subr.S revision 1.17 1 /* $NetBSD: subr.S,v 1.17 2007/05/17 14:51:34 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <machine/asm.h>
34
35 #include "assym.h"
36 #include "opt_ddb.h"
37 #include "opt_multiprocessor.h"
38 #include "opt_lockdebug.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_compat_ibcs2.h"
41 #ifdef COMPAT_IBCS2
42 #include <compat/ibcs2/ibcs2_syscall.h>
43 #endif
44 #include "opt_compat_ultrix.h"
45 #ifdef COMPAT_ULTRIX
46 #include <compat/ultrix/ultrix_syscall.h>
47 #endif
48
49 #define JSBENTRY(x) .globl x ; .align 2 ; x :
50
51 .text
52
53 #ifdef KERNEL_LOADABLE_BY_MOP
54 /*
55 * This is a little tricky. The kernel is not loaded at the correct
56 * address, so the kernel must first be relocated, then copied, then
57 * jump back to the correct address.
58 */
59 /* Copy routine */
60 cps:
61 2: movb (%r0)+,(%r1)+
62 cmpl %r0,%r7
63 bneq 2b
64
65 3: clrb (%r1)+
66 incl %r0
67 cmpl %r0,%r6
68 bneq 3b
69 clrl -(%sp)
70 movl %sp,%ap
71 movl $_cca,%r7
72 movl %r8,(%r7)
73 movpsl -(%sp)
74 pushl %r2
75 rei
76 cpe:
77
78 /* Copy the copy routine */
79 1: movab cps,%r0
80 movab cpe,%r1
81 movl $0x300000,%sp
82 movl %sp,%r3
83 4: movb (%r0)+,(%r3)+
84 cmpl %r0,%r1
85 bneq 4b
86 movl %r7,%r8
87 /* Ok, copy routine copied, set registers and rei */
88 movab _edata,%r7
89 movab _end,%r6
90 movl $0x80000000,%r1
91 movl $0x80000200,%r0
92 subl3 $0x200,%r6,%r9
93 movab 2f,%r2
94 subl2 $0x200,%r2
95 movpsl -(%sp)
96 pushab 4(%sp)
97 rei
98
99 /*
100 * First entry routine from boot. This should be in a file called locore.
101 */
102 JSBENTRY(start)
103 brb 1b # Netbooted starts here
104 #else
105 ASENTRY(start, 0)
106 #endif
107 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
108 pushl $0x1f0000 # Push a nice PSL
109 pushl $to # Address to jump to
110 rei # change to kernel stack
111 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
112 cmpb (%ap),$3 # symbols info present?
113 blssu 3f # nope, skip
114 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
115 # save start of symtab
116 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
117 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
118 # save end of symtab
119 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
120 bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
121 bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
122 mtpr %r0,$PR_PCBB # Save in IPR PCBB
123 addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
124 mtpr %r0,$PR_KSP # put in IPR KSP
125 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
126 movl _C_LABEL(proc0paddr),%r0 # get PCB virtual address
127 movab IFTRAP(%r0),4(%r0) # Save trap address in ESP
128 mtpr 4(%r0),$PR_ESP # Put it in ESP also
129
130 # Set some registers in known state
131 movl _C_LABEL(proc0paddr),%r0
132 clrl P0LR(%r0)
133 clrl P1LR(%r0)
134 mtpr $0,$PR_P0LR
135 mtpr $0,$PR_P1LR
136 movl $0x80000000,%r1
137 movl %r1,P0BR(%r0)
138 movl %r1,P1BR(%r0)
139 mtpr %r1,$PR_P0BR
140 mtpr %r1,$PR_P1BR
141 clrl IFTRAP(%r0)
142 mtpr $0,$PR_SCBB
143
144 # Copy the RPB to its new position
145 #if defined(COMPAT_14)
146 tstl (%ap) # Any arguments?
147 bneq 1f # Yes, called from new boot
148 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
149 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
150 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
151 clrl -(%sp) # Have no RPB
152 brb 2f
153 #endif
154
155 1: pushl 4(%ap) # Address of old rpb
156 2: calls $1,_C_LABEL(_start) # Jump away.
157 /* NOTREACHED */
158
159
160 /*
161 * Signal handler code.
162 */
163
164 .align 2
165 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
166 _C_LABEL(sigcode):
167 pushr $0x3f
168 subl2 $0xc,%sp
169 movl 0x24(%sp),%r0
170 calls $3,(%r0)
171 popr $0x3f
172 chmk $SYS_compat_16___sigreturn14
173 chmk $SYS_exit
174 halt
175 _C_LABEL(esigcode):
176
177 #ifdef COMPAT_IBCS2
178 .align 2
179 .globl _C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
180 _C_LABEL(ibcs2_sigcode):
181 pushr $0x3f
182 subl2 $0xc,%sp
183 movl 0x24(%sp),%r0
184 calls $3,(%r0)
185 popr $0x3f
186 chmk $SYS_compat_16___sigreturn14
187 chmk $SYS_exit
188 halt
189 _C_LABEL(ibcs2_esigcode):
190 #endif /* COMPAT_IBCS2 */
191
192 #ifdef COMPAT_ULTRIX
193 .align 2
194 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
195 _C_LABEL(ultrix_sigcode):
196 pushr $0x3f
197 subl2 $0xc,%sp
198 movl 0x24(%sp),%r0
199 calls $3,(%r0)
200 popr $0x3f
201 chmk $ULTRIX_SYS_sigreturn
202 chmk $SYS_exit
203 halt
204 _C_LABEL(ultrix_esigcode):
205 #endif
206
207 .align 2
208 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
209 _C_LABEL(idsptch):
210 pushr $0x3f
211 .word 0x9f16 # jsb to absolute address
212 .long _C_LABEL(cmn_idsptch) # the absolute address
213 .long 0 # the callback interrupt routine
214 .long 0 # its argument
215 .long 0 # ptr to correspond evcnt struct
216 _C_LABEL(eidsptch):
217
218 _C_LABEL(cmn_idsptch):
219 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
220 calls $0,_C_LABEL(krnlock)
221 #endif
222 movl (%sp)+,%r0 # get pointer to idspvec
223 movl 8(%r0),%r1 # get evcnt pointer
224 beql 1f # no ptr, skip increment
225 incl EV_COUNT(%r1) # increment low longword
226 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
227 1: incl _C_LABEL(uvmexp)+UVME_INTRS # increment uvmexp.intrs
228 #if 0
229 pushl %r0
230 movq (%r0),-(%sp)
231 pushab 2f
232 calls $3,_C_LABEL(printf)
233 movl (%sp)+,%r0
234 #endif
235 pushl 4(%r0) # push argument
236 calls $1,*(%r0) # call interrupt routine
237 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
238 calls $0,_C_LABEL(krnunlock)
239 #endif
240 popr $0x3f # pop registers
241 rei # return from interrut
242 #if 0
243 2: .asciz "intr %p(%p)\n"
244 #endif
245
246 ENTRY(badaddr,0) # Called with addr,b/w/l
247 mfpr $PR_IPL,%r0 # splhigh()
248 mtpr $IPL_HIGH,$PR_IPL
249 movl 4(%ap),%r2 # First argument, the address
250 movl 8(%ap),%r1 # Sec arg, b,w,l
251 pushl %r0 # Save old IPL
252 clrl %r3
253 movab 4f,_C_LABEL(memtest) # Set the return address
254
255 caseb %r1,$1,$4 # What is the size
256 1: .word 1f-1b
257 .word 2f-1b
258 .word 3f-1b # This is unused
259 .word 3f-1b
260
261 1: movb (%r2),%r1 # Test a byte
262 brb 5f
263
264 2: movw (%r2),%r1 # Test a word
265 brb 5f
266
267 3: movl (%r2),%r1 # Test a long
268 brb 5f
269
270 4: incl %r3 # Got machine chk => addr bad
271 5: mtpr (%sp)+,$PR_IPL
272 movl %r3,%r0
273 ret
274
275 #ifdef DDB
276 /*
277 * DDB is the only routine that uses setjmp/longjmp.
278 */
279 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
280 _C_LABEL(setjmp):.word 0
281 movl 4(%ap), %r0
282 movl 8(%fp), (%r0)
283 movl 12(%fp), 4(%r0)
284 movl 16(%fp), 8(%r0)
285 moval 28(%fp),12(%r0)
286 clrl %r0
287 ret
288
289 _C_LABEL(longjmp):.word 0
290 movl 4(%ap), %r1
291 movl 8(%ap), %r0
292 movl (%r1), %ap
293 movl 4(%r1), %fp
294 movl 12(%r1), %sp
295 jmp *8(%r1)
296 #endif
297
298 #if defined(MULTIPROCESSOR)
299 .align 2
300 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
301 _C_LABEL(vax_mp_tramp):
302 ldpctx
303 rei
304 #endif
305
306 /*
307 * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
308 * It let's the kernel know the lwp is alive and then calls func(arg)
309 * and possibly returns to sret.
310 */
311 ENTRY(cpu_lwp_bootstrap, 0)
312 movq %r2,-(%sp) /* save func & arg */
313 movq %r0,-(%sp) /* push oldl/newl */
314 calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
315 movl (%sp)+,%r0 /* grab func */
316 calls $1,(%r0) /* call it with arg */
317 ret
318
319 /*
320 * r1 = newlwp
321 * r0 = oldlwp
322 */
323 JSBENTRY(Swtchto)
324 /* this pops the pc and psw from the stack and puts them in the pcb. */
325 svpctx # Now on interrupt stack
326
327 /* We can know use any register because ldpctx will overwrite them */
328 /* New LWP already in %r1 */
329 mfpr $PR_SSP,%r4 # Get curcpu
330 movl %r1,CI_CURLWP(%r4) # update ci_curlwp
331 movl L_ADDR(%r1),%r3 # Get pointer to new pcb.
332 movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
333 movl %r4,SSP(%r3) # Put curcpu into new PCB
334 addl3 %r3,$IFTRAP,%r4 # Save for copy* functions.
335 mtpr %r4,$PR_ESP # Use ESP as CPU-specific pointer
336 movl %r4,ESP(%r3) # Must save in PCB also.
337
338 extzv $9,$21,%r3,%r1 # extract offset of PCB
339 ashl $9,*_C_LABEL(Sysmap)[%r1],%r4
340
341 mtpr %r4,$PR_PCBB # set PA of new pcb
342 ldpctx # load it
343 /* r0 already has previous lwp */
344 /* r1 already has this lwp */
345 /* r2/r3 and r4/r5 restored */
346 rei /* get off interrupt stack */
347
348 #
349 # copy/fetch/store routines.
350 #
351
352 ENTRY(copyout, 0)
353 movl 8(%ap),%r2
354 blss 3f # kernel space
355 movl 4(%ap),%r1
356 brb 2f
357
358 ENTRY(copyin, 0)
359 movl 4(%ap),%r1
360 blss 3f # kernel space
361 movl 8(%ap),%r2
362 2: mfpr $PR_ESP,%r3
363 movab 1f,(%r3)
364 movc3 12(%ap),(%r1),(%r2)
365 1: mfpr $PR_ESP,%r3
366 clrl (%r3)
367 ret
368
369 3: mnegl $1,%r0
370 ret
371
372 ENTRY(kcopy,0)
373 mfpr $PR_ESP,%r3
374 movl (%r3),-(%sp)
375 movab 1f,(%r3)
376 movl 4(%ap),%r1
377 movl 8(%ap),%r2
378 movc3 12(%ap),(%r1), (%r2)
379 clrl %r1
380 1: mfpr $PR_ESP,%r3
381 movl (%sp)+,(%r3)
382 movl %r1,%r0
383 ret
384
385 /*
386 * copy{in,out}str() copies data from/to user space to/from kernel space.
387 * Security checks:
388 * 1) user space address must be < KERNBASE
389 * 2) the VM system will do the checks while copying
390 */
391 ENTRY(copyinstr, 0)
392 tstl 4(%ap) # kernel address?
393 bgeq 8f # no, continue
394 6: movl $EFAULT,%r0
395 movl 16(%ap),%r2
396 beql 7f
397 clrl (%r2)
398 7: ret
399
400 ENTRY(copyoutstr, 0)
401 tstl 8(%ap) # kernel address?
402 bgeq 8f # no, continue
403 brb 6b # yes, return EFAULT
404
405 ENTRY(copystr,0)
406 8: movl 4(%ap),%r5 # from
407 movl 8(%ap),%r4 # to
408 movl 12(%ap),%r3 # len
409 movl 16(%ap),%r2 # copied
410 clrl %r0
411 mfpr $PR_ESP,%r1
412 movab 3f,(%r1)
413
414 tstl %r3 # any chars to copy?
415 bneq 1f # yes, jump for more
416 0: tstl %r2 # save copied len?
417 beql 2f # no
418 subl3 4(%ap),%r5,(%r2) # save copied len
419 2: ret
420
421 1: movb (%r5)+,(%r4)+ # copy one char
422 beql 0b # jmp if last char
423 sobgtr %r3,1b # copy one more
424 movl $ENAMETOOLONG,%r0 # inform about too long string
425 brb 0b # out of chars
426
427 3: mfpr $PR_ESP,%r1
428 clrl (%r1)
429 brb 0b
430
431 ENTRY(subyte,0)
432 movl 4(%ap),%r0
433 blss 3f # illegal space
434 mfpr $PR_ESP,%r1
435 movab 1f,(%r1)
436 movb 8(%ap),(%r0)
437 clrl %r1
438 1: mfpr $PR_ESP,%r2
439 clrl (%r2)
440 movl %r1,%r0
441 ret
442
443 ENTRY(suword,0)
444 movl 4(%ap),%r0
445 blss 3f # illegal space
446 mfpr $PR_ESP,%r1
447 movab 1f,(%r1)
448 movl 8(%ap),(%r0)
449 clrl %r1
450 1: mfpr $PR_ESP,%r2
451 clrl (%r2)
452 movl %r1,%r0
453 ret
454
455 ENTRY(suswintr,0)
456 movl 4(%ap),%r0
457 blss 3f # illegal space
458 mfpr $PR_ESP,%r1
459 movab 1f,(%r1)
460 movw 8(%ap),(%r0)
461 clrl %r1
462 1: mfpr $PR_ESP,%r2
463 clrl (%r2)
464 movl %r1,%r0
465 ret
466
467 3: mnegl $1,%r0
468 ret
469
470 .align 2
471 ALTENTRY(fusword)
472 ENTRY(fuswintr,0)
473 movl 4(%ap),%r0
474 blss 3b
475 mfpr $PR_ESP,%r1
476 movab 1f,(%r1)
477 movzwl (%r0),%r1
478 1: mfpr $PR_ESP,%r2
479 clrl (%r2)
480 movl %r1,%r0
481 ret
482
483 #if defined(MULTIPROCESSOR) || defined(DEBUG) || defined(DIAGNOSTIC) || defined(LOCKDEBUG)
484
485 JSBENTRY(Slock)
486 1: bbssi $0,(%r1),1b
487 rsb
488
489 JSBENTRY(Slocktry)
490 clrl %r0
491 bbssi $0,(%r1),1f
492 incl %r0
493 1: rsb
494
495 JSBENTRY(Sunlock)
496 bbcci $0,(%r1),1f
497 1: rsb
498
499 #endif
500
501 #
502 # data department
503 #
504 .data
505
506 .globl _C_LABEL(memtest)
507 _C_LABEL(memtest): # memory test in progress
508 .long 0
509
510 #ifdef __ELF__
511 .section .rodata
512 #endif
513 noque: .asciz "swtch"
514