subr.S revision 1.36 1 /* $NetBSD: subr.S,v 1.36 2019/04/06 03:06:28 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <machine/asm.h>
29
30 #include "assym.h"
31 #include "opt_ddb.h"
32 #include "opt_multiprocessor.h"
33 #include "opt_lockdebug.h"
34 #include "opt_compat_netbsd.h"
35 #include "opt_compat_ultrix.h"
36 #ifdef COMPAT_ULTRIX
37 #include <compat/ultrix/ultrix_syscall.h>
38 #endif
39
40 #define JSBENTRY(x) .globl x ; .align 2 ; x :
41 #define SCBENTRY(name) \
42 .text ; \
43 .align 2 ; \
44 .globl __CONCAT(X,name) ; \
45 __CONCAT(X,name):
46
47 .text
48
49 #ifdef KERNEL_LOADABLE_BY_MOP
50 /*
51 * This is a little tricky. The kernel is not loaded at the correct
52 * address, so the kernel must first be relocated, then copied, then
53 * jump back to the correct address.
54 */
55 /* Copy routine */
56 cps:
57 2: movb (%r0)+,(%r1)+
58 cmpl %r0,%r7
59 bneq 2b
60
61 3: clrb (%r1)+
62 incl %r0
63 cmpl %r0,%r6
64 bneq 3b
65 clrl -(%sp)
66 movl %sp,%ap
67 movl $_cca,%r7
68 movl %r8,(%r7)
69 movpsl -(%sp)
70 pushl %r2
71 rei
72 cpe:
73
74 /* Copy the copy routine */
75 1: movab cps,%r0
76 movab cpe,%r1
77 movl $0x300000,%sp
78 movl %sp,%r3
79 4: movb (%r0)+,(%r3)+
80 cmpl %r0,%r1
81 bneq 4b
82 movl %r7,%r8
83 /* Ok, copy routine copied, set registers and rei */
84 movab _edata,%r7
85 movab _end,%r6
86 movl $0x80000000,%r1
87 movl $0x80000200,%r0
88 subl3 $0x200,%r6,%r9
89 movab 2f,%r2
90 subl2 $0x200,%r2
91 movpsl -(%sp)
92 pushab 4(%sp)
93 rei
94
95 /*
96 * First entry routine from boot. This should be in a file called locore.
97 */
98 JSBENTRY(start)
99 brb 1b # Netbooted starts here
100 #else
101 ASENTRY(start, 0)
102 #endif
103 2: bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
104 pushl $0x1f0000 # Push a nice PSL
105 pushl $to # Address to jump to
106 rei # change to kernel stack
107 to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
108 cmpb (%ap),$3 # symbols info present?
109 blssu 3f # nope, skip
110 bisl3 $0x80000000,8(%ap),_C_LABEL(symtab_start)
111 # save start of symtab
112 movl 12(%ap),_C_LABEL(symtab_nsyms) # save number of symtab
113 bisl3 $0x80000000,%r9,_C_LABEL(symtab_end)
114 # save end of symtab
115 3: addl3 _C_LABEL(esym),$0x3ff,%r0 # Round symbol table end
116 bicl3 $0x3ff,%r0,%r1 #
117 movl %r1,_C_LABEL(lwp0)+L_PCB # lwp0 pcb, XXXuvm_lwp_getuarea
118 bicl3 $0x80000000,%r1,%r0 # get phys lwp0 uarea addr
119 mtpr %r0,$PR_PCBB # Save in IPR PCBB
120 addl3 $USPACE,%r1,%r0 # Get kernel stack top
121 mtpr %r0,$PR_KSP # put in IPR KSP
122 movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
123 movl _C_LABEL(lwp0)+L_PCB,%r0 # get PCB virtual address
124 mfpr $PR_PCBB,PCB_PADDR(%r0) # save PCB physical address
125 movab PCB_ONFAULT(%r0),ESP(%r0) # Save trap address in ESP
126 mtpr 4(%r0),$PR_ESP # Put it in ESP also
127
128 # Set some registers in known state
129 movl %r1,%r0 # get lwp0 pcb
130 clrl P0LR(%r0)
131 clrl P1LR(%r0)
132 mtpr $0,$PR_P0LR
133 mtpr $0,$PR_P1LR
134 movl $0x80000000,%r1
135 movl %r1,P0BR(%r0)
136 movl %r1,P1BR(%r0)
137 mtpr %r1,$PR_P0BR
138 mtpr %r1,$PR_P1BR
139 clrl PCB_ONFAULT(%r0)
140 mtpr $0,$PR_SCBB
141
142 # Copy the RPB to its new position
143 #if defined(COMPAT_14)
144 tstl (%ap) # Any arguments?
145 bneq 1f # Yes, called from new boot
146 movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
147 # movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
148 movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
149 clrl -(%sp) # Have no RPB
150 brb 2f
151 #endif
152
153 1: pushl 4(%ap) # Address of old rpb
154 2: calls $1,_C_LABEL(_start) # Jump away.
155 /* NOTREACHED */
156
157
158 /*
159 * Signal handler code.
160 */
161
162 .align 2
163 .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
164 _C_LABEL(sigcode):
165 pushr $0x3f
166 subl2 $0xc,%sp
167 movl 0x24(%sp),%r0
168 calls $3,(%r0)
169 popr $0x3f
170 chmk $SYS_compat_16___sigreturn14
171 chmk $SYS_exit
172 halt
173 _C_LABEL(esigcode):
174
175 #ifdef COMPAT_ULTRIX
176 .align 2
177 .globl _C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
178 _C_LABEL(ultrix_sigcode):
179 pushr $0x3f
180 subl2 $0xc,%sp
181 movl 0x24(%sp),%r0
182 calls $3,(%r0)
183 popr $0x3f
184 chmk $ULTRIX_SYS_sigreturn
185 chmk $SYS_exit
186 halt
187 _C_LABEL(ultrix_esigcode):
188 #endif
189
190 .align 2
191 .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
192 _C_LABEL(idsptch):
193 pushr $0x3f
194 .word 0x9f16 # jsb to absolute address
195 .long _C_LABEL(cmn_idsptch) # the absolute address
196 .long 0 # the callback interrupt routine
197 .long 0 # its argument
198 .long 0 # ptr to correspond evcnt struct
199 _C_LABEL(eidsptch):
200
201 _C_LABEL(cmn_idsptch):
202 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
203 calls $0,_C_LABEL(krnlock)
204 #endif
205 movl (%sp)+,%r0 # get pointer to idspvec
206 mtpr $IPL_VM,$PR_IPL # Make sure we are at IPL_VM
207 movl 8(%r0),%r1 # get evcnt pointer
208 beql 1f # no ptr, skip increment
209 incl EV_COUNT(%r1) # increment low longword
210 adwc $0,EV_COUNT+4(%r1) # add any carry to hi longword
211 1: mfpr $PR_SSP, %r2 # get curlwp
212 movl L_CPU(%r2), %r2 # get curcpu
213 incl CI_NINTR(%r2) # increment ci_data.cpu_nintr
214 adwc $0,(CI_NINTR+4)(%r2)
215 #if 0
216 pushl %r0
217 movq (%r0),-(%sp)
218 pushab 2f
219 calls $3,_C_LABEL(printf)
220 movl (%sp)+,%r0
221 #endif
222 pushl 4(%r0) # push argument
223 calls $1,*(%r0) # call interrupt routine
224 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
225 calls $0,_C_LABEL(krnunlock)
226 #endif
227 popr $0x3f # pop registers
228 rei # return from interrut
229 #if 0
230 2: .asciz "intr %p(%p)\n"
231 #endif
232
233 ENTRY(badaddr,0) # Called with addr,b/w/l
234 mfpr $PR_IPL,%r0 # splhigh()
235 mtpr $IPL_HIGH,$PR_IPL
236 movl 4(%ap),%r2 # First argument, the address
237 movl 8(%ap),%r1 # Sec arg, b,w,l
238 pushl %r0 # Save old IPL
239 clrl %r3
240 movab 4f,_C_LABEL(memtest) # Set the return address
241
242 caseb %r1,$1,$4 # What is the size
243 1: .word 1f-1b
244 .word 2f-1b
245 .word 3f-1b # This is unused
246 .word 3f-1b
247
248 1: movb (%r2),%r1 # Test a byte
249 brb 5f
250
251 2: movw (%r2),%r1 # Test a word
252 brb 5f
253
254 3: movl (%r2),%r1 # Test a long
255 brb 5f
256
257 4: incl %r3 # Got machine chk => addr bad
258 5: mtpr (%sp)+,$PR_IPL
259 movl %r3,%r0
260 ret
261
262 #ifdef DDB
263 /*
264 * DDB is the only routine that uses setjmp/longjmp.
265 */
266 .globl _C_LABEL(setjmp), _C_LABEL(longjmp)
267 _C_LABEL(setjmp):.word 0
268 movl 4(%ap), %r0
269 movl 8(%fp), (%r0)
270 movl 12(%fp), 4(%r0)
271 movl 16(%fp), 8(%r0)
272 moval 28(%fp),12(%r0)
273 clrl %r0
274 ret
275
276 _C_LABEL(longjmp):.word 0
277 movl 4(%ap), %r1
278 movl 8(%ap), %r0
279 movl (%r1), %ap
280 movl 4(%r1), %fp
281 movl 12(%r1), %sp
282 jmp *8(%r1)
283 #endif
284
285 #if defined(MULTIPROCESSOR)
286 .align 2
287 .globl _C_LABEL(vax_mp_tramp) # used to kick off multiprocessor systems.
288 _C_LABEL(vax_mp_tramp):
289 ldpctx
290 rei
291 #endif
292
293 .globl softint_cleanup,softint_exit,softint_process
294 .type softint_cleanup@function
295 .type softint_exit@function
296 .type softint_process@function
297 softint_cleanup:
298 movl L_CPU(%r0),%r1 /* get cpu_info */
299 incl CI_MTX_COUNT(%r1) /* increment mutex count */
300 clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
301 movl L_PCB(%r0),%r1 /* get PCB of softint LWP */
302 softint_exit:
303 popr $0x3 /* restore r0 and r1 */
304 rei /* return from interrupt */
305
306 softint_process:
307 /*
308 * R6 contains pinned LWP
309 * R7 contains ipl to dispatch with
310 */
311 movq %r6,-(%sp) /* push old lwp and ipl onto stack */
312 calls $2,_C_LABEL(softint_dispatch) /* dispatch it */
313
314 /* We can use any register because ldpctx will overwrite them */
315 movl L_PCB(%r6),%r3 /* get pcb */
316 movab softint_exit,PCB_PC(%r3)/* do a quick exit */
317 #ifdef MULTIPROCESSOR
318 movl L_CPU(%r6),%r8
319 movl %r6,CI_CURLWP(%r8)
320 #endif
321
322 mtpr PCB_PADDR(%r3),$PR_PCBB /* restore PA of interrupted pcb */
323 ldpctx /* implictily updates curlwp */
324 rei
325
326
327 softint_common:
328 mfpr $PR_IPL,%r1
329 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
330 movpsl -(%sp) /* add cleanup hook */
331 pushab softint_cleanup
332 svpctx
333
334 /* We can use any register because ldpctx will overwrite them */
335 mfpr $PR_SSP,%r6 /* Get curlwp */
336 movl L_CPU(%r6),%r8 /* get cpu_info */
337 movl CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
338 movl L_PCB(%r2),%r3 /* Get pointer to its pcb. */
339 movl %r6,PCB_R6(%r3) /* move old lwp into new pcb */
340 movl %r1,PCB_R7(%r3) /* move IPL into new pcb */
341 #ifdef MULTIPROCESSOR
342 movl %r2,CI_CURLWP(%r8) /* update ci_curlwp */
343 #endif
344
345 /*
346 * Now reset the PCB since we no idea what state it was last in
347 */
348 movab (USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r3),%r0
349 /* calculate where KSP should be */
350 movl %r0,KSP(%r3) /* save it as SP */
351 movl %r0,PCB_FP(%r3) /* and as the FP too */
352 movab CA_ARGNO(%r0),PCB_AP(%r3) /* update the AP as well */
353 movab softint_process,PCB_PC(%r3) /* and where we will start */
354 movl $PSL_HIGHIPL,PCB_PSL(%r3) /* Needs to be running at IPL_HIGH */
355
356 mtpr PCB_PADDR(%r3),$PR_PCBB /* set PA of new pcb */
357 ldpctx /* load it */
358 rei /* get off interrupt stack */
359
360 SCBENTRY(softclock)
361 pushr $0x3 /* save r0 and r1 */
362 movl $SOFTINT_CLOCK,%r0
363 brb softint_common
364
365 SCBENTRY(softbio)
366 pushr $0x3 /* save r0 and r1 */
367 movl $SOFTINT_BIO,%r0
368 brb softint_common
369
370 SCBENTRY(softnet)
371 pushr $0x3 /* save r0 and r1 */
372 movl $SOFTINT_NET,%r0
373 brb softint_common
374
375 SCBENTRY(softserial)
376 pushr $0x3 /* save r0 and r1 */
377 movl $SOFTINT_SERIAL,%r0
378 brb softint_common
379
380 /*
381 * Helper routine for cpu_lwp_fork. It get invoked by Swtchto.
382 * It let's the kernel know the lwp is alive and then calls func(arg)
383 * and possibly returns to sret.
384 */
385 ENTRY(cpu_lwp_bootstrap, 0)
386 movq %r2,-(%sp) /* save func & arg */
387 movq %r0,-(%sp) /* push oldl/newl */
388 calls $2,_C_LABEL(lwp_startup) /* startup the lwp */
389 movl (%sp)+,%r0 /* grab func */
390 calls $1,(%r0) /* call it with arg */
391 ret
392
393 /*
394 * r1 = newlwp
395 * r0 = oldlwp
396 */
397 JSBENTRY(Swtchto)
398 /* this pops the pc and psw from the stack and puts them in the pcb. */
399 svpctx # Now on interrupt stack
400
401 /* We can know use any register because ldpctx will overwrite them */
402 /* New LWP already in %r1 */
403 movl L_PCB(%r1),%r3 # Get pointer to new pcb.
404 movl %r0,PCB_R0(%r3) # move r0 into new pcb (return value)
405 #ifdef MULTIPROCESSOR
406 movl L_CPU(%r0), %r8 /* get cpu_info of old lwp */
407 movl %r8, L_CPU(%r1) /* update cpu_info of new lwp */
408 movl %r1,CI_CURLWP(%r8) /* update ci_curlwp */
409 #endif
410
411 mtpr PCB_PADDR(%r3),$PR_PCBB # set PA of new pcb
412 mtpr $IPL_HIGH,$PR_IPL /* we need to be at IPL_HIGH */
413 ldpctx # load it
414 /* r0 already has previous lwp */
415 /* r1 already has this lwp */
416 /* r2/r3 and r4/r5 restored */
417 rei /* get off interrupt stack */
418
419 #
420 # copy/fetch/store routines.
421 #
422
423 ENTRY(copyout, 0)
424 movl 8(%ap),%r3
425 blss 3f # kernel space
426 movl 4(%ap),%r1
427 brb 2f
428
429 ENTRY(copyin, 0)
430 movl 4(%ap),%r1
431 blss 3f # kernel space
432 movl 8(%ap),%r3
433 2: mfpr $PR_ESP,%r2
434 movab 1f,(%r2) # set pcb_onfault
435 4: tstw 14(%ap) # check if >= 64K
436 bneq 5f
437 movc3 12(%ap),(%r1),(%r3)
438 clrl %r0
439 1: mfpr $PR_ESP,%r2
440 clrl (%r2) # clear pcb_onfault
441 ret
442 5: movc3 $0xfffc,(%r1),(%r3)
443 subl2 $0xfffc,12(%ap)
444 brb 4b
445
446 3: movl $EFAULT,%r0
447 ret
448
449 ENTRY(kcopy,0)
450 mfpr $PR_ESP,%r3
451 movl (%r3),-(%sp) # save current pcb_onfault
452 movab 1f,(%r3) # set pcb_onfault
453 movl 4(%ap),%r1
454 movl 8(%ap),%r2
455 movc3 12(%ap),(%r1), (%r2)
456 clrl %r0
457 1: mfpr $PR_ESP,%r3
458 movl (%sp)+,(%r3) # restore pcb_onfault
459 ret
460
461 /*
462 * copy{in,out}str() copies data from/to user space to/from kernel space.
463 * Security checks:
464 * 1) user space address must be < KERNBASE
465 * 2) the VM system will do the checks while copying
466 */
467 ENTRY(copyinstr, 0)
468 tstl 4(%ap) # kernel address?
469 bgeq 8f # no, continue
470 6: movl $EFAULT,%r0
471 movl 16(%ap),%r2
472 beql 7f
473 clrl (%r2)
474 7: ret
475
476 ENTRY(copyoutstr, 0)
477 tstl 8(%ap) # kernel address?
478 bgeq 8f # no, continue
479 brb 6b # yes, return EFAULT
480
481 ENTRY(copystr,0)
482 8: movl 4(%ap),%r5 # from
483 movl 8(%ap),%r4 # to
484 movl 12(%ap),%r3 # len
485 movl 16(%ap),%r2 # copied
486 clrl %r0
487 mfpr $PR_ESP,%r1
488 movab 2f,(%r1) # set pcb_onfault
489
490 tstl %r3 # any chars to copy?
491 bneq 1f # yes, jump for more
492 0: tstl %r2 # save copied len?
493 beql 2f # no
494 subl3 4(%ap),%r5,(%r2) # save copied len
495 2: mfpr $PR_ESP,%r1
496 clrl (%r1) # clear pcb_onfault
497 ret
498
499 1: movb (%r5)+,(%r4)+ # copy one char
500 beql 0b # jmp if last char
501 sobgtr %r3,1b # copy one more
502 movl $ENAMETOOLONG,%r0 # inform about too long string
503 brb 0b # out of chars
504
505 /**************************************************************************/
506
507 .align 2
508
509 #define UFETCHSTORE_PROLOGUE \
510 tstl 4(%ap) /* uaddr in userpsace? */ ;\
511 blss 1f /* nope, fault */ ;\
512 mfpr $PR_ESP,%r1 /* &pcb_onfault is in ESP */ ;\
513 movab 2f,(%r1) /* set pcb_onfault */
514
515 #define UFETCHSTORE_EPILOGUE \
516 mfpr $PR_ESP,%r1 /* &pcb_onfault is in ESP */ ;\
517 clrl (%r1) /* pcb_onfault = NULL */
518
519 #define UFETCHSTORE_RETURN \
520 clrl %r0 /* return success */ ;\
521 ret ;\
522 1: movl $EFAULT,%r0 ;\
523 ret /* return EFAULT */ ;\
524 2: UFETCHSTORE_EPILOGUE ;\
525 ret /* error already in %r0 */
526
527 /* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */
528 ENTRY(_ufetch_8,0)
529 UFETCHSTORE_PROLOGUE
530 movb *4(%ap),*8(%ap) # *valp = *uaddr
531 UFETCHSTORE_EPILOGUE
532 UFETCHSTORE_RETURN
533
534 /* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */
535 ENTRY(_ufetch_16,0)
536 UFETCHSTORE_PROLOGUE
537 movw *4(%ap),*8(%ap) # *valp = *uaddr
538 UFETCHSTORE_EPILOGUE
539 UFETCHSTORE_RETURN
540
541 /* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */
542 ENTRY(_ufetch_32,0)
543 UFETCHSTORE_PROLOGUE
544 movl *4(%ap),*8(%ap) # *valp = *uaddr
545 UFETCHSTORE_EPILOGUE
546 UFETCHSTORE_RETURN
547
548 /* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */
549 ENTRY(_ustore_8,0)
550 UFETCHSTORE_PROLOGUE
551 movb 8(%ap),*4(%ap) # *uaddr = val
552 UFETCHSTORE_EPILOGUE
553 UFETCHSTORE_RETURN
554
555 /* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */
556 ENTRY(_ustore_16,0)
557 UFETCHSTORE_PROLOGUE
558 movw 8(%ap),*4(%ap) # *uaddr = val
559 UFETCHSTORE_EPILOGUE
560 UFETCHSTORE_RETURN
561
562 /* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */
563 ENTRY(_ustore_32,0)
564 UFETCHSTORE_PROLOGUE
565 movl 8(%ap),*4(%ap) # *uaddr = val
566 UFETCHSTORE_EPILOGUE
567 UFETCHSTORE_RETURN
568
569 /**************************************************************************/
570
571 .align 2
572
573 JSBENTRY(Slock)
574 1: bbssi $0,(%r1),1b
575 rsb
576
577 JSBENTRY(Slocktry)
578 clrl %r0
579 bbssi $0,(%r1),1f
580 incl %r0
581 1: rsb
582
583 JSBENTRY(Sunlock)
584 bbcci $0,(%r1),1f
585 1: rsb
586
587 #
588 # data department
589 #
590 .data
591
592 .globl _C_LABEL(memtest)
593 _C_LABEL(memtest): # memory test in progress
594 .long 0
595