locore.s revision 1.52 1 /* $NetBSD: locore.s,v 1.52 2003/08/07 16:29:59 agc Exp $ */
2
3 /*
4 * Copyright (c) 1980, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * from: Utah $Hdr: locore.s 1.66 92/12/22$
36 * @(#)locore.s 8.6 (Berkeley) 5/27/94
37 */
38 /*
39 * Copyright (c) 1988 University of Utah.
40 *
41 * This code is derived from software contributed to Berkeley by
42 * the Systems Programming Group of the University of Utah Computer
43 * Science Department.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * from: Utah $Hdr: locore.s 1.66 92/12/22$
74 * @(#)locore.s 8.6 (Berkeley) 5/27/94
75 */
76
77 #include "opt_compat_netbsd.h"
78 #include "opt_compat_svr4.h"
79 #include "opt_compat_sunos.h"
80 #include "opt_kgdb.h"
81 #include "opt_lockdebug.h"
82
83 #include "assym.h"
84 #include <machine/asm.h>
85 #include <machine/trap.h>
86
87 | Remember this is a fun project!
88
89 .data
90 GLOBAL(mon_crp)
91 .long 0,0
92
93 | This is for kvm_mkdb, and should be the address of the beginning
94 | of the kernel text segment (not necessarily the same as kernbase).
95 .text
96 GLOBAL(kernel_text)
97
98 | This is the entry point, as well as the end of the temporary stack
99 | used during process switch (one 8K page ending at start)
100 ASGLOBAL(tmpstk)
101 ASGLOBAL(start)
102
103 | The first step, after disabling interrupts, is to map enough of the kernel
104 | into high virtual address space so that we can use position dependent code.
105 | This is a tricky task on the sun3x because the MMU is already enabled and
106 | the ROM monitor provides no indication of where the root MMU table is mapped.
107 | Therefore we must use one of the 68030's 'transparent translation' registers
108 | to define a range in the address space where the MMU translation is
109 | turned off. Once this is complete we can modify the MMU table directly
110 | without the need for it to be mapped into virtual memory.
111 | All code must be position independent until otherwise noted, as the
112 | boot loader has loaded us into low memory but all the symbols in this
113 | code have been linked high.
114 movw #PSL_HIGHIPL,%sr | no interrupts
115 movl #KERNBASE,%a5 | for vtop conversion
116 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
117 subl %a5,%a0
118 | Note: borrowing mon_crp for tt0 setup...
119 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
120 .long 0xf0100800 | transparent translation reg0
121 | [ pmove a0@, tt0 ]
122 | In order to map the kernel into high memory we will copy the root table
123 | entry which maps the 16 megabytes of memory starting at 0x0 into the
124 | entry which maps the 16 megabytes starting at KERNBASE.
125 pmove %crp,%a0@ | Get monitor CPU root pointer
126 movl %a0@(4),%a1 | 2nd word is PA of level A table
127
128 movl %a1,%a0 | compute the descriptor address
129 addl #0x3e0,%a1 | for VA starting at KERNBASE
130 movl %a0@,%a1@ | copy descriptor type
131 movl %a0@(4),%a1@(4) | copy physical address
132
133 | Kernel is now double mapped at zero and KERNBASE.
134 | Force a long jump to the relocated code (high VA).
135 movl #IC_CLEAR,%d0 | Flush the I-cache
136 movc %d0,%cacr
137 jmp L_high_code:l | long jump
138
139 L_high_code:
140 | We are now running in the correctly relocated kernel, so
141 | we are no longer restricted to position-independent code.
142 | It is handy to leave transparent translation enabled while
143 | for the low 1GB while _bootstrap() is doing its thing.
144
145 | Do bootstrap stuff needed before main() gets called.
146 | Our boot loader leaves a copy of the kernel's exec header
147 | just before the start of the kernel text segment, so the
148 | kernel can sanity-check the DDB symbols at [end...esym].
149 | Pass the struct exec at tmpstk-32 to _bootstrap().
150 | Also, make sure the initial frame pointer is zero so that
151 | the backtrace algorithm used by KGDB terminates nicely.
152 lea _ASM_LABEL(tmpstk)-32,%sp
153 movl #0,%a6
154 jsr _C_LABEL(_bootstrap) | See locore2.c
155
156 | Now turn off the transparent translation of the low 1GB.
157 | (this also flushes the ATC)
158 clrl %sp@-
159 .long 0xf0170800 | pmove sp@,tt0
160 addql #4,%sp
161
162 | Now that _bootstrap() is done using the PROM functions,
163 | we can safely set the sfc/dfc to something != FC_CONTROL
164 moveq #FC_USERD,%d0 | make movs access "user data"
165 movc %d0,%sfc | space for copyin/copyout
166 movc %d0,%dfc
167
168 | Setup process zero user/kernel stacks.
169 movl _C_LABEL(proc0paddr),%a1| get lwp0 pcb addr
170 lea %a1@(USPACE-4),%sp | set SSP to last word
171 movl #USRSTACK-4,%a2
172 movl %a2,%usp | init user SP
173
174 | Note curpcb was already set in _bootstrap().
175 | Will do fpu initialization during autoconfig (see fpu.c)
176 | The interrupt vector table and stack are now ready.
177 | Interrupts will be enabled later, AFTER autoconfiguration
178 | is finished, to avoid spurrious interrupts.
179
180 /*
181 * Create a fake exception frame so that cpu_fork() can copy it.
182 * main() nevers returns; we exit to user mode from a forked process
183 * later on.
184 */
185 clrw %sp@- | tf_format,tf_vector
186 clrl %sp@- | tf_pc (filled in later)
187 movw #PSL_USER,%sp@- | tf_sr for user mode
188 clrl %sp@- | tf_stackadj
189 lea %sp@(-64),%sp | tf_regs[16]
190 lea _C_LABEL(lwp0),%a0 | proc0.p_md.md_regs =
191 movl %a1,%a0@(L_MD_REGS) | trapframe
192 jbsr _C_LABEL(main) | main(&trapframe)
193 PANIC("main() returned")
194
195 /*
196 * proc_trampoline: call function in register %a2 with %a3 as an arg
197 * and then rei.
198 */
199 GLOBAL(proc_trampoline)
200 movl %a3,%sp@- | push function arg
201 jbsr %a2@ | call function
202 addql #4,%sp | pop arg
203 movl %sp@(FR_SP),%a0 | grab and load
204 movl %a0,%usp | user SP
205 moveml %sp@+,#0x7FFF | restore most user regs
206 addql #8,%sp | toss SP and stack adjust
207 jra _ASM_LABEL(rei) | and return
208
209 | That is all the assembly startup code we need on the sun3x!
210 | The rest of this is like the hp300/locore.s where possible.
211
212 /*
213 * Trap/interrupt vector routines
214 */
215 #include <m68k/m68k/trap_subr.s>
216
217 GLOBAL(buserr)
218 tstl _C_LABEL(nofault) | device probe?
219 jeq _C_LABEL(addrerr) | no, handle as usual
220 movl _C_LABEL(nofault),%sp@- | yes,
221 jbsr _C_LABEL(longjmp) | longjmp(nofault)
222 GLOBAL(addrerr)
223 clrl %sp@- | stack adjust count
224 moveml #0xFFFF,%sp@- | save user registers
225 movl %usp,%a0 | save the user SP
226 movl %a0,%sp@(FR_SP) | in the savearea
227 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
228 moveq #0,%d0
229 movw %a1@(10),%d0 | grab SSW for fault processing
230 btst #12,%d0 | RB set?
231 jeq LbeX0 | no, test RC
232 bset #14,%d0 | yes, must set FB
233 movw %d0,%a1@(10) | for hardware too
234 LbeX0:
235 btst #13,%d0 | RC set?
236 jeq LbeX1 | no, skip
237 bset #15,%d0 | yes, must set FC
238 movw %d0,%a1@(10) | for hardware too
239 LbeX1:
240 btst #8,%d0 | data fault?
241 jeq Lbe0 | no, check for hard cases
242 movl %a1@(16),%d1 | fault address is as given in frame
243 jra Lbe10 | thats it
244 Lbe0:
245 btst #4,%a1@(6) | long (type B) stack frame?
246 jne Lbe4 | yes, go handle
247 movl %a1@(2),%d1 | no, can use save PC
248 btst #14,%d0 | FB set?
249 jeq Lbe3 | no, try FC
250 addql #4,%d1 | yes, adjust address
251 jra Lbe10 | done
252 Lbe3:
253 btst #15,%d0 | FC set?
254 jeq Lbe10 | no, done
255 addql #2,%d1 | yes, adjust address
256 jra Lbe10 | done
257 Lbe4:
258 movl %a1@(36),%d1 | long format, use stage B address
259 btst #15,%d0 | FC set?
260 jeq Lbe10 | no, all done
261 subql #2,%d1 | yes, adjust address
262 Lbe10:
263 movl %d1,%sp@- | push fault VA
264 movl %d0,%sp@- | and padded SSW
265 movw %a1@(6),%d0 | get frame format/vector offset
266 andw #0x0FFF,%d0 | clear out frame format
267 cmpw #12,%d0 | address error vector?
268 jeq Lisaerr | yes, go to it
269
270 /* MMU-specific code to determine reason for bus error. */
271 movl %d1,%a0 | fault address
272 movl %sp@,%d0 | function code from ssw
273 btst #8,%d0 | data fault?
274 jne Lbe10a
275 movql #1,%d0 | user program access FC
276 | (we dont separate data/program)
277 btst #5,%a1@ | supervisor mode?
278 jeq Lbe10a | if no, done
279 movql #5,%d0 | else supervisor program access
280 Lbe10a:
281 ptestr %d0,%a0@,#7 | do a table search
282 pmove %psr,%sp@ | save result
283 movb %sp@,%d1
284 btst #2,%d1 | invalid? (incl. limit viol and berr)
285 jeq Lmightnotbemerr | no -> wp check
286 btst #7,%d1 | is it MMU table berr?
287 jeq Lismerr | no, must be fast
288 jra Lisberr1 | real bus err needs not be fast
289 Lmightnotbemerr:
290 btst #3,%d1 | write protect bit set?
291 jeq Lisberr1 | no, must be bus error
292 movl %sp@,%d0 | ssw into low word of d0
293 andw #0xc0,%d0 | write protect is set on page:
294 cmpw #0x40,%d0 | was it read cycle?
295 jeq Lisberr1 | yes, was not WPE, must be bus err
296 /* End of MMU-specific bus error code. */
297
298 Lismerr:
299 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
300 jra _ASM_LABEL(faultstkadj) | and deal with it
301 Lisaerr:
302 movl #T_ADDRERR,%sp@- | mark address error
303 jra _ASM_LABEL(faultstkadj) | and deal with it
304 Lisberr1:
305 clrw %sp@ | re-clear pad word
306 Lisberr:
307 movl #T_BUSERR,%sp@- | mark bus error
308 jra _ASM_LABEL(faultstkadj) | and deal with it
309
310 /*
311 * FP exceptions.
312 */
313 GLOBAL(fpfline)
314 clrl %sp@- | stack adjust count
315 moveml #0xFFFF,%sp@- | save registers
316 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
317 jra _ASM_LABEL(fault) | do it
318
319 GLOBAL(fpunsupp)
320 clrl %sp@- | stack adjust count
321 moveml #0xFFFF,%sp@- | save registers
322 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
323 jra _ASM_LABEL(fault) | do it
324
325 /*
326 * Handles all other FP coprocessor exceptions.
327 * Note that since some FP exceptions generate mid-instruction frames
328 * and may cause signal delivery, we need to test for stack adjustment
329 * after the trap call.
330 */
331 GLOBAL(fpfault)
332 clrl %sp@- | stack adjust count
333 moveml #0xFFFF,%sp@- | save user registers
334 movl %usp,%a0 | and save
335 movl %a0,%sp@(FR_SP) | the user stack pointer
336 clrl %sp@- | no VA arg
337 movl _C_LABEL(curpcb),%a0 | current pcb
338 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
339 fsave %a0@ | save state
340 tstb %a0@ | null state frame?
341 jeq Lfptnull | yes, safe
342 clrw %d0 | no, need to tweak BIU
343 movb %a0@(1),%d0 | get frame size
344 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
345 Lfptnull:
346 fmovem %fpsr,%sp@- | push fpsr as code argument
347 frestore %a0@ | restore state
348 movl #T_FPERR,%sp@- | push type arg
349 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
350
351 /*
352 * Other exceptions only cause four and six word stack frame and require
353 * no post-trap stack adjustment.
354 */
355 GLOBAL(badtrap)
356 clrl %sp@- | stack adjust count
357 moveml #0xFFFF,%sp@- | save std frame regs
358 jbsr _C_LABEL(straytrap) | report
359 moveml %sp@+,#0xFFFF | restore regs
360 addql #4,%sp | stack adjust count
361 jra _ASM_LABEL(rei) | all done
362
363 /*
364 * Trap 0 is for system calls
365 */
366 GLOBAL(trap0)
367 clrl %sp@- | stack adjust count
368 moveml #0xFFFF,%sp@- | save user registers
369 movl %usp,%a0 | save the user SP
370 movl %a0,%sp@(FR_SP) | in the savearea
371 movl %d0,%sp@- | push syscall number
372 jbsr _C_LABEL(syscall) | handle it
373 addql #4,%sp | pop syscall arg
374 movl %sp@(FR_SP),%a0 | grab and restore
375 movl %a0,%usp | user SP
376 moveml %sp@+,#0x7FFF | restore most registers
377 addql #8,%sp | pop SP and stack adjust
378 jra _ASM_LABEL(rei) | all done
379
380 /*
381 * Trap 12 is the entry point for the cachectl "syscall"
382 * cachectl(command, addr, length)
383 * command in d0, addr in a1, length in d1
384 */
385 GLOBAL(trap12)
386 movl _C_LABEL(curlwp),%a0
387 movl %a0@(L_PROC),%sp@- | push curproc pointer
388 movl %d1,%sp@- | push length
389 movl %a1,%sp@- | push addr
390 movl %d0,%sp@- | push command
391 jbsr _C_LABEL(cachectl1) | do it
392 lea %sp@(16),%sp | pop args
393 jra _ASM_LABEL(rei) | all done
394
395 /*
396 * Trace (single-step) trap. Kernel-mode is special.
397 * User mode traps are simply passed on to trap().
398 */
399 GLOBAL(trace)
400 clrl %sp@- | stack adjust count
401 moveml #0xFFFF,%sp@-
402 moveq #T_TRACE,%d0
403
404 | Check PSW and see what happen.
405 | T=0 S=0 (should not happen)
406 | T=1 S=0 trace trap from user mode
407 | T=0 S=1 trace trap on a trap instruction
408 | T=1 S=1 trace trap from system mode (kernel breakpoint)
409
410 movw %sp@(FR_HW),%d1 | get PSW
411 notw %d1 | XXX no support for T0 on 680[234]0
412 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
413 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
414 jra _ASM_LABEL(fault) | no, user-mode fault
415
416 /*
417 * Trap 15 is used for:
418 * - GDB breakpoints (in user programs)
419 * - KGDB breakpoints (in the kernel)
420 * - trace traps for SUN binaries (not fully supported yet)
421 * User mode traps are simply passed to trap().
422 */
423 GLOBAL(trap15)
424 clrl %sp@- | stack adjust count
425 moveml #0xFFFF,%sp@-
426 moveq #T_TRAP15,%d0
427 btst #5,%sp@(FR_HW) | was supervisor mode?
428 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
429 jra _ASM_LABEL(fault) | no, user-mode fault
430
431 ASLOCAL(kbrkpt)
432 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
433 | Save the system sp rather than the user sp.
434 movw #PSL_HIGHIPL,%sr | lock out interrupts
435 lea %sp@(FR_SIZE),%a6 | Save stack pointer
436 movl %a6,%sp@(FR_SP) | from before trap
437
438 | If we are not on tmpstk switch to it.
439 | (so debugger can change the stack pointer)
440 movl %a6,%d1
441 cmpl #_ASM_LABEL(tmpstk),%d1
442 jls Lbrkpt2 | already on tmpstk
443 | Copy frame to the temporary stack
444 movl %sp,%a0 | %a0=src
445 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
446 movl %a1,%sp | sp=new frame
447 moveq #FR_SIZE,%d1
448 Lbrkpt1:
449 movl %a0@+,%a1@+
450 subql #4,%d1
451 bgt Lbrkpt1
452
453 Lbrkpt2:
454 | Call the trap handler for the kernel debugger.
455 | Do not call trap() to handle it, so that we can
456 | set breakpoints in trap() if we want. We know
457 | the trap type is either T_TRACE or T_BREAKPOINT.
458 movl %d0,%sp@- | push trap type
459 jbsr _C_LABEL(trap_kdebug)
460 addql #4,%sp | pop args
461
462 | The stack pointer may have been modified, or
463 | data below it modified (by kgdb push call),
464 | so push the hardware frame at the current sp
465 | before restoring registers and returning.
466 movl %sp@(FR_SP),%a0 | modified sp
467 lea %sp@(FR_SIZE),%a1 | end of our frame
468 movl %a1@-,%a0@- | copy 2 longs with
469 movl %a1@-,%a0@- | ... predecrement
470 movl %a0,%sp@(FR_SP) | sp = h/w frame
471 moveml %sp@+,#0x7FFF | restore all but sp
472 movl %sp@,%sp | ... and sp
473 rte | all done
474
475 /* Use common m68k sigreturn */
476 #include <m68k/m68k/sigreturn.s>
477
478 /*
479 * Interrupt handlers. Most are auto-vectored,
480 * and hard-wired the same way on all sun3 models.
481 * Format in the stack is:
482 * %d0,%d1,%a0,%a1, sr, pc, vo
483 */
484
485 #define INTERRUPT_SAVEREG \
486 moveml #0xC0C0,%sp@-
487
488 #define INTERRUPT_RESTORE \
489 moveml %sp@+,#0x0303
490
491 /*
492 * This is the common auto-vector interrupt handler,
493 * for which the CPU provides the vector=0x18+level.
494 * These are installed in the interrupt vector table.
495 */
496 #ifdef __ELF__
497 .align 4
498 #else
499 .align 2
500 #endif
501 GLOBAL(_isr_autovec)
502 INTERRUPT_SAVEREG
503 jbsr _C_LABEL(isr_autovec)
504 INTERRUPT_RESTORE
505 jra _ASM_LABEL(rei)
506
507 /* clock: see clock.c */
508 #ifdef __ELF__
509 .align 4
510 #else
511 .align 2
512 #endif
513 GLOBAL(_isr_clock)
514 INTERRUPT_SAVEREG
515 jbsr _C_LABEL(clock_intr)
516 INTERRUPT_RESTORE
517 jra _ASM_LABEL(rei)
518
519 | Handler for all vectored interrupts (i.e. VME interrupts)
520 #ifdef __ELF__
521 .align 4
522 #else
523 .align 2
524 #endif
525 GLOBAL(_isr_vectored)
526 INTERRUPT_SAVEREG
527 jbsr _C_LABEL(isr_vectored)
528 INTERRUPT_RESTORE
529 jra _ASM_LABEL(rei)
530
531 #undef INTERRUPT_SAVEREG
532 #undef INTERRUPT_RESTORE
533
534 /* interrupt counters (needed by vmstat) */
535 GLOBAL(intrnames)
536 .asciz "spur" | 0
537 .asciz "lev1" | 1
538 .asciz "lev2" | 2
539 .asciz "lev3" | 3
540 .asciz "lev4" | 4
541 .asciz "clock" | 5
542 .asciz "lev6" | 6
543 .asciz "nmi" | 7
544 GLOBAL(eintrnames)
545
546 .data
547 .even
548 GLOBAL(intrcnt)
549 .long 0,0,0,0,0,0,0,0,0,0
550 GLOBAL(eintrcnt)
551 .text
552
553 /*
554 * Emulation of VAX REI instruction.
555 *
556 * This code is (mostly) un-altered from the hp300 code,
557 * except that sun machines do not need a simulated SIR
558 * because they have a real software interrupt register.
559 *
560 * This code deals with checking for and servicing ASTs
561 * (profiling, scheduling) and software interrupts (network, softclock).
562 * We check for ASTs first, just like the VAX. To avoid excess overhead
563 * the T_ASTFLT handling code will also check for software interrupts so we
564 * do not have to do it here. After identifying that we need an AST we
565 * drop the IPL to allow device interrupts.
566 *
567 * This code is complicated by the fact that sendsig may have been called
568 * necessitating a stack cleanup.
569 */
570
571 ASGLOBAL(rei)
572 #ifdef DIAGNOSTIC
573 tstl _C_LABEL(panicstr) | have we paniced?
574 jne Ldorte | yes, do not make matters worse
575 #endif
576 tstl _C_LABEL(astpending) | AST pending?
577 jeq Ldorte | no, done
578 Lrei1:
579 btst #5,%sp@ | yes, are we returning to user mode?
580 jne Ldorte | no, done
581 movw #PSL_LOWIPL,%sr | lower SPL
582 clrl %sp@- | stack adjust
583 moveml #0xFFFF,%sp@- | save all registers
584 movl %usp,%a1 | including
585 movl %a1,%sp@(FR_SP) | the users SP
586 clrl %sp@- | VA == none
587 clrl %sp@- | code == none
588 movl #T_ASTFLT,%sp@- | type == async system trap
589 jbsr _C_LABEL(trap) | go handle it
590 lea %sp@(12),%sp | pop value args
591 movl %sp@(FR_SP),%a0 | restore user SP
592 movl %a0,%usp | from save area
593 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
594 jne Laststkadj | yes, go to it
595 moveml %sp@+,#0x7FFF | no, restore most user regs
596 addql #8,%sp | toss SP and stack adjust
597 rte | and do real RTE
598 Laststkadj:
599 lea %sp@(FR_HW),%a1 | pointer to HW frame
600 addql #8,%a1 | source pointer
601 movl %a1,%a0 | source
602 addw %d0,%a0 | + hole size = dest pointer
603 movl %a1@-,%a0@- | copy
604 movl %a1@-,%a0@- | 8 bytes
605 movl %a0,%sp@(FR_SP) | new SSP
606 moveml %sp@+,#0x7FFF | restore user registers
607 movl %sp@,%sp | and our SP
608 Ldorte:
609 rte | real return
610
611 /*
612 * Initialization is at the beginning of this file, because the
613 * kernel entry point needs to be at zero for compatibility with
614 * the Sun boot loader. This works on Sun machines because the
615 * interrupt vector table for reset is NOT at address zero.
616 * (The MMU has a "boot" bit that forces access to the PROM)
617 */
618
619 /*
620 * Use common m68k sigcode.
621 */
622 #include <m68k/m68k/sigcode.s>
623 #ifdef COMPAT_SUNOS
624 #include <m68k/m68k/sunos_sigcode.s>
625 #endif
626 #ifdef COMPAT_SVR4
627 #include <m68k/m68k/svr4_sigcode.s>
628 #endif
629
630 .text
631
632 /*
633 * Primitives
634 */
635
636 /*
637 * Use common m68k support routines.
638 */
639 #include <m68k/m68k/support.s>
640
641 BSS(want_resched,4)
642
643 /*
644 * Use common m68k process manipulation routines.
645 */
646 #include <m68k/m68k/proc_subr.s>
647
648 /*
649 * Use common m68k process/lwp switch and context save subroutines.
650 */
651 #define FPCOPROC /* XXX: Temp. Reqd. */
652 #include <m68k/m68k/switch_subr.s>
653
654
655 /* suline() */
656
657 #ifdef DEBUG
658 .data
659 ASGLOBAL(fulltflush)
660 .long 0
661 ASGLOBAL(fullcflush)
662 .long 0
663 .text
664 #endif
665
666 ENTRY(ecacheon)
667 rts
668
669 ENTRY(ecacheoff)
670 rts
671
672 /*
673 * Get callers current SP value.
674 * Note that simply taking the address of a local variable in a C function
675 * doesn't work because callee saved registers may be outside the stack frame
676 * defined by A6 (e.g. GCC generated code).
677 *
678 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
679 */
680 GLOBAL(getsp)
681 movl %sp,%d0 | get current SP
682 addql #4,%d0 | compensate for return address
683 movl %d0,%a0
684 rts
685
686 ENTRY(getsfc)
687 movc %sfc,%d0
688 movl %d0,%a0
689 rts
690
691 ENTRY(getdfc)
692 movc %dfc,%d0
693 movl %d0,%a0
694 rts
695
696 ENTRY(getvbr)
697 movc %vbr,%d0
698 movl %d0,%a0
699 rts
700
701 ENTRY(setvbr)
702 movl %sp@(4),%d0
703 movc %d0,%vbr
704 rts
705
706 /*
707 * Load a new CPU Root Pointer (CRP) into the MMU.
708 * void loadcrp(struct mmu_rootptr *);
709 */
710 ENTRY(loadcrp)
711 movl %sp@(4),%a0 | arg1: &CRP
712 movl #CACHE_CLR,%d0
713 movc %d0,%cacr | invalidate cache(s)
714 pflusha | flush entire TLB
715 pmove %a0@,%crp | load new user root pointer
716 rts
717
718 ENTRY(getcrp)
719 movl %sp@(4),%a0 | arg1: &crp
720 pmove %crp,%a0@ | *crpp = %crp
721 rts
722
723 /*
724 * Get the physical address of the PTE for a given VA.
725 */
726 ENTRY(ptest_addr)
727 movl %sp@(4),%a1 | VA
728 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
729 movl %a0,%d0 | Result in %d0 (not a pointer return)
730 rts
731
732 /*
733 * Set processor priority level calls. Most are implemented with
734 * inline asm expansions. However, we need one instantiation here
735 * in case some non-optimized code makes external references.
736 * Most places will use the inlined functions param.h supplies.
737 */
738
739 ENTRY(_getsr)
740 clrl %d0
741 movw %sr,%d0
742 movl %a1,%d0
743 rts
744
745 ENTRY(_spl)
746 clrl %d0
747 movw %sr,%d0
748 movl %sp@(4),%d1
749 movw %d1,%sr
750 rts
751
752 ENTRY(_splraise)
753 clrl %d0
754 movw %sr,%d0
755 movl %d0,%d1
756 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
757 cmpl %sp@(4),%d1 | (old - new)
758 bge Lsplr
759 movl %sp@(4),%d1
760 movw %d1,%sr
761 Lsplr:
762 rts
763
764 /*
765 * Save and restore 68881 state.
766 */
767 ENTRY(m68881_save)
768 movl %sp@(4),%a0 | save area pointer
769 fsave %a0@ | save state
770 tstb %a0@ | null state frame?
771 jeq Lm68881sdone | yes, all done
772 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
773 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
774 Lm68881sdone:
775 rts
776
777 ENTRY(m68881_restore)
778 movl %sp@(4),%a0 | save area pointer
779 tstb %a0@ | null state frame?
780 jeq Lm68881rdone | yes, easy
781 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
782 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
783 Lm68881rdone:
784 frestore %a0@ | restore state
785 rts
786
787 /*
788 * _delay(unsigned N)
789 * Delay for at least (N/256) microseconds.
790 * This routine depends on the variable: delay_divisor
791 * which should be set based on the CPU clock rate.
792 * XXX: Currently this is set based on the CPU model,
793 * XXX: but this should be determined at run time...
794 */
795 GLOBAL(_delay)
796 | %d0 = arg = (usecs << 8)
797 movl %sp@(4),%d0
798 | %d1 = delay_divisor;
799 movl _C_LABEL(delay_divisor),%d1
800 jra L_delay /* Jump into the loop! */
801
802 /*
803 * Align the branch target of the loop to a half-line (8-byte)
804 * boundary to minimize cache effects. This guarantees both
805 * that there will be no prefetch stalls due to cache line burst
806 * operations and that the loop will run from a single cache
807 * half-line.
808 */
809 #ifdef __ELF__
810 .align 8
811 #else
812 .align 3
813 #endif
814 L_delay:
815 subl %d1,%d0
816 jgt L_delay
817 rts
818
819 | Define some addresses, mostly so DDB can print useful info.
820 | Not using _C_LABEL() here because these symbols are never
821 | referenced by any C code, and if the leading underscore
822 | ever goes away, these lines turn into syntax errors...
823 .set _KERNBASE,KERNBASE
824 .set _MONSTART,SUN3X_MONSTART
825 .set _PROM_BASE,SUN3X_PROM_BASE
826 .set _MONEND,SUN3X_MONEND
827
828 |The end!
829