locore.s revision 1.48.6.2 1 /* $NetBSD: locore.s,v 1.48.6.2 2001/11/18 19:39:04 scw Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_svr4.h"
46 #include "opt_compat_sunos.h"
47 #include "opt_kgdb.h"
48 #include "opt_lockdebug.h"
49
50 #include "assym.h"
51 #include <machine/asm.h>
52 #include <machine/trap.h>
53
54 | Remember this is a fun project!
55
56 .data
57 GLOBAL(mon_crp)
58 .long 0,0
59
60 | This is for kvm_mkdb, and should be the address of the beginning
61 | of the kernel text segment (not necessarily the same as kernbase).
62 .text
63 GLOBAL(kernel_text)
64
65 | This is the entry point, as well as the end of the temporary stack
66 | used during process switch (one 8K page ending at start)
67 ASGLOBAL(tmpstk)
68 ASGLOBAL(start)
69
70 | The first step, after disabling interrupts, is to map enough of the kernel
71 | into high virtual address space so that we can use position dependent code.
72 | This is a tricky task on the sun3x because the MMU is already enabled and
73 | the ROM monitor provides no indication of where the root MMU table is mapped.
74 | Therefore we must use one of the 68030's 'transparent translation' registers
75 | to define a range in the address space where the MMU translation is
76 | turned off. Once this is complete we can modify the MMU table directly
77 | without the need for it to be mapped into virtual memory.
78 | All code must be position independent until otherwise noted, as the
79 | boot loader has loaded us into low memory but all the symbols in this
80 | code have been linked high.
81 movw #PSL_HIGHIPL,%sr | no interrupts
82 movl #KERNBASE,%a5 | for vtop conversion
83 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
84 subl %a5,%a0
85 | Note: borrowing mon_crp for tt0 setup...
86 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
87 .long 0xf0100800 | transparent translation reg0
88 | [ pmove a0@, tt0 ]
89 | In order to map the kernel into high memory we will copy the root table
90 | entry which maps the 16 megabytes of memory starting at 0x0 into the
91 | entry which maps the 16 megabytes starting at KERNBASE.
92 pmove %crp,%a0@ | Get monitor CPU root pointer
93 movl %a0@(4),%a1 | 2nd word is PA of level A table
94
95 movl %a1,%a0 | compute the descriptor address
96 addl #0x3e0,%a1 | for VA starting at KERNBASE
97 movl %a0@,%a1@ | copy descriptor type
98 movl %a0@(4),%a1@(4) | copy physical address
99
100 | Kernel is now double mapped at zero and KERNBASE.
101 | Force a long jump to the relocated code (high VA).
102 movl #IC_CLEAR,%d0 | Flush the I-cache
103 movc %d0,%cacr
104 jmp L_high_code:l | long jump
105
106 L_high_code:
107 | We are now running in the correctly relocated kernel, so
108 | we are no longer restricted to position-independent code.
109 | It is handy to leave transparent translation enabled while
110 | for the low 1GB while _bootstrap() is doing its thing.
111
112 | Do bootstrap stuff needed before main() gets called.
113 | Our boot loader leaves a copy of the kernel's exec header
114 | just before the start of the kernel text segment, so the
115 | kernel can sanity-check the DDB symbols at [end...esym].
116 | Pass the struct exec at tmpstk-32 to _bootstrap().
117 | Also, make sure the initial frame pointer is zero so that
118 | the backtrace algorithm used by KGDB terminates nicely.
119 lea _ASM_LABEL(tmpstk)-32,%sp
120 movl #0,%a6
121 jsr _C_LABEL(_bootstrap) | See locore2.c
122
123 | Now turn off the transparent translation of the low 1GB.
124 | (this also flushes the ATC)
125 clrl %sp@-
126 .long 0xf0170800 | pmove sp@,tt0
127 addql #4,%sp
128
129 | Now that _bootstrap() is done using the PROM functions,
130 | we can safely set the sfc/dfc to something != FC_CONTROL
131 moveq #FC_USERD,%d0 | make movs access "user data"
132 movc %d0,%sfc | space for copyin/copyout
133 movc %d0,%dfc
134
135 | Setup process zero user/kernel stacks.
136 movl _C_LABEL(proc0paddr),%a1| get lwp0 pcb addr
137 lea %a1@(USPACE-4),%sp | set SSP to last word
138 movl #USRSTACK-4,%a2
139 movl %a2,%usp | init user SP
140
141 | Note curpcb was already set in _bootstrap().
142 | Will do fpu initialization during autoconfig (see fpu.c)
143 | The interrupt vector table and stack are now ready.
144 | Interrupts will be enabled later, AFTER autoconfiguration
145 | is finished, to avoid spurrious interrupts.
146
147 /*
148 * Final preparation for calling main.
149 *
150 * Create a fake exception frame that returns to user mode,
151 * and save its address in p->p_md.md_regs for cpu_fork().
152 * The new frames for process 1 and 2 will be adjusted by
153 * cpu_set_kpc() to arrange for a call to a kernel function
154 * before the new process does its rte out to user mode.
155 */
156 clrw %sp@- | tf_format,tf_vector
157 clrl %sp@- | tf_pc (filled in later)
158 movw #PSL_USER,%sp@- | tf_sr for user mode
159 clrl %sp@- | tf_stackadj
160 lea %sp@(-64),%sp | tf_regs[16]
161 movl %sp,%a1 | a1=trapframe
162 lea _C_LABEL(lwp0),%a0 | lwp0.l_md.md_regs =
163 movl %a1,%a0@(L_MD_REGS) | trapframe
164 movl %a2,%a1@(FR_SP) | a2 == usp (from above)
165 pea %a1@ | push &trapframe
166 jbsr _C_LABEL(main) | main(&trapframe)
167 addql #4,%sp | help DDB backtrace
168 trap #15 | should not get here
169
170 | This is used by cpu_fork() to return to user mode.
171 | It is called with SP pointing to a struct trapframe.
172 GLOBAL(proc_do_uret)
173 movl %sp@(FR_SP),%a0 | grab and load
174 movl %a0,%usp | user SP
175 moveml %sp@+,#0x7FFF | load most registers (all but SSP)
176 addql #8,%sp | pop SSP and stack adjust count
177 rte
178
179 /*
180 * proc_trampoline:
181 * This is used by cpu_set_kpc() to "push" a function call onto the
182 * kernel stack of some process, very much like a signal delivery.
183 * When we get here, the stack has:
184 *
185 * SP+8: switchframe from before cpu_set_kpc
186 * SP+4: void *arg;
187 * SP: u_long func;
188 *
189 * On entry, the switchframe pushed by cpu_set_kpc has already been
190 * popped off the stack, so all this needs to do is pop the function
191 * pointer into a register, call it, then pop the arg, and finally
192 * return using the switchframe that remains on the stack.
193 */
194 GLOBAL(proc_trampoline)
195 movl %sp@+,%a0 | function pointer
196 jbsr %a0@ | (*func)(arg)
197 addql #4,%sp | toss the arg
198 rts | as cpu_switch would do
199
200 | That is all the assembly startup code we need on the sun3x!
201 | The rest of this is like the hp300/locore.s where possible.
202
203 /*
204 * Trap/interrupt vector routines
205 */
206 #include <m68k/m68k/trap_subr.s>
207
208 GLOBAL(buserr)
209 tstl _C_LABEL(nofault) | device probe?
210 jeq _C_LABEL(addrerr) | no, handle as usual
211 movl _C_LABEL(nofault),%sp@- | yes,
212 jbsr _C_LABEL(longjmp) | longjmp(nofault)
213 GLOBAL(addrerr)
214 clrl %sp@- | stack adjust count
215 moveml #0xFFFF,%sp@- | save user registers
216 movl %usp,%a0 | save the user SP
217 movl %a0,%sp@(FR_SP) | in the savearea
218 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
219 moveq #0,%d0
220 movw %a1@(10),%d0 | grab SSW for fault processing
221 btst #12,%d0 | RB set?
222 jeq LbeX0 | no, test RC
223 bset #14,%d0 | yes, must set FB
224 movw %d0,%a1@(10) | for hardware too
225 LbeX0:
226 btst #13,%d0 | RC set?
227 jeq LbeX1 | no, skip
228 bset #15,%d0 | yes, must set FC
229 movw %d0,%a1@(10) | for hardware too
230 LbeX1:
231 btst #8,%d0 | data fault?
232 jeq Lbe0 | no, check for hard cases
233 movl %a1@(16),%d1 | fault address is as given in frame
234 jra Lbe10 | thats it
235 Lbe0:
236 btst #4,%a1@(6) | long (type B) stack frame?
237 jne Lbe4 | yes, go handle
238 movl %a1@(2),%d1 | no, can use save PC
239 btst #14,%d0 | FB set?
240 jeq Lbe3 | no, try FC
241 addql #4,%d1 | yes, adjust address
242 jra Lbe10 | done
243 Lbe3:
244 btst #15,%d0 | FC set?
245 jeq Lbe10 | no, done
246 addql #2,%d1 | yes, adjust address
247 jra Lbe10 | done
248 Lbe4:
249 movl %a1@(36),%d1 | long format, use stage B address
250 btst #15,%d0 | FC set?
251 jeq Lbe10 | no, all done
252 subql #2,%d1 | yes, adjust address
253 Lbe10:
254 movl %d1,%sp@- | push fault VA
255 movl %d0,%sp@- | and padded SSW
256 movw %a1@(6),%d0 | get frame format/vector offset
257 andw #0x0FFF,%d0 | clear out frame format
258 cmpw #12,%d0 | address error vector?
259 jeq Lisaerr | yes, go to it
260
261 /* MMU-specific code to determine reason for bus error. */
262 movl %d1,%a0 | fault address
263 movl %sp@,%d0 | function code from ssw
264 btst #8,%d0 | data fault?
265 jne Lbe10a
266 movql #1,%d0 | user program access FC
267 | (we dont separate data/program)
268 btst #5,%a1@ | supervisor mode?
269 jeq Lbe10a | if no, done
270 movql #5,%d0 | else supervisor program access
271 Lbe10a:
272 ptestr %d0,%a0@,#7 | do a table search
273 pmove %psr,%sp@ | save result
274 movb %sp@,%d1
275 btst #2,%d1 | invalid? (incl. limit viol and berr)
276 jeq Lmightnotbemerr | no -> wp check
277 btst #7,%d1 | is it MMU table berr?
278 jeq Lismerr | no, must be fast
279 jra Lisberr1 | real bus err needs not be fast
280 Lmightnotbemerr:
281 btst #3,%d1 | write protect bit set?
282 jeq Lisberr1 | no, must be bus error
283 movl %sp@,%d0 | ssw into low word of d0
284 andw #0xc0,%d0 | write protect is set on page:
285 cmpw #0x40,%d0 | was it read cycle?
286 jeq Lisberr1 | yes, was not WPE, must be bus err
287 /* End of MMU-specific bus error code. */
288
289 Lismerr:
290 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
291 jra _ASM_LABEL(faultstkadj) | and deal with it
292 Lisaerr:
293 movl #T_ADDRERR,%sp@- | mark address error
294 jra _ASM_LABEL(faultstkadj) | and deal with it
295 Lisberr1:
296 clrw %sp@ | re-clear pad word
297 Lisberr:
298 movl #T_BUSERR,%sp@- | mark bus error
299 jra _ASM_LABEL(faultstkadj) | and deal with it
300
301 /*
302 * FP exceptions.
303 */
304 GLOBAL(fpfline)
305 clrl %sp@- | stack adjust count
306 moveml #0xFFFF,%sp@- | save registers
307 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
308 jra _ASM_LABEL(fault) | do it
309
310 GLOBAL(fpunsupp)
311 clrl %sp@- | stack adjust count
312 moveml #0xFFFF,%sp@- | save registers
313 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
314 jra _ASM_LABEL(fault) | do it
315
316 /*
317 * Handles all other FP coprocessor exceptions.
318 * Note that since some FP exceptions generate mid-instruction frames
319 * and may cause signal delivery, we need to test for stack adjustment
320 * after the trap call.
321 */
322 GLOBAL(fpfault)
323 clrl %sp@- | stack adjust count
324 moveml #0xFFFF,%sp@- | save user registers
325 movl %usp,%a0 | and save
326 movl %a0,%sp@(FR_SP) | the user stack pointer
327 clrl %sp@- | no VA arg
328 movl _C_LABEL(curpcb),%a0 | current pcb
329 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
330 fsave %a0@ | save state
331 tstb %a0@ | null state frame?
332 jeq Lfptnull | yes, safe
333 clrw %d0 | no, need to tweak BIU
334 movb %a0@(1),%d0 | get frame size
335 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
336 Lfptnull:
337 fmovem %fpsr,%sp@- | push fpsr as code argument
338 frestore %a0@ | restore state
339 movl #T_FPERR,%sp@- | push type arg
340 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
341
342 /*
343 * Other exceptions only cause four and six word stack frame and require
344 * no post-trap stack adjustment.
345 */
346 GLOBAL(badtrap)
347 clrl %sp@- | stack adjust count
348 moveml #0xFFFF,%sp@- | save std frame regs
349 jbsr _C_LABEL(straytrap) | report
350 moveml %sp@+,#0xFFFF | restore regs
351 addql #4,%sp | stack adjust count
352 jra _ASM_LABEL(rei) | all done
353
354 /*
355 * Trap 0 is for system calls
356 */
357 GLOBAL(trap0)
358 clrl %sp@- | stack adjust count
359 moveml #0xFFFF,%sp@- | save user registers
360 movl %usp,%a0 | save the user SP
361 movl %a0,%sp@(FR_SP) | in the savearea
362 movl %d0,%sp@- | push syscall number
363 jbsr _C_LABEL(syscall) | handle it
364 addql #4,%sp | pop syscall arg
365 movl %sp@(FR_SP),%a0 | grab and restore
366 movl %a0,%usp | user SP
367 moveml %sp@+,#0x7FFF | restore most registers
368 addql #8,%sp | pop SP and stack adjust
369 jra _ASM_LABEL(rei) | all done
370
371 /*
372 * Trap 12 is the entry point for the cachectl "syscall"
373 * cachectl(command, addr, length)
374 * command in d0, addr in a1, length in d1
375 */
376 GLOBAL(trap12)
377 movl _C_LABEL(curproc),%a0
378 movl %a0@(L_PROC),%sp@- | push curproc pointer
379 movl %d1,%sp@- | push length
380 movl %a1,%sp@- | push addr
381 movl %d0,%sp@- | push command
382 jbsr _C_LABEL(cachectl1) | do it
383 lea %sp@(16),%sp | pop args
384 jra _ASM_LABEL(rei) | all done
385
386 /*
387 * Trace (single-step) trap. Kernel-mode is special.
388 * User mode traps are simply passed on to trap().
389 */
390 GLOBAL(trace)
391 clrl %sp@- | stack adjust count
392 moveml #0xFFFF,%sp@-
393 moveq #T_TRACE,%d0
394
395 | Check PSW and see what happen.
396 | T=0 S=0 (should not happen)
397 | T=1 S=0 trace trap from user mode
398 | T=0 S=1 trace trap on a trap instruction
399 | T=1 S=1 trace trap from system mode (kernel breakpoint)
400
401 movw %sp@(FR_HW),%d1 | get PSW
402 notw %d1 | XXX no support for T0 on 680[234]0
403 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
404 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
405 jra _ASM_LABEL(fault) | no, user-mode fault
406
407 /*
408 * Trap 15 is used for:
409 * - GDB breakpoints (in user programs)
410 * - KGDB breakpoints (in the kernel)
411 * - trace traps for SUN binaries (not fully supported yet)
412 * User mode traps are simply passed to trap().
413 */
414 GLOBAL(trap15)
415 clrl %sp@- | stack adjust count
416 moveml #0xFFFF,%sp@-
417 moveq #T_TRAP15,%d0
418 btst #5,%sp@(FR_HW) | was supervisor mode?
419 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
420 jra _ASM_LABEL(fault) | no, user-mode fault
421
422 ASLOCAL(kbrkpt)
423 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
424 | Save the system sp rather than the user sp.
425 movw #PSL_HIGHIPL,%sr | lock out interrupts
426 lea %sp@(FR_SIZE),%a6 | Save stack pointer
427 movl %a6,%sp@(FR_SP) | from before trap
428
429 | If we are not on tmpstk switch to it.
430 | (so debugger can change the stack pointer)
431 movl %a6,%d1
432 cmpl #_ASM_LABEL(tmpstk),%d1
433 jls Lbrkpt2 | already on tmpstk
434 | Copy frame to the temporary stack
435 movl %sp,%a0 | %a0=src
436 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
437 movl %a1,%sp | sp=new frame
438 moveq #FR_SIZE,%d1
439 Lbrkpt1:
440 movl %a0@+,%a1@+
441 subql #4,%d1
442 bgt Lbrkpt1
443
444 Lbrkpt2:
445 | Call the trap handler for the kernel debugger.
446 | Do not call trap() to handle it, so that we can
447 | set breakpoints in trap() if we want. We know
448 | the trap type is either T_TRACE or T_BREAKPOINT.
449 movl %d0,%sp@- | push trap type
450 jbsr _C_LABEL(trap_kdebug)
451 addql #4,%sp | pop args
452
453 | The stack pointer may have been modified, or
454 | data below it modified (by kgdb push call),
455 | so push the hardware frame at the current sp
456 | before restoring registers and returning.
457 movl %sp@(FR_SP),%a0 | modified sp
458 lea %sp@(FR_SIZE),%a1 | end of our frame
459 movl %a1@-,%a0@- | copy 2 longs with
460 movl %a1@-,%a0@- | ... predecrement
461 movl %a0,%sp@(FR_SP) | sp = h/w frame
462 moveml %sp@+,#0x7FFF | restore all but sp
463 movl %sp@,%sp | ... and sp
464 rte | all done
465
466 /* Use common m68k sigreturn */
467 #include <m68k/m68k/sigreturn.s>
468
469 /*
470 * Interrupt handlers. Most are auto-vectored,
471 * and hard-wired the same way on all sun3 models.
472 * Format in the stack is:
473 * %d0,%d1,%a0,%a1, sr, pc, vo
474 */
475
476 #define INTERRUPT_SAVEREG \
477 moveml #0xC0C0,%sp@-
478
479 #define INTERRUPT_RESTORE \
480 moveml %sp@+,#0x0303
481
482 /*
483 * This is the common auto-vector interrupt handler,
484 * for which the CPU provides the vector=0x18+level.
485 * These are installed in the interrupt vector table.
486 */
487 #ifdef __ELF__
488 .align 4
489 #else
490 .align 2
491 #endif
492 GLOBAL(_isr_autovec)
493 INTERRUPT_SAVEREG
494 jbsr _C_LABEL(isr_autovec)
495 INTERRUPT_RESTORE
496 jra _ASM_LABEL(rei)
497
498 /* clock: see clock.c */
499 #ifdef __ELF__
500 .align 4
501 #else
502 .align 2
503 #endif
504 GLOBAL(_isr_clock)
505 INTERRUPT_SAVEREG
506 jbsr _C_LABEL(clock_intr)
507 INTERRUPT_RESTORE
508 jra _ASM_LABEL(rei)
509
510 | Handler for all vectored interrupts (i.e. VME interrupts)
511 #ifdef __ELF__
512 .align 4
513 #else
514 .align 2
515 #endif
516 GLOBAL(_isr_vectored)
517 INTERRUPT_SAVEREG
518 jbsr _C_LABEL(isr_vectored)
519 INTERRUPT_RESTORE
520 jra _ASM_LABEL(rei)
521
522 #undef INTERRUPT_SAVEREG
523 #undef INTERRUPT_RESTORE
524
525 /* interrupt counters (needed by vmstat) */
526 GLOBAL(intrnames)
527 .asciz "spur" | 0
528 .asciz "lev1" | 1
529 .asciz "lev2" | 2
530 .asciz "lev3" | 3
531 .asciz "lev4" | 4
532 .asciz "clock" | 5
533 .asciz "lev6" | 6
534 .asciz "nmi" | 7
535 GLOBAL(eintrnames)
536
537 .data
538 .even
539 GLOBAL(intrcnt)
540 .long 0,0,0,0,0,0,0,0,0,0
541 GLOBAL(eintrcnt)
542 .text
543
544 /*
545 * Emulation of VAX REI instruction.
546 *
547 * This code is (mostly) un-altered from the hp300 code,
548 * except that sun machines do not need a simulated SIR
549 * because they have a real software interrupt register.
550 *
551 * This code deals with checking for and servicing ASTs
552 * (profiling, scheduling) and software interrupts (network, softclock).
553 * We check for ASTs first, just like the VAX. To avoid excess overhead
554 * the T_ASTFLT handling code will also check for software interrupts so we
555 * do not have to do it here. After identifying that we need an AST we
556 * drop the IPL to allow device interrupts.
557 *
558 * This code is complicated by the fact that sendsig may have been called
559 * necessitating a stack cleanup.
560 */
561
562 ASGLOBAL(rei)
563 #ifdef DIAGNOSTIC
564 tstl _C_LABEL(panicstr) | have we paniced?
565 jne Ldorte | yes, do not make matters worse
566 #endif
567 tstl _C_LABEL(astpending) | AST pending?
568 jeq Ldorte | no, done
569 Lrei1:
570 btst #5,%sp@ | yes, are we returning to user mode?
571 jne Ldorte | no, done
572 movw #PSL_LOWIPL,%sr | lower SPL
573 clrl %sp@- | stack adjust
574 moveml #0xFFFF,%sp@- | save all registers
575 movl %usp,%a1 | including
576 movl %a1,%sp@(FR_SP) | the users SP
577 clrl %sp@- | VA == none
578 clrl %sp@- | code == none
579 movl #T_ASTFLT,%sp@- | type == async system trap
580 jbsr _C_LABEL(trap) | go handle it
581 lea %sp@(12),%sp | pop value args
582 movl %sp@(FR_SP),%a0 | restore user SP
583 movl %a0,%usp | from save area
584 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
585 jne Laststkadj | yes, go to it
586 moveml %sp@+,#0x7FFF | no, restore most user regs
587 addql #8,%sp | toss SP and stack adjust
588 rte | and do real RTE
589 Laststkadj:
590 lea %sp@(FR_HW),%a1 | pointer to HW frame
591 addql #8,%a1 | source pointer
592 movl %a1,%a0 | source
593 addw %d0,%a0 | + hole size = dest pointer
594 movl %a1@-,%a0@- | copy
595 movl %a1@-,%a0@- | 8 bytes
596 movl %a0,%sp@(FR_SP) | new SSP
597 moveml %sp@+,#0x7FFF | restore user registers
598 movl %sp@,%sp | and our SP
599 Ldorte:
600 rte | real return
601
602 /*
603 * Initialization is at the beginning of this file, because the
604 * kernel entry point needs to be at zero for compatibility with
605 * the Sun boot loader. This works on Sun machines because the
606 * interrupt vector table for reset is NOT at address zero.
607 * (The MMU has a "boot" bit that forces access to the PROM)
608 */
609
610 /*
611 * Use common m68k sigcode.
612 */
613 #include <m68k/m68k/sigcode.s>
614 #ifdef COMPAT_SUNOS
615 #include <m68k/m68k/sunos_sigcode.s>
616 #endif
617 #ifdef COMPAT_SVR4
618 #include <m68k/m68k/svr4_sigcode.s>
619 #endif
620
621 .text
622
623 /*
624 * Primitives
625 */
626
627 /*
628 * Use common m68k support routines.
629 */
630 #include <m68k/m68k/support.s>
631
632 BSS(want_resched,4)
633
634 /*
635 * Use common m68k process manipulation routines.
636 */
637 #include <m68k/m68k/proc_subr.s>
638
639 /*
640 * Use common m68k process/lwp switch and context save subroutines.
641 */
642 #define FPCOPROC /* XXX: Temp. Reqd. */
643 #include <m68k/m68k/switch_subr.s>
644
645
646 /* suline() */
647
648 #ifdef DEBUG
649 .data
650 ASGLOBAL(fulltflush)
651 .long 0
652 ASGLOBAL(fullcflush)
653 .long 0
654 .text
655 #endif
656
657 /*
658 * Invalidate entire TLB.
659 */
660 ENTRY(TBIA)
661 _C_LABEL(_TBIA):
662 pflusha
663 movl #DC_CLEAR,%d0
664 movc %d0,%cacr | invalidate on-chip d-cache
665 rts
666
667 /*
668 * Invalidate any TLB entry for given VA (TB Invalidate Single)
669 */
670 ENTRY(TBIS)
671 #ifdef DEBUG
672 tstl _ASM_LABEL(fulltflush) | being conservative?
673 jne _C_LABEL(_TBIA) | yes, flush entire TLB
674 #endif
675 movl %sp@(4),%a0
676 pflush #0,#0,%a0@ | flush address from both sides
677 movl #DC_CLEAR,%d0
678 movc %d0,%cacr | invalidate on-chip data cache
679 rts
680
681 /*
682 * Invalidate supervisor side of TLB
683 */
684 ENTRY(TBIAS)
685 #ifdef DEBUG
686 tstl _ASM_LABEL(fulltflush) | being conservative?
687 jne _C_LABEL(_TBIA) | yes, flush everything
688 #endif
689 pflush #4,#4 | flush supervisor TLB entries
690 movl #DC_CLEAR,%d0
691 movc %d0,%cacr | invalidate on-chip d-cache
692 rts
693
694 /*
695 * Invalidate user side of TLB
696 */
697 ENTRY(TBIAU)
698 #ifdef DEBUG
699 tstl _ASM_LABEL(fulltflush) | being conservative?
700 jne _C_LABEL(_TBIA) | yes, flush everything
701 #endif
702 pflush #0,#4 | flush user TLB entries
703 movl #DC_CLEAR,%d0
704 movc %d0,%cacr | invalidate on-chip d-cache
705 rts
706
707 /*
708 * Invalidate instruction cache
709 */
710 ENTRY(ICIA)
711 movl #IC_CLEAR,%d0
712 movc %d0,%cacr | invalidate i-cache
713 rts
714
715 /*
716 * Invalidate data cache.
717 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
718 * problems with DC_WA. The only cases we have to worry about are context
719 * switch and TLB changes, both of which are handled "in-line" in resume
720 * and TBI*.
721 */
722 ENTRY(DCIA)
723 __DCIA:
724 rts
725
726 ENTRY(DCIS)
727 __DCIS:
728 rts
729
730 /*
731 * Invalidate data cache.
732 */
733 ENTRY(DCIU)
734 movl #DC_CLEAR,%d0
735 movc %d0,%cacr | invalidate on-chip d-cache
736 rts
737
738 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
739
740 ENTRY(PCIA)
741 movl #DC_CLEAR,%d0
742 movc %d0,%cacr | invalidate on-chip d-cache
743 rts
744
745 ENTRY(ecacheon)
746 rts
747
748 ENTRY(ecacheoff)
749 rts
750
751 /*
752 * Get callers current SP value.
753 * Note that simply taking the address of a local variable in a C function
754 * doesn't work because callee saved registers may be outside the stack frame
755 * defined by A6 (e.g. GCC generated code).
756 *
757 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
758 */
759 GLOBAL(getsp)
760 movl %sp,%d0 | get current SP
761 addql #4,%d0 | compensate for return address
762 movl %d0,%a0
763 rts
764
765 ENTRY(getsfc)
766 movc %sfc,%d0
767 movl %d0,%a0
768 rts
769
770 ENTRY(getdfc)
771 movc %dfc,%d0
772 movl %d0,%a0
773 rts
774
775 ENTRY(getvbr)
776 movc %vbr,%d0
777 movl %d0,%a0
778 rts
779
780 ENTRY(setvbr)
781 movl %sp@(4),%d0
782 movc %d0,%vbr
783 rts
784
785 /*
786 * Load a new CPU Root Pointer (CRP) into the MMU.
787 * void loadcrp(struct mmu_rootptr *);
788 */
789 ENTRY(loadcrp)
790 movl %sp@(4),%a0 | arg1: &CRP
791 movl #CACHE_CLR,%d0
792 movc %d0,%cacr | invalidate cache(s)
793 pflusha | flush entire TLB
794 pmove %a0@,%crp | load new user root pointer
795 rts
796
797 ENTRY(getcrp)
798 movl %sp@(4),%a0 | arg1: &crp
799 pmove %crp,%a0@ | *crpp = %crp
800 rts
801
802 /*
803 * Get the physical address of the PTE for a given VA.
804 */
805 ENTRY(ptest_addr)
806 movl %sp@(4),%a1 | VA
807 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
808 movl %a0,%d0 | Result in %d0 (not a pointer return)
809 rts
810
811 /*
812 * Set processor priority level calls. Most are implemented with
813 * inline asm expansions. However, we need one instantiation here
814 * in case some non-optimized code makes external references.
815 * Most places will use the inlined functions param.h supplies.
816 */
817
818 ENTRY(_getsr)
819 clrl %d0
820 movw %sr,%d0
821 movl %a1,%d0
822 rts
823
824 ENTRY(_spl)
825 clrl %d0
826 movw %sr,%d0
827 movl %sp@(4),%d1
828 movw %d1,%sr
829 rts
830
831 ENTRY(_splraise)
832 clrl %d0
833 movw %sr,%d0
834 movl %d0,%d1
835 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
836 cmpl %sp@(4),%d1 | (old - new)
837 bge Lsplr
838 movl %sp@(4),%d1
839 movw %d1,%sr
840 Lsplr:
841 rts
842
843 /*
844 * Save and restore 68881 state.
845 */
846 ENTRY(m68881_save)
847 movl %sp@(4),%a0 | save area pointer
848 fsave %a0@ | save state
849 tstb %a0@ | null state frame?
850 jeq Lm68881sdone | yes, all done
851 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
852 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
853 Lm68881sdone:
854 rts
855
856 ENTRY(m68881_restore)
857 movl %sp@(4),%a0 | save area pointer
858 tstb %a0@ | null state frame?
859 jeq Lm68881rdone | yes, easy
860 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
861 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
862 Lm68881rdone:
863 frestore %a0@ | restore state
864 rts
865
866 /*
867 * _delay(unsigned N)
868 * Delay for at least (N/256) microseconds.
869 * This routine depends on the variable: delay_divisor
870 * which should be set based on the CPU clock rate.
871 * XXX: Currently this is set based on the CPU model,
872 * XXX: but this should be determined at run time...
873 */
874 GLOBAL(_delay)
875 | %d0 = arg = (usecs << 8)
876 movl %sp@(4),%d0
877 | %d1 = delay_divisor;
878 movl _C_LABEL(delay_divisor),%d1
879 jra L_delay /* Jump into the loop! */
880
881 /*
882 * Align the branch target of the loop to a half-line (8-byte)
883 * boundary to minimize cache effects. This guarantees both
884 * that there will be no prefetch stalls due to cache line burst
885 * operations and that the loop will run from a single cache
886 * half-line.
887 */
888 #ifdef __ELF__
889 .align 8
890 #else
891 .align 3
892 #endif
893 L_delay:
894 subl %d1,%d0
895 jgt L_delay
896 rts
897
898 | Define some addresses, mostly so DDB can print useful info.
899 | Not using _C_LABEL() here because these symbols are never
900 | referenced by any C code, and if the leading underscore
901 | ever goes away, these lines turn into syntax errors...
902 .set _KERNBASE,KERNBASE
903 .set _MONSTART,SUN3X_MONSTART
904 .set _PROM_BASE,SUN3X_PROM_BASE
905 .set _MONEND,SUN3X_MONEND
906
907 |The end!
908