locore.s revision 1.45 1 /* $NetBSD: locore.s,v 1.45 2001/02/22 07:11:12 chs Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_svr4.h"
46 #include "opt_compat_sunos.h"
47 #include "opt_lockdebug.h"
48
49 #include "assym.h"
50 #include <machine/asm.h>
51 #include <machine/trap.h>
52
53 | Remember this is a fun project!
54
55 .data
56 GLOBAL(mon_crp)
57 .long 0,0
58
59 | This is for kvm_mkdb, and should be the address of the beginning
60 | of the kernel text segment (not necessarily the same as kernbase).
61 .text
62 GLOBAL(kernel_text)
63
64 | This is the entry point, as well as the end of the temporary stack
65 | used during process switch (one 8K page ending at start)
66 ASGLOBAL(tmpstk)
67 ASGLOBAL(start)
68
69 | The first step, after disabling interrupts, is to map enough of the kernel
70 | into high virtual address space so that we can use position dependent code.
71 | This is a tricky task on the sun3x because the MMU is already enabled and
72 | the ROM monitor provides no indication of where the root MMU table is mapped.
73 | Therefore we must use one of the 68030's 'transparent translation' registers
74 | to define a range in the address space where the MMU translation is
75 | turned off. Once this is complete we can modify the MMU table directly
76 | without the need for it to be mapped into virtual memory.
77 | All code must be position independent until otherwise noted, as the
78 | boot loader has loaded us into low memory but all the symbols in this
79 | code have been linked high.
80 movw #PSL_HIGHIPL,%sr | no interrupts
81 movl #KERNBASE,%a5 | for vtop conversion
82 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
83 subl %a5,%a0
84 | Note: borrowing mon_crp for tt0 setup...
85 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
86 .long 0xf0100800 | transparent translation reg0
87 | [ pmove a0@, tt0 ]
88 | In order to map the kernel into high memory we will copy the root table
89 | entry which maps the 16 megabytes of memory starting at 0x0 into the
90 | entry which maps the 16 megabytes starting at KERNBASE.
91 pmove %crp,%a0@ | Get monitor CPU root pointer
92 movl %a0@(4),%a1 | 2nd word is PA of level A table
93
94 movl %a1,%a0 | compute the descriptor address
95 addl #0x3e0,%a1 | for VA starting at KERNBASE
96 movl %a0@,%a1@ | copy descriptor type
97 movl %a0@(4),%a1@(4) | copy physical address
98
99 | Kernel is now double mapped at zero and KERNBASE.
100 | Force a long jump to the relocated code (high VA).
101 movl #IC_CLEAR,%d0 | Flush the I-cache
102 movc %d0,%cacr
103 jmp L_high_code:l | long jump
104
105 L_high_code:
106 | We are now running in the correctly relocated kernel, so
107 | we are no longer restricted to position-independent code.
108 | It is handy to leave transparent translation enabled while
109 | for the low 1GB while _bootstrap() is doing its thing.
110
111 | Do bootstrap stuff needed before main() gets called.
112 | Our boot loader leaves a copy of the kernel's exec header
113 | just before the start of the kernel text segment, so the
114 | kernel can sanity-check the DDB symbols at [end...esym].
115 | Pass the struct exec at tmpstk-32 to _bootstrap().
116 | Also, make sure the initial frame pointer is zero so that
117 | the backtrace algorithm used by KGDB terminates nicely.
118 lea _ASM_LABEL(tmpstk)-32,%sp
119 movl #0,%a6
120 jsr _C_LABEL(_bootstrap) | See locore2.c
121
122 | Now turn off the transparent translation of the low 1GB.
123 | (this also flushes the ATC)
124 clrl %sp@-
125 .long 0xf0170800 | pmove sp@,tt0
126 addql #4,%sp
127
128 | Now that _bootstrap() is done using the PROM functions,
129 | we can safely set the sfc/dfc to something != FC_CONTROL
130 moveq #FC_USERD,%d0 | make movs access "user data"
131 movc %d0,%sfc | space for copyin/copyout
132 movc %d0,%dfc
133
134 | Setup process zero user/kernel stacks.
135 movl _C_LABEL(proc0paddr),%a1| get proc0 pcb addr
136 lea %a1@(USPACE-4),%sp | set SSP to last word
137 movl #USRSTACK-4,%a2
138 movl %a2,%usp | init user SP
139
140 | Note curpcb was already set in _bootstrap().
141 | Will do fpu initialization during autoconfig (see fpu.c)
142 | The interrupt vector table and stack are now ready.
143 | Interrupts will be enabled later, AFTER autoconfiguration
144 | is finished, to avoid spurrious interrupts.
145
146 /*
147 * Final preparation for calling main.
148 *
149 * Create a fake exception frame that returns to user mode,
150 * and save its address in p->p_md.md_regs for cpu_fork().
151 * The new frames for process 1 and 2 will be adjusted by
152 * cpu_set_kpc() to arrange for a call to a kernel function
153 * before the new process does its rte out to user mode.
154 */
155 clrw %sp@- | tf_format,tf_vector
156 clrl %sp@- | tf_pc (filled in later)
157 movw #PSL_USER,%sp@- | tf_sr for user mode
158 clrl %sp@- | tf_stackadj
159 lea %sp@(-64),%sp | tf_regs[16]
160 movl %sp,%a1 | a1=trapframe
161 lea _C_LABEL(proc0),%a0 | proc0.p_md.md_regs =
162 movl %a1,%a0@(P_MDREGS) | trapframe
163 movl %a2,%a1@(FR_SP) | a2 == usp (from above)
164 pea %a1@ | push &trapframe
165 jbsr _C_LABEL(main) | main(&trapframe)
166 addql #4,%sp | help DDB backtrace
167 trap #15 | should not get here
168
169 | This is used by cpu_fork() to return to user mode.
170 | It is called with SP pointing to a struct trapframe.
171 GLOBAL(proc_do_uret)
172 movl %sp@(FR_SP),%a0 | grab and load
173 movl %a0,%usp | user SP
174 moveml %sp@+,#0x7FFF | load most registers (all but SSP)
175 addql #8,%sp | pop SSP and stack adjust count
176 rte
177
178 /*
179 * proc_trampoline:
180 * This is used by cpu_set_kpc() to "push" a function call onto the
181 * kernel stack of some process, very much like a signal delivery.
182 * When we get here, the stack has:
183 *
184 * SP+8: switchframe from before cpu_set_kpc
185 * SP+4: void *arg;
186 * SP: u_long func;
187 *
188 * On entry, the switchframe pushed by cpu_set_kpc has already been
189 * popped off the stack, so all this needs to do is pop the function
190 * pointer into a register, call it, then pop the arg, and finally
191 * return using the switchframe that remains on the stack.
192 */
193 GLOBAL(proc_trampoline)
194 movl %sp@+,%a0 | function pointer
195 jbsr %a0@ | (*func)(arg)
196 addql #4,%sp | toss the arg
197 rts | as cpu_switch would do
198
199 | That is all the assembly startup code we need on the sun3x!
200 | The rest of this is like the hp300/locore.s where possible.
201
202 /*
203 * Trap/interrupt vector routines
204 */
205 #include <m68k/m68k/trap_subr.s>
206
207 GLOBAL(buserr)
208 tstl _C_LABEL(nofault) | device probe?
209 jeq _C_LABEL(addrerr) | no, handle as usual
210 movl _C_LABEL(nofault),%sp@- | yes,
211 jbsr _C_LABEL(longjmp) | longjmp(nofault)
212 GLOBAL(addrerr)
213 clrl %sp@- | stack adjust count
214 moveml #0xFFFF,%sp@- | save user registers
215 movl %usp,%a0 | save the user SP
216 movl %a0,%sp@(FR_SP) | in the savearea
217 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
218 moveq #0,%d0
219 movw %a1@(10),%d0 | grab SSW for fault processing
220 btst #12,%d0 | RB set?
221 jeq LbeX0 | no, test RC
222 bset #14,%d0 | yes, must set FB
223 movw %d0,%a1@(10) | for hardware too
224 LbeX0:
225 btst #13,%d0 | RC set?
226 jeq LbeX1 | no, skip
227 bset #15,%d0 | yes, must set FC
228 movw %d0,%a1@(10) | for hardware too
229 LbeX1:
230 btst #8,%d0 | data fault?
231 jeq Lbe0 | no, check for hard cases
232 movl %a1@(16),%d1 | fault address is as given in frame
233 jra Lbe10 | thats it
234 Lbe0:
235 btst #4,%a1@(6) | long (type B) stack frame?
236 jne Lbe4 | yes, go handle
237 movl %a1@(2),%d1 | no, can use save PC
238 btst #14,%d0 | FB set?
239 jeq Lbe3 | no, try FC
240 addql #4,%d1 | yes, adjust address
241 jra Lbe10 | done
242 Lbe3:
243 btst #15,%d0 | FC set?
244 jeq Lbe10 | no, done
245 addql #2,%d1 | yes, adjust address
246 jra Lbe10 | done
247 Lbe4:
248 movl %a1@(36),%d1 | long format, use stage B address
249 btst #15,%d0 | FC set?
250 jeq Lbe10 | no, all done
251 subql #2,%d1 | yes, adjust address
252 Lbe10:
253 movl %d1,%sp@- | push fault VA
254 movl %d0,%sp@- | and padded SSW
255 movw %a1@(6),%d0 | get frame format/vector offset
256 andw #0x0FFF,%d0 | clear out frame format
257 cmpw #12,%d0 | address error vector?
258 jeq Lisaerr | yes, go to it
259
260 /* MMU-specific code to determine reason for bus error. */
261 movl %d1,%a0 | fault address
262 movl %sp@,%d0 | function code from ssw
263 btst #8,%d0 | data fault?
264 jne Lbe10a
265 movql #1,%d0 | user program access FC
266 | (we dont separate data/program)
267 btst #5,%a1@ | supervisor mode?
268 jeq Lbe10a | if no, done
269 movql #5,%d0 | else supervisor program access
270 Lbe10a:
271 ptestr %d0,%a0@,#7 | do a table search
272 pmove %psr,%sp@ | save result
273 movb %sp@,%d1
274 btst #2,%d1 | invalid? (incl. limit viol and berr)
275 jeq Lmightnotbemerr | no -> wp check
276 btst #7,%d1 | is it MMU table berr?
277 jeq Lismerr | no, must be fast
278 jra Lisberr1 | real bus err needs not be fast
279 Lmightnotbemerr:
280 btst #3,%d1 | write protect bit set?
281 jeq Lisberr1 | no, must be bus error
282 movl %sp@,%d0 | ssw into low word of d0
283 andw #0xc0,%d0 | write protect is set on page:
284 cmpw #0x40,%d0 | was it read cycle?
285 jeq Lisberr1 | yes, was not WPE, must be bus err
286 /* End of MMU-specific bus error code. */
287
288 Lismerr:
289 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
290 jra _ASM_LABEL(faultstkadj) | and deal with it
291 Lisaerr:
292 movl #T_ADDRERR,%sp@- | mark address error
293 jra _ASM_LABEL(faultstkadj) | and deal with it
294 Lisberr1:
295 clrw %sp@ | re-clear pad word
296 Lisberr:
297 movl #T_BUSERR,%sp@- | mark bus error
298 jra _ASM_LABEL(faultstkadj) | and deal with it
299
300 /*
301 * FP exceptions.
302 */
303 GLOBAL(fpfline)
304 clrl %sp@- | stack adjust count
305 moveml #0xFFFF,%sp@- | save registers
306 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
307 jra _ASM_LABEL(fault) | do it
308
309 GLOBAL(fpunsupp)
310 clrl %sp@- | stack adjust count
311 moveml #0xFFFF,%sp@- | save registers
312 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
313 jra _ASM_LABEL(fault) | do it
314
315 /*
316 * Handles all other FP coprocessor exceptions.
317 * Note that since some FP exceptions generate mid-instruction frames
318 * and may cause signal delivery, we need to test for stack adjustment
319 * after the trap call.
320 */
321 GLOBAL(fpfault)
322 clrl %sp@- | stack adjust count
323 moveml #0xFFFF,%sp@- | save user registers
324 movl %usp,%a0 | and save
325 movl %a0,%sp@(FR_SP) | the user stack pointer
326 clrl %sp@- | no VA arg
327 movl _C_LABEL(curpcb),%a0 | current pcb
328 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
329 fsave %a0@ | save state
330 tstb %a0@ | null state frame?
331 jeq Lfptnull | yes, safe
332 clrw %d0 | no, need to tweak BIU
333 movb %a0@(1),%d0 | get frame size
334 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
335 Lfptnull:
336 fmovem %fpsr,%sp@- | push fpsr as code argument
337 frestore %a0@ | restore state
338 movl #T_FPERR,%sp@- | push type arg
339 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
340
341 /*
342 * Other exceptions only cause four and six word stack frame and require
343 * no post-trap stack adjustment.
344 */
345 GLOBAL(badtrap)
346 clrl %sp@- | stack adjust count
347 moveml #0xFFFF,%sp@- | save std frame regs
348 jbsr _C_LABEL(straytrap) | report
349 moveml %sp@+,#0xFFFF | restore regs
350 addql #4,%sp | stack adjust count
351 jra _ASM_LABEL(rei) | all done
352
353 /*
354 * Trap 0 is for system calls
355 */
356 GLOBAL(trap0)
357 clrl %sp@- | stack adjust count
358 moveml #0xFFFF,%sp@- | save user registers
359 movl %usp,%a0 | save the user SP
360 movl %a0,%sp@(FR_SP) | in the savearea
361 movl %d0,%sp@- | push syscall number
362 jbsr _C_LABEL(syscall) | handle it
363 addql #4,%sp | pop syscall arg
364 movl %sp@(FR_SP),%a0 | grab and restore
365 movl %a0,%usp | user SP
366 moveml %sp@+,#0x7FFF | restore most registers
367 addql #8,%sp | pop SP and stack adjust
368 jra _ASM_LABEL(rei) | all done
369
370 /*
371 * Trap 12 is the entry point for the cachectl "syscall"
372 * cachectl(command, addr, length)
373 * command in d0, addr in a1, length in d1
374 */
375 GLOBAL(trap12)
376 movl _C_LABEL(curproc),%sp@- | push curproc pointer
377 movl %d1,%sp@- | push length
378 movl %a1,%sp@- | push addr
379 movl %d0,%sp@- | push command
380 jbsr _C_LABEL(cachectl1) | do it
381 lea %sp@(16),%sp | pop args
382 jra _ASM_LABEL(rei) | all done
383
384 /*
385 * Trace (single-step) trap. Kernel-mode is special.
386 * User mode traps are simply passed on to trap().
387 */
388 GLOBAL(trace)
389 clrl %sp@- | stack adjust count
390 moveml #0xFFFF,%sp@-
391 moveq #T_TRACE,%d0
392
393 | Check PSW and see what happen.
394 | T=0 S=0 (should not happen)
395 | T=1 S=0 trace trap from user mode
396 | T=0 S=1 trace trap on a trap instruction
397 | T=1 S=1 trace trap from system mode (kernel breakpoint)
398
399 movw %sp@(FR_HW),%d1 | get PSW
400 notw %d1 | XXX no support for T0 on 680[234]0
401 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
402 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
403 jra _ASM_LABEL(fault) | no, user-mode fault
404
405 /*
406 * Trap 15 is used for:
407 * - GDB breakpoints (in user programs)
408 * - KGDB breakpoints (in the kernel)
409 * - trace traps for SUN binaries (not fully supported yet)
410 * User mode traps are simply passed to trap().
411 */
412 GLOBAL(trap15)
413 clrl %sp@- | stack adjust count
414 moveml #0xFFFF,%sp@-
415 moveq #T_TRAP15,%d0
416 btst #5,%sp@(FR_HW) | was supervisor mode?
417 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
418 jra _ASM_LABEL(fault) | no, user-mode fault
419
420 ASLOCAL(kbrkpt)
421 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
422 | Save the system sp rather than the user sp.
423 movw #PSL_HIGHIPL,%sr | lock out interrupts
424 lea %sp@(FR_SIZE),%a6 | Save stack pointer
425 movl %a6,%sp@(FR_SP) | from before trap
426
427 | If we are not on tmpstk switch to it.
428 | (so debugger can change the stack pointer)
429 movl %a6,%d1
430 cmpl #_ASM_LABEL(tmpstk),%d1
431 jls Lbrkpt2 | already on tmpstk
432 | Copy frame to the temporary stack
433 movl %sp,%a0 | %a0=src
434 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
435 movl %a1,%sp | sp=new frame
436 moveq #FR_SIZE,%d1
437 Lbrkpt1:
438 movl %a0@+,%a1@+
439 subql #4,%d1
440 bgt Lbrkpt1
441
442 Lbrkpt2:
443 | Call the trap handler for the kernel debugger.
444 | Do not call trap() to handle it, so that we can
445 | set breakpoints in trap() if we want. We know
446 | the trap type is either T_TRACE or T_BREAKPOINT.
447 movl %d0,%sp@- | push trap type
448 jbsr _C_LABEL(trap_kdebug)
449 addql #4,%sp | pop args
450
451 | The stack pointer may have been modified, or
452 | data below it modified (by kgdb push call),
453 | so push the hardware frame at the current sp
454 | before restoring registers and returning.
455 movl %sp@(FR_SP),%a0 | modified sp
456 lea %sp@(FR_SIZE),%a1 | end of our frame
457 movl %a1@-,%a0@- | copy 2 longs with
458 movl %a1@-,%a0@- | ... predecrement
459 movl %a0,%sp@(FR_SP) | sp = h/w frame
460 moveml %sp@+,#0x7FFF | restore all but sp
461 movl %sp@,%sp | ... and sp
462 rte | all done
463
464 /* Use common m68k sigreturn */
465 #include <m68k/m68k/sigreturn.s>
466
467 /*
468 * Interrupt handlers. Most are auto-vectored,
469 * and hard-wired the same way on all sun3 models.
470 * Format in the stack is:
471 * %d0,%d1,%a0,%a1, sr, pc, vo
472 */
473
474 #define INTERRUPT_SAVEREG \
475 moveml #0xC0C0,%sp@-
476
477 #define INTERRUPT_RESTORE \
478 moveml %sp@+,#0x0303
479
480 /*
481 * This is the common auto-vector interrupt handler,
482 * for which the CPU provides the vector=0x18+level.
483 * These are installed in the interrupt vector table.
484 */
485 .align 2
486 GLOBAL(_isr_autovec)
487 INTERRUPT_SAVEREG
488 jbsr _C_LABEL(isr_autovec)
489 INTERRUPT_RESTORE
490 jra _ASM_LABEL(rei)
491
492 /* clock: see clock.c */
493 .align 2
494 GLOBAL(_isr_clock)
495 INTERRUPT_SAVEREG
496 jbsr _C_LABEL(clock_intr)
497 INTERRUPT_RESTORE
498 jra _ASM_LABEL(rei)
499
500 | Handler for all vectored interrupts (i.e. VME interrupts)
501 .align 2
502 GLOBAL(_isr_vectored)
503 INTERRUPT_SAVEREG
504 jbsr _C_LABEL(isr_vectored)
505 INTERRUPT_RESTORE
506 jra _ASM_LABEL(rei)
507
508 #undef INTERRUPT_SAVEREG
509 #undef INTERRUPT_RESTORE
510
511 /* interrupt counters (needed by vmstat) */
512 GLOBAL(intrnames)
513 .asciz "spur" | 0
514 .asciz "lev1" | 1
515 .asciz "lev2" | 2
516 .asciz "lev3" | 3
517 .asciz "lev4" | 4
518 .asciz "clock" | 5
519 .asciz "lev6" | 6
520 .asciz "nmi" | 7
521 GLOBAL(eintrnames)
522
523 .data
524 .even
525 GLOBAL(intrcnt)
526 .long 0,0,0,0,0,0,0,0,0,0
527 GLOBAL(eintrcnt)
528 .text
529
530 /*
531 * Emulation of VAX REI instruction.
532 *
533 * This code is (mostly) un-altered from the hp300 code,
534 * except that sun machines do not need a simulated SIR
535 * because they have a real software interrupt register.
536 *
537 * This code deals with checking for and servicing ASTs
538 * (profiling, scheduling) and software interrupts (network, softclock).
539 * We check for ASTs first, just like the VAX. To avoid excess overhead
540 * the T_ASTFLT handling code will also check for software interrupts so we
541 * do not have to do it here. After identifying that we need an AST we
542 * drop the IPL to allow device interrupts.
543 *
544 * This code is complicated by the fact that sendsig may have been called
545 * necessitating a stack cleanup.
546 */
547
548 ASGLOBAL(rei)
549 #ifdef DIAGNOSTIC
550 tstl _C_LABEL(panicstr) | have we paniced?
551 jne Ldorte | yes, do not make matters worse
552 #endif
553 tstl _C_LABEL(astpending) | AST pending?
554 jeq Ldorte | no, done
555 Lrei1:
556 btst #5,%sp@ | yes, are we returning to user mode?
557 jne Ldorte | no, done
558 movw #PSL_LOWIPL,%sr | lower SPL
559 clrl %sp@- | stack adjust
560 moveml #0xFFFF,%sp@- | save all registers
561 movl %usp,%a1 | including
562 movl %a1,%sp@(FR_SP) | the users SP
563 clrl %sp@- | VA == none
564 clrl %sp@- | code == none
565 movl #T_ASTFLT,%sp@- | type == async system trap
566 jbsr _C_LABEL(trap) | go handle it
567 lea %sp@(12),%sp | pop value args
568 movl %sp@(FR_SP),%a0 | restore user SP
569 movl %a0,%usp | from save area
570 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
571 jne Laststkadj | yes, go to it
572 moveml %sp@+,#0x7FFF | no, restore most user regs
573 addql #8,%sp | toss SP and stack adjust
574 rte | and do real RTE
575 Laststkadj:
576 lea %sp@(FR_HW),%a1 | pointer to HW frame
577 addql #8,%a1 | source pointer
578 movl %a1,%a0 | source
579 addw %d0,%a0 | + hole size = dest pointer
580 movl %a1@-,%a0@- | copy
581 movl %a1@-,%a0@- | 8 bytes
582 movl %a0,%sp@(FR_SP) | new SSP
583 moveml %sp@+,#0x7FFF | restore user registers
584 movl %sp@,%sp | and our SP
585 Ldorte:
586 rte | real return
587
588 /*
589 * Initialization is at the beginning of this file, because the
590 * kernel entry point needs to be at zero for compatibility with
591 * the Sun boot loader. This works on Sun machines because the
592 * interrupt vector table for reset is NOT at address zero.
593 * (The MMU has a "boot" bit that forces access to the PROM)
594 */
595
596 /*
597 * Use common m68k sigcode.
598 */
599 #include <m68k/m68k/sigcode.s>
600 #ifdef COMPAT_SUNOS
601 #include <m68k/m68k/sunos_sigcode.s>
602 #endif
603 #ifdef COMPAT_SVR4
604 #include <m68k/m68k/svr4_sigcode.s>
605 #endif
606
607 .text
608
609 /*
610 * Primitives
611 */
612
613 /*
614 * Use common m68k support routines.
615 */
616 #include <m68k/m68k/support.s>
617
618 BSS(want_resched,4)
619
620 /*
621 * Use common m68k process manipulation routines.
622 */
623 #include <m68k/m68k/proc_subr.s>
624
625 | Message for Lbadsw panic
626 Lsw0:
627 .asciz "cpu_switch"
628 .even
629
630 .data
631 GLOBAL(masterpaddr) | XXX compatibility (debuggers)
632 GLOBAL(curpcb)
633 .long 0
634 ASBSS(nullpcb,SIZEOF_PCB)
635 .text
636
637 /*
638 * At exit of a process, do a cpu_switch for the last time.
639 * Switch to a safe stack and PCB, and select a new process to run. The
640 * old stack and u-area will be freed by the reaper.
641 *
642 * MUST BE CALLED AT SPLHIGH!
643 */
644 ENTRY(switch_exit)
645 movl %sp@(4),%a0 | struct proc *p
646 | save state into garbage pcb
647 movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
648 lea _ASM_LABEL(tmpstk),%sp | goto a tmp stack
649
650 /* Schedule the vmspace and stack to be freed. */
651 movl %a0,%sp@- | exit2(p)
652 jbsr _C_LABEL(exit2)
653 lea %sp@(4),%sp
654
655 #if defined(LOCKDEBUG)
656 /* Acquire sched_lock */
657 jbsr _C_LABEL(sched_lock_idle)
658 #endif
659
660 jra _C_LABEL(cpu_switch)
661
662 /*
663 * When no processes are on the runq, cpu_switch() branches to idle
664 * to wait for something to come ready.
665 */
666 Lidle:
667 #if defined(LOCKDEBUG)
668 /* Release sched_lock */
669 jbsr _C_LABEL(sched_unlock_idle)
670 #endif
671 stop #PSL_LOWIPL
672 GLOBAL(_Idle) | See clock.c
673 movw #PSL_HIGHIPL,%sr
674 #if defined(LOCKDEBUG)
675 /* Acquire sched_lock */
676 jbsr _C_LABEL(sched_lock_idle)
677 #endif
678 movl _C_LABEL(sched_whichqs),%d0
679 jeq Lidle
680 jra Lsw1
681
682 Lbadsw:
683 movl #Lsw0,%sp@-
684 jbsr _C_LABEL(panic)
685 /*NOTREACHED*/
686
687 /*
688 * cpu_switch()
689 * Hacked for sun3
690 */
691 ENTRY(cpu_switch)
692 movl _C_LABEL(curpcb),%a1 | current pcb
693 movw %sr,%a1@(PCB_PS) | save sr before changing ipl
694 #ifdef notyet
695 movl _C_LABEL(curproc),%sp@- | remember last proc running
696 #endif
697 clrl _C_LABEL(curproc)
698
699 /*
700 * Find the highest-priority queue that isn't empty,
701 * then take the first proc from that queue.
702 */
703 movl _C_LABEL(sched_whichqs),%d0
704 jeq Lidle
705 Lsw1:
706 /*
707 * Interrupts are blocked, sched_lock is held. If
708 * we come here via Idle, %d0 contains the contents
709 * of a non-zero sched_whichqs.
710 */
711 movl %d0,%d1
712 negl %d0
713 andl %d1,%d0
714 bfffo %d0{#0:#32},%d1
715 eorib #31,%d1
716
717 movl %d1,%d0
718 lslb #3,%d1 | convert queue number to index
719 addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
720 movl %d1,%a1
721 movl %a1@(P_FORW),%a0 | p = q->p_forw
722 cmpal %d1,%a0 | anyone on queue?
723 jeq Lbadsw | no, panic
724 #ifdef DIAGNOSTIC
725 tstl %a0@(P_WCHAN)
726 jne Lbadsw
727 cmpb #SRUN,%a0@(P_STAT)
728 jne Lbadsw
729 #endif
730 movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
731 movl %a0@(P_FORW),%a1 | n = p->p_forw
732 movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
733 cmpal %d1,%a1 | anyone left on queue?
734 jne Lsw2 | yes, skip
735 movl _C_LABEL(sched_whichqs),%d1
736 bclr %d0,%d1 | no, clear bit
737 movl %d1,_C_LABEL(sched_whichqs)
738 Lsw2:
739 /* p->p_cpu initialized in fork1() for single-processor */
740 movb #SONPROC,%a0@(P_STAT) | p->p_stat = SONPROC
741 movl %a0,_C_LABEL(curproc)
742 clrl _C_LABEL(want_resched)
743 #ifdef notyet
744 movl %sp@+,%a1 | XXX - Make this work!
745 cmpl %a0,%a1 | switching to same proc?
746 jeq Lswdone | yes, skip save and restore
747 #endif
748 /*
749 * Save state of previous process in its pcb.
750 */
751 movl _C_LABEL(curpcb),%a1
752 moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
753 movl %usp,%a2 | grab USP (a2 has been saved)
754 movl %a2,%a1@(PCB_USP) | and save it
755
756 tstl _C_LABEL(fputype) | Do we have an fpu?
757 jeq Lswnofpsave | No? Then don't try save.
758 lea %a1@(PCB_FPCTX),%a2 | pointer to FP save area
759 fsave %a2@ | save FP state
760 tstb %a2@ | null state frame?
761 jeq Lswnofpsave | yes, all done
762 fmovem %fp0-%fp7,%a2@(FPF_REGS) | save FP general regs
763 fmovem %fpcr/%fpsr/%fpi,%a2@(FPF_FPCR) | save FP control regs
764 Lswnofpsave:
765
766 /*
767 * Now that we have saved all the registers that must be
768 * preserved, we are free to use those registers until
769 * we load the registers for the switched-to process.
770 * In this section, keep: %a0=curproc, %a1=curpcb
771 */
772
773 clrl %a0@(P_BACK) | clear back link
774 movl %a0@(P_ADDR),%a1 | get p_addr
775 movl %a1,_C_LABEL(curpcb)
776
777 #if defined(LOCKDEBUG)
778 /*
779 * Done mucking with the run queues, release the
780 * scheduler lock, but keep interrupts out.
781 */
782 movl %a0,%sp@- | not args...
783 movl %a1,%sp@- | ...just saving
784 jbsr _C_LABEL(sched_unlock_idle)
785 movl %sp@+,%a1
786 movl %sp@+,%a0
787 #endif
788
789 /*
790 * Load the new VM context (new MMU root pointer)
791 */
792 movl %a0@(P_VMSPACE),%a2 | vm = p->p_vmspace
793 #ifdef DIAGNOSTIC
794 tstl %a2 | vm == VM_MAP_NULL?
795 jeq Lbadsw | panic
796 #endif
797 #ifdef PMAP_DEBUG
798 /* When debugging just call _pmap_switch(). */
799 movl %a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
800 pea %a2@ | push pmap
801 jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
802 addql #4,%sp
803 movl _C_LABEL(curpcb),%a1 | restore p_addr
804 #else
805 /* Otherwise, use this inline version. */
806 lea _C_LABEL(kernel_crp),%a3 | our CPU Root Ptr. (CRP)
807 movl %a2@(VM_PMAP),%a2 | pmap = vm->vm_map.pmap
808 movl %a2@(PM_A_PHYS),%d0 | phys = pmap->pm_a_phys
809 cmpl %a3@(4),%d0 | == kernel_crp.rp_addr ?
810 jeq Lsame_mmuctx | skip loadcrp/flush
811 /* OK, it is a new MMU context. Load it up. */
812 movl %d0,%a3@(4)
813 movl #CACHE_CLR,%d0
814 movc %d0,%cacr | invalidate cache(s)
815 pflusha | flush entire TLB
816 pmove %a3@,%crp | load new user root pointer
817 Lsame_mmuctx:
818 #endif
819
820 /*
821 * Reload the registers for the new process.
822 * After this point we can only use %d0,%d1,%a0,%a1
823 */
824 moveml %a1@(PCB_REGS),#0xFCFC | reload registers
825 movl %a1@(PCB_USP),%a0
826 movl %a0,%usp | and USP
827
828 tstl _C_LABEL(fputype) | If we don't have an fpu,
829 jeq Lres_skip | don't try to restore it.
830 lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
831 tstb %a0@ | null state frame?
832 jeq Lresfprest | yes, easy
833 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
834 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
835 Lresfprest:
836 frestore %a0@ | restore state
837 Lres_skip:
838 movw %a1@(PCB_PS),%d0 | no, restore PS
839 #ifdef DIAGNOSTIC
840 btst #13,%d0 | supervisor mode?
841 jeq Lbadsw | no? panic!
842 #endif
843 movw %d0,%sr | OK, restore PS
844 movl #1,%a0 | return 1 (for alternate returns)
845 rts
846
847 /*
848 * savectx(pcb)
849 * Update pcb, saving current processor state.
850 */
851 ENTRY(savectx)
852 movl %sp@(4),%a1
853 movw %sr,%a1@(PCB_PS)
854 movl %usp,%a0 | grab USP
855 movl %a0,%a1@(PCB_USP) | and save it
856 moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
857
858 tstl _C_LABEL(fputype) | Do we have FPU?
859 jeq Lsavedone | No? Then don't save state.
860 lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
861 fsave %a0@ | save FP state
862 tstb %a0@ | null state frame?
863 jeq Lsavedone | yes, all done
864 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
865 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
866 Lsavedone:
867 movl #0,%a0 | return 0
868 rts
869
870 /* suline() */
871
872 #ifdef DEBUG
873 .data
874 ASGLOBAL(fulltflush)
875 .long 0
876 ASGLOBAL(fullcflush)
877 .long 0
878 .text
879 #endif
880
881 /*
882 * Invalidate entire TLB.
883 */
884 ENTRY(TBIA)
885 _C_LABEL(_TBIA):
886 pflusha
887 movl #DC_CLEAR,%d0
888 movc %d0,%cacr | invalidate on-chip d-cache
889 rts
890
891 /*
892 * Invalidate any TLB entry for given VA (TB Invalidate Single)
893 */
894 ENTRY(TBIS)
895 #ifdef DEBUG
896 tstl _ASM_LABEL(fulltflush) | being conservative?
897 jne _C_LABEL(_TBIA) | yes, flush entire TLB
898 #endif
899 movl %sp@(4),%a0
900 pflush #0,#0,%a0@ | flush address from both sides
901 movl #DC_CLEAR,%d0
902 movc %d0,%cacr | invalidate on-chip data cache
903 rts
904
905 /*
906 * Invalidate supervisor side of TLB
907 */
908 ENTRY(TBIAS)
909 #ifdef DEBUG
910 tstl _ASM_LABEL(fulltflush) | being conservative?
911 jne _C_LABEL(_TBIA) | yes, flush everything
912 #endif
913 pflush #4,#4 | flush supervisor TLB entries
914 movl #DC_CLEAR,%d0
915 movc %d0,%cacr | invalidate on-chip d-cache
916 rts
917
918 /*
919 * Invalidate user side of TLB
920 */
921 ENTRY(TBIAU)
922 #ifdef DEBUG
923 tstl _ASM_LABEL(fulltflush) | being conservative?
924 jne _C_LABEL(_TBIA) | yes, flush everything
925 #endif
926 pflush #0,#4 | flush user TLB entries
927 movl #DC_CLEAR,%d0
928 movc %d0,%cacr | invalidate on-chip d-cache
929 rts
930
931 /*
932 * Invalidate instruction cache
933 */
934 ENTRY(ICIA)
935 movl #IC_CLEAR,%d0
936 movc %d0,%cacr | invalidate i-cache
937 rts
938
939 /*
940 * Invalidate data cache.
941 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
942 * problems with DC_WA. The only cases we have to worry about are context
943 * switch and TLB changes, both of which are handled "in-line" in resume
944 * and TBI*.
945 */
946 ENTRY(DCIA)
947 __DCIA:
948 rts
949
950 ENTRY(DCIS)
951 __DCIS:
952 rts
953
954 /*
955 * Invalidate data cache.
956 */
957 ENTRY(DCIU)
958 movl #DC_CLEAR,%d0
959 movc %d0,%cacr | invalidate on-chip d-cache
960 rts
961
962 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
963
964 ENTRY(PCIA)
965 movl #DC_CLEAR,%d0
966 movc %d0,%cacr | invalidate on-chip d-cache
967 rts
968
969 ENTRY(ecacheon)
970 rts
971
972 ENTRY(ecacheoff)
973 rts
974
975 /*
976 * Get callers current SP value.
977 * Note that simply taking the address of a local variable in a C function
978 * doesn't work because callee saved registers may be outside the stack frame
979 * defined by A6 (e.g. GCC generated code).
980 *
981 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
982 */
983 GLOBAL(getsp)
984 movl %sp,%d0 | get current SP
985 addql #4,%d0 | compensate for return address
986 movl %d0,%a0
987 rts
988
989 ENTRY(getsfc)
990 movc %sfc,%d0
991 movl %d0,%a0
992 rts
993
994 ENTRY(getdfc)
995 movc %dfc,%d0
996 movl %d0,%a0
997 rts
998
999 ENTRY(getvbr)
1000 movc %vbr,%d0
1001 movl %d0,%a0
1002 rts
1003
1004 ENTRY(setvbr)
1005 movl %sp@(4),%d0
1006 movc %d0,%vbr
1007 rts
1008
1009 /*
1010 * Load a new CPU Root Pointer (CRP) into the MMU.
1011 * void loadcrp(struct mmu_rootptr *);
1012 */
1013 ENTRY(loadcrp)
1014 movl %sp@(4),%a0 | arg1: &CRP
1015 movl #CACHE_CLR,%d0
1016 movc %d0,%cacr | invalidate cache(s)
1017 pflusha | flush entire TLB
1018 pmove %a0@,%crp | load new user root pointer
1019 rts
1020
1021 ENTRY(getcrp)
1022 movl %sp@(4),%a0 | arg1: &crp
1023 pmove %crp,%a0@ | *crpp = %crp
1024 rts
1025
1026 /*
1027 * Get the physical address of the PTE for a given VA.
1028 */
1029 ENTRY(ptest_addr)
1030 movl %sp@(4),%a1 | VA
1031 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
1032 rts
1033
1034 /*
1035 * Set processor priority level calls. Most are implemented with
1036 * inline asm expansions. However, we need one instantiation here
1037 * in case some non-optimized code makes external references.
1038 * Most places will use the inlined functions param.h supplies.
1039 */
1040
1041 ENTRY(_getsr)
1042 clrl %d0
1043 movw %sr,%d0
1044 movl %a1,%d0
1045 rts
1046
1047 ENTRY(_spl)
1048 clrl %d0
1049 movw %sr,%d0
1050 movl %sp@(4),%d1
1051 movw %d1,%sr
1052 rts
1053
1054 ENTRY(_splraise)
1055 clrl %d0
1056 movw %sr,%d0
1057 movl %d0,%d1
1058 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
1059 cmpl %sp@(4),%d1 | (old - new)
1060 bge Lsplr
1061 movl %sp@(4),%d1
1062 movw %d1,%sr
1063 Lsplr:
1064 rts
1065
1066 /*
1067 * Save and restore 68881 state.
1068 */
1069 ENTRY(m68881_save)
1070 movl %sp@(4),%a0 | save area pointer
1071 fsave %a0@ | save state
1072 tstb %a0@ | null state frame?
1073 jeq Lm68881sdone | yes, all done
1074 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
1075 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
1076 Lm68881sdone:
1077 rts
1078
1079 ENTRY(m68881_restore)
1080 movl %sp@(4),%a0 | save area pointer
1081 tstb %a0@ | null state frame?
1082 jeq Lm68881rdone | yes, easy
1083 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
1084 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
1085 Lm68881rdone:
1086 frestore %a0@ | restore state
1087 rts
1088
1089 /*
1090 * _delay(unsigned N)
1091 * Delay for at least (N/256) microseconds.
1092 * This routine depends on the variable: delay_divisor
1093 * which should be set based on the CPU clock rate.
1094 * XXX: Currently this is set based on the CPU model,
1095 * XXX: but this should be determined at run time...
1096 */
1097 GLOBAL(_delay)
1098 | %d0 = arg = (usecs << 8)
1099 movl %sp@(4),%d0
1100 | %d1 = delay_divisor;
1101 movl _C_LABEL(delay_divisor),%d1
1102 jra L_delay /* Jump into the loop! */
1103
1104 /*
1105 * Align the branch target of the loop to a half-line (8-byte)
1106 * boundary to minimize cache effects. This guarantees both
1107 * that there will be no prefetch stalls due to cache line burst
1108 * operations and that the loop will run from a single cache
1109 * half-line.
1110 */
1111 .align 8
1112 L_delay:
1113 subl %d1,%d0
1114 jgt L_delay
1115 rts
1116
1117 | Define some addresses, mostly so DDB can print useful info.
1118 | Not using _C_LABEL() here because these symbols are never
1119 | referenced by any C code, and if the leading underscore
1120 | ever goes away, these lines turn into syntax errors...
1121 .set _KERNBASE,KERNBASE
1122 .set _MONSTART,SUN3X_MONSTART
1123 .set _PROM_BASE,SUN3X_PROM_BASE
1124 .set _MONEND,SUN3X_MONEND
1125
1126 |The end!
1127