locore.s revision 1.49 1 /* $NetBSD: locore.s,v 1.49 2002/10/20 02:37:38 chs Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "opt_compat_netbsd.h"
45 #include "opt_compat_svr4.h"
46 #include "opt_compat_sunos.h"
47 #include "opt_kgdb.h"
48 #include "opt_lockdebug.h"
49
50 #include "assym.h"
51 #include <machine/asm.h>
52 #include <machine/trap.h>
53
54 | Remember this is a fun project!
55
56 .data
57 GLOBAL(mon_crp)
58 .long 0,0
59
60 | This is for kvm_mkdb, and should be the address of the beginning
61 | of the kernel text segment (not necessarily the same as kernbase).
62 .text
63 GLOBAL(kernel_text)
64
65 | This is the entry point, as well as the end of the temporary stack
66 | used during process switch (one 8K page ending at start)
67 ASGLOBAL(tmpstk)
68 ASGLOBAL(start)
69
70 | The first step, after disabling interrupts, is to map enough of the kernel
71 | into high virtual address space so that we can use position dependent code.
72 | This is a tricky task on the sun3x because the MMU is already enabled and
73 | the ROM monitor provides no indication of where the root MMU table is mapped.
74 | Therefore we must use one of the 68030's 'transparent translation' registers
75 | to define a range in the address space where the MMU translation is
76 | turned off. Once this is complete we can modify the MMU table directly
77 | without the need for it to be mapped into virtual memory.
78 | All code must be position independent until otherwise noted, as the
79 | boot loader has loaded us into low memory but all the symbols in this
80 | code have been linked high.
81 movw #PSL_HIGHIPL,%sr | no interrupts
82 movl #KERNBASE,%a5 | for vtop conversion
83 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
84 subl %a5,%a0
85 | Note: borrowing mon_crp for tt0 setup...
86 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
87 .long 0xf0100800 | transparent translation reg0
88 | [ pmove a0@, tt0 ]
89 | In order to map the kernel into high memory we will copy the root table
90 | entry which maps the 16 megabytes of memory starting at 0x0 into the
91 | entry which maps the 16 megabytes starting at KERNBASE.
92 pmove %crp,%a0@ | Get monitor CPU root pointer
93 movl %a0@(4),%a1 | 2nd word is PA of level A table
94
95 movl %a1,%a0 | compute the descriptor address
96 addl #0x3e0,%a1 | for VA starting at KERNBASE
97 movl %a0@,%a1@ | copy descriptor type
98 movl %a0@(4),%a1@(4) | copy physical address
99
100 | Kernel is now double mapped at zero and KERNBASE.
101 | Force a long jump to the relocated code (high VA).
102 movl #IC_CLEAR,%d0 | Flush the I-cache
103 movc %d0,%cacr
104 jmp L_high_code:l | long jump
105
106 L_high_code:
107 | We are now running in the correctly relocated kernel, so
108 | we are no longer restricted to position-independent code.
109 | It is handy to leave transparent translation enabled while
110 | for the low 1GB while _bootstrap() is doing its thing.
111
112 | Do bootstrap stuff needed before main() gets called.
113 | Our boot loader leaves a copy of the kernel's exec header
114 | just before the start of the kernel text segment, so the
115 | kernel can sanity-check the DDB symbols at [end...esym].
116 | Pass the struct exec at tmpstk-32 to _bootstrap().
117 | Also, make sure the initial frame pointer is zero so that
118 | the backtrace algorithm used by KGDB terminates nicely.
119 lea _ASM_LABEL(tmpstk)-32,%sp
120 movl #0,%a6
121 jsr _C_LABEL(_bootstrap) | See locore2.c
122
123 | Now turn off the transparent translation of the low 1GB.
124 | (this also flushes the ATC)
125 clrl %sp@-
126 .long 0xf0170800 | pmove sp@,tt0
127 addql #4,%sp
128
129 | Now that _bootstrap() is done using the PROM functions,
130 | we can safely set the sfc/dfc to something != FC_CONTROL
131 moveq #FC_USERD,%d0 | make movs access "user data"
132 movc %d0,%sfc | space for copyin/copyout
133 movc %d0,%dfc
134
135 | Setup process zero user/kernel stacks.
136 movl _C_LABEL(proc0paddr),%a1| get proc0 pcb addr
137 lea %a1@(USPACE-4),%sp | set SSP to last word
138 movl #USRSTACK-4,%a2
139 movl %a2,%usp | init user SP
140
141 | Note curpcb was already set in _bootstrap().
142 | Will do fpu initialization during autoconfig (see fpu.c)
143 | The interrupt vector table and stack are now ready.
144 | Interrupts will be enabled later, AFTER autoconfiguration
145 | is finished, to avoid spurrious interrupts.
146
147 /*
148 * Create a fake exception frame so that cpu_fork() can copy it.
149 * main() nevers returns; we exit to user mode from a forked process
150 * later on.
151 */
152 clrw %sp@- | tf_format,tf_vector
153 clrl %sp@- | tf_pc (filled in later)
154 movw #PSL_USER,%sp@- | tf_sr for user mode
155 clrl %sp@- | tf_stackadj
156 lea %sp@(-64),%sp | tf_regs[16]
157 lea _C_LABEL(proc0),%a0 | proc0.p_md.md_regs =
158 movl %a1,%a0@(P_MDREGS) | trapframe
159 jbsr _C_LABEL(main) | main(&trapframe)
160 PANIC("main() returned")
161
162 /*
163 * proc_trampoline: call function in register %a2 with %a3 as an arg
164 * and then rei.
165 */
166 GLOBAL(proc_trampoline)
167 movl %a3,%sp@- | push function arg
168 jbsr %a2@ | call function
169 addql #4,%sp | pop arg
170 movl %sp@(FR_SP),%a0 | grab and load
171 movl %a0,%usp | user SP
172 moveml %sp@+,#0x7FFF | restore most user regs
173 addql #8,%sp | toss SP and stack adjust
174 jra _ASM_LABEL(rei) | and return
175
176 | That is all the assembly startup code we need on the sun3x!
177 | The rest of this is like the hp300/locore.s where possible.
178
179 /*
180 * Trap/interrupt vector routines
181 */
182 #include <m68k/m68k/trap_subr.s>
183
184 GLOBAL(buserr)
185 tstl _C_LABEL(nofault) | device probe?
186 jeq _C_LABEL(addrerr) | no, handle as usual
187 movl _C_LABEL(nofault),%sp@- | yes,
188 jbsr _C_LABEL(longjmp) | longjmp(nofault)
189 GLOBAL(addrerr)
190 clrl %sp@- | stack adjust count
191 moveml #0xFFFF,%sp@- | save user registers
192 movl %usp,%a0 | save the user SP
193 movl %a0,%sp@(FR_SP) | in the savearea
194 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
195 moveq #0,%d0
196 movw %a1@(10),%d0 | grab SSW for fault processing
197 btst #12,%d0 | RB set?
198 jeq LbeX0 | no, test RC
199 bset #14,%d0 | yes, must set FB
200 movw %d0,%a1@(10) | for hardware too
201 LbeX0:
202 btst #13,%d0 | RC set?
203 jeq LbeX1 | no, skip
204 bset #15,%d0 | yes, must set FC
205 movw %d0,%a1@(10) | for hardware too
206 LbeX1:
207 btst #8,%d0 | data fault?
208 jeq Lbe0 | no, check for hard cases
209 movl %a1@(16),%d1 | fault address is as given in frame
210 jra Lbe10 | thats it
211 Lbe0:
212 btst #4,%a1@(6) | long (type B) stack frame?
213 jne Lbe4 | yes, go handle
214 movl %a1@(2),%d1 | no, can use save PC
215 btst #14,%d0 | FB set?
216 jeq Lbe3 | no, try FC
217 addql #4,%d1 | yes, adjust address
218 jra Lbe10 | done
219 Lbe3:
220 btst #15,%d0 | FC set?
221 jeq Lbe10 | no, done
222 addql #2,%d1 | yes, adjust address
223 jra Lbe10 | done
224 Lbe4:
225 movl %a1@(36),%d1 | long format, use stage B address
226 btst #15,%d0 | FC set?
227 jeq Lbe10 | no, all done
228 subql #2,%d1 | yes, adjust address
229 Lbe10:
230 movl %d1,%sp@- | push fault VA
231 movl %d0,%sp@- | and padded SSW
232 movw %a1@(6),%d0 | get frame format/vector offset
233 andw #0x0FFF,%d0 | clear out frame format
234 cmpw #12,%d0 | address error vector?
235 jeq Lisaerr | yes, go to it
236
237 /* MMU-specific code to determine reason for bus error. */
238 movl %d1,%a0 | fault address
239 movl %sp@,%d0 | function code from ssw
240 btst #8,%d0 | data fault?
241 jne Lbe10a
242 movql #1,%d0 | user program access FC
243 | (we dont separate data/program)
244 btst #5,%a1@ | supervisor mode?
245 jeq Lbe10a | if no, done
246 movql #5,%d0 | else supervisor program access
247 Lbe10a:
248 ptestr %d0,%a0@,#7 | do a table search
249 pmove %psr,%sp@ | save result
250 movb %sp@,%d1
251 btst #2,%d1 | invalid? (incl. limit viol and berr)
252 jeq Lmightnotbemerr | no -> wp check
253 btst #7,%d1 | is it MMU table berr?
254 jeq Lismerr | no, must be fast
255 jra Lisberr1 | real bus err needs not be fast
256 Lmightnotbemerr:
257 btst #3,%d1 | write protect bit set?
258 jeq Lisberr1 | no, must be bus error
259 movl %sp@,%d0 | ssw into low word of d0
260 andw #0xc0,%d0 | write protect is set on page:
261 cmpw #0x40,%d0 | was it read cycle?
262 jeq Lisberr1 | yes, was not WPE, must be bus err
263 /* End of MMU-specific bus error code. */
264
265 Lismerr:
266 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
267 jra _ASM_LABEL(faultstkadj) | and deal with it
268 Lisaerr:
269 movl #T_ADDRERR,%sp@- | mark address error
270 jra _ASM_LABEL(faultstkadj) | and deal with it
271 Lisberr1:
272 clrw %sp@ | re-clear pad word
273 Lisberr:
274 movl #T_BUSERR,%sp@- | mark bus error
275 jra _ASM_LABEL(faultstkadj) | and deal with it
276
277 /*
278 * FP exceptions.
279 */
280 GLOBAL(fpfline)
281 clrl %sp@- | stack adjust count
282 moveml #0xFFFF,%sp@- | save registers
283 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
284 jra _ASM_LABEL(fault) | do it
285
286 GLOBAL(fpunsupp)
287 clrl %sp@- | stack adjust count
288 moveml #0xFFFF,%sp@- | save registers
289 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
290 jra _ASM_LABEL(fault) | do it
291
292 /*
293 * Handles all other FP coprocessor exceptions.
294 * Note that since some FP exceptions generate mid-instruction frames
295 * and may cause signal delivery, we need to test for stack adjustment
296 * after the trap call.
297 */
298 GLOBAL(fpfault)
299 clrl %sp@- | stack adjust count
300 moveml #0xFFFF,%sp@- | save user registers
301 movl %usp,%a0 | and save
302 movl %a0,%sp@(FR_SP) | the user stack pointer
303 clrl %sp@- | no VA arg
304 movl _C_LABEL(curpcb),%a0 | current pcb
305 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
306 fsave %a0@ | save state
307 tstb %a0@ | null state frame?
308 jeq Lfptnull | yes, safe
309 clrw %d0 | no, need to tweak BIU
310 movb %a0@(1),%d0 | get frame size
311 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
312 Lfptnull:
313 fmovem %fpsr,%sp@- | push fpsr as code argument
314 frestore %a0@ | restore state
315 movl #T_FPERR,%sp@- | push type arg
316 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
317
318 /*
319 * Other exceptions only cause four and six word stack frame and require
320 * no post-trap stack adjustment.
321 */
322 GLOBAL(badtrap)
323 clrl %sp@- | stack adjust count
324 moveml #0xFFFF,%sp@- | save std frame regs
325 jbsr _C_LABEL(straytrap) | report
326 moveml %sp@+,#0xFFFF | restore regs
327 addql #4,%sp | stack adjust count
328 jra _ASM_LABEL(rei) | all done
329
330 /*
331 * Trap 0 is for system calls
332 */
333 GLOBAL(trap0)
334 clrl %sp@- | stack adjust count
335 moveml #0xFFFF,%sp@- | save user registers
336 movl %usp,%a0 | save the user SP
337 movl %a0,%sp@(FR_SP) | in the savearea
338 movl %d0,%sp@- | push syscall number
339 jbsr _C_LABEL(syscall) | handle it
340 addql #4,%sp | pop syscall arg
341 movl %sp@(FR_SP),%a0 | grab and restore
342 movl %a0,%usp | user SP
343 moveml %sp@+,#0x7FFF | restore most registers
344 addql #8,%sp | pop SP and stack adjust
345 jra _ASM_LABEL(rei) | all done
346
347 /*
348 * Trap 12 is the entry point for the cachectl "syscall"
349 * cachectl(command, addr, length)
350 * command in d0, addr in a1, length in d1
351 */
352 GLOBAL(trap12)
353 movl _C_LABEL(curproc),%sp@- | push curproc pointer
354 movl %d1,%sp@- | push length
355 movl %a1,%sp@- | push addr
356 movl %d0,%sp@- | push command
357 jbsr _C_LABEL(cachectl1) | do it
358 lea %sp@(16),%sp | pop args
359 jra _ASM_LABEL(rei) | all done
360
361 /*
362 * Trace (single-step) trap. Kernel-mode is special.
363 * User mode traps are simply passed on to trap().
364 */
365 GLOBAL(trace)
366 clrl %sp@- | stack adjust count
367 moveml #0xFFFF,%sp@-
368 moveq #T_TRACE,%d0
369
370 | Check PSW and see what happen.
371 | T=0 S=0 (should not happen)
372 | T=1 S=0 trace trap from user mode
373 | T=0 S=1 trace trap on a trap instruction
374 | T=1 S=1 trace trap from system mode (kernel breakpoint)
375
376 movw %sp@(FR_HW),%d1 | get PSW
377 notw %d1 | XXX no support for T0 on 680[234]0
378 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
379 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
380 jra _ASM_LABEL(fault) | no, user-mode fault
381
382 /*
383 * Trap 15 is used for:
384 * - GDB breakpoints (in user programs)
385 * - KGDB breakpoints (in the kernel)
386 * - trace traps for SUN binaries (not fully supported yet)
387 * User mode traps are simply passed to trap().
388 */
389 GLOBAL(trap15)
390 clrl %sp@- | stack adjust count
391 moveml #0xFFFF,%sp@-
392 moveq #T_TRAP15,%d0
393 btst #5,%sp@(FR_HW) | was supervisor mode?
394 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
395 jra _ASM_LABEL(fault) | no, user-mode fault
396
397 ASLOCAL(kbrkpt)
398 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
399 | Save the system sp rather than the user sp.
400 movw #PSL_HIGHIPL,%sr | lock out interrupts
401 lea %sp@(FR_SIZE),%a6 | Save stack pointer
402 movl %a6,%sp@(FR_SP) | from before trap
403
404 | If we are not on tmpstk switch to it.
405 | (so debugger can change the stack pointer)
406 movl %a6,%d1
407 cmpl #_ASM_LABEL(tmpstk),%d1
408 jls Lbrkpt2 | already on tmpstk
409 | Copy frame to the temporary stack
410 movl %sp,%a0 | %a0=src
411 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
412 movl %a1,%sp | sp=new frame
413 moveq #FR_SIZE,%d1
414 Lbrkpt1:
415 movl %a0@+,%a1@+
416 subql #4,%d1
417 bgt Lbrkpt1
418
419 Lbrkpt2:
420 | Call the trap handler for the kernel debugger.
421 | Do not call trap() to handle it, so that we can
422 | set breakpoints in trap() if we want. We know
423 | the trap type is either T_TRACE or T_BREAKPOINT.
424 movl %d0,%sp@- | push trap type
425 jbsr _C_LABEL(trap_kdebug)
426 addql #4,%sp | pop args
427
428 | The stack pointer may have been modified, or
429 | data below it modified (by kgdb push call),
430 | so push the hardware frame at the current sp
431 | before restoring registers and returning.
432 movl %sp@(FR_SP),%a0 | modified sp
433 lea %sp@(FR_SIZE),%a1 | end of our frame
434 movl %a1@-,%a0@- | copy 2 longs with
435 movl %a1@-,%a0@- | ... predecrement
436 movl %a0,%sp@(FR_SP) | sp = h/w frame
437 moveml %sp@+,#0x7FFF | restore all but sp
438 movl %sp@,%sp | ... and sp
439 rte | all done
440
441 /* Use common m68k sigreturn */
442 #include <m68k/m68k/sigreturn.s>
443
444 /*
445 * Interrupt handlers. Most are auto-vectored,
446 * and hard-wired the same way on all sun3 models.
447 * Format in the stack is:
448 * %d0,%d1,%a0,%a1, sr, pc, vo
449 */
450
451 #define INTERRUPT_SAVEREG \
452 moveml #0xC0C0,%sp@-
453
454 #define INTERRUPT_RESTORE \
455 moveml %sp@+,#0x0303
456
457 /*
458 * This is the common auto-vector interrupt handler,
459 * for which the CPU provides the vector=0x18+level.
460 * These are installed in the interrupt vector table.
461 */
462 #ifdef __ELF__
463 .align 4
464 #else
465 .align 2
466 #endif
467 GLOBAL(_isr_autovec)
468 INTERRUPT_SAVEREG
469 jbsr _C_LABEL(isr_autovec)
470 INTERRUPT_RESTORE
471 jra _ASM_LABEL(rei)
472
473 /* clock: see clock.c */
474 #ifdef __ELF__
475 .align 4
476 #else
477 .align 2
478 #endif
479 GLOBAL(_isr_clock)
480 INTERRUPT_SAVEREG
481 jbsr _C_LABEL(clock_intr)
482 INTERRUPT_RESTORE
483 jra _ASM_LABEL(rei)
484
485 | Handler for all vectored interrupts (i.e. VME interrupts)
486 #ifdef __ELF__
487 .align 4
488 #else
489 .align 2
490 #endif
491 GLOBAL(_isr_vectored)
492 INTERRUPT_SAVEREG
493 jbsr _C_LABEL(isr_vectored)
494 INTERRUPT_RESTORE
495 jra _ASM_LABEL(rei)
496
497 #undef INTERRUPT_SAVEREG
498 #undef INTERRUPT_RESTORE
499
500 /* interrupt counters (needed by vmstat) */
501 GLOBAL(intrnames)
502 .asciz "spur" | 0
503 .asciz "lev1" | 1
504 .asciz "lev2" | 2
505 .asciz "lev3" | 3
506 .asciz "lev4" | 4
507 .asciz "clock" | 5
508 .asciz "lev6" | 6
509 .asciz "nmi" | 7
510 GLOBAL(eintrnames)
511
512 .data
513 .even
514 GLOBAL(intrcnt)
515 .long 0,0,0,0,0,0,0,0,0,0
516 GLOBAL(eintrcnt)
517 .text
518
519 /*
520 * Emulation of VAX REI instruction.
521 *
522 * This code is (mostly) un-altered from the hp300 code,
523 * except that sun machines do not need a simulated SIR
524 * because they have a real software interrupt register.
525 *
526 * This code deals with checking for and servicing ASTs
527 * (profiling, scheduling) and software interrupts (network, softclock).
528 * We check for ASTs first, just like the VAX. To avoid excess overhead
529 * the T_ASTFLT handling code will also check for software interrupts so we
530 * do not have to do it here. After identifying that we need an AST we
531 * drop the IPL to allow device interrupts.
532 *
533 * This code is complicated by the fact that sendsig may have been called
534 * necessitating a stack cleanup.
535 */
536
537 ASGLOBAL(rei)
538 #ifdef DIAGNOSTIC
539 tstl _C_LABEL(panicstr) | have we paniced?
540 jne Ldorte | yes, do not make matters worse
541 #endif
542 tstl _C_LABEL(astpending) | AST pending?
543 jeq Ldorte | no, done
544 Lrei1:
545 btst #5,%sp@ | yes, are we returning to user mode?
546 jne Ldorte | no, done
547 movw #PSL_LOWIPL,%sr | lower SPL
548 clrl %sp@- | stack adjust
549 moveml #0xFFFF,%sp@- | save all registers
550 movl %usp,%a1 | including
551 movl %a1,%sp@(FR_SP) | the users SP
552 clrl %sp@- | VA == none
553 clrl %sp@- | code == none
554 movl #T_ASTFLT,%sp@- | type == async system trap
555 jbsr _C_LABEL(trap) | go handle it
556 lea %sp@(12),%sp | pop value args
557 movl %sp@(FR_SP),%a0 | restore user SP
558 movl %a0,%usp | from save area
559 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
560 jne Laststkadj | yes, go to it
561 moveml %sp@+,#0x7FFF | no, restore most user regs
562 addql #8,%sp | toss SP and stack adjust
563 rte | and do real RTE
564 Laststkadj:
565 lea %sp@(FR_HW),%a1 | pointer to HW frame
566 addql #8,%a1 | source pointer
567 movl %a1,%a0 | source
568 addw %d0,%a0 | + hole size = dest pointer
569 movl %a1@-,%a0@- | copy
570 movl %a1@-,%a0@- | 8 bytes
571 movl %a0,%sp@(FR_SP) | new SSP
572 moveml %sp@+,#0x7FFF | restore user registers
573 movl %sp@,%sp | and our SP
574 Ldorte:
575 rte | real return
576
577 /*
578 * Initialization is at the beginning of this file, because the
579 * kernel entry point needs to be at zero for compatibility with
580 * the Sun boot loader. This works on Sun machines because the
581 * interrupt vector table for reset is NOT at address zero.
582 * (The MMU has a "boot" bit that forces access to the PROM)
583 */
584
585 /*
586 * Use common m68k sigcode.
587 */
588 #include <m68k/m68k/sigcode.s>
589 #ifdef COMPAT_SUNOS
590 #include <m68k/m68k/sunos_sigcode.s>
591 #endif
592 #ifdef COMPAT_SVR4
593 #include <m68k/m68k/svr4_sigcode.s>
594 #endif
595
596 .text
597
598 /*
599 * Primitives
600 */
601
602 /*
603 * Use common m68k support routines.
604 */
605 #include <m68k/m68k/support.s>
606
607 BSS(want_resched,4)
608
609 /*
610 * Use common m68k process manipulation routines.
611 */
612 #include <m68k/m68k/proc_subr.s>
613
614 | Message for Lbadsw panic
615 Lsw0:
616 .asciz "cpu_switch"
617 .even
618
619 .data
620 GLOBAL(masterpaddr) | XXX compatibility (debuggers)
621 GLOBAL(curpcb)
622 .long 0
623 ASBSS(nullpcb,SIZEOF_PCB)
624 .text
625
626 /*
627 * At exit of a process, do a cpu_switch for the last time.
628 * Switch to a safe stack and PCB, and select a new process to run. The
629 * old stack and u-area will be freed by the reaper.
630 *
631 * MUST BE CALLED AT SPLHIGH!
632 */
633 ENTRY(switch_exit)
634 movl %sp@(4),%a0 | struct proc *p
635 | save state into garbage pcb
636 movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
637 lea _ASM_LABEL(tmpstk),%sp | goto a tmp stack
638
639 /* Schedule the vmspace and stack to be freed. */
640 movl %a0,%sp@- | exit2(p)
641 jbsr _C_LABEL(exit2)
642 lea %sp@(4),%sp
643
644 #if defined(LOCKDEBUG)
645 /* Acquire sched_lock */
646 jbsr _C_LABEL(sched_lock_idle)
647 #endif
648
649 jra _C_LABEL(cpu_switch)
650
651 /*
652 * When no processes are on the runq, cpu_switch() branches to idle
653 * to wait for something to come ready.
654 */
655 Lidle:
656 #if defined(LOCKDEBUG)
657 /* Release sched_lock */
658 jbsr _C_LABEL(sched_unlock_idle)
659 #endif
660 stop #PSL_LOWIPL
661 GLOBAL(_Idle) | See clock.c
662 movw #PSL_HIGHIPL,%sr
663 #if defined(LOCKDEBUG)
664 /* Acquire sched_lock */
665 jbsr _C_LABEL(sched_lock_idle)
666 #endif
667 movl _C_LABEL(sched_whichqs),%d0
668 jeq Lidle
669 jra Lsw1
670
671 Lbadsw:
672 movl #Lsw0,%sp@-
673 jbsr _C_LABEL(panic)
674 /*NOTREACHED*/
675
676 /*
677 * cpu_switch()
678 * Hacked for sun3
679 */
680 ENTRY(cpu_switch)
681 movl _C_LABEL(curpcb),%a1 | current pcb
682 movw %sr,%a1@(PCB_PS) | save sr before changing ipl
683 #ifdef notyet
684 movl _C_LABEL(curproc),%sp@- | remember last proc running
685 #endif
686 clrl _C_LABEL(curproc)
687
688 /*
689 * Find the highest-priority queue that isn't empty,
690 * then take the first proc from that queue.
691 */
692 movl _C_LABEL(sched_whichqs),%d0
693 jeq Lidle
694 Lsw1:
695 /*
696 * Interrupts are blocked, sched_lock is held. If
697 * we come here via Idle, %d0 contains the contents
698 * of a non-zero sched_whichqs.
699 */
700 movl %d0,%d1
701 negl %d0
702 andl %d1,%d0
703 bfffo %d0{#0:#32},%d1
704 eorib #31,%d1
705
706 movl %d1,%d0
707 lslb #3,%d1 | convert queue number to index
708 addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
709 movl %d1,%a1
710 movl %a1@(P_FORW),%a0 | p = q->p_forw
711 cmpal %d1,%a0 | anyone on queue?
712 jeq Lbadsw | no, panic
713 #ifdef DIAGNOSTIC
714 tstl %a0@(P_WCHAN)
715 jne Lbadsw
716 cmpb #SRUN,%a0@(P_STAT)
717 jne Lbadsw
718 #endif
719 movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
720 movl %a0@(P_FORW),%a1 | n = p->p_forw
721 movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
722 cmpal %d1,%a1 | anyone left on queue?
723 jne Lsw2 | yes, skip
724 movl _C_LABEL(sched_whichqs),%d1
725 bclr %d0,%d1 | no, clear bit
726 movl %d1,_C_LABEL(sched_whichqs)
727 Lsw2:
728 /* p->p_cpu initialized in fork1() for single-processor */
729 movb #SONPROC,%a0@(P_STAT) | p->p_stat = SONPROC
730 movl %a0,_C_LABEL(curproc)
731 clrl _C_LABEL(want_resched)
732 #ifdef notyet
733 movl %sp@+,%a1 | XXX - Make this work!
734 cmpl %a0,%a1 | switching to same proc?
735 jeq Lswdone | yes, skip save and restore
736 #endif
737 /*
738 * Save state of previous process in its pcb.
739 */
740 movl _C_LABEL(curpcb),%a1
741 moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
742 movl %usp,%a2 | grab USP (a2 has been saved)
743 movl %a2,%a1@(PCB_USP) | and save it
744
745 tstl _C_LABEL(fputype) | Do we have an fpu?
746 jeq Lswnofpsave | No? Then don't try save.
747 lea %a1@(PCB_FPCTX),%a2 | pointer to FP save area
748 fsave %a2@ | save FP state
749 tstb %a2@ | null state frame?
750 jeq Lswnofpsave | yes, all done
751 fmovem %fp0-%fp7,%a2@(FPF_REGS) | save FP general regs
752 fmovem %fpcr/%fpsr/%fpi,%a2@(FPF_FPCR) | save FP control regs
753 Lswnofpsave:
754
755 /*
756 * Now that we have saved all the registers that must be
757 * preserved, we are free to use those registers until
758 * we load the registers for the switched-to process.
759 * In this section, keep: %a0=curproc, %a1=curpcb
760 */
761
762 clrl %a0@(P_BACK) | clear back link
763 movl %a0@(P_ADDR),%a1 | get p_addr
764 movl %a1,_C_LABEL(curpcb)
765
766 #if defined(LOCKDEBUG)
767 /*
768 * Done mucking with the run queues, release the
769 * scheduler lock, but keep interrupts out.
770 */
771 movl %a0,%sp@- | not args...
772 movl %a1,%sp@- | ...just saving
773 jbsr _C_LABEL(sched_unlock_idle)
774 movl %sp@+,%a1
775 movl %sp@+,%a0
776 #endif
777
778 /*
779 * Load the new VM context (new MMU root pointer)
780 */
781 movl %a0@(P_VMSPACE),%a2 | vm = p->p_vmspace
782 #ifdef DIAGNOSTIC
783 tstl %a2 | vm == VM_MAP_NULL?
784 jeq Lbadsw | panic
785 #endif
786 #ifdef PMAP_DEBUG
787 /* When debugging just call _pmap_switch(). */
788 movl %a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
789 pea %a2@ | push pmap
790 jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
791 addql #4,%sp
792 movl _C_LABEL(curpcb),%a1 | restore p_addr
793 #else
794 /* Otherwise, use this inline version. */
795 lea _C_LABEL(kernel_crp),%a3 | our CPU Root Ptr. (CRP)
796 movl %a2@(VM_PMAP),%a2 | pmap = vm->vm_map.pmap
797 movl %a2@(PM_A_PHYS),%d0 | phys = pmap->pm_a_phys
798 cmpl %a3@(4),%d0 | == kernel_crp.rp_addr ?
799 jeq Lsame_mmuctx | skip loadcrp/flush
800 /* OK, it is a new MMU context. Load it up. */
801 movl %d0,%a3@(4)
802 movl #CACHE_CLR,%d0
803 movc %d0,%cacr | invalidate cache(s)
804 pflusha | flush entire TLB
805 pmove %a3@,%crp | load new user root pointer
806 Lsame_mmuctx:
807 #endif
808
809 /*
810 * Reload the registers for the new process.
811 * After this point we can only use %d0,%d1,%a0,%a1
812 */
813 moveml %a1@(PCB_REGS),#0xFCFC | reload registers
814 movl %a1@(PCB_USP),%a0
815 movl %a0,%usp | and USP
816
817 tstl _C_LABEL(fputype) | If we don't have an fpu,
818 jeq Lres_skip | don't try to restore it.
819 lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
820 tstb %a0@ | null state frame?
821 jeq Lresfprest | yes, easy
822 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
823 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
824 Lresfprest:
825 frestore %a0@ | restore state
826 Lres_skip:
827 movw %a1@(PCB_PS),%d0 | no, restore PS
828 #ifdef DIAGNOSTIC
829 btst #13,%d0 | supervisor mode?
830 jeq Lbadsw | no? panic!
831 #endif
832 movw %d0,%sr | OK, restore PS
833 movl #1,%a0 | return 1 (for alternate returns)
834 rts
835
836 /*
837 * savectx(pcb)
838 * Update pcb, saving current processor state.
839 */
840 ENTRY(savectx)
841 movl %sp@(4),%a1
842 movw %sr,%a1@(PCB_PS)
843 movl %usp,%a0 | grab USP
844 movl %a0,%a1@(PCB_USP) | and save it
845 moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
846
847 tstl _C_LABEL(fputype) | Do we have FPU?
848 jeq Lsavedone | No? Then don't save state.
849 lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
850 fsave %a0@ | save FP state
851 tstb %a0@ | null state frame?
852 jeq Lsavedone | yes, all done
853 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
854 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
855 Lsavedone:
856 movl #0,%a0 | return 0
857 rts
858
859 /* suline() */
860
861 #ifdef DEBUG
862 .data
863 ASGLOBAL(fulltflush)
864 .long 0
865 ASGLOBAL(fullcflush)
866 .long 0
867 .text
868 #endif
869
870 /*
871 * Invalidate entire TLB.
872 */
873 ENTRY(TBIA)
874 _C_LABEL(_TBIA):
875 pflusha
876 movl #DC_CLEAR,%d0
877 movc %d0,%cacr | invalidate on-chip d-cache
878 rts
879
880 /*
881 * Invalidate any TLB entry for given VA (TB Invalidate Single)
882 */
883 ENTRY(TBIS)
884 #ifdef DEBUG
885 tstl _ASM_LABEL(fulltflush) | being conservative?
886 jne _C_LABEL(_TBIA) | yes, flush entire TLB
887 #endif
888 movl %sp@(4),%a0
889 pflush #0,#0,%a0@ | flush address from both sides
890 movl #DC_CLEAR,%d0
891 movc %d0,%cacr | invalidate on-chip data cache
892 rts
893
894 /*
895 * Invalidate supervisor side of TLB
896 */
897 ENTRY(TBIAS)
898 #ifdef DEBUG
899 tstl _ASM_LABEL(fulltflush) | being conservative?
900 jne _C_LABEL(_TBIA) | yes, flush everything
901 #endif
902 pflush #4,#4 | flush supervisor TLB entries
903 movl #DC_CLEAR,%d0
904 movc %d0,%cacr | invalidate on-chip d-cache
905 rts
906
907 /*
908 * Invalidate user side of TLB
909 */
910 ENTRY(TBIAU)
911 #ifdef DEBUG
912 tstl _ASM_LABEL(fulltflush) | being conservative?
913 jne _C_LABEL(_TBIA) | yes, flush everything
914 #endif
915 pflush #0,#4 | flush user TLB entries
916 movl #DC_CLEAR,%d0
917 movc %d0,%cacr | invalidate on-chip d-cache
918 rts
919
920 /*
921 * Invalidate instruction cache
922 */
923 ENTRY(ICIA)
924 movl #IC_CLEAR,%d0
925 movc %d0,%cacr | invalidate i-cache
926 rts
927
928 /*
929 * Invalidate data cache.
930 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
931 * problems with DC_WA. The only cases we have to worry about are context
932 * switch and TLB changes, both of which are handled "in-line" in resume
933 * and TBI*.
934 */
935 ENTRY(DCIA)
936 __DCIA:
937 rts
938
939 ENTRY(DCIS)
940 __DCIS:
941 rts
942
943 /*
944 * Invalidate data cache.
945 */
946 ENTRY(DCIU)
947 movl #DC_CLEAR,%d0
948 movc %d0,%cacr | invalidate on-chip d-cache
949 rts
950
951 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
952
953 ENTRY(PCIA)
954 movl #DC_CLEAR,%d0
955 movc %d0,%cacr | invalidate on-chip d-cache
956 rts
957
958 ENTRY(ecacheon)
959 rts
960
961 ENTRY(ecacheoff)
962 rts
963
964 /*
965 * Get callers current SP value.
966 * Note that simply taking the address of a local variable in a C function
967 * doesn't work because callee saved registers may be outside the stack frame
968 * defined by A6 (e.g. GCC generated code).
969 *
970 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
971 */
972 GLOBAL(getsp)
973 movl %sp,%d0 | get current SP
974 addql #4,%d0 | compensate for return address
975 movl %d0,%a0
976 rts
977
978 ENTRY(getsfc)
979 movc %sfc,%d0
980 movl %d0,%a0
981 rts
982
983 ENTRY(getdfc)
984 movc %dfc,%d0
985 movl %d0,%a0
986 rts
987
988 ENTRY(getvbr)
989 movc %vbr,%d0
990 movl %d0,%a0
991 rts
992
993 ENTRY(setvbr)
994 movl %sp@(4),%d0
995 movc %d0,%vbr
996 rts
997
998 /*
999 * Load a new CPU Root Pointer (CRP) into the MMU.
1000 * void loadcrp(struct mmu_rootptr *);
1001 */
1002 ENTRY(loadcrp)
1003 movl %sp@(4),%a0 | arg1: &CRP
1004 movl #CACHE_CLR,%d0
1005 movc %d0,%cacr | invalidate cache(s)
1006 pflusha | flush entire TLB
1007 pmove %a0@,%crp | load new user root pointer
1008 rts
1009
1010 ENTRY(getcrp)
1011 movl %sp@(4),%a0 | arg1: &crp
1012 pmove %crp,%a0@ | *crpp = %crp
1013 rts
1014
1015 /*
1016 * Get the physical address of the PTE for a given VA.
1017 */
1018 ENTRY(ptest_addr)
1019 movl %sp@(4),%a1 | VA
1020 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
1021 movl %a0,%d0 | Result in %d0 (not a pointer return)
1022 rts
1023
1024 /*
1025 * Set processor priority level calls. Most are implemented with
1026 * inline asm expansions. However, we need one instantiation here
1027 * in case some non-optimized code makes external references.
1028 * Most places will use the inlined functions param.h supplies.
1029 */
1030
1031 ENTRY(_getsr)
1032 clrl %d0
1033 movw %sr,%d0
1034 movl %a1,%d0
1035 rts
1036
1037 ENTRY(_spl)
1038 clrl %d0
1039 movw %sr,%d0
1040 movl %sp@(4),%d1
1041 movw %d1,%sr
1042 rts
1043
1044 ENTRY(_splraise)
1045 clrl %d0
1046 movw %sr,%d0
1047 movl %d0,%d1
1048 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
1049 cmpl %sp@(4),%d1 | (old - new)
1050 bge Lsplr
1051 movl %sp@(4),%d1
1052 movw %d1,%sr
1053 Lsplr:
1054 rts
1055
1056 /*
1057 * Save and restore 68881 state.
1058 */
1059 ENTRY(m68881_save)
1060 movl %sp@(4),%a0 | save area pointer
1061 fsave %a0@ | save state
1062 tstb %a0@ | null state frame?
1063 jeq Lm68881sdone | yes, all done
1064 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
1065 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
1066 Lm68881sdone:
1067 rts
1068
1069 ENTRY(m68881_restore)
1070 movl %sp@(4),%a0 | save area pointer
1071 tstb %a0@ | null state frame?
1072 jeq Lm68881rdone | yes, easy
1073 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
1074 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
1075 Lm68881rdone:
1076 frestore %a0@ | restore state
1077 rts
1078
1079 /*
1080 * _delay(unsigned N)
1081 * Delay for at least (N/256) microseconds.
1082 * This routine depends on the variable: delay_divisor
1083 * which should be set based on the CPU clock rate.
1084 * XXX: Currently this is set based on the CPU model,
1085 * XXX: but this should be determined at run time...
1086 */
1087 GLOBAL(_delay)
1088 | %d0 = arg = (usecs << 8)
1089 movl %sp@(4),%d0
1090 | %d1 = delay_divisor;
1091 movl _C_LABEL(delay_divisor),%d1
1092 jra L_delay /* Jump into the loop! */
1093
1094 /*
1095 * Align the branch target of the loop to a half-line (8-byte)
1096 * boundary to minimize cache effects. This guarantees both
1097 * that there will be no prefetch stalls due to cache line burst
1098 * operations and that the loop will run from a single cache
1099 * half-line.
1100 */
1101 #ifdef __ELF__
1102 .align 8
1103 #else
1104 .align 3
1105 #endif
1106 L_delay:
1107 subl %d1,%d0
1108 jgt L_delay
1109 rts
1110
1111 | Define some addresses, mostly so DDB can print useful info.
1112 | Not using _C_LABEL() here because these symbols are never
1113 | referenced by any C code, and if the leading underscore
1114 | ever goes away, these lines turn into syntax errors...
1115 .set _KERNBASE,KERNBASE
1116 .set _MONSTART,SUN3X_MONSTART
1117 .set _PROM_BASE,SUN3X_PROM_BASE
1118 .set _MONEND,SUN3X_MONEND
1119
1120 |The end!
1121