locore.s revision 1.29 1 /* $NetBSD: locore.s,v 1.29 1998/09/30 23:01:31 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "opt_compat_netbsd.h"
45 #include "opt_uvm.h"
46
47 #include "assym.h"
48 #include <machine/asm.h>
49 #include <machine/trap.h>
50
51 | Remember this is a fun project!
52
53 .data
54 GLOBAL(mon_crp)
55 .long 0,0
56
57 | This is for kvm_mkdb, and should be the address of the beginning
58 | of the kernel text segment (not necessarily the same as kernbase).
59 .text
60 GLOBAL(kernel_text)
61
62 | This is the entry point, as well as the end of the temporary stack
63 | used during process switch (one 8K page ending at start)
64 ASGLOBAL(tmpstk)
65 ASGLOBAL(start)
66
67 | The first step, after disabling interrupts, is to map enough of the kernel
68 | into high virtual address space so that we can use position dependent code.
69 | This is a tricky task on the sun3x because the MMU is already enabled and
70 | the ROM monitor provides no indication of where the root MMU table is mapped.
71 | Therefore we must use one of the 68030's 'transparent translation' registers
72 | to define a range in the address space where the MMU translation is
73 | turned off. Once this is complete we can modify the MMU table directly
74 | without the need for it to be mapped into virtual memory.
75 | All code must be position independent until otherwise noted, as the
76 | boot loader has loaded us into low memory but all the symbols in this
77 | code have been linked high.
78 movw #PSL_HIGHIPL, sr | no interrupts
79 movl #KERNBASE, a5 | for vtop conversion
80 lea _C_LABEL(mon_crp), a0 | where to store the CRP
81 subl a5, a0
82 | Note: borrowing mon_crp for tt0 setup...
83 movl #0x3F8107, a0@ | map the low 1GB v=p with the
84 .long 0xf0100800 | transparent translation reg0
85 | [ pmove a0@, tt0 ]
86 | In order to map the kernel into high memory we will copy the root table
87 | entry which maps the 16 megabytes of memory starting at 0x0 into the
88 | entry which maps the 16 megabytes starting at KERNBASE.
89 pmove crp, a0@ | Get monitor CPU root pointer
90 movl a0@(4), a1 | 2nd word is PA of level A table
91
92 movl a1, a0 | compute the descriptor address
93 addl #0x3e0, a1 | for VA starting at KERNBASE
94 movl a0@, a1@ | copy descriptor type
95 movl a0@(4), a1@(4) | copy physical address
96
97 | Kernel is now double mapped at zero and KERNBASE.
98 | Force a long jump to the relocated code (high VA).
99 movl #IC_CLEAR, d0 | Flush the I-cache
100 movc d0, cacr
101 jmp L_high_code:l | long jump
102
103 L_high_code:
104 | We are now running in the correctly relocated kernel, so
105 | we are no longer restricted to position-independent code.
106 | It is handy to leave transparent translation enabled while
107 | for the low 1GB while _bootstrap() is doing its thing.
108
109 | Do bootstrap stuff needed before main() gets called.
110 | Our boot loader leaves a copy of the kernel's exec header
111 | just before the start of the kernel text segment, so the
112 | kernel can sanity-check the DDB symbols at [end...esym].
113 | Pass the struct exec at tmpstk-32 to _bootstrap().
114 | Also, make sure the initial frame pointer is zero so that
115 | the backtrace algorithm used by KGDB terminates nicely.
116 lea _ASM_LABEL(tmpstk)-32, sp
117 movl #0,a6
118 jsr _C_LABEL(_bootstrap) | See locore2.c
119
120 | Now turn off the transparent translation of the low 1GB.
121 | (this also flushes the ATC)
122 clrl sp@-
123 .long 0xf0170800 | pmove sp@,tt0
124 addql #4,sp
125
126 | Now that _bootstrap() is done using the PROM functions,
127 | we can safely set the sfc/dfc to something != FC_CONTROL
128 moveq #FC_USERD, d0 | make movs access "user data"
129 movc d0, sfc | space for copyin/copyout
130 movc d0, dfc
131
132 | Setup process zero user/kernel stacks.
133 movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
134 lea a1@(USPACE-4),sp | set SSP to last word
135 movl #USRSTACK-4,a2
136 movl a2,usp | init user SP
137
138 | Note curpcb was already set in _bootstrap().
139 | Will do fpu initialization during autoconfig (see fpu.c)
140 | The interrupt vector table and stack are now ready.
141 | Interrupts will be enabled later, AFTER autoconfiguration
142 | is finished, to avoid spurrious interrupts.
143
144 /*
145 * Final preparation for calling main.
146 *
147 * Create a fake exception frame that returns to user mode,
148 * and save its address in p->p_md.md_regs for cpu_fork().
149 * The new frames for process 1 and 2 will be adjusted by
150 * cpu_set_kpc() to arrange for a call to a kernel function
151 * before the new process does its rte out to user mode.
152 */
153 clrw sp@- | tf_format,tf_vector
154 clrl sp@- | tf_pc (filled in later)
155 movw #PSL_USER,sp@- | tf_sr for user mode
156 clrl sp@- | tf_stackadj
157 lea sp@(-64),sp | tf_regs[16]
158 movl sp,a1 | a1=trapframe
159 lea _C_LABEL(proc0),a0 | proc0.p_md.md_regs =
160 movl a1,a0@(P_MDREGS) | trapframe
161 movl a2,a1@(FR_SP) | a2 == usp (from above)
162 pea a1@ | push &trapframe
163 jbsr _C_LABEL(main) | main(&trapframe)
164 addql #4,sp | help DDB backtrace
165 trap #15 | should not get here
166
167 | This is used by cpu_fork() to return to user mode.
168 | It is called with SP pointing to a struct trapframe.
169 GLOBAL(proc_do_uret)
170 movl sp@(FR_SP),a0 | grab and load
171 movl a0,usp | user SP
172 moveml sp@+,#0x7FFF | load most registers (all but SSP)
173 addql #8,sp | pop SSP and stack adjust count
174 rte
175
176 /*
177 * proc_trampoline:
178 * This is used by cpu_set_kpc() to "push" a function call onto the
179 * kernel stack of some process, very much like a signal delivery.
180 * When we get here, the stack has:
181 *
182 * SP+8: switchframe from before cpu_set_kpc
183 * SP+4: void *proc;
184 * SP: u_long func;
185 *
186 * On entry, the switchframe pushed by cpu_set_kpc has already been
187 * popped off the stack, so all this needs to do is pop the function
188 * pointer into a register, call it, then pop the arg, and finally
189 * return using the switchframe that remains on the stack.
190 */
191 GLOBAL(proc_trampoline)
192 movl sp@+,a0 | function pointer
193 jbsr a0@ | (*func)(procp)
194 addql #4,sp | toss the arg
195 rts | as cpu_switch would do
196
197 | That is all the assembly startup code we need on the sun3x!
198 | The rest of this is like the hp300/locore.s where possible.
199
200 /*
201 * Trap/interrupt vector routines
202 */
203 #include <m68k/m68k/trap_subr.s>
204
205 GLOBAL(buserr)
206 tstl _C_LABEL(nofault) | device probe?
207 jeq _C_LABEL(addrerr) | no, handle as usual
208 movl _C_LABEL(nofault),sp@- | yes,
209 jbsr _C_LABEL(longjmp) | longjmp(nofault)
210 GLOBAL(addrerr)
211 clrl sp@- | stack adjust count
212 moveml #0xFFFF,sp@- | save user registers
213 movl usp,a0 | save the user SP
214 movl a0,sp@(FR_SP) | in the savearea
215 lea sp@(FR_HW),a1 | grab base of HW berr frame
216 moveq #0,d0
217 movw a1@(10),d0 | grab SSW for fault processing
218 btst #12,d0 | RB set?
219 jeq LbeX0 | no, test RC
220 bset #14,d0 | yes, must set FB
221 movw d0,a1@(10) | for hardware too
222 LbeX0:
223 btst #13,d0 | RC set?
224 jeq LbeX1 | no, skip
225 bset #15,d0 | yes, must set FC
226 movw d0,a1@(10) | for hardware too
227 LbeX1:
228 btst #8,d0 | data fault?
229 jeq Lbe0 | no, check for hard cases
230 movl a1@(16),d1 | fault address is as given in frame
231 jra Lbe10 | thats it
232 Lbe0:
233 btst #4,a1@(6) | long (type B) stack frame?
234 jne Lbe4 | yes, go handle
235 movl a1@(2),d1 | no, can use save PC
236 btst #14,d0 | FB set?
237 jeq Lbe3 | no, try FC
238 addql #4,d1 | yes, adjust address
239 jra Lbe10 | done
240 Lbe3:
241 btst #15,d0 | FC set?
242 jeq Lbe10 | no, done
243 addql #2,d1 | yes, adjust address
244 jra Lbe10 | done
245 Lbe4:
246 movl a1@(36),d1 | long format, use stage B address
247 btst #15,d0 | FC set?
248 jeq Lbe10 | no, all done
249 subql #2,d1 | yes, adjust address
250 Lbe10:
251 movl d1,sp@- | push fault VA
252 movl d0,sp@- | and padded SSW
253 movw a1@(6),d0 | get frame format/vector offset
254 andw #0x0FFF,d0 | clear out frame format
255 cmpw #12,d0 | address error vector?
256 jeq Lisaerr | yes, go to it
257
258 /* MMU-specific code to determine reason for bus error. */
259 movl d1,a0 | fault address
260 movl sp@,d0 | function code from ssw
261 btst #8,d0 | data fault?
262 jne Lbe10a
263 movql #1,d0 | user program access FC
264 | (we dont separate data/program)
265 btst #5,a1@ | supervisor mode?
266 jeq Lbe10a | if no, done
267 movql #5,d0 | else supervisor program access
268 Lbe10a:
269 ptestr d0,a0@,#7 | do a table search
270 pmove psr,sp@ | save result
271 movb sp@,d1
272 btst #2,d1 | invalid? (incl. limit viol and berr)
273 jeq Lmightnotbemerr | no -> wp check
274 btst #7,d1 | is it MMU table berr?
275 jeq Lismerr | no, must be fast
276 jra Lisberr1 | real bus err needs not be fast
277 Lmightnotbemerr:
278 btst #3,d1 | write protect bit set?
279 jeq Lisberr1 | no, must be bus error
280 movl sp@,d0 | ssw into low word of d0
281 andw #0xc0,d0 | write protect is set on page:
282 cmpw #0x40,d0 | was it read cycle?
283 jeq Lisberr1 | yes, was not WPE, must be bus err
284 /* End of MMU-specific bus error code. */
285
286 Lismerr:
287 movl #T_MMUFLT,sp@- | show that we are an MMU fault
288 jra _ASM_LABEL(faultstkadj) | and deal with it
289 Lisaerr:
290 movl #T_ADDRERR,sp@- | mark address error
291 jra _ASM_LABEL(faultstkadj) | and deal with it
292 Lisberr1:
293 clrw sp@ | re-clear pad word
294 Lisberr:
295 movl #T_BUSERR,sp@- | mark bus error
296 jra _ASM_LABEL(faultstkadj) | and deal with it
297
298 /*
299 * FP exceptions.
300 */
301 GLOBAL(fpfline)
302 clrl sp@- | stack adjust count
303 moveml #0xFFFF,sp@- | save registers
304 moveq #T_FPEMULI,d0 | denote as FP emulation trap
305 jra _ASM_LABEL(fault) | do it
306
307 GLOBAL(fpunsupp)
308 clrl sp@- | stack adjust count
309 moveml #0xFFFF,sp@- | save registers
310 moveq #T_FPEMULD,d0 | denote as FP emulation trap
311 jra _ASM_LABEL(fault) | do it
312
313 /*
314 * Handles all other FP coprocessor exceptions.
315 * Note that since some FP exceptions generate mid-instruction frames
316 * and may cause signal delivery, we need to test for stack adjustment
317 * after the trap call.
318 */
319 GLOBAL(fpfault)
320 clrl sp@- | stack adjust count
321 moveml #0xFFFF,sp@- | save user registers
322 movl usp,a0 | and save
323 movl a0,sp@(FR_SP) | the user stack pointer
324 clrl sp@- | no VA arg
325 movl _C_LABEL(curpcb),a0 | current pcb
326 lea a0@(PCB_FPCTX),a0 | address of FP savearea
327 fsave a0@ | save state
328 tstb a0@ | null state frame?
329 jeq Lfptnull | yes, safe
330 clrw d0 | no, need to tweak BIU
331 movb a0@(1),d0 | get frame size
332 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
333 Lfptnull:
334 fmovem fpsr,sp@- | push fpsr as code argument
335 frestore a0@ | restore state
336 movl #T_FPERR,sp@- | push type arg
337 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
338
339 /*
340 * Other exceptions only cause four and six word stack frame and require
341 * no post-trap stack adjustment.
342 */
343 GLOBAL(badtrap)
344 clrl sp@- | stack adjust count
345 moveml #0xFFFF,sp@- | save std frame regs
346 jbsr _C_LABEL(straytrap) | report
347 moveml sp@+,#0xFFFF | restore regs
348 addql #4, sp | stack adjust count
349 jra _ASM_LABEL(rei) | all done
350
351 /*
352 * Trap 0 is for system calls
353 */
354 GLOBAL(trap0)
355 clrl sp@- | stack adjust count
356 moveml #0xFFFF,sp@- | save user registers
357 movl usp,a0 | save the user SP
358 movl a0,sp@(FR_SP) | in the savearea
359 movl d0,sp@- | push syscall number
360 jbsr _C_LABEL(syscall) | handle it
361 addql #4,sp | pop syscall arg
362 movl sp@(FR_SP),a0 | grab and restore
363 movl a0,usp | user SP
364 moveml sp@+,#0x7FFF | restore most registers
365 addql #8,sp | pop SP and stack adjust
366 jra _ASM_LABEL(rei) | all done
367
368 /*
369 * Trap 1 action depends on the emulation type:
370 * NetBSD: sigreturn "syscall"
371 * HPUX: user breakpoint
372 */
373 GLOBAL(trap1)
374 #if 0 /* COMPAT_HPUX */
375 /* If process is HPUX, this is a user breakpoint. */
376 jne _C_LABEL(trap15) | HPUX user breakpoint
377 #endif
378 jra _ASM_LABEL(sigreturn) | NetBSD
379
380 /*
381 * Trap 2 action depends on the emulation type:
382 * NetBSD: user breakpoint -- See XXX below...
383 * SunOS: cache flush
384 * HPUX: sigreturn
385 */
386 GLOBAL(trap2)
387 #if 0 /* COMPAT_HPUX */
388 /* If process is HPUX, this is a sigreturn call */
389 jne _ASM_LABEL(sigreturn)
390 #endif
391 jra _C_LABEL(trap15) | NetBSD user breakpoint
392 | XXX - Make NetBSD use trap 15 for breakpoints?
393 | XXX - That way, we can allow this cache flush...
394 | XXX SunOS trap #2 (and NetBSD?)
395 | Flush on-chip cache (leave it enabled)
396 | movl #CACHE_CLR,d0
397 | movc d0,cacr
398 | rte
399
400 /*
401 * Trap 12 is the entry point for the cachectl "syscall"
402 * cachectl(command, addr, length)
403 * command in d0, addr in a1, length in d1
404 */
405 GLOBAL(trap12)
406 movl d1,sp@- | push length
407 movl a1,sp@- | push addr
408 movl d0,sp@- | push command
409 jbsr _C_LABEL(cachectl) | do it
410 lea sp@(12),sp | pop args
411 jra _ASM_LABEL(rei) | all done
412
413 /*
414 * Trace (single-step) trap. Kernel-mode is special.
415 * User mode traps are simply passed on to trap().
416 */
417 GLOBAL(trace)
418 clrl sp@- | stack adjust count
419 moveml #0xFFFF,sp@-
420 moveq #T_TRACE,d0
421 btst #5,sp@(FR_HW) | was supervisor mode?
422 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
423 jra _ASM_LABEL(fault) | no, user-mode fault
424
425 /*
426 * Trap 15 is used for:
427 * - GDB breakpoints (in user programs)
428 * - KGDB breakpoints (in the kernel)
429 * - trace traps for SUN binaries (not fully supported yet)
430 * User mode traps are simply passed to trap().
431 */
432 GLOBAL(trap15)
433 clrl sp@- | stack adjust count
434 moveml #0xFFFF,sp@-
435 moveq #T_TRAP15,d0
436 btst #5,sp@(FR_HW) | was supervisor mode?
437 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
438 jra _ASM_LABEL(fault) | no, user-mode fault
439
440 ASLOCAL(kbrkpt)
441 | Kernel-mode breakpoint or trace trap. (d0=trap_type)
442 | Save the system sp rather than the user sp.
443 movw #PSL_HIGHIPL,sr | lock out interrupts
444 lea sp@(FR_SIZE),a6 | Save stack pointer
445 movl a6,sp@(FR_SP) | from before trap
446
447 | If we are not on tmpstk switch to it.
448 | (so debugger can change the stack pointer)
449 movl a6,d1
450 cmpl #_ASM_LABEL(tmpstk),d1
451 jls Lbrkpt2 | already on tmpstk
452 | Copy frame to the temporary stack
453 movl sp,a0 | a0=src
454 lea _ASM_LABEL(tmpstk)-96,a1 | a1=dst
455 movl a1,sp | sp=new frame
456 moveq #FR_SIZE,d1
457 Lbrkpt1:
458 movl a0@+,a1@+
459 subql #4,d1
460 bgt Lbrkpt1
461
462 Lbrkpt2:
463 | Call the trap handler for the kernel debugger.
464 | Do not call trap() to handle it, so that we can
465 | set breakpoints in trap() if we want. We know
466 | the trap type is either T_TRACE or T_BREAKPOINT.
467 movl d0,sp@- | push trap type
468 jbsr _C_LABEL(trap_kdebug)
469 addql #4,sp | pop args
470
471 | The stack pointer may have been modified, or
472 | data below it modified (by kgdb push call),
473 | so push the hardware frame at the current sp
474 | before restoring registers and returning.
475 movl sp@(FR_SP),a0 | modified sp
476 lea sp@(FR_SIZE),a1 | end of our frame
477 movl a1@-,a0@- | copy 2 longs with
478 movl a1@-,a0@- | ... predecrement
479 movl a0,sp@(FR_SP) | sp = h/w frame
480 moveml sp@+,#0x7FFF | restore all but sp
481 movl sp@,sp | ... and sp
482 rte | all done
483
484 /* Use common m68k sigreturn */
485 #include <m68k/m68k/sigreturn.s>
486
487 /*
488 * Interrupt handlers. Most are auto-vectored,
489 * and hard-wired the same way on all sun3 models.
490 * Format in the stack is:
491 * d0,d1,a0,a1, sr, pc, vo
492 */
493
494 #define INTERRUPT_SAVEREG \
495 moveml #0xC0C0,sp@-
496
497 #define INTERRUPT_RESTORE \
498 moveml sp@+,#0x0303
499
500 /*
501 * This is the common auto-vector interrupt handler,
502 * for which the CPU provides the vector=0x18+level.
503 * These are installed in the interrupt vector table.
504 */
505 .align 2
506 GLOBAL(_isr_autovec)
507 INTERRUPT_SAVEREG
508 jbsr _C_LABEL(isr_autovec)
509 INTERRUPT_RESTORE
510 jra _ASM_LABEL(rei)
511
512 /* clock: see clock.c */
513 .align 2
514 GLOBAL(_isr_clock)
515 INTERRUPT_SAVEREG
516 jbsr _C_LABEL(clock_intr)
517 INTERRUPT_RESTORE
518 jra _ASM_LABEL(rei)
519
520 | Handler for all vectored interrupts (i.e. VME interrupts)
521 .align 2
522 GLOBAL(_isr_vectored)
523 INTERRUPT_SAVEREG
524 jbsr _C_LABEL(isr_vectored)
525 INTERRUPT_RESTORE
526 jra _ASM_LABEL(rei)
527
528 #undef INTERRUPT_SAVEREG
529 #undef INTERRUPT_RESTORE
530
531 /* interrupt counters (needed by vmstat) */
532 GLOBAL(intrnames)
533 .asciz "spur" | 0
534 .asciz "lev1" | 1
535 .asciz "lev2" | 2
536 .asciz "lev3" | 3
537 .asciz "lev4" | 4
538 .asciz "clock" | 5
539 .asciz "lev6" | 6
540 .asciz "nmi" | 7
541 GLOBAL(eintrnames)
542
543 .data
544 .even
545 GLOBAL(intrcnt)
546 .long 0,0,0,0,0,0,0,0,0,0
547 GLOBAL(eintrcnt)
548 .text
549
550 /*
551 * Emulation of VAX REI instruction.
552 *
553 * This code is (mostly) un-altered from the hp300 code,
554 * except that sun machines do not need a simulated SIR
555 * because they have a real software interrupt register.
556 *
557 * This code deals with checking for and servicing ASTs
558 * (profiling, scheduling) and software interrupts (network, softclock).
559 * We check for ASTs first, just like the VAX. To avoid excess overhead
560 * the T_ASTFLT handling code will also check for software interrupts so we
561 * do not have to do it here. After identifying that we need an AST we
562 * drop the IPL to allow device interrupts.
563 *
564 * This code is complicated by the fact that sendsig may have been called
565 * necessitating a stack cleanup.
566 */
567
568 ASGLOBAL(rei)
569 #ifdef DIAGNOSTIC
570 tstl _C_LABEL(panicstr) | have we paniced?
571 jne Ldorte | yes, do not make matters worse
572 #endif
573 tstl _C_LABEL(astpending) | AST pending?
574 jeq Ldorte | no, done
575 Lrei1:
576 btst #5,sp@ | yes, are we returning to user mode?
577 jne Ldorte | no, done
578 movw #PSL_LOWIPL,sr | lower SPL
579 clrl sp@- | stack adjust
580 moveml #0xFFFF,sp@- | save all registers
581 movl usp,a1 | including
582 movl a1,sp@(FR_SP) | the users SP
583 clrl sp@- | VA == none
584 clrl sp@- | code == none
585 movl #T_ASTFLT,sp@- | type == async system trap
586 jbsr _C_LABEL(trap) | go handle it
587 lea sp@(12),sp | pop value args
588 movl sp@(FR_SP),a0 | restore user SP
589 movl a0,usp | from save area
590 movw sp@(FR_ADJ),d0 | need to adjust stack?
591 jne Laststkadj | yes, go to it
592 moveml sp@+,#0x7FFF | no, restore most user regs
593 addql #8,sp | toss SP and stack adjust
594 rte | and do real RTE
595 Laststkadj:
596 lea sp@(FR_HW),a1 | pointer to HW frame
597 addql #8,a1 | source pointer
598 movl a1,a0 | source
599 addw d0,a0 | + hole size = dest pointer
600 movl a1@-,a0@- | copy
601 movl a1@-,a0@- | 8 bytes
602 movl a0,sp@(FR_SP) | new SSP
603 moveml sp@+,#0x7FFF | restore user registers
604 movl sp@,sp | and our SP
605 Ldorte:
606 rte | real return
607
608 /*
609 * Initialization is at the beginning of this file, because the
610 * kernel entry point needs to be at zero for compatibility with
611 * the Sun boot loader. This works on Sun machines because the
612 * interrupt vector table for reset is NOT at address zero.
613 * (The MMU has a "boot" bit that forces access to the PROM)
614 */
615
616 /*
617 * Use common m68k sigcode.
618 */
619 #include <m68k/m68k/sigcode.s>
620
621 .text
622
623 /*
624 * Primitives
625 */
626
627 /*
628 * Use common m68k support routines.
629 */
630 #include <m68k/m68k/support.s>
631
632 BSS(want_resched,4)
633
634 /*
635 * Use common m68k process manipulation routines.
636 */
637 #include <m68k/m68k/proc_subr.s>
638
639 | Message for Lbadsw panic
640 Lsw0:
641 .asciz "cpu_switch"
642 .even
643
644 .data
645 GLOBAL(masterpaddr) | XXX compatibility (debuggers)
646 GLOBAL(curpcb)
647 .long 0
648 ASBSS(nullpcb,SIZEOF_PCB)
649 .text
650
651 /*
652 * At exit of a process, do a cpu_switch for the last time.
653 * Switch to a safe stack and PCB, and select a new process to run. The
654 * old stack and u-area will be freed by the reaper.
655 */
656 ENTRY(switch_exit)
657 movl sp@(4),a0 | struct proc *p
658 | save state into garbage pcb
659 movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
660 lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
661
662 /* Schedule the vmspace and stack to be freed. */
663 movl a0,sp@- | exit2(p)
664 jbsr _C_LABEL(exit2)
665
666 /* Don't pop the proc; pass it to cpu_switch(). */
667
668 jra _C_LABEL(cpu_switch)
669
670 /*
671 * When no processes are on the runq, cpu_switch() branches to idle
672 * to wait for something to come ready.
673 */
674 .data
675 GLOBAL(Idle_count)
676 .long 0
677 .text
678
679 Lidle:
680 stop #PSL_LOWIPL
681 GLOBAL(_Idle) | See clock.c
682 movw #PSL_HIGHIPL,sr
683 addql #1, _C_LABEL(Idle_count)
684 tstl _C_LABEL(whichqs)
685 jeq Lidle
686 movw #PSL_LOWIPL,sr
687 jra Lsw1
688
689 Lbadsw:
690 movl #Lsw0,sp@-
691 jbsr _C_LABEL(panic)
692 /*NOTREACHED*/
693
694 /*
695 * cpu_switch()
696 * Hacked for sun3
697 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
698 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
699 */
700 ENTRY(cpu_switch)
701 movl _C_LABEL(curpcb),a1 | current pcb
702 movw sr,a1@(PCB_PS) | save sr before changing ipl
703 #ifdef notyet
704 movl _C_LABEL(curproc),sp@- | remember last proc running
705 #endif
706 clrl _C_LABEL(curproc)
707
708 Lsw1:
709 /*
710 * Find the highest-priority queue that isn't empty,
711 * then take the first proc from that queue.
712 */
713 clrl d0
714 lea _C_LABEL(whichqs),a0
715 movl a0@,d1
716 Lswchk:
717 btst d0,d1
718 jne Lswfnd
719 addqb #1,d0
720 cmpb #32,d0
721 jne Lswchk
722 jra _C_LABEL(_Idle)
723 Lswfnd:
724 movw #PSL_HIGHIPL,sr | lock out interrupts
725 movl a0@,d1 | and check again...
726 bclr d0,d1
727 jeq Lsw1 | proc moved, rescan
728 movl d1,a0@ | update whichqs
729 moveq #1,d1 | double check for higher priority
730 lsll d0,d1 | process (which may have snuck in
731 subql #1,d1 | while we were finding this one)
732 andl a0@,d1
733 jeq Lswok | no one got in, continue
734 movl a0@,d1
735 bset d0,d1 | otherwise put this one back
736 movl d1,a0@
737 jra Lsw1 | and rescan
738 Lswok:
739 movl d0,d1
740 lslb #3,d1 | convert queue number to index
741 addl #_qs,d1 | locate queue (q)
742 movl d1,a1
743 cmpl a1@(P_FORW),a1 | anyone on queue?
744 jeq Lbadsw | no, panic
745 movl a1@(P_FORW),a0 | p = q->p_forw
746 movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
747 movl a0@(P_FORW),a1 | q = p->p_forw
748 movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
749 cmpl a0@(P_FORW),d1 | anyone left on queue?
750 jeq Lsw2 | no, skip
751 movl _C_LABEL(whichqs),d1
752 bset d0,d1 | yes, reset bit
753 movl d1,_C_LABEL(whichqs)
754 Lsw2:
755 movl a0,_C_LABEL(curproc)
756 clrl _C_LABEL(want_resched)
757 #ifdef notyet
758 movl sp@+,a1 | XXX - Make this work!
759 cmpl a0,a1 | switching to same proc?
760 jeq Lswdone | yes, skip save and restore
761 #endif
762 /*
763 * Save state of previous process in its pcb.
764 */
765 movl _C_LABEL(curpcb),a1
766 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
767 movl usp,a2 | grab USP (a2 has been saved)
768 movl a2,a1@(PCB_USP) | and save it
769
770 tstl _C_LABEL(fputype) | Do we have an fpu?
771 jeq Lswnofpsave | No? Then don't try save.
772 lea a1@(PCB_FPCTX),a2 | pointer to FP save area
773 fsave a2@ | save FP state
774 tstb a2@ | null state frame?
775 jeq Lswnofpsave | yes, all done
776 fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
777 fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
778 Lswnofpsave:
779
780 /*
781 * Now that we have saved all the registers that must be
782 * preserved, we are free to use those registers until
783 * we load the registers for the switched-to process.
784 * In this section, keep: a0=curproc, a1=curpcb
785 */
786
787 #ifdef DIAGNOSTIC
788 tstl a0@(P_WCHAN)
789 jne Lbadsw
790 cmpb #SRUN,a0@(P_STAT)
791 jne Lbadsw
792 #endif
793 clrl a0@(P_BACK) | clear back link
794 movl a0@(P_ADDR),a1 | get p_addr
795 movl a1,_C_LABEL(curpcb)
796
797 /*
798 * Load the new VM context (new MMU root pointer)
799 */
800 movl a0@(P_VMSPACE),a2 | vm = p->p_vmspace
801 #ifdef DIAGNOSTIC
802 tstl a2 | vm == VM_MAP_NULL?
803 jeq Lbadsw | panic
804 #endif
805 #ifdef PMAP_DEBUG
806 /* When debugging just call _pmap_switch(). */
807 movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
808 pea a2@ | push pmap
809 jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
810 addql #4,sp
811 movl _C_LABEL(curpcb),a1 | restore p_addr
812 #else
813 /* Otherwise, use this inline version. */
814 lea _C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
815 movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
816 movl a2@(PM_A_PHYS),d0 | phys = pmap->pm_a_phys
817 cmpl a3@(4),d0 | == kernel_crp.rp_addr ?
818 jeq Lsame_mmuctx | skip loadcrp/flush
819 /* OK, it is a new MMU context. Load it up. */
820 movl d0,a3@(4)
821 movl #CACHE_CLR,d0
822 movc d0,cacr | invalidate cache(s)
823 pflusha | flush entire TLB
824 pmove a3@,crp | load new user root pointer
825 Lsame_mmuctx:
826 #endif
827
828 /*
829 * Reload the registers for the new process.
830 * After this point we can only use d0,d1,a0,a1
831 */
832 moveml a1@(PCB_REGS),#0xFCFC | reload registers
833 movl a1@(PCB_USP),a0
834 movl a0,usp | and USP
835
836 tstl _C_LABEL(fputype) | If we don't have an fpu,
837 jeq Lres_skip | don't try to restore it.
838 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
839 tstb a0@ | null state frame?
840 jeq Lresfprest | yes, easy
841 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
842 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
843 Lresfprest:
844 frestore a0@ | restore state
845 Lres_skip:
846 movw a1@(PCB_PS),d0 | no, restore PS
847 #ifdef DIAGNOSTIC
848 btst #13,d0 | supervisor mode?
849 jeq Lbadsw | no? panic!
850 #endif
851 movw d0,sr | OK, restore PS
852 moveq #1,d0 | return 1 (for alternate returns)
853 rts
854
855 /*
856 * savectx(pcb)
857 * Update pcb, saving current processor state.
858 */
859 ENTRY(savectx)
860 movl sp@(4),a1
861 movw sr,a1@(PCB_PS)
862 movl usp,a0 | grab USP
863 movl a0,a1@(PCB_USP) | and save it
864 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
865
866 tstl _C_LABEL(fputype) | Do we have FPU?
867 jeq Lsavedone | No? Then don't save state.
868 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
869 fsave a0@ | save FP state
870 tstb a0@ | null state frame?
871 jeq Lsavedone | yes, all done
872 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
873 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
874 Lsavedone:
875 moveq #0,d0 | return 0
876 rts
877
878 /* suline() */
879
880 #ifdef DEBUG
881 .data
882 ASGLOBAL(fulltflush)
883 .long 0
884 ASGLOBAL(fullcflush)
885 .long 0
886 .text
887 #endif
888
889 /*
890 * Invalidate entire TLB.
891 */
892 ENTRY(TBIA)
893 _C_LABEL(_TBIA):
894 pflusha
895 movl #DC_CLEAR,d0
896 movc d0,cacr | invalidate on-chip d-cache
897 rts
898
899 /*
900 * Invalidate any TLB entry for given VA (TB Invalidate Single)
901 */
902 ENTRY(TBIS)
903 #ifdef DEBUG
904 tstl _ASM_LABEL(fulltflush) | being conservative?
905 jne _C_LABEL(_TBIA) | yes, flush entire TLB
906 #endif
907 movl sp@(4),a0
908 pflush #0,#0,a0@ | flush address from both sides
909 movl #DC_CLEAR,d0
910 movc d0,cacr | invalidate on-chip data cache
911 rts
912
913 /*
914 * Invalidate supervisor side of TLB
915 */
916 ENTRY(TBIAS)
917 #ifdef DEBUG
918 tstl _ASM_LABEL(fulltflush) | being conservative?
919 jne _C_LABEL(_TBIA) | yes, flush everything
920 #endif
921 pflush #4,#4 | flush supervisor TLB entries
922 movl #DC_CLEAR,d0
923 movc d0,cacr | invalidate on-chip d-cache
924 rts
925
926 /*
927 * Invalidate user side of TLB
928 */
929 ENTRY(TBIAU)
930 #ifdef DEBUG
931 tstl _ASM_LABEL(fulltflush) | being conservative?
932 jne _C_LABEL(_TBIA) | yes, flush everything
933 #endif
934 pflush #0,#4 | flush user TLB entries
935 movl #DC_CLEAR,d0
936 movc d0,cacr | invalidate on-chip d-cache
937 rts
938
939 /*
940 * Invalidate instruction cache
941 */
942 ENTRY(ICIA)
943 movl #IC_CLEAR,d0
944 movc d0,cacr | invalidate i-cache
945 rts
946
947 /*
948 * Invalidate data cache.
949 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
950 * problems with DC_WA. The only cases we have to worry about are context
951 * switch and TLB changes, both of which are handled "in-line" in resume
952 * and TBI*.
953 */
954 ENTRY(DCIA)
955 __DCIA:
956 rts
957
958 ENTRY(DCIS)
959 __DCIS:
960 rts
961
962 /*
963 * Invalidate data cache.
964 */
965 ENTRY(DCIU)
966 movl #DC_CLEAR,d0
967 movc d0,cacr | invalidate on-chip d-cache
968 rts
969
970 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
971
972 ENTRY(PCIA)
973 movl #DC_CLEAR,d0
974 movc d0,cacr | invalidate on-chip d-cache
975 rts
976
977 ENTRY(ecacheon)
978 rts
979
980 ENTRY(ecacheoff)
981 rts
982
983 /*
984 * Get callers current SP value.
985 * Note that simply taking the address of a local variable in a C function
986 * doesn't work because callee saved registers may be outside the stack frame
987 * defined by A6 (e.g. GCC generated code).
988 *
989 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
990 */
991 GLOBAL(getsp)
992 movl sp,d0 | get current SP
993 addql #4,d0 | compensate for return address
994 rts
995
996 ENTRY(getsfc)
997 movc sfc,d0
998 rts
999
1000 ENTRY(getdfc)
1001 movc dfc,d0
1002 rts
1003
1004 ENTRY(getvbr)
1005 movc vbr, d0
1006 rts
1007
1008 ENTRY(setvbr)
1009 movl sp@(4), d0
1010 movc d0, vbr
1011 rts
1012
1013 /*
1014 * Load a new CPU Root Pointer (CRP) into the MMU.
1015 * void loadcrp(struct mmu_rootptr *);
1016 */
1017 ENTRY(loadcrp)
1018 movl sp@(4),a0 | arg1: &CRP
1019 movl #CACHE_CLR,d0
1020 movc d0,cacr | invalidate cache(s)
1021 pflusha | flush entire TLB
1022 pmove a0@,crp | load new user root pointer
1023 rts
1024
1025 /*
1026 * Get the physical address of the PTE for a given VA.
1027 */
1028 ENTRY(ptest_addr)
1029 movl sp@(4),a0 | VA
1030 ptestr #5,a0@,#7,a1 | a1 = addr of PTE
1031 movl a1,d0
1032 rts
1033
1034 /*
1035 * Set processor priority level calls. Most are implemented with
1036 * inline asm expansions. However, we need one instantiation here
1037 * in case some non-optimized code makes external references.
1038 * Most places will use the inlined functions param.h supplies.
1039 */
1040
1041 ENTRY(_getsr)
1042 clrl d0
1043 movw sr,d0
1044 rts
1045
1046 ENTRY(_spl)
1047 clrl d0
1048 movw sr,d0
1049 movl sp@(4),d1
1050 movw d1,sr
1051 rts
1052
1053 ENTRY(_splraise)
1054 clrl d0
1055 movw sr,d0
1056 movl d0,d1
1057 andl #PSL_HIGHIPL,d1 | old &= PSL_HIGHIPL
1058 cmpl sp@(4),d1 | (old - new)
1059 bge Lsplr
1060 movl sp@(4),d1
1061 movw d1,sr
1062 Lsplr:
1063 rts
1064
1065 /*
1066 * Save and restore 68881 state.
1067 */
1068 ENTRY(m68881_save)
1069 movl sp@(4),a0 | save area pointer
1070 fsave a0@ | save state
1071 tstb a0@ | null state frame?
1072 jeq Lm68881sdone | yes, all done
1073 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1074 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1075 Lm68881sdone:
1076 rts
1077
1078 ENTRY(m68881_restore)
1079 movl sp@(4),a0 | save area pointer
1080 tstb a0@ | null state frame?
1081 jeq Lm68881rdone | yes, easy
1082 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1083 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1084 Lm68881rdone:
1085 frestore a0@ | restore state
1086 rts
1087
1088 /*
1089 * _delay(unsigned N)
1090 * Delay for at least (N/256) microseconds.
1091 * This routine depends on the variable: delay_divisor
1092 * which should be set based on the CPU clock rate.
1093 * XXX: Currently this is set based on the CPU model,
1094 * XXX: but this should be determined at run time...
1095 */
1096 GLOBAL(_delay)
1097 | d0 = arg = (usecs << 8)
1098 movl sp@(4),d0
1099 | d1 = delay_divisor;
1100 movl _C_LABEL(delay_divisor),d1
1101 L_delay:
1102 subl d1,d0
1103 jgt L_delay
1104 rts
1105
1106
1107 | Define some addresses, mostly so DDB can print useful info.
1108 | Not using _C_LABEL() here because these symbols are never
1109 | referenced by any C code, and if the leading underscore
1110 | ever goes away, these lines turn into syntax errors...
1111 .set _KERNBASE,KERNBASE
1112 .set _MONSTART,SUN3X_MONSTART
1113 .set _PROM_BASE,SUN3X_PROM_BASE
1114 .set _MONEND,SUN3X_MONEND
1115
1116 |The end!
1117