locore.s revision 1.27 1 /* $NetBSD: locore.s,v 1.27 1998/06/09 20:47:17 gwr Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "opt_uvm.h"
45
46 #include "assym.h"
47 #include <machine/asm.h>
48 #include <machine/trap.h>
49
50 | Remember this is a fun project!
51
52 .data
53 GLOBAL(mon_crp)
54 .long 0,0
55
56 | This is for kvm_mkdb, and should be the address of the beginning
57 | of the kernel text segment (not necessarily the same as kernbase).
58 .text
59 GLOBAL(kernel_text)
60
61 | This is the entry point, as well as the end of the temporary stack
62 | used during process switch (one 8K page ending at start)
63 ASGLOBAL(tmpstk)
64 ASGLOBAL(start)
65
66 | The first step, after disabling interrupts, is to map enough of the kernel
67 | into high virtual address space so that we can use position dependent code.
68 | This is a tricky task on the sun3x because the MMU is already enabled and
69 | the ROM monitor provides no indication of where the root MMU table is mapped.
70 | Therefore we must use one of the 68030's 'transparent translation' registers
71 | to define a range in the address space where the MMU translation is
72 | turned off. Once this is complete we can modify the MMU table directly
73 | without the need for it to be mapped into virtual memory.
74 | All code must be position independent until otherwise noted, as the
75 | boot loader has loaded us into low memory but all the symbols in this
76 | code have been linked high.
77 movw #PSL_HIGHIPL, sr | no interrupts
78 movl #KERNBASE, a5 | for vtop conversion
79 lea _C_LABEL(mon_crp), a0 | where to store the CRP
80 subl a5, a0
81 | Note: borrowing mon_crp for tt0 setup...
82 movl #0x3F8107, a0@ | map the low 1GB v=p with the
83 .long 0xf0100800 | transparent translation reg0
84 | [ pmove a0@, tt0 ]
85 | In order to map the kernel into high memory we will copy the root table
86 | entry which maps the 16 megabytes of memory starting at 0x0 into the
87 | entry which maps the 16 megabytes starting at KERNBASE.
88 pmove crp, a0@ | Get monitor CPU root pointer
89 movl a0@(4), a1 | 2nd word is PA of level A table
90
91 movl a1, a0 | compute the descriptor address
92 addl #0x3e0, a1 | for VA starting at KERNBASE
93 movl a0@, a1@ | copy descriptor type
94 movl a0@(4), a1@(4) | copy physical address
95
96 | Kernel is now double mapped at zero and KERNBASE.
97 | Force a long jump to the relocated code (high VA).
98 movl #IC_CLEAR, d0 | Flush the I-cache
99 movc d0, cacr
100 jmp L_high_code:l | long jump
101
102 L_high_code:
103 | We are now running in the correctly relocated kernel, so
104 | we are no longer restricted to position-independent code.
105 | It is handy to leave transparent translation enabled while
106 | for the low 1GB while _bootstrap() is doing its thing.
107
108 | Do bootstrap stuff needed before main() gets called.
109 | Our boot loader leaves a copy of the kernel's exec header
110 | just before the start of the kernel text segment, so the
111 | kernel can sanity-check the DDB symbols at [end...esym].
112 | Pass the struct exec at tmpstk-32 to _bootstrap().
113 | Also, make sure the initial frame pointer is zero so that
114 | the backtrace algorithm used by KGDB terminates nicely.
115 lea _ASM_LABEL(tmpstk)-32, sp
116 movl #0,a6
117 jsr _C_LABEL(_bootstrap) | See locore2.c
118
119 | Now turn off the transparent translation of the low 1GB.
120 | (this also flushes the ATC)
121 clrl sp@-
122 .long 0xf0170800 | pmove sp@,tt0
123 addql #4,sp
124
125 | Now that _bootstrap() is done using the PROM functions,
126 | we can safely set the sfc/dfc to something != FC_CONTROL
127 moveq #FC_USERD, d0 | make movs access "user data"
128 movc d0, sfc | space for copyin/copyout
129 movc d0, dfc
130
131 | Setup process zero user/kernel stacks.
132 movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
133 lea a1@(USPACE-4),sp | set SSP to last word
134 movl #USRSTACK-4,a2
135 movl a2,usp | init user SP
136
137 | Note curpcb was already set in _bootstrap().
138 | Will do fpu initialization during autoconfig (see fpu.c)
139 | The interrupt vector table and stack are now ready.
140 | Interrupts will be enabled later, AFTER autoconfiguration
141 | is finished, to avoid spurrious interrupts.
142
143 /*
144 * Final preparation for calling main.
145 *
146 * Create a fake exception frame that returns to user mode,
147 * and save its address in p->p_md.md_regs for cpu_fork().
148 * The new frames for process 1 and 2 will be adjusted by
149 * cpu_set_kpc() to arrange for a call to a kernel function
150 * before the new process does its rte out to user mode.
151 */
152 clrw sp@- | tf_format,tf_vector
153 clrl sp@- | tf_pc (filled in later)
154 movw #PSL_USER,sp@- | tf_sr for user mode
155 clrl sp@- | tf_stackadj
156 lea sp@(-64),sp | tf_regs[16]
157 movl sp,a1 | a1=trapframe
158 lea _C_LABEL(proc0),a0 | proc0.p_md.md_regs =
159 movl a1,a0@(P_MDREGS) | trapframe
160 movl a2,a1@(FR_SP) | a2 == usp (from above)
161 pea a1@ | push &trapframe
162 jbsr _C_LABEL(main) | main(&trapframe)
163 addql #4,sp | help DDB backtrace
164 trap #15 | should not get here
165
166 | This is used by cpu_fork() to return to user mode.
167 | It is called with SP pointing to a struct trapframe.
168 GLOBAL(proc_do_uret)
169 movl sp@(FR_SP),a0 | grab and load
170 movl a0,usp | user SP
171 moveml sp@+,#0x7FFF | load most registers (all but SSP)
172 addql #8,sp | pop SSP and stack adjust count
173 rte
174
175 /*
176 * proc_trampoline:
177 * This is used by cpu_set_kpc() to "push" a function call onto the
178 * kernel stack of some process, very much like a signal delivery.
179 * When we get here, the stack has:
180 *
181 * SP+8: switchframe from before cpu_set_kpc
182 * SP+4: void *proc;
183 * SP: u_long func;
184 *
185 * On entry, the switchframe pushed by cpu_set_kpc has already been
186 * popped off the stack, so all this needs to do is pop the function
187 * pointer into a register, call it, then pop the arg, and finally
188 * return using the switchframe that remains on the stack.
189 */
190 GLOBAL(proc_trampoline)
191 movl sp@+,a0 | function pointer
192 jbsr a0@ | (*func)(procp)
193 addql #4,sp | toss the arg
194 rts | as cpu_switch would do
195
196 | That is all the assembly startup code we need on the sun3x!
197 | The rest of this is like the hp300/locore.s where possible.
198
199 /*
200 * Trap/interrupt vector routines
201 */
202 #include <m68k/m68k/trap_subr.s>
203
204 GLOBAL(buserr)
205 tstl _C_LABEL(nofault) | device probe?
206 jeq _C_LABEL(addrerr) | no, handle as usual
207 movl _C_LABEL(nofault),sp@- | yes,
208 jbsr _C_LABEL(longjmp) | longjmp(nofault)
209 GLOBAL(addrerr)
210 clrl sp@- | stack adjust count
211 moveml #0xFFFF,sp@- | save user registers
212 movl usp,a0 | save the user SP
213 movl a0,sp@(FR_SP) | in the savearea
214 lea sp@(FR_HW),a1 | grab base of HW berr frame
215 moveq #0,d0
216 movw a1@(10),d0 | grab SSW for fault processing
217 btst #12,d0 | RB set?
218 jeq LbeX0 | no, test RC
219 bset #14,d0 | yes, must set FB
220 movw d0,a1@(10) | for hardware too
221 LbeX0:
222 btst #13,d0 | RC set?
223 jeq LbeX1 | no, skip
224 bset #15,d0 | yes, must set FC
225 movw d0,a1@(10) | for hardware too
226 LbeX1:
227 btst #8,d0 | data fault?
228 jeq Lbe0 | no, check for hard cases
229 movl a1@(16),d1 | fault address is as given in frame
230 jra Lbe10 | thats it
231 Lbe0:
232 btst #4,a1@(6) | long (type B) stack frame?
233 jne Lbe4 | yes, go handle
234 movl a1@(2),d1 | no, can use save PC
235 btst #14,d0 | FB set?
236 jeq Lbe3 | no, try FC
237 addql #4,d1 | yes, adjust address
238 jra Lbe10 | done
239 Lbe3:
240 btst #15,d0 | FC set?
241 jeq Lbe10 | no, done
242 addql #2,d1 | yes, adjust address
243 jra Lbe10 | done
244 Lbe4:
245 movl a1@(36),d1 | long format, use stage B address
246 btst #15,d0 | FC set?
247 jeq Lbe10 | no, all done
248 subql #2,d1 | yes, adjust address
249 Lbe10:
250 movl d1,sp@- | push fault VA
251 movl d0,sp@- | and padded SSW
252 movw a1@(6),d0 | get frame format/vector offset
253 andw #0x0FFF,d0 | clear out frame format
254 cmpw #12,d0 | address error vector?
255 jeq Lisaerr | yes, go to it
256
257 /* MMU-specific code to determine reason for bus error. */
258 movl d1,a0 | fault address
259 movl sp@,d0 | function code from ssw
260 btst #8,d0 | data fault?
261 jne Lbe10a
262 movql #1,d0 | user program access FC
263 | (we dont separate data/program)
264 btst #5,a1@ | supervisor mode?
265 jeq Lbe10a | if no, done
266 movql #5,d0 | else supervisor program access
267 Lbe10a:
268 ptestr d0,a0@,#7 | do a table search
269 pmove psr,sp@ | save result
270 movb sp@,d1
271 btst #2,d1 | invalid? (incl. limit viol and berr)
272 jeq Lmightnotbemerr | no -> wp check
273 btst #7,d1 | is it MMU table berr?
274 jeq Lismerr | no, must be fast
275 jra Lisberr1 | real bus err needs not be fast
276 Lmightnotbemerr:
277 btst #3,d1 | write protect bit set?
278 jeq Lisberr1 | no, must be bus error
279 movl sp@,d0 | ssw into low word of d0
280 andw #0xc0,d0 | write protect is set on page:
281 cmpw #0x40,d0 | was it read cycle?
282 jeq Lisberr1 | yes, was not WPE, must be bus err
283 /* End of MMU-specific bus error code. */
284
285 Lismerr:
286 movl #T_MMUFLT,sp@- | show that we are an MMU fault
287 jra _ASM_LABEL(faultstkadj) | and deal with it
288 Lisaerr:
289 movl #T_ADDRERR,sp@- | mark address error
290 jra _ASM_LABEL(faultstkadj) | and deal with it
291 Lisberr1:
292 clrw sp@ | re-clear pad word
293 Lisberr:
294 movl #T_BUSERR,sp@- | mark bus error
295 jra _ASM_LABEL(faultstkadj) | and deal with it
296
297 /*
298 * FP exceptions.
299 */
300 GLOBAL(fpfline)
301 clrl sp@- | stack adjust count
302 moveml #0xFFFF,sp@- | save registers
303 moveq #T_FPEMULI,d0 | denote as FP emulation trap
304 jra _ASM_LABEL(fault) | do it
305
306 GLOBAL(fpunsupp)
307 clrl sp@- | stack adjust count
308 moveml #0xFFFF,sp@- | save registers
309 moveq #T_FPEMULD,d0 | denote as FP emulation trap
310 jra _ASM_LABEL(fault) | do it
311
312 /*
313 * Handles all other FP coprocessor exceptions.
314 * Note that since some FP exceptions generate mid-instruction frames
315 * and may cause signal delivery, we need to test for stack adjustment
316 * after the trap call.
317 */
318 GLOBAL(fpfault)
319 clrl sp@- | stack adjust count
320 moveml #0xFFFF,sp@- | save user registers
321 movl usp,a0 | and save
322 movl a0,sp@(FR_SP) | the user stack pointer
323 clrl sp@- | no VA arg
324 movl _C_LABEL(curpcb),a0 | current pcb
325 lea a0@(PCB_FPCTX),a0 | address of FP savearea
326 fsave a0@ | save state
327 tstb a0@ | null state frame?
328 jeq Lfptnull | yes, safe
329 clrw d0 | no, need to tweak BIU
330 movb a0@(1),d0 | get frame size
331 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
332 Lfptnull:
333 fmovem fpsr,sp@- | push fpsr as code argument
334 frestore a0@ | restore state
335 movl #T_FPERR,sp@- | push type arg
336 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
337
338 /*
339 * Other exceptions only cause four and six word stack frame and require
340 * no post-trap stack adjustment.
341 */
342 GLOBAL(badtrap)
343 clrl sp@- | stack adjust count
344 moveml #0xFFFF,sp@- | save std frame regs
345 jbsr _C_LABEL(straytrap) | report
346 moveml sp@+,#0xFFFF | restore regs
347 addql #4, sp | stack adjust count
348 jra _ASM_LABEL(rei) | all done
349
350 /*
351 * Trap 0 is for system calls
352 */
353 GLOBAL(trap0)
354 clrl sp@- | stack adjust count
355 moveml #0xFFFF,sp@- | save user registers
356 movl usp,a0 | save the user SP
357 movl a0,sp@(FR_SP) | in the savearea
358 movl d0,sp@- | push syscall number
359 jbsr _C_LABEL(syscall) | handle it
360 addql #4,sp | pop syscall arg
361 movl sp@(FR_SP),a0 | grab and restore
362 movl a0,usp | user SP
363 moveml sp@+,#0x7FFF | restore most registers
364 addql #8,sp | pop SP and stack adjust
365 jra _ASM_LABEL(rei) | all done
366
367 /*
368 * Trap 1 action depends on the emulation type:
369 * NetBSD: sigreturn "syscall"
370 * HPUX: user breakpoint
371 */
372 GLOBAL(trap1)
373 #if 0 /* COMPAT_HPUX */
374 /* If process is HPUX, this is a user breakpoint. */
375 jne _C_LABEL(trap15) | HPUX user breakpoint
376 #endif
377 jra _ASM_LABEL(sigreturn) | NetBSD
378
379 /*
380 * Trap 2 action depends on the emulation type:
381 * NetBSD: user breakpoint -- See XXX below...
382 * SunOS: cache flush
383 * HPUX: sigreturn
384 */
385 GLOBAL(trap2)
386 #if 0 /* COMPAT_HPUX */
387 /* If process is HPUX, this is a sigreturn call */
388 jne _ASM_LABEL(sigreturn)
389 #endif
390 jra _C_LABEL(trap15) | NetBSD user breakpoint
391 | XXX - Make NetBSD use trap 15 for breakpoints?
392 | XXX - That way, we can allow this cache flush...
393 | XXX SunOS trap #2 (and NetBSD?)
394 | Flush on-chip cache (leave it enabled)
395 | movl #CACHE_CLR,d0
396 | movc d0,cacr
397 | rte
398
399 /*
400 * Trap 12 is the entry point for the cachectl "syscall"
401 * cachectl(command, addr, length)
402 * command in d0, addr in a1, length in d1
403 */
404 GLOBAL(trap12)
405 movl d1,sp@- | push length
406 movl a1,sp@- | push addr
407 movl d0,sp@- | push command
408 jbsr _C_LABEL(cachectl) | do it
409 lea sp@(12),sp | pop args
410 jra _ASM_LABEL(rei) | all done
411
412 /*
413 * Trace (single-step) trap. Kernel-mode is special.
414 * User mode traps are simply passed on to trap().
415 */
416 GLOBAL(trace)
417 clrl sp@- | stack adjust count
418 moveml #0xFFFF,sp@-
419 moveq #T_TRACE,d0
420 btst #5,sp@(FR_HW) | was supervisor mode?
421 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
422 jra _ASM_LABEL(fault) | no, user-mode fault
423
424 /*
425 * Trap 15 is used for:
426 * - GDB breakpoints (in user programs)
427 * - KGDB breakpoints (in the kernel)
428 * - trace traps for SUN binaries (not fully supported yet)
429 * User mode traps are simply passed to trap().
430 */
431 GLOBAL(trap15)
432 clrl sp@- | stack adjust count
433 moveml #0xFFFF,sp@-
434 moveq #T_TRAP15,d0
435 btst #5,sp@(FR_HW) | was supervisor mode?
436 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
437 jra _ASM_LABEL(fault) | no, user-mode fault
438
439 ASLOCAL(kbrkpt)
440 | Kernel-mode breakpoint or trace trap. (d0=trap_type)
441 | Save the system sp rather than the user sp.
442 movw #PSL_HIGHIPL,sr | lock out interrupts
443 lea sp@(FR_SIZE),a6 | Save stack pointer
444 movl a6,sp@(FR_SP) | from before trap
445
446 | If we are not on tmpstk switch to it.
447 | (so debugger can change the stack pointer)
448 movl a6,d1
449 cmpl #_ASM_LABEL(tmpstk),d1
450 jls Lbrkpt2 | already on tmpstk
451 | Copy frame to the temporary stack
452 movl sp,a0 | a0=src
453 lea _ASM_LABEL(tmpstk)-96,a1 | a1=dst
454 movl a1,sp | sp=new frame
455 moveq #FR_SIZE,d1
456 Lbrkpt1:
457 movl a0@+,a1@+
458 subql #4,d1
459 bgt Lbrkpt1
460
461 Lbrkpt2:
462 | Call the trap handler for the kernel debugger.
463 | Do not call trap() to handle it, so that we can
464 | set breakpoints in trap() if we want. We know
465 | the trap type is either T_TRACE or T_BREAKPOINT.
466 movl d0,sp@- | push trap type
467 jbsr _C_LABEL(trap_kdebug)
468 addql #4,sp | pop args
469
470 | The stack pointer may have been modified, or
471 | data below it modified (by kgdb push call),
472 | so push the hardware frame at the current sp
473 | before restoring registers and returning.
474 movl sp@(FR_SP),a0 | modified sp
475 lea sp@(FR_SIZE),a1 | end of our frame
476 movl a1@-,a0@- | copy 2 longs with
477 movl a1@-,a0@- | ... predecrement
478 movl a0,sp@(FR_SP) | sp = h/w frame
479 moveml sp@+,#0x7FFF | restore all but sp
480 movl sp@,sp | ... and sp
481 rte | all done
482
483 /* Use common m68k sigreturn */
484 #include <m68k/m68k/sigreturn.s>
485
486 /*
487 * Interrupt handlers. Most are auto-vectored,
488 * and hard-wired the same way on all sun3 models.
489 * Format in the stack is:
490 * d0,d1,a0,a1, sr, pc, vo
491 */
492
493 #define INTERRUPT_SAVEREG \
494 moveml #0xC0C0,sp@-
495
496 #define INTERRUPT_RESTORE \
497 moveml sp@+,#0x0303
498
499 /*
500 * This is the common auto-vector interrupt handler,
501 * for which the CPU provides the vector=0x18+level.
502 * These are installed in the interrupt vector table.
503 */
504 .align 2
505 GLOBAL(_isr_autovec)
506 INTERRUPT_SAVEREG
507 jbsr _C_LABEL(isr_autovec)
508 INTERRUPT_RESTORE
509 jra _ASM_LABEL(rei)
510
511 /* clock: see clock.c */
512 .align 2
513 GLOBAL(_isr_clock)
514 INTERRUPT_SAVEREG
515 jbsr _C_LABEL(clock_intr)
516 INTERRUPT_RESTORE
517 jra _ASM_LABEL(rei)
518
519 | Handler for all vectored interrupts (i.e. VME interrupts)
520 .align 2
521 GLOBAL(_isr_vectored)
522 INTERRUPT_SAVEREG
523 jbsr _C_LABEL(isr_vectored)
524 INTERRUPT_RESTORE
525 jra _ASM_LABEL(rei)
526
527 #undef INTERRUPT_SAVEREG
528 #undef INTERRUPT_RESTORE
529
530 /* interrupt counters (needed by vmstat) */
531 GLOBAL(intrnames)
532 .asciz "spur" | 0
533 .asciz "lev1" | 1
534 .asciz "lev2" | 2
535 .asciz "lev3" | 3
536 .asciz "lev4" | 4
537 .asciz "clock" | 5
538 .asciz "lev6" | 6
539 .asciz "nmi" | 7
540 GLOBAL(eintrnames)
541
542 .data
543 .even
544 GLOBAL(intrcnt)
545 .long 0,0,0,0,0,0,0,0,0,0
546 GLOBAL(eintrcnt)
547 .text
548
549 /*
550 * Emulation of VAX REI instruction.
551 *
552 * This code is (mostly) un-altered from the hp300 code,
553 * except that sun machines do not need a simulated SIR
554 * because they have a real software interrupt register.
555 *
556 * This code deals with checking for and servicing ASTs
557 * (profiling, scheduling) and software interrupts (network, softclock).
558 * We check for ASTs first, just like the VAX. To avoid excess overhead
559 * the T_ASTFLT handling code will also check for software interrupts so we
560 * do not have to do it here. After identifying that we need an AST we
561 * drop the IPL to allow device interrupts.
562 *
563 * This code is complicated by the fact that sendsig may have been called
564 * necessitating a stack cleanup.
565 */
566
567 ASGLOBAL(rei)
568 #ifdef DIAGNOSTIC
569 tstl _C_LABEL(panicstr) | have we paniced?
570 jne Ldorte | yes, do not make matters worse
571 #endif
572 tstl _C_LABEL(astpending) | AST pending?
573 jeq Ldorte | no, done
574 Lrei1:
575 btst #5,sp@ | yes, are we returning to user mode?
576 jne Ldorte | no, done
577 movw #PSL_LOWIPL,sr | lower SPL
578 clrl sp@- | stack adjust
579 moveml #0xFFFF,sp@- | save all registers
580 movl usp,a1 | including
581 movl a1,sp@(FR_SP) | the users SP
582 clrl sp@- | VA == none
583 clrl sp@- | code == none
584 movl #T_ASTFLT,sp@- | type == async system trap
585 jbsr _C_LABEL(trap) | go handle it
586 lea sp@(12),sp | pop value args
587 movl sp@(FR_SP),a0 | restore user SP
588 movl a0,usp | from save area
589 movw sp@(FR_ADJ),d0 | need to adjust stack?
590 jne Laststkadj | yes, go to it
591 moveml sp@+,#0x7FFF | no, restore most user regs
592 addql #8,sp | toss SP and stack adjust
593 rte | and do real RTE
594 Laststkadj:
595 lea sp@(FR_HW),a1 | pointer to HW frame
596 addql #8,a1 | source pointer
597 movl a1,a0 | source
598 addw d0,a0 | + hole size = dest pointer
599 movl a1@-,a0@- | copy
600 movl a1@-,a0@- | 8 bytes
601 movl a0,sp@(FR_SP) | new SSP
602 moveml sp@+,#0x7FFF | restore user registers
603 movl sp@,sp | and our SP
604 Ldorte:
605 rte | real return
606
607 /*
608 * Initialization is at the beginning of this file, because the
609 * kernel entry point needs to be at zero for compatibility with
610 * the Sun boot loader. This works on Sun machines because the
611 * interrupt vector table for reset is NOT at address zero.
612 * (The MMU has a "boot" bit that forces access to the PROM)
613 */
614
615 /*
616 * Use common m68k sigcode.
617 */
618 #include <m68k/m68k/sigcode.s>
619
620 .text
621
622 /*
623 * Primitives
624 */
625
626 /*
627 * Use common m68k support routines.
628 */
629 #include <m68k/m68k/support.s>
630
631 BSS(want_resched,4)
632
633 /*
634 * Use common m68k process manipulation routines.
635 */
636 #include <m68k/m68k/proc_subr.s>
637
638 | Message for Lbadsw panic
639 Lsw0:
640 .asciz "cpu_switch"
641 .even
642
643 .data
644 GLOBAL(masterpaddr) | XXX compatibility (debuggers)
645 GLOBAL(curpcb)
646 .long 0
647 ASBSS(nullpcb,SIZEOF_PCB)
648 .text
649
650 /*
651 * At exit of a process, do a cpu_switch for the last time.
652 * Switch to a safe stack and PCB, and deallocate the process's resources.
653 * The ipl is high enough to prevent the memory from being reallocated.
654 */
655 ENTRY(switch_exit)
656 movl sp@(4),a0 | struct proc *p
657 | save state into garbage pcb
658 movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
659 lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
660 movl a0,sp@- | pass proc ptr down
661
662 /* Free old process's u-area. */
663 movl #USPACE,sp@- | size of u-area
664 movl a0@(P_ADDR),sp@- | address of process's u-area
665 movl _C_LABEL(kernel_map),sp@- | map it was allocated in
666 #if defined(UVM)
667 jbsr _C_LABEL(uvm_km_free) | deallocate it
668 #else
669 jbsr _C_LABEL(kmem_free) | deallocate it
670 #endif
671 lea sp@(12),sp | pop args
672
673 jra _C_LABEL(cpu_switch)
674
675 /*
676 * When no processes are on the runq, cpu_switch() branches to idle
677 * to wait for something to come ready.
678 */
679 .data
680 GLOBAL(Idle_count)
681 .long 0
682 .text
683
684 Lidle:
685 stop #PSL_LOWIPL
686 GLOBAL(_Idle) | See clock.c
687 movw #PSL_HIGHIPL,sr
688 addql #1, _C_LABEL(Idle_count)
689 tstl _C_LABEL(whichqs)
690 jeq Lidle
691 movw #PSL_LOWIPL,sr
692 jra Lsw1
693
694 Lbadsw:
695 movl #Lsw0,sp@-
696 jbsr _C_LABEL(panic)
697 /*NOTREACHED*/
698
699 /*
700 * cpu_switch()
701 * Hacked for sun3
702 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
703 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
704 */
705 ENTRY(cpu_switch)
706 movl _C_LABEL(curpcb),a1 | current pcb
707 movw sr,a1@(PCB_PS) | save sr before changing ipl
708 #ifdef notyet
709 movl _C_LABEL(curproc),sp@- | remember last proc running
710 #endif
711 clrl _C_LABEL(curproc)
712
713 Lsw1:
714 /*
715 * Find the highest-priority queue that isn't empty,
716 * then take the first proc from that queue.
717 */
718 clrl d0
719 lea _C_LABEL(whichqs),a0
720 movl a0@,d1
721 Lswchk:
722 btst d0,d1
723 jne Lswfnd
724 addqb #1,d0
725 cmpb #32,d0
726 jne Lswchk
727 jra _C_LABEL(_Idle)
728 Lswfnd:
729 movw #PSL_HIGHIPL,sr | lock out interrupts
730 movl a0@,d1 | and check again...
731 bclr d0,d1
732 jeq Lsw1 | proc moved, rescan
733 movl d1,a0@ | update whichqs
734 moveq #1,d1 | double check for higher priority
735 lsll d0,d1 | process (which may have snuck in
736 subql #1,d1 | while we were finding this one)
737 andl a0@,d1
738 jeq Lswok | no one got in, continue
739 movl a0@,d1
740 bset d0,d1 | otherwise put this one back
741 movl d1,a0@
742 jra Lsw1 | and rescan
743 Lswok:
744 movl d0,d1
745 lslb #3,d1 | convert queue number to index
746 addl #_qs,d1 | locate queue (q)
747 movl d1,a1
748 cmpl a1@(P_FORW),a1 | anyone on queue?
749 jeq Lbadsw | no, panic
750 movl a1@(P_FORW),a0 | p = q->p_forw
751 movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
752 movl a0@(P_FORW),a1 | q = p->p_forw
753 movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
754 cmpl a0@(P_FORW),d1 | anyone left on queue?
755 jeq Lsw2 | no, skip
756 movl _C_LABEL(whichqs),d1
757 bset d0,d1 | yes, reset bit
758 movl d1,_C_LABEL(whichqs)
759 Lsw2:
760 movl a0,_C_LABEL(curproc)
761 clrl _C_LABEL(want_resched)
762 #ifdef notyet
763 movl sp@+,a1 | XXX - Make this work!
764 cmpl a0,a1 | switching to same proc?
765 jeq Lswdone | yes, skip save and restore
766 #endif
767 /*
768 * Save state of previous process in its pcb.
769 */
770 movl _C_LABEL(curpcb),a1
771 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
772 movl usp,a2 | grab USP (a2 has been saved)
773 movl a2,a1@(PCB_USP) | and save it
774
775 tstl _C_LABEL(fputype) | Do we have an fpu?
776 jeq Lswnofpsave | No? Then don't try save.
777 lea a1@(PCB_FPCTX),a2 | pointer to FP save area
778 fsave a2@ | save FP state
779 tstb a2@ | null state frame?
780 jeq Lswnofpsave | yes, all done
781 fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
782 fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
783 Lswnofpsave:
784
785 /*
786 * Now that we have saved all the registers that must be
787 * preserved, we are free to use those registers until
788 * we load the registers for the switched-to process.
789 * In this section, keep: a0=curproc, a1=curpcb
790 */
791
792 #ifdef DIAGNOSTIC
793 tstl a0@(P_WCHAN)
794 jne Lbadsw
795 cmpb #SRUN,a0@(P_STAT)
796 jne Lbadsw
797 #endif
798 clrl a0@(P_BACK) | clear back link
799 movl a0@(P_ADDR),a1 | get p_addr
800 movl a1,_C_LABEL(curpcb)
801
802 /*
803 * Load the new VM context (new MMU root pointer)
804 */
805 movl a0@(P_VMSPACE),a2 | vm = p->p_vmspace
806 #ifdef DIAGNOSTIC
807 tstl a2 | vm == VM_MAP_NULL?
808 jeq Lbadsw | panic
809 #endif
810 #ifdef PMAP_DEBUG
811 /* When debugging just call _pmap_switch(). */
812 movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
813 pea a2@ | push pmap
814 jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
815 addql #4,sp
816 movl _C_LABEL(curpcb),a1 | restore p_addr
817 #else
818 /* Otherwise, use this inline version. */
819 lea _C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
820 movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
821 movl a2@(PM_A_PHYS),d0 | phys = pmap->pm_a_phys
822 cmpl a3@(4),d0 | == kernel_crp.rp_addr ?
823 jeq Lsame_mmuctx | skip loadcrp/flush
824 /* OK, it is a new MMU context. Load it up. */
825 movl d0,a3@(4)
826 movl #CACHE_CLR,d0
827 movc d0,cacr | invalidate cache(s)
828 pflusha | flush entire TLB
829 pmove a3@,crp | load new user root pointer
830 Lsame_mmuctx:
831 #endif
832
833 /*
834 * Reload the registers for the new process.
835 * After this point we can only use d0,d1,a0,a1
836 */
837 moveml a1@(PCB_REGS),#0xFCFC | reload registers
838 movl a1@(PCB_USP),a0
839 movl a0,usp | and USP
840
841 tstl _C_LABEL(fputype) | If we don't have an fpu,
842 jeq Lres_skip | don't try to restore it.
843 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
844 tstb a0@ | null state frame?
845 jeq Lresfprest | yes, easy
846 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
847 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
848 Lresfprest:
849 frestore a0@ | restore state
850 Lres_skip:
851 movw a1@(PCB_PS),d0 | no, restore PS
852 #ifdef DIAGNOSTIC
853 btst #13,d0 | supervisor mode?
854 jeq Lbadsw | no? panic!
855 #endif
856 movw d0,sr | OK, restore PS
857 moveq #1,d0 | return 1 (for alternate returns)
858 rts
859
860 /*
861 * savectx(pcb)
862 * Update pcb, saving current processor state.
863 */
864 ENTRY(savectx)
865 movl sp@(4),a1
866 movw sr,a1@(PCB_PS)
867 movl usp,a0 | grab USP
868 movl a0,a1@(PCB_USP) | and save it
869 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
870
871 tstl _C_LABEL(fputype) | Do we have FPU?
872 jeq Lsavedone | No? Then don't save state.
873 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
874 fsave a0@ | save FP state
875 tstb a0@ | null state frame?
876 jeq Lsavedone | yes, all done
877 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
878 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
879 Lsavedone:
880 moveq #0,d0 | return 0
881 rts
882
883 /* suline() */
884
885 #ifdef DEBUG
886 .data
887 ASGLOBAL(fulltflush)
888 .long 0
889 ASGLOBAL(fullcflush)
890 .long 0
891 .text
892 #endif
893
894 /*
895 * Invalidate entire TLB.
896 */
897 ENTRY(TBIA)
898 _C_LABEL(_TBIA):
899 pflusha
900 movl #DC_CLEAR,d0
901 movc d0,cacr | invalidate on-chip d-cache
902 rts
903
904 /*
905 * Invalidate any TLB entry for given VA (TB Invalidate Single)
906 */
907 ENTRY(TBIS)
908 #ifdef DEBUG
909 tstl _ASM_LABEL(fulltflush) | being conservative?
910 jne _C_LABEL(_TBIA) | yes, flush entire TLB
911 #endif
912 movl sp@(4),a0
913 pflush #0,#0,a0@ | flush address from both sides
914 movl #DC_CLEAR,d0
915 movc d0,cacr | invalidate on-chip data cache
916 rts
917
918 /*
919 * Invalidate supervisor side of TLB
920 */
921 ENTRY(TBIAS)
922 #ifdef DEBUG
923 tstl _ASM_LABEL(fulltflush) | being conservative?
924 jne _C_LABEL(_TBIA) | yes, flush everything
925 #endif
926 pflush #4,#4 | flush supervisor TLB entries
927 movl #DC_CLEAR,d0
928 movc d0,cacr | invalidate on-chip d-cache
929 rts
930
931 /*
932 * Invalidate user side of TLB
933 */
934 ENTRY(TBIAU)
935 #ifdef DEBUG
936 tstl _ASM_LABEL(fulltflush) | being conservative?
937 jne _C_LABEL(_TBIA) | yes, flush everything
938 #endif
939 pflush #0,#4 | flush user TLB entries
940 movl #DC_CLEAR,d0
941 movc d0,cacr | invalidate on-chip d-cache
942 rts
943
944 /*
945 * Invalidate instruction cache
946 */
947 ENTRY(ICIA)
948 movl #IC_CLEAR,d0
949 movc d0,cacr | invalidate i-cache
950 rts
951
952 /*
953 * Invalidate data cache.
954 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
955 * problems with DC_WA. The only cases we have to worry about are context
956 * switch and TLB changes, both of which are handled "in-line" in resume
957 * and TBI*.
958 */
959 ENTRY(DCIA)
960 __DCIA:
961 rts
962
963 ENTRY(DCIS)
964 __DCIS:
965 rts
966
967 /*
968 * Invalidate data cache.
969 */
970 ENTRY(DCIU)
971 movl #DC_CLEAR,d0
972 movc d0,cacr | invalidate on-chip d-cache
973 rts
974
975 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
976
977 ENTRY(PCIA)
978 movl #DC_CLEAR,d0
979 movc d0,cacr | invalidate on-chip d-cache
980 rts
981
982 ENTRY(ecacheon)
983 rts
984
985 ENTRY(ecacheoff)
986 rts
987
988 /*
989 * Get callers current SP value.
990 * Note that simply taking the address of a local variable in a C function
991 * doesn't work because callee saved registers may be outside the stack frame
992 * defined by A6 (e.g. GCC generated code).
993 *
994 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
995 */
996 GLOBAL(getsp)
997 movl sp,d0 | get current SP
998 addql #4,d0 | compensate for return address
999 rts
1000
1001 ENTRY(getsfc)
1002 movc sfc,d0
1003 rts
1004
1005 ENTRY(getdfc)
1006 movc dfc,d0
1007 rts
1008
1009 ENTRY(getvbr)
1010 movc vbr, d0
1011 rts
1012
1013 ENTRY(setvbr)
1014 movl sp@(4), d0
1015 movc d0, vbr
1016 rts
1017
1018 /*
1019 * Load a new CPU Root Pointer (CRP) into the MMU.
1020 * void loadcrp(struct mmu_rootptr *);
1021 */
1022 ENTRY(loadcrp)
1023 movl sp@(4),a0 | arg1: &CRP
1024 movl #CACHE_CLR,d0
1025 movc d0,cacr | invalidate cache(s)
1026 pflusha | flush entire TLB
1027 pmove a0@,crp | load new user root pointer
1028 rts
1029
1030 /*
1031 * Get the physical address of the PTE for a given VA.
1032 */
1033 ENTRY(ptest_addr)
1034 movl sp@(4),a0 | VA
1035 ptestr #5,a0@,#7,a1 | a1 = addr of PTE
1036 movl a1,d0
1037 rts
1038
1039 /*
1040 * Set processor priority level calls. Most are implemented with
1041 * inline asm expansions. However, we need one instantiation here
1042 * in case some non-optimized code makes external references.
1043 * Most places will use the inlined functions param.h supplies.
1044 */
1045
1046 ENTRY(_getsr)
1047 clrl d0
1048 movw sr,d0
1049 rts
1050
1051 ENTRY(_spl)
1052 clrl d0
1053 movw sr,d0
1054 movl sp@(4),d1
1055 movw d1,sr
1056 rts
1057
1058 ENTRY(_splraise)
1059 clrl d0
1060 movw sr,d0
1061 movl d0,d1
1062 andl #PSL_HIGHIPL,d1 | old &= PSL_HIGHIPL
1063 cmpl sp@(4),d1 | (old - new)
1064 bge Lsplr
1065 movl sp@(4),d1
1066 movw d1,sr
1067 Lsplr:
1068 rts
1069
1070 /*
1071 * Save and restore 68881 state.
1072 */
1073 ENTRY(m68881_save)
1074 movl sp@(4),a0 | save area pointer
1075 fsave a0@ | save state
1076 tstb a0@ | null state frame?
1077 jeq Lm68881sdone | yes, all done
1078 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1079 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1080 Lm68881sdone:
1081 rts
1082
1083 ENTRY(m68881_restore)
1084 movl sp@(4),a0 | save area pointer
1085 tstb a0@ | null state frame?
1086 jeq Lm68881rdone | yes, easy
1087 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1088 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1089 Lm68881rdone:
1090 frestore a0@ | restore state
1091 rts
1092
1093 /*
1094 * _delay(unsigned N)
1095 * Delay for at least (N/256) microseconds.
1096 * This routine depends on the variable: delay_divisor
1097 * which should be set based on the CPU clock rate.
1098 * XXX: Currently this is set based on the CPU model,
1099 * XXX: but this should be determined at run time...
1100 */
1101 GLOBAL(_delay)
1102 | d0 = arg = (usecs << 8)
1103 movl sp@(4),d0
1104 | d1 = delay_divisor;
1105 movl _C_LABEL(delay_divisor),d1
1106 L_delay:
1107 subl d1,d0
1108 jgt L_delay
1109 rts
1110
1111
1112 | Define some addresses, mostly so DDB can print useful info.
1113 | Not using _C_LABEL() here because these symbols are never
1114 | referenced by any C code, and if the leading underscore
1115 | ever goes away, these lines turn into syntax errors...
1116 .set _KERNBASE,KERNBASE
1117 .set _MONSTART,SUN3X_MONSTART
1118 .set _PROM_BASE,SUN3X_PROM_BASE
1119 .set _MONEND,SUN3X_MONEND
1120
1121 |The end!
1122