locore.s revision 1.17 1 /* $NetBSD: locore.s,v 1.17 1997/04/25 01:50:55 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "assym.h"
45 #include <machine/asm.h>
46 #include <machine/trap.h>
47
48 | Remember this is a fun project!
49
50 .data
51 .globl _mon_crp
52 _mon_crp:
53 .long 0,0
54
55 | This is for kvm_mkdb, and should be the address of the beginning
56 | of the kernel text segment (not necessarily the same as kernbase).
57 .text
58 .globl _kernel_text
59 _kernel_text:
60
61 | This is the entry point, as well as the end of the temporary stack
62 | used during process switch (one 8K page ending at start)
63 .globl tmpstk
64 tmpstk:
65 .globl start
66 start:
67 | The first step, after disabling interrupts, is to map enough of the kernel
68 | into high virtual address space so that we can use position dependent code.
69 | This is a tricky task on the sun3x because the MMU is already enabled and
70 | the ROM monitor provides no indication of where the root MMU table is mapped.
71 | Therefore we must use one of the 68030's 'transparent translation' registers
72 | to define a range in the address space where the MMU translation is
73 | turned off. Once this is complete we can modify the MMU table directly
74 | without the need for it to be mapped into virtual memory.
75 | All code must be position independent until otherwise noted, as the
76 | boot loader has loaded us into low memory but all the symbols in this
77 | code have been linked high.
78 movw #PSL_HIGHIPL, sr | no interrupts
79 movl #KERNBASE, a5 | for vtop conversion
80 lea _mon_crp, a0 | where to store the CRP
81 subl a5, a0
82 | Note: borrowing mon_crp for tt0 setup...
83 movl #0x3F8107, a0@ | map the low 1GB v=p with the
84 .long 0xf0100800 | transparent translation reg0
85 | [ pmove a0@, tt0 ]
86 | In order to map the kernel into high memory we will copy the root table
87 | entry which maps the 16 megabytes of memory starting at 0x0 into the
88 | entry which maps the 16 megabytes starting at KERNBASE.
89 pmove crp, a0@ | Get monitor CPU root pointer
90 movl a0@(4), a1 | 2nd word is PA of level A table
91
92 movl a1, a0 | compute the descriptor address
93 addl #0x3e0, a1 | for VA starting at KERNBASE
94 movl a0@, a1@ | copy descriptor type
95 movl a0@(4), a1@(4) | copy physical address
96
97 | Kernel is now double mapped at zero and KERNBASE.
98 | Force a long jump to the relocated code (high VA).
99 movl #IC_CLEAR, d0 | Flush the I-cache
100 movc d0, cacr
101 jmp L_high_code:l | long jump
102
103 L_high_code:
104 | We are now running in the correctly relocated kernel, so
105 | we are no longer restricted to position-independent code.
106 | It is handy to leave transparent translation enabled while
107 | for the low 1GB while __bootstrap() is doing its thing.
108
109 | Do bootstrap stuff needed before main() gets called.
110 | Our boot loader leaves a copy of the kernel's exec header
111 | just before the start of the kernel text segment, so the
112 | kernel can sanity-check the DDB symbols at [end...esym].
113 | Pass the struct exec at tmpstk-32 to __bootstrap().
114 | Also, make sure the initial frame pointer is zero so that
115 | the backtrace algorithm used by KGDB terminates nicely.
116 lea tmpstk-32, sp
117 movl #0,a6
118 jsr __bootstrap | See _startup.c
119
120 | Now turn off the transparent translation of the low 1GB.
121 | (this also flushes the ATC)
122 clrl sp@-
123 .long 0xf0170800 | pmove sp@,tt0
124 addql #4,sp
125
126 | Now that __bootstrap() is done using the PROM functions,
127 | we can safely set the sfc/dfc to something != FC_CONTROL
128 moveq #FC_USERD, d0 | make movs access "user data"
129 movc d0, sfc | space for copyin/copyout
130 movc d0, dfc
131
132 | Setup process zero user/kernel stacks.
133 movl _proc0paddr,a1 | get proc0 pcb addr
134 lea a1@(USPACE-4),sp | set SSP to last word
135 movl #USRSTACK-4,a2
136 movl a2,usp | init user SP
137
138 | Note curpcb was already set in __bootstrap().
139 | Will do fpu initialization during autoconfig (see fpu.c)
140 | The interrupt vector table and stack are now ready.
141 | Interrupts will be enabled later, AFTER autoconfiguration
142 | is finished, to avoid spurrious interrupts.
143
144 /*
145 * Final preparation for calling main.
146 *
147 * Create a fake exception frame that returns to user mode,
148 * and save its address in p->p_md.md_regs for cpu_fork().
149 * The new frames for process 1 and 2 will be adjusted by
150 * cpu_set_kpc() to arrange for a call to a kernel function
151 * before the new process does its rte out to user mode.
152 */
153 clrw sp@- | tf_format,tf_vector
154 clrl sp@- | tf_pc (filled in later)
155 movw #PSL_USER,sp@- | tf_sr for user mode
156 clrl sp@- | tf_stackadj
157 lea sp@(-64),sp | tf_regs[16]
158 movl sp,a1 | a1=trapframe
159 lea _proc0,a0 | proc0.p_md.md_regs =
160 movl a1,a0@(P_MDREGS) | trapframe
161 movl a2,a1@(FR_SP) | a2 == usp (from above)
162 pea a1@ | push &trapframe
163 jbsr _main | main(&trapframe)
164 addql #4,sp | help DDB backtrace
165 trap #15 | should not get here
166
167 | This is used by cpu_fork() to return to user mode.
168 | It is called with SP pointing to a struct trapframe.
169 .globl _proc_do_uret
170 _proc_do_uret:
171 movl sp@(FR_SP),a0 | grab and load
172 movl a0,usp | user SP
173 moveml sp@+,#0x7FFF | load most registers (all but SSP)
174 addql #8,sp | pop SSP and stack adjust count
175 rte
176
177 /*
178 * proc_trampoline:
179 * This is used by cpu_set_kpc() to "push" a function call onto the
180 * kernel stack of some process, very much like a signal delivery.
181 * When we get here, the stack has:
182 *
183 * SP+8: switchframe from before cpu_set_kpc
184 * SP+4: void *proc;
185 * SP: u_long func;
186 *
187 * On entry, the switchframe pushed by cpu_set_kpc has already been
188 * popped off the stack, so all this needs to do is pop the function
189 * pointer into a register, call it, then pop the arg, and finally
190 * return using the switchframe that remains on the stack.
191 */
192 .globl _proc_trampoline
193 _proc_trampoline:
194 movl sp@+,a0 | function pointer
195 jbsr a0@ | (*func)(procp)
196 addql #4,sp | toss the arg
197 rts | as cpu_switch would do
198
199 | That is all the assembly startup code we need on the sun3x!
200 | The rest of this is like the hp300/locore.s where possible.
201
202 /*
203 * Trap/interrupt vector routines
204 */
205 #include <m68k/m68k/trap_subr.s>
206
207 .globl _buserr, _addrerr, _illinst, _zerodiv, _chkinst
208 .globl _trapvinst, _privinst, _trace, _badtrap, _fmterr
209 .globl _trap0, _trap1, _trap2, _trap12, _trap15
210 .globl _coperr, _fpfline, _fpunsupp
211
212 .globl _trap, _nofault, _longjmp
213 _buserr:
214 tstl _nofault | device probe?
215 jeq _addrerr | no, handle as usual
216 movl _nofault,sp@- | yes,
217 jbsr _longjmp | longjmp(nofault)
218 _addrerr:
219 clrl sp@- | stack adjust count
220 moveml #0xFFFF,sp@- | save user registers
221 movl usp,a0 | save the user SP
222 movl a0,sp@(FR_SP) | in the savearea
223 lea sp@(FR_HW),a1 | grab base of HW berr frame
224 moveq #0,d0
225 movw a1@(10),d0 | grab SSW for fault processing
226 btst #12,d0 | RB set?
227 jeq LbeX0 | no, test RC
228 bset #14,d0 | yes, must set FB
229 movw d0,a1@(10) | for hardware too
230 LbeX0:
231 btst #13,d0 | RC set?
232 jeq LbeX1 | no, skip
233 bset #15,d0 | yes, must set FC
234 movw d0,a1@(10) | for hardware too
235 LbeX1:
236 btst #8,d0 | data fault?
237 jeq Lbe0 | no, check for hard cases
238 movl a1@(16),d1 | fault address is as given in frame
239 jra Lbe10 | thats it
240 Lbe0:
241 btst #4,a1@(6) | long (type B) stack frame?
242 jne Lbe4 | yes, go handle
243 movl a1@(2),d1 | no, can use save PC
244 btst #14,d0 | FB set?
245 jeq Lbe3 | no, try FC
246 addql #4,d1 | yes, adjust address
247 jra Lbe10 | done
248 Lbe3:
249 btst #15,d0 | FC set?
250 jeq Lbe10 | no, done
251 addql #2,d1 | yes, adjust address
252 jra Lbe10 | done
253 Lbe4:
254 movl a1@(36),d1 | long format, use stage B address
255 btst #15,d0 | FC set?
256 jeq Lbe10 | no, all done
257 subql #2,d1 | yes, adjust address
258 Lbe10:
259 movl d1,sp@- | push fault VA
260 movl d0,sp@- | and padded SSW
261 movw a1@(6),d0 | get frame format/vector offset
262 andw #0x0FFF,d0 | clear out frame format
263 cmpw #12,d0 | address error vector?
264 jeq Lisaerr | yes, go to it
265
266 /* MMU-specific code to determine reason for bus error. */
267 movl d1,a0 | fault address
268 movl sp@,d0 | function code from ssw
269 btst #8,d0 | data fault?
270 jne Lbe10a
271 movql #1,d0 | user program access FC
272 | (we dont separate data/program)
273 btst #5,a1@ | supervisor mode?
274 jeq Lbe10a | if no, done
275 movql #5,d0 | else supervisor program access
276 Lbe10a:
277 ptestr d0,a0@,#7 | do a table search
278 pmove psr,sp@ | save result
279 movb sp@,d1
280 btst #2,d1 | invalid? (incl. limit viol and berr)
281 jeq Lmightnotbemerr | no -> wp check
282 btst #7,d1 | is it MMU table berr?
283 jeq Lismerr | no, must be fast
284 jra Lisberr1 | real bus err needs not be fast
285 Lmightnotbemerr:
286 btst #3,d1 | write protect bit set?
287 jeq Lisberr1 | no, must be bus error
288 movl sp@,d0 | ssw into low word of d0
289 andw #0xc0,d0 | write protect is set on page:
290 cmpw #0x40,d0 | was it read cycle?
291 jeq Lisberr1 | yes, was not WPE, must be bus err
292 /* End of MMU-specific bus error code. */
293
294 Lismerr:
295 movl #T_MMUFLT,sp@- | show that we are an MMU fault
296 jra _ASM_LABEL(faultstkadj) | and deal with it
297 Lisaerr:
298 movl #T_ADDRERR,sp@- | mark address error
299 jra _ASM_LABEL(faultstkadj) | and deal with it
300 Lisberr1:
301 clrw sp@ | re-clear pad word
302 Lisberr:
303 movl #T_BUSERR,sp@- | mark bus error
304 jra _ASM_LABEL(faultstkadj) | and deal with it
305
306 /*
307 * FP exceptions.
308 */
309 _fpfline:
310 clrl sp@- | stack adjust count
311 moveml #0xFFFF,sp@- | save registers
312 moveq #T_FPEMULI,d0 | denote as FP emulation trap
313 jra fault | do it
314
315 _fpunsupp:
316 clrl sp@- | stack adjust count
317 moveml #0xFFFF,sp@- | save registers
318 moveq #T_FPEMULD,d0 | denote as FP emulation trap
319 jra fault | do it
320
321 /*
322 * Handles all other FP coprocessor exceptions.
323 * Note that since some FP exceptions generate mid-instruction frames
324 * and may cause signal delivery, we need to test for stack adjustment
325 * after the trap call.
326 */
327 .globl _fpfault
328 _fpfault:
329 clrl sp@- | stack adjust count
330 moveml #0xFFFF,sp@- | save user registers
331 movl usp,a0 | and save
332 movl a0,sp@(FR_SP) | the user stack pointer
333 clrl sp@- | no VA arg
334 movl _curpcb,a0 | current pcb
335 lea a0@(PCB_FPCTX),a0 | address of FP savearea
336 fsave a0@ | save state
337 tstb a0@ | null state frame?
338 jeq Lfptnull | yes, safe
339 clrw d0 | no, need to tweak BIU
340 movb a0@(1),d0 | get frame size
341 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
342 Lfptnull:
343 fmovem fpsr,sp@- | push fpsr as code argument
344 frestore a0@ | restore state
345 movl #T_FPERR,sp@- | push type arg
346 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
347
348 /*
349 * Other exceptions only cause four and six word stack frame and require
350 * no post-trap stack adjustment.
351 */
352
353 .globl _straytrap
354 _badtrap:
355 clrl sp@- | stack adjust count
356 moveml #0xFFFF,sp@- | save std frame regs
357 jbsr _straytrap | report
358 moveml sp@+,#0xFFFF | restore regs
359 addql #4, sp | stack adjust count
360 jra rei | all done
361
362 /*
363 * Trap 0 is for system calls
364 */
365 .globl _syscall
366 _trap0:
367 clrl sp@- | stack adjust count
368 moveml #0xFFFF,sp@- | save user registers
369 movl usp,a0 | save the user SP
370 movl a0,sp@(FR_SP) | in the savearea
371 movl d0,sp@- | push syscall number
372 jbsr _syscall | handle it
373 addql #4,sp | pop syscall arg
374 movl sp@(FR_SP),a0 | grab and restore
375 movl a0,usp | user SP
376 moveml sp@+,#0x7FFF | restore most registers
377 addql #8,sp | pop SP and stack adjust
378 jra rei | all done
379
380 /*
381 * Trap 1 action depends on the emulation type:
382 * NetBSD: sigreturn "syscall"
383 * HPUX: user breakpoint
384 */
385 _trap1:
386 #if 0 /* COMPAT_HPUX */
387 /* If process is HPUX, this is a user breakpoint. */
388 jne _trap15 | HPUX user breakpoint
389 #endif
390 jra sigreturn | NetBSD
391
392 /*
393 * Trap 2 action depends on the emulation type:
394 * NetBSD: user breakpoint -- See XXX below...
395 * SunOS: cache flush
396 * HPUX: sigreturn
397 */
398 _trap2:
399 #if 0 /* COMPAT_HPUX */
400 /* If process is HPUX, this is a sigreturn call */
401 jne sigreturn
402 #endif
403 jra _trap15 | NetBSD user breakpoint
404 | XXX - Make NetBSD use trap 15 for breakpoints?
405 | XXX - That way, we can allow this cache flush...
406 | XXX SunOS trap #2 (and NetBSD?)
407 | Flush on-chip cache (leave it enabled)
408 | movl #CACHE_CLR,d0
409 | movc d0,cacr
410 | rte
411
412 /*
413 * Trap 12 is the entry point for the cachectl "syscall"
414 * cachectl(command, addr, length)
415 * command in d0, addr in a1, length in d1
416 */
417 .globl _cachectl
418 _trap12:
419 movl d1,sp@- | push length
420 movl a1,sp@- | push addr
421 movl d0,sp@- | push command
422 jbsr _cachectl | do it
423 lea sp@(12),sp | pop args
424 jra rei | all done
425
426 /*
427 * Trace (single-step) trap. Kernel-mode is special.
428 * User mode traps are simply passed on to trap().
429 */
430 _trace:
431 clrl sp@- | stack adjust count
432 moveml #0xFFFF,sp@-
433 moveq #T_TRACE,d0
434 btst #5,sp@(FR_HW) | was supervisor mode?
435 jne kbrkpt | yes, kernel brkpt
436 jra fault | no, user-mode fault
437
438 /*
439 * Trap 15 is used for:
440 * - GDB breakpoints (in user programs)
441 * - KGDB breakpoints (in the kernel)
442 * - trace traps for SUN binaries (not fully supported yet)
443 * User mode traps are simply passed to trap().
444 */
445 _trap15:
446 clrl sp@- | stack adjust count
447 moveml #0xFFFF,sp@-
448 moveq #T_TRAP15,d0
449 btst #5,sp@(FR_HW) | was supervisor mode?
450 jne kbrkpt | yes, kernel brkpt
451 jra fault | no, user-mode fault
452
453 kbrkpt:
454 | Kernel-mode breakpoint or trace trap. (d0=trap_type)
455 | Save the system sp rather than the user sp.
456 movw #PSL_HIGHIPL,sr | lock out interrupts
457 lea sp@(FR_SIZE),a6 | Save stack pointer
458 movl a6,sp@(FR_SP) | from before trap
459
460 | If we are not on tmpstk switch to it.
461 | (so debugger can change the stack pointer)
462 movl a6,d1
463 cmpl #tmpstk,d1
464 jls Lbrkpt2 | already on tmpstk
465 | Copy frame to the temporary stack
466 movl sp,a0 | a0=src
467 lea tmpstk-96,a1 | a1=dst
468 movl a1,sp | sp=new frame
469 moveq #FR_SIZE,d1
470 Lbrkpt1:
471 movl a0@+,a1@+
472 subql #4,d1
473 bgt Lbrkpt1
474
475 Lbrkpt2:
476 | Call the trap handler for the kernel debugger.
477 | Do not call trap() to handle it, so that we can
478 | set breakpoints in trap() if we want. We know
479 | the trap type is either T_TRACE or T_BREAKPOINT.
480 movl d0,sp@- | push trap type
481 jbsr _trap_kdebug
482 addql #4,sp | pop args
483
484 | The stack pointer may have been modified, or
485 | data below it modified (by kgdb push call),
486 | so push the hardware frame at the current sp
487 | before restoring registers and returning.
488 movl sp@(FR_SP),a0 | modified sp
489 lea sp@(FR_SIZE),a1 | end of our frame
490 movl a1@-,a0@- | copy 2 longs with
491 movl a1@-,a0@- | ... predecrement
492 movl a0,sp@(FR_SP) | sp = h/w frame
493 moveml sp@+,#0x7FFF | restore all but sp
494 movl sp@,sp | ... and sp
495 rte | all done
496
497 /* Use common m68k sigreturn */
498 #include <m68k/m68k/sigreturn.s>
499
500 /*
501 * Interrupt handlers. Most are auto-vectored,
502 * and hard-wired the same way on all sun3 models.
503 * Format in the stack is:
504 * d0,d1,a0,a1, sr, pc, vo
505 */
506
507 #define INTERRUPT_SAVEREG \
508 moveml #0xC0C0,sp@-
509
510 #define INTERRUPT_RESTORE \
511 moveml sp@+,#0x0303
512
513 /*
514 * This is the common auto-vector interrupt handler,
515 * for which the CPU provides the vector=0x18+level.
516 * These are installed in the interrupt vector table.
517 */
518 .align 2
519 .globl __isr_autovec, _isr_autovec
520 __isr_autovec:
521 INTERRUPT_SAVEREG
522 jbsr _isr_autovec
523 INTERRUPT_RESTORE
524 jra rei
525
526 /* clock: see clock.c */
527 .align 2
528 .globl __isr_clock, _clock_intr
529 __isr_clock:
530 INTERRUPT_SAVEREG
531 jbsr _clock_intr
532 INTERRUPT_RESTORE
533 jra rei
534
535 | Handler for all vectored interrupts (i.e. VME interrupts)
536 .align 2
537 .globl __isr_vectored, _isr_vectored
538 __isr_vectored:
539 INTERRUPT_SAVEREG
540 jbsr _isr_vectored
541 INTERRUPT_RESTORE
542 jra rei
543
544 #undef INTERRUPT_SAVEREG
545 #undef INTERRUPT_RESTORE
546
547 /* interrupt counters (needed by vmstat) */
548 .globl _intrcnt,_eintrcnt,_intrnames,_eintrnames
549 _intrnames:
550 .asciz "spur" | 0
551 .asciz "lev1" | 1
552 .asciz "lev2" | 2
553 .asciz "lev3" | 3
554 .asciz "lev4" | 4
555 .asciz "clock" | 5
556 .asciz "lev6" | 6
557 .asciz "nmi" | 7
558 _eintrnames:
559
560 .data
561 .even
562 _intrcnt:
563 .long 0,0,0,0,0,0,0,0,0,0
564 _eintrcnt:
565 .text
566
567 /*
568 * Emulation of VAX REI instruction.
569 *
570 * This code is (mostly) un-altered from the hp300 code,
571 * except that sun machines do not need a simulated SIR
572 * because they have a real software interrupt register.
573 *
574 * This code deals with checking for and servicing ASTs
575 * (profiling, scheduling) and software interrupts (network, softclock).
576 * We check for ASTs first, just like the VAX. To avoid excess overhead
577 * the T_ASTFLT handling code will also check for software interrupts so we
578 * do not have to do it here. After identifying that we need an AST we
579 * drop the IPL to allow device interrupts.
580 *
581 * This code is complicated by the fact that sendsig may have been called
582 * necessitating a stack cleanup.
583 */
584
585 .globl _astpending
586 .globl rei
587 rei:
588 #ifdef DIAGNOSTIC
589 tstl _panicstr | have we paniced?
590 jne Ldorte | yes, do not make matters worse
591 #endif
592 tstl _astpending | AST pending?
593 jeq Ldorte | no, done
594 Lrei1:
595 btst #5,sp@ | yes, are we returning to user mode?
596 jne Ldorte | no, done
597 movw #PSL_LOWIPL,sr | lower SPL
598 clrl sp@- | stack adjust
599 moveml #0xFFFF,sp@- | save all registers
600 movl usp,a1 | including
601 movl a1,sp@(FR_SP) | the users SP
602 clrl sp@- | VA == none
603 clrl sp@- | code == none
604 movl #T_ASTFLT,sp@- | type == async system trap
605 jbsr _trap | go handle it
606 lea sp@(12),sp | pop value args
607 movl sp@(FR_SP),a0 | restore user SP
608 movl a0,usp | from save area
609 movw sp@(FR_ADJ),d0 | need to adjust stack?
610 jne Laststkadj | yes, go to it
611 moveml sp@+,#0x7FFF | no, restore most user regs
612 addql #8,sp | toss SP and stack adjust
613 rte | and do real RTE
614 Laststkadj:
615 lea sp@(FR_HW),a1 | pointer to HW frame
616 addql #8,a1 | source pointer
617 movl a1,a0 | source
618 addw d0,a0 | + hole size = dest pointer
619 movl a1@-,a0@- | copy
620 movl a1@-,a0@- | 8 bytes
621 movl a0,sp@(FR_SP) | new SSP
622 moveml sp@+,#0x7FFF | restore user registers
623 movl sp@,sp | and our SP
624 Ldorte:
625 rte | real return
626
627 /*
628 * Initialization is at the beginning of this file, because the
629 * kernel entry point needs to be at zero for compatibility with
630 * the Sun boot loader. This works on Sun machines because the
631 * interrupt vector table for reset is NOT at address zero.
632 * (The MMU has a "boot" bit that forces access to the PROM)
633 */
634
635 /*
636 * Use common m68k sigcode.
637 */
638 #include <m68k/m68k/sigcode.s>
639
640 .text
641
642 /*
643 * Primitives
644 */
645
646 /*
647 * Use common m68k support routines.
648 */
649 #include <m68k/m68k/support.s>
650
651 .globl _whichqs,_qs,_cnt,_panic
652 .globl _curproc
653 .comm _want_resched,4
654
655 /*
656 * Use common m68k process manipulation routines.
657 */
658 #include <m68k/m68k/proc_subr.s>
659
660 | Message for Lbadsw panic
661 Lsw0:
662 .asciz "cpu_switch"
663 .even
664
665 .globl _curpcb
666 .globl _masterpaddr | XXX compatibility (debuggers)
667 .data
668 _masterpaddr: | XXX compatibility (debuggers)
669 _curpcb:
670 .long 0
671 .comm nullpcb,SIZEOF_PCB
672 .text
673
674 /*
675 * At exit of a process, do a cpu_switch for the last time.
676 * Switch to a safe stack and PCB, and deallocate the process's resources.
677 * The ipl is high enough to prevent the memory from being reallocated.
678 */
679 ENTRY(switch_exit)
680 movl sp@(4),a0 | struct proc *p
681 movl #nullpcb,_curpcb | save state into garbage pcb
682 lea tmpstk,sp | goto a tmp stack
683 movl a0,sp@- | pass proc ptr down
684
685 /* Free old process's u-area. */
686 movl #USPACE,sp@- | size of u-area
687 movl a0@(P_ADDR),sp@- | address of process's u-area
688 movl _kernel_map,sp@- | map it was allocated in
689 jbsr _kmem_free | deallocate it
690 lea sp@(12),sp | pop args
691
692 jra _cpu_switch
693
694 /*
695 * When no processes are on the runq, cpu_switch() branches to idle
696 * to wait for something to come ready.
697 */
698 .data
699 .globl _Idle_count
700 _Idle_count:
701 .long 0
702 .text
703
704 .globl Idle
705 Lidle:
706 stop #PSL_LOWIPL
707 Idle:
708 movw #PSL_HIGHIPL,sr
709 addql #1, _Idle_count
710 tstl _whichqs
711 jeq Lidle
712 movw #PSL_LOWIPL,sr
713 jra Lsw1
714
715 Lbadsw:
716 movl #Lsw0,sp@-
717 jbsr _panic
718 /*NOTREACHED*/
719
720 /*
721 * cpu_switch()
722 * Hacked for sun3
723 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
724 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
725 */
726 ENTRY(cpu_switch)
727 movl _curpcb,a1 | current pcb
728 movw sr,a1@(PCB_PS) | save sr before changing ipl
729 #ifdef notyet
730 movl _curproc,sp@- | remember last proc running
731 #endif
732 clrl _curproc
733
734 Lsw1:
735 /*
736 * Find the highest-priority queue that isn't empty,
737 * then take the first proc from that queue.
738 */
739 clrl d0
740 lea _whichqs,a0
741 movl a0@,d1
742 Lswchk:
743 btst d0,d1
744 jne Lswfnd
745 addqb #1,d0
746 cmpb #32,d0
747 jne Lswchk
748 jra Idle
749 Lswfnd:
750 movw #PSL_HIGHIPL,sr | lock out interrupts
751 movl a0@,d1 | and check again...
752 bclr d0,d1
753 jeq Lsw1 | proc moved, rescan
754 movl d1,a0@ | update whichqs
755 moveq #1,d1 | double check for higher priority
756 lsll d0,d1 | process (which may have snuck in
757 subql #1,d1 | while we were finding this one)
758 andl a0@,d1
759 jeq Lswok | no one got in, continue
760 movl a0@,d1
761 bset d0,d1 | otherwise put this one back
762 movl d1,a0@
763 jra Lsw1 | and rescan
764 Lswok:
765 movl d0,d1
766 lslb #3,d1 | convert queue number to index
767 addl #_qs,d1 | locate queue (q)
768 movl d1,a1
769 cmpl a1@(P_FORW),a1 | anyone on queue?
770 jeq Lbadsw | no, panic
771 movl a1@(P_FORW),a0 | p = q->p_forw
772 movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
773 movl a0@(P_FORW),a1 | q = p->p_forw
774 movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
775 cmpl a0@(P_FORW),d1 | anyone left on queue?
776 jeq Lsw2 | no, skip
777 movl _whichqs,d1
778 bset d0,d1 | yes, reset bit
779 movl d1,_whichqs
780 Lsw2:
781 movl a0,_curproc
782 clrl _want_resched
783 #ifdef notyet
784 movl sp@+,a1 | XXX - Make this work!
785 cmpl a0,a1 | switching to same proc?
786 jeq Lswdone | yes, skip save and restore
787 #endif
788 /*
789 * Save state of previous process in its pcb.
790 */
791 movl _curpcb,a1
792 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
793 movl usp,a2 | grab USP (a2 has been saved)
794 movl a2,a1@(PCB_USP) | and save it
795
796 tstl _fputype | Do we have an fpu?
797 jeq Lswnofpsave | No? Then don't try save.
798 lea a1@(PCB_FPCTX),a2 | pointer to FP save area
799 fsave a2@ | save FP state
800 tstb a2@ | null state frame?
801 jeq Lswnofpsave | yes, all done
802 fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
803 fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
804 Lswnofpsave:
805
806 /*
807 * Now that we have saved all the registers that must be
808 * preserved, we are free to use those registers until
809 * we load the registers for the switched-to process.
810 * In this section, keep: a0=curproc, a1=curpcb
811 */
812
813 #ifdef DIAGNOSTIC
814 tstl a0@(P_WCHAN)
815 jne Lbadsw
816 cmpb #SRUN,a0@(P_STAT)
817 jne Lbadsw
818 #endif
819 clrl a0@(P_BACK) | clear back link
820 movl a0@(P_ADDR),a1 | get p_addr
821 movl a1,_curpcb
822
823 /*
824 * Load the new VM context (new MMU root pointer)
825 */
826 movl a0@(P_VMSPACE),a2 | vm = p->p_vmspace
827 #ifdef DIAGNOSTIC
828 tstl a2 | map == VM_MAP_NULL?
829 jeq Lbadsw | panic
830 #endif
831 #ifdef PMAP_DEBUG
832 /*
833 * Just call pmap_activate() for now. Later on,
834 * use the in-line version below (for speed).
835 */
836 lea a2@(VM_PMAP),a2 | pmap = &vmspace.vm_pmap
837 pea a2@ | push pmap
838 jbsr _pmap_activate | pmap_activate(pmap)
839 addql #4,sp
840 movl _curpcb,a1 | restore p_addr
841 #else
842 /* XXX - Later, use this inline version. */
843 /* Just load the new CPU Root Pointer (MMU) */
844 lea _kernel_crp, a3 | our CPU Root Ptr. (CRP)
845 lea a2@(VM_PMAP),a2 | pmap = &vmspace.vm_pmap
846 movl a2@(PM_A_PHYS),d0 | phys = pmap->pm_a_phys
847 cmpl a3@(4),d0 | == kernel_crp.rp_addr ?
848 jeq Lsame_mmuctx | skip loadcrp/flush
849 /* OK, it is a new MMU context. Load it up. */
850 movl d0,a3@(4)
851 movl #CACHE_CLR,d0
852 movc d0,cacr | invalidate cache(s)
853 pflusha | flush entire TLB
854 pmove a3@,crp | load new user root pointer
855 Lsame_mmuctx:
856 #endif
857
858 /*
859 * Reload the registers for the new process.
860 * After this point we can only use d0,d1,a0,a1
861 */
862 moveml a1@(PCB_REGS),#0xFCFC | reload registers
863 movl a1@(PCB_USP),a0
864 movl a0,usp | and USP
865
866 tstl _fputype | If we don't have an fpu,
867 jeq Lres_skip | don't try to restore it.
868 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
869 tstb a0@ | null state frame?
870 jeq Lresfprest | yes, easy
871 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
872 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
873 Lresfprest:
874 frestore a0@ | restore state
875 Lres_skip:
876 movw a1@(PCB_PS),d0 | no, restore PS
877 #ifdef DIAGNOSTIC
878 btst #13,d0 | supervisor mode?
879 jeq Lbadsw | no? panic!
880 #endif
881 movw d0,sr | OK, restore PS
882 moveq #1,d0 | return 1 (for alternate returns)
883 rts
884
885 /*
886 * savectx(pcb)
887 * Update pcb, saving current processor state.
888 */
889 ENTRY(savectx)
890 movl sp@(4),a1
891 movw sr,a1@(PCB_PS)
892 movl usp,a0 | grab USP
893 movl a0,a1@(PCB_USP) | and save it
894 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
895
896 tstl _fputype | Do we have FPU?
897 jeq Lsavedone | No? Then don't save state.
898 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
899 fsave a0@ | save FP state
900 tstb a0@ | null state frame?
901 jeq Lsavedone | yes, all done
902 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
903 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
904 Lsavedone:
905 moveq #0,d0 | return 0
906 rts
907
908 /* suline() `040 only */
909
910 #ifdef DEBUG
911 .data
912 .globl fulltflush, fullcflush
913 fulltflush:
914 .long 0
915 fullcflush:
916 .long 0
917 .text
918 #endif
919
920 /*
921 * Invalidate entire TLB.
922 */
923 ENTRY(TBIA)
924 __TBIA:
925 pflusha
926 movl #DC_CLEAR,d0
927 movc d0,cacr | invalidate on-chip d-cache
928 rts
929
930 /*
931 * Invalidate any TLB entry for given VA (TB Invalidate Single)
932 */
933 ENTRY(TBIS)
934 #ifdef DEBUG
935 tstl fulltflush | being conservative?
936 jne __TBIA | yes, flush entire TLB
937 #endif
938 movl sp@(4),a0
939 pflush #0,#0,a0@ | flush address from both sides
940 movl #DC_CLEAR,d0
941 movc d0,cacr | invalidate on-chip data cache
942 rts
943
944 /*
945 * Invalidate supervisor side of TLB
946 */
947 ENTRY(TBIAS)
948 #ifdef DEBUG
949 tstl fulltflush | being conservative?
950 jne __TBIA | yes, flush everything
951 #endif
952 pflush #4,#4 | flush supervisor TLB entries
953 movl #DC_CLEAR,d0
954 movc d0,cacr | invalidate on-chip d-cache
955 rts
956
957 /*
958 * Invalidate user side of TLB
959 */
960 ENTRY(TBIAU)
961 #ifdef DEBUG
962 tstl fulltflush | being conservative?
963 jne __TBIA | yes, flush everything
964 #endif
965 pflush #0,#4 | flush user TLB entries
966 movl #DC_CLEAR,d0
967 movc d0,cacr | invalidate on-chip d-cache
968 rts
969
970 /*
971 * Invalidate instruction cache
972 */
973 ENTRY(ICIA)
974 movl #IC_CLEAR,d0
975 movc d0,cacr | invalidate i-cache
976 rts
977
978 /*
979 * Invalidate data cache.
980 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
981 * problems with DC_WA. The only cases we have to worry about are context
982 * switch and TLB changes, both of which are handled "in-line" in resume
983 * and TBI*.
984 */
985 ENTRY(DCIA)
986 __DCIA:
987 rts
988
989 ENTRY(DCIS)
990 __DCIS:
991 rts
992
993 /*
994 * Invalidate data cache.
995 */
996 ENTRY(DCIU)
997 movl #DC_CLEAR,d0
998 movc d0,cacr | invalidate on-chip d-cache
999 rts
1000
1001 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
1002
1003 ENTRY(PCIA)
1004 movl #DC_CLEAR,d0
1005 movc d0,cacr | invalidate on-chip d-cache
1006 rts
1007
1008 ENTRY(ecacheon)
1009 rts
1010
1011 ENTRY(ecacheoff)
1012 rts
1013
1014 /*
1015 * Get callers current SP value.
1016 * Note that simply taking the address of a local variable in a C function
1017 * doesn't work because callee saved registers may be outside the stack frame
1018 * defined by A6 (e.g. GCC generated code).
1019 *
1020 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
1021 */
1022 .globl _getsp
1023 _getsp:
1024 movl sp,d0 | get current SP
1025 addql #4,d0 | compensate for return address
1026 rts
1027
1028 ENTRY(getsfc)
1029 movc sfc,d0
1030 rts
1031
1032 ENTRY(getdfc)
1033 movc dfc,d0
1034 rts
1035
1036 ENTRY(getvbr)
1037 movc vbr, d0
1038 rts
1039
1040 ENTRY(setvbr)
1041 movl sp@(4), d0
1042 movc d0, vbr
1043 rts
1044
1045 /*
1046 * Load a new CPU Root Pointer (CRP) into the MMU.
1047 * void loadcrp(struct mmu_rootptr *);
1048 */
1049 ENTRY(loadcrp)
1050 movl sp@(4),a0 | arg1: &CRP
1051 movl #CACHE_CLR,d0
1052 movc d0,cacr | invalidate cache(s)
1053 pflusha | flush entire TLB
1054 pmove a0@,crp | load new user root pointer
1055 rts
1056
1057 /*
1058 * Get the physical address of the PTE for a given VA.
1059 */
1060 ENTRY(ptest_addr)
1061 movl sp@(4),a0 | VA
1062 ptestr #5,a0@,#7,a1 | a1 = addr of PTE
1063 movl a1,d0
1064 rts
1065
1066 /*
1067 * Set processor priority level calls. Most are implemented with
1068 * inline asm expansions. However, we need one instantiation here
1069 * in case some non-optimized code makes external references.
1070 * Most places will use the inlined function param.h supplies.
1071 */
1072
1073 ENTRY(_spl)
1074 movl sp@(4),d1
1075 clrl d0
1076 movw sr,d0
1077 movw d1,sr
1078 rts
1079
1080 ENTRY(getsr)
1081 moveq #0, d0
1082 movw sr, d0
1083 rts
1084
1085 /*
1086 * Save and restore 68881 state.
1087 */
1088 ENTRY(m68881_save)
1089 movl sp@(4),a0 | save area pointer
1090 fsave a0@ | save state
1091 tstb a0@ | null state frame?
1092 jeq Lm68881sdone | yes, all done
1093 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1094 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1095 Lm68881sdone:
1096 rts
1097
1098 ENTRY(m68881_restore)
1099 movl sp@(4),a0 | save area pointer
1100 tstb a0@ | null state frame?
1101 jeq Lm68881rdone | yes, easy
1102 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1103 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1104 Lm68881rdone:
1105 frestore a0@ | restore state
1106 rts
1107
1108 /*
1109 * _delay(unsigned N)
1110 * Delay for at least (N/256) microseconds.
1111 * This routine depends on the variable: delay_divisor
1112 * which should be set based on the CPU clock rate.
1113 * XXX: Currently this is set in sun3_startup.c based on the
1114 * XXX: CPU model but this should be determined at run time...
1115 */
1116 .globl __delay
1117 __delay:
1118 | d0 = arg = (usecs << 8)
1119 movl sp@(4),d0
1120 | d1 = delay_divisor;
1121 movl _delay_divisor,d1
1122 L_delay:
1123 subl d1,d0
1124 jgt L_delay
1125 rts
1126
1127
1128 | Define some addresses, mostly so DDB can print useful info.
1129 .globl _kernbase
1130 .set _kernbase,KERNBASE
1131 .globl _dvma_base
1132 .set _dvma_base,DVMA_SPACE_START
1133 .globl _prom_start
1134 .set _prom_start,MONSTART
1135 .globl _prom_base
1136 .set _prom_base,PROM_BASE
1137
1138 |The end!
1139