locore.s revision 1.43 1 1.43 tsutsui /* $NetBSD: locore.s,v 1.43 2000/11/03 05:28:28 tsutsui Exp $ */
2 1.1 gwr
3 1.1 gwr /*
4 1.1 gwr * Copyright (c) 1988 University of Utah.
5 1.1 gwr * Copyright (c) 1980, 1990, 1993
6 1.1 gwr * The Regents of the University of California. All rights reserved.
7 1.1 gwr *
8 1.1 gwr * This code is derived from software contributed to Berkeley by
9 1.1 gwr * the Systems Programming Group of the University of Utah Computer
10 1.1 gwr * Science Department.
11 1.1 gwr *
12 1.1 gwr * Redistribution and use in source and binary forms, with or without
13 1.1 gwr * modification, are permitted provided that the following conditions
14 1.1 gwr * are met:
15 1.1 gwr * 1. Redistributions of source code must retain the above copyright
16 1.1 gwr * notice, this list of conditions and the following disclaimer.
17 1.1 gwr * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 gwr * notice, this list of conditions and the following disclaimer in the
19 1.1 gwr * documentation and/or other materials provided with the distribution.
20 1.1 gwr * 3. All advertising materials mentioning features or use of this software
21 1.1 gwr * must display the following acknowledgement:
22 1.1 gwr * This product includes software developed by the University of
23 1.1 gwr * California, Berkeley and its contributors.
24 1.1 gwr * 4. Neither the name of the University nor the names of its contributors
25 1.1 gwr * may be used to endorse or promote products derived from this software
26 1.1 gwr * without specific prior written permission.
27 1.1 gwr *
28 1.1 gwr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 gwr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 gwr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 gwr * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 gwr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 gwr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 gwr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 gwr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 gwr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 gwr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 gwr * SUCH DAMAGE.
39 1.1 gwr *
40 1.1 gwr * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 1.1 gwr * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 1.1 gwr */
43 1.1 gwr
44 1.29 thorpej #include "opt_compat_netbsd.h"
45 1.34 kleink #include "opt_compat_svr4.h"
46 1.35 christos #include "opt_compat_sunos.h"
47 1.42 thorpej #include "opt_lockdebug.h"
48 1.27 gwr
49 1.1 gwr #include "assym.h"
50 1.17 thorpej #include <machine/asm.h>
51 1.1 gwr #include <machine/trap.h>
52 1.1 gwr
53 1.1 gwr | Remember this is a fun project!
54 1.1 gwr
55 1.1 gwr .data
56 1.19 jeremy GLOBAL(mon_crp)
57 1.1 gwr .long 0,0
58 1.1 gwr
59 1.1 gwr | This is for kvm_mkdb, and should be the address of the beginning
60 1.1 gwr | of the kernel text segment (not necessarily the same as kernbase).
61 1.1 gwr .text
62 1.19 jeremy GLOBAL(kernel_text)
63 1.1 gwr
64 1.1 gwr | This is the entry point, as well as the end of the temporary stack
65 1.1 gwr | used during process switch (one 8K page ending at start)
66 1.19 jeremy ASGLOBAL(tmpstk)
67 1.20 gwr ASGLOBAL(start)
68 1.19 jeremy
69 1.1 gwr | The first step, after disabling interrupts, is to map enough of the kernel
70 1.1 gwr | into high virtual address space so that we can use position dependent code.
71 1.1 gwr | This is a tricky task on the sun3x because the MMU is already enabled and
72 1.1 gwr | the ROM monitor provides no indication of where the root MMU table is mapped.
73 1.1 gwr | Therefore we must use one of the 68030's 'transparent translation' registers
74 1.1 gwr | to define a range in the address space where the MMU translation is
75 1.1 gwr | turned off. Once this is complete we can modify the MMU table directly
76 1.1 gwr | without the need for it to be mapped into virtual memory.
77 1.1 gwr | All code must be position independent until otherwise noted, as the
78 1.1 gwr | boot loader has loaded us into low memory but all the symbols in this
79 1.1 gwr | code have been linked high.
80 1.1 gwr movw #PSL_HIGHIPL, sr | no interrupts
81 1.1 gwr movl #KERNBASE, a5 | for vtop conversion
82 1.19 jeremy lea _C_LABEL(mon_crp), a0 | where to store the CRP
83 1.1 gwr subl a5, a0
84 1.1 gwr | Note: borrowing mon_crp for tt0 setup...
85 1.1 gwr movl #0x3F8107, a0@ | map the low 1GB v=p with the
86 1.14 jeremy .long 0xf0100800 | transparent translation reg0
87 1.14 jeremy | [ pmove a0@, tt0 ]
88 1.1 gwr | In order to map the kernel into high memory we will copy the root table
89 1.1 gwr | entry which maps the 16 megabytes of memory starting at 0x0 into the
90 1.1 gwr | entry which maps the 16 megabytes starting at KERNBASE.
91 1.1 gwr pmove crp, a0@ | Get monitor CPU root pointer
92 1.1 gwr movl a0@(4), a1 | 2nd word is PA of level A table
93 1.1 gwr
94 1.1 gwr movl a1, a0 | compute the descriptor address
95 1.1 gwr addl #0x3e0, a1 | for VA starting at KERNBASE
96 1.1 gwr movl a0@, a1@ | copy descriptor type
97 1.1 gwr movl a0@(4), a1@(4) | copy physical address
98 1.1 gwr
99 1.1 gwr | Kernel is now double mapped at zero and KERNBASE.
100 1.1 gwr | Force a long jump to the relocated code (high VA).
101 1.1 gwr movl #IC_CLEAR, d0 | Flush the I-cache
102 1.1 gwr movc d0, cacr
103 1.1 gwr jmp L_high_code:l | long jump
104 1.1 gwr
105 1.1 gwr L_high_code:
106 1.1 gwr | We are now running in the correctly relocated kernel, so
107 1.1 gwr | we are no longer restricted to position-independent code.
108 1.1 gwr | It is handy to leave transparent translation enabled while
109 1.20 gwr | for the low 1GB while _bootstrap() is doing its thing.
110 1.1 gwr
111 1.1 gwr | Do bootstrap stuff needed before main() gets called.
112 1.1 gwr | Our boot loader leaves a copy of the kernel's exec header
113 1.1 gwr | just before the start of the kernel text segment, so the
114 1.1 gwr | kernel can sanity-check the DDB symbols at [end...esym].
115 1.20 gwr | Pass the struct exec at tmpstk-32 to _bootstrap().
116 1.7 gwr | Also, make sure the initial frame pointer is zero so that
117 1.7 gwr | the backtrace algorithm used by KGDB terminates nicely.
118 1.19 jeremy lea _ASM_LABEL(tmpstk)-32, sp
119 1.6 gwr movl #0,a6
120 1.26 gwr jsr _C_LABEL(_bootstrap) | See locore2.c
121 1.1 gwr
122 1.1 gwr | Now turn off the transparent translation of the low 1GB.
123 1.1 gwr | (this also flushes the ATC)
124 1.1 gwr clrl sp@-
125 1.14 jeremy .long 0xf0170800 | pmove sp@,tt0
126 1.1 gwr addql #4,sp
127 1.1 gwr
128 1.20 gwr | Now that _bootstrap() is done using the PROM functions,
129 1.1 gwr | we can safely set the sfc/dfc to something != FC_CONTROL
130 1.1 gwr moveq #FC_USERD, d0 | make movs access "user data"
131 1.1 gwr movc d0, sfc | space for copyin/copyout
132 1.1 gwr movc d0, dfc
133 1.1 gwr
134 1.1 gwr | Setup process zero user/kernel stacks.
135 1.19 jeremy movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
136 1.1 gwr lea a1@(USPACE-4),sp | set SSP to last word
137 1.1 gwr movl #USRSTACK-4,a2
138 1.1 gwr movl a2,usp | init user SP
139 1.1 gwr
140 1.20 gwr | Note curpcb was already set in _bootstrap().
141 1.1 gwr | Will do fpu initialization during autoconfig (see fpu.c)
142 1.1 gwr | The interrupt vector table and stack are now ready.
143 1.1 gwr | Interrupts will be enabled later, AFTER autoconfiguration
144 1.1 gwr | is finished, to avoid spurrious interrupts.
145 1.1 gwr
146 1.1 gwr /*
147 1.1 gwr * Final preparation for calling main.
148 1.1 gwr *
149 1.1 gwr * Create a fake exception frame that returns to user mode,
150 1.1 gwr * and save its address in p->p_md.md_regs for cpu_fork().
151 1.1 gwr * The new frames for process 1 and 2 will be adjusted by
152 1.1 gwr * cpu_set_kpc() to arrange for a call to a kernel function
153 1.1 gwr * before the new process does its rte out to user mode.
154 1.1 gwr */
155 1.6 gwr clrw sp@- | tf_format,tf_vector
156 1.6 gwr clrl sp@- | tf_pc (filled in later)
157 1.6 gwr movw #PSL_USER,sp@- | tf_sr for user mode
158 1.6 gwr clrl sp@- | tf_stackadj
159 1.6 gwr lea sp@(-64),sp | tf_regs[16]
160 1.6 gwr movl sp,a1 | a1=trapframe
161 1.19 jeremy lea _C_LABEL(proc0),a0 | proc0.p_md.md_regs =
162 1.6 gwr movl a1,a0@(P_MDREGS) | trapframe
163 1.6 gwr movl a2,a1@(FR_SP) | a2 == usp (from above)
164 1.7 gwr pea a1@ | push &trapframe
165 1.19 jeremy jbsr _C_LABEL(main) | main(&trapframe)
166 1.7 gwr addql #4,sp | help DDB backtrace
167 1.1 gwr trap #15 | should not get here
168 1.1 gwr
169 1.1 gwr | This is used by cpu_fork() to return to user mode.
170 1.1 gwr | It is called with SP pointing to a struct trapframe.
171 1.19 jeremy GLOBAL(proc_do_uret)
172 1.1 gwr movl sp@(FR_SP),a0 | grab and load
173 1.1 gwr movl a0,usp | user SP
174 1.1 gwr moveml sp@+,#0x7FFF | load most registers (all but SSP)
175 1.1 gwr addql #8,sp | pop SSP and stack adjust count
176 1.1 gwr rte
177 1.1 gwr
178 1.1 gwr /*
179 1.1 gwr * proc_trampoline:
180 1.1 gwr * This is used by cpu_set_kpc() to "push" a function call onto the
181 1.1 gwr * kernel stack of some process, very much like a signal delivery.
182 1.1 gwr * When we get here, the stack has:
183 1.1 gwr *
184 1.1 gwr * SP+8: switchframe from before cpu_set_kpc
185 1.31 thorpej * SP+4: void *arg;
186 1.1 gwr * SP: u_long func;
187 1.1 gwr *
188 1.1 gwr * On entry, the switchframe pushed by cpu_set_kpc has already been
189 1.1 gwr * popped off the stack, so all this needs to do is pop the function
190 1.1 gwr * pointer into a register, call it, then pop the arg, and finally
191 1.1 gwr * return using the switchframe that remains on the stack.
192 1.1 gwr */
193 1.19 jeremy GLOBAL(proc_trampoline)
194 1.1 gwr movl sp@+,a0 | function pointer
195 1.31 thorpej jbsr a0@ | (*func)(arg)
196 1.1 gwr addql #4,sp | toss the arg
197 1.1 gwr rts | as cpu_switch would do
198 1.1 gwr
199 1.1 gwr | That is all the assembly startup code we need on the sun3x!
200 1.1 gwr | The rest of this is like the hp300/locore.s where possible.
201 1.1 gwr
202 1.1 gwr /*
203 1.1 gwr * Trap/interrupt vector routines
204 1.1 gwr */
205 1.17 thorpej #include <m68k/m68k/trap_subr.s>
206 1.1 gwr
207 1.19 jeremy GLOBAL(buserr)
208 1.19 jeremy tstl _C_LABEL(nofault) | device probe?
209 1.19 jeremy jeq _C_LABEL(addrerr) | no, handle as usual
210 1.19 jeremy movl _C_LABEL(nofault),sp@- | yes,
211 1.19 jeremy jbsr _C_LABEL(longjmp) | longjmp(nofault)
212 1.19 jeremy GLOBAL(addrerr)
213 1.1 gwr clrl sp@- | stack adjust count
214 1.1 gwr moveml #0xFFFF,sp@- | save user registers
215 1.1 gwr movl usp,a0 | save the user SP
216 1.1 gwr movl a0,sp@(FR_SP) | in the savearea
217 1.1 gwr lea sp@(FR_HW),a1 | grab base of HW berr frame
218 1.1 gwr moveq #0,d0
219 1.1 gwr movw a1@(10),d0 | grab SSW for fault processing
220 1.1 gwr btst #12,d0 | RB set?
221 1.1 gwr jeq LbeX0 | no, test RC
222 1.1 gwr bset #14,d0 | yes, must set FB
223 1.1 gwr movw d0,a1@(10) | for hardware too
224 1.1 gwr LbeX0:
225 1.1 gwr btst #13,d0 | RC set?
226 1.1 gwr jeq LbeX1 | no, skip
227 1.1 gwr bset #15,d0 | yes, must set FC
228 1.1 gwr movw d0,a1@(10) | for hardware too
229 1.1 gwr LbeX1:
230 1.1 gwr btst #8,d0 | data fault?
231 1.1 gwr jeq Lbe0 | no, check for hard cases
232 1.1 gwr movl a1@(16),d1 | fault address is as given in frame
233 1.1 gwr jra Lbe10 | thats it
234 1.1 gwr Lbe0:
235 1.1 gwr btst #4,a1@(6) | long (type B) stack frame?
236 1.1 gwr jne Lbe4 | yes, go handle
237 1.1 gwr movl a1@(2),d1 | no, can use save PC
238 1.1 gwr btst #14,d0 | FB set?
239 1.1 gwr jeq Lbe3 | no, try FC
240 1.1 gwr addql #4,d1 | yes, adjust address
241 1.1 gwr jra Lbe10 | done
242 1.1 gwr Lbe3:
243 1.1 gwr btst #15,d0 | FC set?
244 1.1 gwr jeq Lbe10 | no, done
245 1.1 gwr addql #2,d1 | yes, adjust address
246 1.1 gwr jra Lbe10 | done
247 1.1 gwr Lbe4:
248 1.1 gwr movl a1@(36),d1 | long format, use stage B address
249 1.1 gwr btst #15,d0 | FC set?
250 1.1 gwr jeq Lbe10 | no, all done
251 1.1 gwr subql #2,d1 | yes, adjust address
252 1.1 gwr Lbe10:
253 1.1 gwr movl d1,sp@- | push fault VA
254 1.1 gwr movl d0,sp@- | and padded SSW
255 1.1 gwr movw a1@(6),d0 | get frame format/vector offset
256 1.1 gwr andw #0x0FFF,d0 | clear out frame format
257 1.1 gwr cmpw #12,d0 | address error vector?
258 1.1 gwr jeq Lisaerr | yes, go to it
259 1.1 gwr
260 1.1 gwr /* MMU-specific code to determine reason for bus error. */
261 1.1 gwr movl d1,a0 | fault address
262 1.1 gwr movl sp@,d0 | function code from ssw
263 1.1 gwr btst #8,d0 | data fault?
264 1.1 gwr jne Lbe10a
265 1.1 gwr movql #1,d0 | user program access FC
266 1.1 gwr | (we dont separate data/program)
267 1.1 gwr btst #5,a1@ | supervisor mode?
268 1.1 gwr jeq Lbe10a | if no, done
269 1.1 gwr movql #5,d0 | else supervisor program access
270 1.1 gwr Lbe10a:
271 1.1 gwr ptestr d0,a0@,#7 | do a table search
272 1.1 gwr pmove psr,sp@ | save result
273 1.1 gwr movb sp@,d1
274 1.1 gwr btst #2,d1 | invalid? (incl. limit viol and berr)
275 1.1 gwr jeq Lmightnotbemerr | no -> wp check
276 1.1 gwr btst #7,d1 | is it MMU table berr?
277 1.1 gwr jeq Lismerr | no, must be fast
278 1.1 gwr jra Lisberr1 | real bus err needs not be fast
279 1.1 gwr Lmightnotbemerr:
280 1.1 gwr btst #3,d1 | write protect bit set?
281 1.1 gwr jeq Lisberr1 | no, must be bus error
282 1.1 gwr movl sp@,d0 | ssw into low word of d0
283 1.1 gwr andw #0xc0,d0 | write protect is set on page:
284 1.1 gwr cmpw #0x40,d0 | was it read cycle?
285 1.1 gwr jeq Lisberr1 | yes, was not WPE, must be bus err
286 1.1 gwr /* End of MMU-specific bus error code. */
287 1.1 gwr
288 1.1 gwr Lismerr:
289 1.1 gwr movl #T_MMUFLT,sp@- | show that we are an MMU fault
290 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
291 1.1 gwr Lisaerr:
292 1.1 gwr movl #T_ADDRERR,sp@- | mark address error
293 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
294 1.1 gwr Lisberr1:
295 1.1 gwr clrw sp@ | re-clear pad word
296 1.1 gwr Lisberr:
297 1.1 gwr movl #T_BUSERR,sp@- | mark bus error
298 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
299 1.1 gwr
300 1.1 gwr /*
301 1.1 gwr * FP exceptions.
302 1.1 gwr */
303 1.19 jeremy GLOBAL(fpfline)
304 1.1 gwr clrl sp@- | stack adjust count
305 1.1 gwr moveml #0xFFFF,sp@- | save registers
306 1.1 gwr moveq #T_FPEMULI,d0 | denote as FP emulation trap
307 1.19 jeremy jra _ASM_LABEL(fault) | do it
308 1.1 gwr
309 1.19 jeremy GLOBAL(fpunsupp)
310 1.1 gwr clrl sp@- | stack adjust count
311 1.1 gwr moveml #0xFFFF,sp@- | save registers
312 1.1 gwr moveq #T_FPEMULD,d0 | denote as FP emulation trap
313 1.19 jeremy jra _ASM_LABEL(fault) | do it
314 1.1 gwr
315 1.1 gwr /*
316 1.1 gwr * Handles all other FP coprocessor exceptions.
317 1.1 gwr * Note that since some FP exceptions generate mid-instruction frames
318 1.1 gwr * and may cause signal delivery, we need to test for stack adjustment
319 1.1 gwr * after the trap call.
320 1.1 gwr */
321 1.19 jeremy GLOBAL(fpfault)
322 1.1 gwr clrl sp@- | stack adjust count
323 1.1 gwr moveml #0xFFFF,sp@- | save user registers
324 1.1 gwr movl usp,a0 | and save
325 1.1 gwr movl a0,sp@(FR_SP) | the user stack pointer
326 1.1 gwr clrl sp@- | no VA arg
327 1.19 jeremy movl _C_LABEL(curpcb),a0 | current pcb
328 1.1 gwr lea a0@(PCB_FPCTX),a0 | address of FP savearea
329 1.1 gwr fsave a0@ | save state
330 1.1 gwr tstb a0@ | null state frame?
331 1.1 gwr jeq Lfptnull | yes, safe
332 1.1 gwr clrw d0 | no, need to tweak BIU
333 1.1 gwr movb a0@(1),d0 | get frame size
334 1.1 gwr bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
335 1.1 gwr Lfptnull:
336 1.1 gwr fmovem fpsr,sp@- | push fpsr as code argument
337 1.1 gwr frestore a0@ | restore state
338 1.1 gwr movl #T_FPERR,sp@- | push type arg
339 1.17 thorpej jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
340 1.1 gwr
341 1.1 gwr /*
342 1.1 gwr * Other exceptions only cause four and six word stack frame and require
343 1.1 gwr * no post-trap stack adjustment.
344 1.1 gwr */
345 1.19 jeremy GLOBAL(badtrap)
346 1.1 gwr clrl sp@- | stack adjust count
347 1.1 gwr moveml #0xFFFF,sp@- | save std frame regs
348 1.19 jeremy jbsr _C_LABEL(straytrap) | report
349 1.1 gwr moveml sp@+,#0xFFFF | restore regs
350 1.1 gwr addql #4, sp | stack adjust count
351 1.19 jeremy jra _ASM_LABEL(rei) | all done
352 1.1 gwr
353 1.1 gwr /*
354 1.1 gwr * Trap 0 is for system calls
355 1.1 gwr */
356 1.19 jeremy GLOBAL(trap0)
357 1.1 gwr clrl sp@- | stack adjust count
358 1.1 gwr moveml #0xFFFF,sp@- | save user registers
359 1.1 gwr movl usp,a0 | save the user SP
360 1.1 gwr movl a0,sp@(FR_SP) | in the savearea
361 1.1 gwr movl d0,sp@- | push syscall number
362 1.19 jeremy jbsr _C_LABEL(syscall) | handle it
363 1.1 gwr addql #4,sp | pop syscall arg
364 1.1 gwr movl sp@(FR_SP),a0 | grab and restore
365 1.1 gwr movl a0,usp | user SP
366 1.1 gwr moveml sp@+,#0x7FFF | restore most registers
367 1.1 gwr addql #8,sp | pop SP and stack adjust
368 1.19 jeremy jra _ASM_LABEL(rei) | all done
369 1.11 gwr
370 1.11 gwr /*
371 1.11 gwr * Trap 12 is the entry point for the cachectl "syscall"
372 1.11 gwr * cachectl(command, addr, length)
373 1.11 gwr * command in d0, addr in a1, length in d1
374 1.11 gwr */
375 1.19 jeremy GLOBAL(trap12)
376 1.32 is movl _C_LABEL(curproc),sp@- | push curproc pointer
377 1.11 gwr movl d1,sp@- | push length
378 1.11 gwr movl a1,sp@- | push addr
379 1.11 gwr movl d0,sp@- | push command
380 1.32 is jbsr _C_LABEL(cachectl1) | do it
381 1.32 is lea sp@(16),sp | pop args
382 1.19 jeremy jra _ASM_LABEL(rei) | all done
383 1.1 gwr
384 1.1 gwr /*
385 1.1 gwr * Trace (single-step) trap. Kernel-mode is special.
386 1.1 gwr * User mode traps are simply passed on to trap().
387 1.1 gwr */
388 1.19 jeremy GLOBAL(trace)
389 1.1 gwr clrl sp@- | stack adjust count
390 1.1 gwr moveml #0xFFFF,sp@-
391 1.1 gwr moveq #T_TRACE,d0
392 1.37 itohy
393 1.37 itohy | Check PSW and see what happen.
394 1.37 itohy | T=0 S=0 (should not happen)
395 1.37 itohy | T=1 S=0 trace trap from user mode
396 1.37 itohy | T=0 S=1 trace trap on a trap instruction
397 1.37 itohy | T=1 S=1 trace trap from system mode (kernel breakpoint)
398 1.37 itohy
399 1.37 itohy movw sp@(FR_HW),d1 | get PSW
400 1.37 itohy notw d1 | XXX no support for T0 on 680[234]0
401 1.37 itohy andw #PSL_TS,d1 | from system mode (T=1, S=1)?
402 1.37 itohy jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
403 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
404 1.1 gwr
405 1.1 gwr /*
406 1.1 gwr * Trap 15 is used for:
407 1.1 gwr * - GDB breakpoints (in user programs)
408 1.1 gwr * - KGDB breakpoints (in the kernel)
409 1.1 gwr * - trace traps for SUN binaries (not fully supported yet)
410 1.11 gwr * User mode traps are simply passed to trap().
411 1.1 gwr */
412 1.19 jeremy GLOBAL(trap15)
413 1.1 gwr clrl sp@- | stack adjust count
414 1.1 gwr moveml #0xFFFF,sp@-
415 1.1 gwr moveq #T_TRAP15,d0
416 1.11 gwr btst #5,sp@(FR_HW) | was supervisor mode?
417 1.19 jeremy jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
418 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
419 1.1 gwr
420 1.19 jeremy ASLOCAL(kbrkpt)
421 1.11 gwr | Kernel-mode breakpoint or trace trap. (d0=trap_type)
422 1.1 gwr | Save the system sp rather than the user sp.
423 1.1 gwr movw #PSL_HIGHIPL,sr | lock out interrupts
424 1.1 gwr lea sp@(FR_SIZE),a6 | Save stack pointer
425 1.1 gwr movl a6,sp@(FR_SP) | from before trap
426 1.1 gwr
427 1.1 gwr | If we are not on tmpstk switch to it.
428 1.1 gwr | (so debugger can change the stack pointer)
429 1.1 gwr movl a6,d1
430 1.19 jeremy cmpl #_ASM_LABEL(tmpstk),d1
431 1.1 gwr jls Lbrkpt2 | already on tmpstk
432 1.1 gwr | Copy frame to the temporary stack
433 1.1 gwr movl sp,a0 | a0=src
434 1.19 jeremy lea _ASM_LABEL(tmpstk)-96,a1 | a1=dst
435 1.1 gwr movl a1,sp | sp=new frame
436 1.1 gwr moveq #FR_SIZE,d1
437 1.1 gwr Lbrkpt1:
438 1.1 gwr movl a0@+,a1@+
439 1.1 gwr subql #4,d1
440 1.1 gwr bgt Lbrkpt1
441 1.1 gwr
442 1.1 gwr Lbrkpt2:
443 1.11 gwr | Call the trap handler for the kernel debugger.
444 1.6 gwr | Do not call trap() to handle it, so that we can
445 1.1 gwr | set breakpoints in trap() if we want. We know
446 1.1 gwr | the trap type is either T_TRACE or T_BREAKPOINT.
447 1.6 gwr movl d0,sp@- | push trap type
448 1.19 jeremy jbsr _C_LABEL(trap_kdebug)
449 1.6 gwr addql #4,sp | pop args
450 1.6 gwr
451 1.1 gwr | The stack pointer may have been modified, or
452 1.1 gwr | data below it modified (by kgdb push call),
453 1.1 gwr | so push the hardware frame at the current sp
454 1.1 gwr | before restoring registers and returning.
455 1.1 gwr movl sp@(FR_SP),a0 | modified sp
456 1.1 gwr lea sp@(FR_SIZE),a1 | end of our frame
457 1.1 gwr movl a1@-,a0@- | copy 2 longs with
458 1.1 gwr movl a1@-,a0@- | ... predecrement
459 1.1 gwr movl a0,sp@(FR_SP) | sp = h/w frame
460 1.1 gwr moveml sp@+,#0x7FFF | restore all but sp
461 1.1 gwr movl sp@,sp | ... and sp
462 1.1 gwr rte | all done
463 1.1 gwr
464 1.11 gwr /* Use common m68k sigreturn */
465 1.11 gwr #include <m68k/m68k/sigreturn.s>
466 1.1 gwr
467 1.1 gwr /*
468 1.1 gwr * Interrupt handlers. Most are auto-vectored,
469 1.1 gwr * and hard-wired the same way on all sun3 models.
470 1.1 gwr * Format in the stack is:
471 1.1 gwr * d0,d1,a0,a1, sr, pc, vo
472 1.1 gwr */
473 1.1 gwr
474 1.1 gwr #define INTERRUPT_SAVEREG \
475 1.1 gwr moveml #0xC0C0,sp@-
476 1.1 gwr
477 1.1 gwr #define INTERRUPT_RESTORE \
478 1.1 gwr moveml sp@+,#0x0303
479 1.1 gwr
480 1.1 gwr /*
481 1.1 gwr * This is the common auto-vector interrupt handler,
482 1.1 gwr * for which the CPU provides the vector=0x18+level.
483 1.1 gwr * These are installed in the interrupt vector table.
484 1.1 gwr */
485 1.1 gwr .align 2
486 1.19 jeremy GLOBAL(_isr_autovec)
487 1.1 gwr INTERRUPT_SAVEREG
488 1.19 jeremy jbsr _C_LABEL(isr_autovec)
489 1.1 gwr INTERRUPT_RESTORE
490 1.19 jeremy jra _ASM_LABEL(rei)
491 1.1 gwr
492 1.1 gwr /* clock: see clock.c */
493 1.1 gwr .align 2
494 1.19 jeremy GLOBAL(_isr_clock)
495 1.1 gwr INTERRUPT_SAVEREG
496 1.19 jeremy jbsr _C_LABEL(clock_intr)
497 1.1 gwr INTERRUPT_RESTORE
498 1.19 jeremy jra _ASM_LABEL(rei)
499 1.1 gwr
500 1.1 gwr | Handler for all vectored interrupts (i.e. VME interrupts)
501 1.1 gwr .align 2
502 1.19 jeremy GLOBAL(_isr_vectored)
503 1.1 gwr INTERRUPT_SAVEREG
504 1.19 jeremy jbsr _C_LABEL(isr_vectored)
505 1.1 gwr INTERRUPT_RESTORE
506 1.19 jeremy jra _ASM_LABEL(rei)
507 1.1 gwr
508 1.1 gwr #undef INTERRUPT_SAVEREG
509 1.1 gwr #undef INTERRUPT_RESTORE
510 1.1 gwr
511 1.1 gwr /* interrupt counters (needed by vmstat) */
512 1.19 jeremy GLOBAL(intrnames)
513 1.1 gwr .asciz "spur" | 0
514 1.1 gwr .asciz "lev1" | 1
515 1.1 gwr .asciz "lev2" | 2
516 1.1 gwr .asciz "lev3" | 3
517 1.1 gwr .asciz "lev4" | 4
518 1.1 gwr .asciz "clock" | 5
519 1.1 gwr .asciz "lev6" | 6
520 1.1 gwr .asciz "nmi" | 7
521 1.19 jeremy GLOBAL(eintrnames)
522 1.1 gwr
523 1.1 gwr .data
524 1.1 gwr .even
525 1.19 jeremy GLOBAL(intrcnt)
526 1.1 gwr .long 0,0,0,0,0,0,0,0,0,0
527 1.19 jeremy GLOBAL(eintrcnt)
528 1.1 gwr .text
529 1.1 gwr
530 1.1 gwr /*
531 1.1 gwr * Emulation of VAX REI instruction.
532 1.1 gwr *
533 1.1 gwr * This code is (mostly) un-altered from the hp300 code,
534 1.1 gwr * except that sun machines do not need a simulated SIR
535 1.1 gwr * because they have a real software interrupt register.
536 1.1 gwr *
537 1.1 gwr * This code deals with checking for and servicing ASTs
538 1.1 gwr * (profiling, scheduling) and software interrupts (network, softclock).
539 1.1 gwr * We check for ASTs first, just like the VAX. To avoid excess overhead
540 1.1 gwr * the T_ASTFLT handling code will also check for software interrupts so we
541 1.1 gwr * do not have to do it here. After identifying that we need an AST we
542 1.1 gwr * drop the IPL to allow device interrupts.
543 1.1 gwr *
544 1.1 gwr * This code is complicated by the fact that sendsig may have been called
545 1.1 gwr * necessitating a stack cleanup.
546 1.1 gwr */
547 1.1 gwr
548 1.19 jeremy ASGLOBAL(rei)
549 1.1 gwr #ifdef DIAGNOSTIC
550 1.19 jeremy tstl _C_LABEL(panicstr) | have we paniced?
551 1.1 gwr jne Ldorte | yes, do not make matters worse
552 1.1 gwr #endif
553 1.19 jeremy tstl _C_LABEL(astpending) | AST pending?
554 1.1 gwr jeq Ldorte | no, done
555 1.1 gwr Lrei1:
556 1.1 gwr btst #5,sp@ | yes, are we returning to user mode?
557 1.1 gwr jne Ldorte | no, done
558 1.1 gwr movw #PSL_LOWIPL,sr | lower SPL
559 1.1 gwr clrl sp@- | stack adjust
560 1.1 gwr moveml #0xFFFF,sp@- | save all registers
561 1.1 gwr movl usp,a1 | including
562 1.1 gwr movl a1,sp@(FR_SP) | the users SP
563 1.1 gwr clrl sp@- | VA == none
564 1.1 gwr clrl sp@- | code == none
565 1.1 gwr movl #T_ASTFLT,sp@- | type == async system trap
566 1.19 jeremy jbsr _C_LABEL(trap) | go handle it
567 1.1 gwr lea sp@(12),sp | pop value args
568 1.1 gwr movl sp@(FR_SP),a0 | restore user SP
569 1.1 gwr movl a0,usp | from save area
570 1.1 gwr movw sp@(FR_ADJ),d0 | need to adjust stack?
571 1.1 gwr jne Laststkadj | yes, go to it
572 1.1 gwr moveml sp@+,#0x7FFF | no, restore most user regs
573 1.1 gwr addql #8,sp | toss SP and stack adjust
574 1.1 gwr rte | and do real RTE
575 1.1 gwr Laststkadj:
576 1.1 gwr lea sp@(FR_HW),a1 | pointer to HW frame
577 1.1 gwr addql #8,a1 | source pointer
578 1.1 gwr movl a1,a0 | source
579 1.1 gwr addw d0,a0 | + hole size = dest pointer
580 1.1 gwr movl a1@-,a0@- | copy
581 1.1 gwr movl a1@-,a0@- | 8 bytes
582 1.1 gwr movl a0,sp@(FR_SP) | new SSP
583 1.1 gwr moveml sp@+,#0x7FFF | restore user registers
584 1.1 gwr movl sp@,sp | and our SP
585 1.1 gwr Ldorte:
586 1.1 gwr rte | real return
587 1.1 gwr
588 1.1 gwr /*
589 1.1 gwr * Initialization is at the beginning of this file, because the
590 1.1 gwr * kernel entry point needs to be at zero for compatibility with
591 1.1 gwr * the Sun boot loader. This works on Sun machines because the
592 1.1 gwr * interrupt vector table for reset is NOT at address zero.
593 1.1 gwr * (The MMU has a "boot" bit that forces access to the PROM)
594 1.1 gwr */
595 1.1 gwr
596 1.1 gwr /*
597 1.16 thorpej * Use common m68k sigcode.
598 1.1 gwr */
599 1.16 thorpej #include <m68k/m68k/sigcode.s>
600 1.16 thorpej
601 1.1 gwr .text
602 1.1 gwr
603 1.1 gwr /*
604 1.1 gwr * Primitives
605 1.1 gwr */
606 1.1 gwr
607 1.1 gwr /*
608 1.12 thorpej * Use common m68k support routines.
609 1.1 gwr */
610 1.12 thorpej #include <m68k/m68k/support.s>
611 1.1 gwr
612 1.19 jeremy BSS(want_resched,4)
613 1.1 gwr
614 1.1 gwr /*
615 1.15 thorpej * Use common m68k process manipulation routines.
616 1.1 gwr */
617 1.15 thorpej #include <m68k/m68k/proc_subr.s>
618 1.1 gwr
619 1.1 gwr | Message for Lbadsw panic
620 1.1 gwr Lsw0:
621 1.1 gwr .asciz "cpu_switch"
622 1.1 gwr .even
623 1.1 gwr
624 1.1 gwr .data
625 1.19 jeremy GLOBAL(masterpaddr) | XXX compatibility (debuggers)
626 1.19 jeremy GLOBAL(curpcb)
627 1.1 gwr .long 0
628 1.19 jeremy ASBSS(nullpcb,SIZEOF_PCB)
629 1.1 gwr .text
630 1.1 gwr
631 1.1 gwr /*
632 1.1 gwr * At exit of a process, do a cpu_switch for the last time.
633 1.28 thorpej * Switch to a safe stack and PCB, and select a new process to run. The
634 1.28 thorpej * old stack and u-area will be freed by the reaper.
635 1.42 thorpej *
636 1.42 thorpej * MUST BE CALLED AT SPLHIGH!
637 1.1 gwr */
638 1.1 gwr ENTRY(switch_exit)
639 1.1 gwr movl sp@(4),a0 | struct proc *p
640 1.19 jeremy | save state into garbage pcb
641 1.19 jeremy movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
642 1.19 jeremy lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
643 1.1 gwr
644 1.28 thorpej /* Schedule the vmspace and stack to be freed. */
645 1.28 thorpej movl a0,sp@- | exit2(p)
646 1.28 thorpej jbsr _C_LABEL(exit2)
647 1.42 thorpej lea sp@(4),sp
648 1.28 thorpej
649 1.42 thorpej #if defined(LOCKDEBUG)
650 1.42 thorpej /* Acquire sched_lock */
651 1.42 thorpej jbsr _C_LABEL(sched_lock_idle)
652 1.42 thorpej #endif
653 1.1 gwr
654 1.19 jeremy jra _C_LABEL(cpu_switch)
655 1.1 gwr
656 1.1 gwr /*
657 1.1 gwr * When no processes are on the runq, cpu_switch() branches to idle
658 1.1 gwr * to wait for something to come ready.
659 1.1 gwr */
660 1.1 gwr Lidle:
661 1.42 thorpej #if defined(LOCKDEBUG)
662 1.42 thorpej /* Release sched_lock */
663 1.42 thorpej jbsr _C_LABEL(sched_unlock_idle)
664 1.42 thorpej #endif
665 1.1 gwr stop #PSL_LOWIPL
666 1.19 jeremy GLOBAL(_Idle) | See clock.c
667 1.1 gwr movw #PSL_HIGHIPL,sr
668 1.42 thorpej #if defined(LOCKDEBUG)
669 1.42 thorpej /* Acquire sched_lock */
670 1.42 thorpej jbsr _C_LABEL(sched_lock_idle)
671 1.42 thorpej #endif
672 1.42 thorpej movl _C_LABEL(sched_whichqs),%d0
673 1.1 gwr jeq Lidle
674 1.1 gwr jra Lsw1
675 1.1 gwr
676 1.1 gwr Lbadsw:
677 1.1 gwr movl #Lsw0,sp@-
678 1.19 jeremy jbsr _C_LABEL(panic)
679 1.1 gwr /*NOTREACHED*/
680 1.1 gwr
681 1.1 gwr /*
682 1.1 gwr * cpu_switch()
683 1.1 gwr * Hacked for sun3
684 1.1 gwr */
685 1.1 gwr ENTRY(cpu_switch)
686 1.19 jeremy movl _C_LABEL(curpcb),a1 | current pcb
687 1.1 gwr movw sr,a1@(PCB_PS) | save sr before changing ipl
688 1.1 gwr #ifdef notyet
689 1.19 jeremy movl _C_LABEL(curproc),sp@- | remember last proc running
690 1.1 gwr #endif
691 1.19 jeremy clrl _C_LABEL(curproc)
692 1.1 gwr
693 1.1 gwr /*
694 1.1 gwr * Find the highest-priority queue that isn't empty,
695 1.1 gwr * then take the first proc from that queue.
696 1.1 gwr */
697 1.42 thorpej movl _C_LABEL(sched_whichqs),%d0
698 1.43 tsutsui jeq Lidle
699 1.42 thorpej Lsw1:
700 1.42 thorpej /*
701 1.42 thorpej * Interrupts are blocked, sched_lock is held. If
702 1.42 thorpej * we come here via Idle, %d0 contains the contents
703 1.42 thorpej * of a non-zero sched_whichqs.
704 1.42 thorpej */
705 1.42 thorpej movl %d0,%d1
706 1.42 thorpej negl %d0
707 1.42 thorpej andl %d1,%d0
708 1.42 thorpej bfffo %d0{#0:#32},%d1
709 1.42 thorpej eorib #31,%d1
710 1.42 thorpej
711 1.42 thorpej movl %d1,%d0
712 1.42 thorpej lslb #3,%d1 | convert queue number to index
713 1.42 thorpej addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
714 1.42 thorpej movl %d1,%a1
715 1.42 thorpej movl %a1@(P_FORW),%a0 | p = q->p_forw
716 1.42 thorpej cmpal %d1,%a0 | anyone on queue?
717 1.1 gwr jeq Lbadsw | no, panic
718 1.38 thorpej #ifdef DIAGNOSTIC
719 1.38 thorpej tstl a0@(P_WCHAN)
720 1.38 thorpej jne Lbadsw
721 1.38 thorpej cmpb #SRUN,a0@(P_STAT)
722 1.38 thorpej jne Lbadsw
723 1.38 thorpej #endif
724 1.42 thorpej movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
725 1.42 thorpej movl %a0@(P_FORW),%a1 | n = p->p_forw
726 1.42 thorpej movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
727 1.42 thorpej cmpal %d1,%a1 | anyone left on queue?
728 1.42 thorpej jne Lsw2 | yes, skip
729 1.42 thorpej movl _C_LABEL(sched_whichqs),%d1
730 1.42 thorpej bclr %d0,%d1 | no, clear bit
731 1.42 thorpej movl %d1,_C_LABEL(sched_whichqs)
732 1.1 gwr Lsw2:
733 1.41 thorpej /* p->p_cpu initialized in fork1() for single-processor */
734 1.38 thorpej movb #SONPROC,a0@(P_STAT) | p->p_stat = SONPROC
735 1.19 jeremy movl a0,_C_LABEL(curproc)
736 1.19 jeremy clrl _C_LABEL(want_resched)
737 1.1 gwr #ifdef notyet
738 1.1 gwr movl sp@+,a1 | XXX - Make this work!
739 1.1 gwr cmpl a0,a1 | switching to same proc?
740 1.1 gwr jeq Lswdone | yes, skip save and restore
741 1.1 gwr #endif
742 1.1 gwr /*
743 1.1 gwr * Save state of previous process in its pcb.
744 1.1 gwr */
745 1.19 jeremy movl _C_LABEL(curpcb),a1
746 1.1 gwr moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
747 1.1 gwr movl usp,a2 | grab USP (a2 has been saved)
748 1.1 gwr movl a2,a1@(PCB_USP) | and save it
749 1.1 gwr
750 1.19 jeremy tstl _C_LABEL(fputype) | Do we have an fpu?
751 1.1 gwr jeq Lswnofpsave | No? Then don't try save.
752 1.1 gwr lea a1@(PCB_FPCTX),a2 | pointer to FP save area
753 1.1 gwr fsave a2@ | save FP state
754 1.1 gwr tstb a2@ | null state frame?
755 1.1 gwr jeq Lswnofpsave | yes, all done
756 1.1 gwr fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
757 1.1 gwr fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
758 1.1 gwr Lswnofpsave:
759 1.1 gwr
760 1.6 gwr /*
761 1.6 gwr * Now that we have saved all the registers that must be
762 1.6 gwr * preserved, we are free to use those registers until
763 1.6 gwr * we load the registers for the switched-to process.
764 1.6 gwr * In this section, keep: a0=curproc, a1=curpcb
765 1.6 gwr */
766 1.6 gwr
767 1.1 gwr clrl a0@(P_BACK) | clear back link
768 1.1 gwr movl a0@(P_ADDR),a1 | get p_addr
769 1.19 jeremy movl a1,_C_LABEL(curpcb)
770 1.42 thorpej
771 1.42 thorpej #if defined(LOCKDEBUG)
772 1.42 thorpej /*
773 1.42 thorpej * Done mucking with the run queues, release the
774 1.42 thorpej * scheduler lock, but keep interrupts out.
775 1.42 thorpej */
776 1.42 thorpej movl %a0,sp@- | not args...
777 1.42 thorpej movl %a1,sp@- | ...just saving
778 1.42 thorpej jbsr _C_LABEL(sched_unlock_idle)
779 1.42 thorpej movl sp@+,%a1
780 1.42 thorpej movl sp@+,%a0
781 1.42 thorpej #endif
782 1.1 gwr
783 1.8 gwr /*
784 1.8 gwr * Load the new VM context (new MMU root pointer)
785 1.8 gwr */
786 1.8 gwr movl a0@(P_VMSPACE),a2 | vm = p->p_vmspace
787 1.8 gwr #ifdef DIAGNOSTIC
788 1.20 gwr tstl a2 | vm == VM_MAP_NULL?
789 1.8 gwr jeq Lbadsw | panic
790 1.8 gwr #endif
791 1.8 gwr #ifdef PMAP_DEBUG
792 1.25 gwr /* When debugging just call _pmap_switch(). */
793 1.25 gwr movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
794 1.25 gwr pea a2@ | push pmap
795 1.25 gwr jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
796 1.8 gwr addql #4,sp
797 1.19 jeremy movl _C_LABEL(curpcb),a1 | restore p_addr
798 1.8 gwr #else
799 1.25 gwr /* Otherwise, use this inline version. */
800 1.20 gwr lea _C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
801 1.20 gwr movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
802 1.8 gwr movl a2@(PM_A_PHYS),d0 | phys = pmap->pm_a_phys
803 1.9 jeremy cmpl a3@(4),d0 | == kernel_crp.rp_addr ?
804 1.8 gwr jeq Lsame_mmuctx | skip loadcrp/flush
805 1.8 gwr /* OK, it is a new MMU context. Load it up. */
806 1.9 jeremy movl d0,a3@(4)
807 1.1 gwr movl #CACHE_CLR,d0
808 1.1 gwr movc d0,cacr | invalidate cache(s)
809 1.1 gwr pflusha | flush entire TLB
810 1.8 gwr pmove a3@,crp | load new user root pointer
811 1.8 gwr Lsame_mmuctx:
812 1.8 gwr #endif
813 1.1 gwr
814 1.6 gwr /*
815 1.6 gwr * Reload the registers for the new process.
816 1.6 gwr * After this point we can only use d0,d1,a0,a1
817 1.6 gwr */
818 1.6 gwr moveml a1@(PCB_REGS),#0xFCFC | reload registers
819 1.1 gwr movl a1@(PCB_USP),a0
820 1.1 gwr movl a0,usp | and USP
821 1.1 gwr
822 1.19 jeremy tstl _C_LABEL(fputype) | If we don't have an fpu,
823 1.1 gwr jeq Lres_skip | don't try to restore it.
824 1.1 gwr lea a1@(PCB_FPCTX),a0 | pointer to FP save area
825 1.1 gwr tstb a0@ | null state frame?
826 1.1 gwr jeq Lresfprest | yes, easy
827 1.1 gwr fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
828 1.1 gwr fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
829 1.1 gwr Lresfprest:
830 1.1 gwr frestore a0@ | restore state
831 1.1 gwr Lres_skip:
832 1.1 gwr movw a1@(PCB_PS),d0 | no, restore PS
833 1.1 gwr #ifdef DIAGNOSTIC
834 1.1 gwr btst #13,d0 | supervisor mode?
835 1.1 gwr jeq Lbadsw | no? panic!
836 1.1 gwr #endif
837 1.1 gwr movw d0,sr | OK, restore PS
838 1.1 gwr moveq #1,d0 | return 1 (for alternate returns)
839 1.1 gwr rts
840 1.1 gwr
841 1.1 gwr /*
842 1.1 gwr * savectx(pcb)
843 1.1 gwr * Update pcb, saving current processor state.
844 1.1 gwr */
845 1.1 gwr ENTRY(savectx)
846 1.1 gwr movl sp@(4),a1
847 1.1 gwr movw sr,a1@(PCB_PS)
848 1.1 gwr movl usp,a0 | grab USP
849 1.1 gwr movl a0,a1@(PCB_USP) | and save it
850 1.1 gwr moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
851 1.1 gwr
852 1.19 jeremy tstl _C_LABEL(fputype) | Do we have FPU?
853 1.1 gwr jeq Lsavedone | No? Then don't save state.
854 1.1 gwr lea a1@(PCB_FPCTX),a0 | pointer to FP save area
855 1.1 gwr fsave a0@ | save FP state
856 1.1 gwr tstb a0@ | null state frame?
857 1.1 gwr jeq Lsavedone | yes, all done
858 1.1 gwr fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
859 1.1 gwr fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
860 1.1 gwr Lsavedone:
861 1.1 gwr moveq #0,d0 | return 0
862 1.1 gwr rts
863 1.1 gwr
864 1.20 gwr /* suline() */
865 1.1 gwr
866 1.1 gwr #ifdef DEBUG
867 1.1 gwr .data
868 1.19 jeremy ASGLOBAL(fulltflush)
869 1.1 gwr .long 0
870 1.19 jeremy ASGLOBAL(fullcflush)
871 1.1 gwr .long 0
872 1.1 gwr .text
873 1.1 gwr #endif
874 1.1 gwr
875 1.1 gwr /*
876 1.1 gwr * Invalidate entire TLB.
877 1.1 gwr */
878 1.1 gwr ENTRY(TBIA)
879 1.19 jeremy _C_LABEL(_TBIA):
880 1.1 gwr pflusha
881 1.1 gwr movl #DC_CLEAR,d0
882 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
883 1.1 gwr rts
884 1.1 gwr
885 1.1 gwr /*
886 1.1 gwr * Invalidate any TLB entry for given VA (TB Invalidate Single)
887 1.1 gwr */
888 1.1 gwr ENTRY(TBIS)
889 1.1 gwr #ifdef DEBUG
890 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
891 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush entire TLB
892 1.1 gwr #endif
893 1.1 gwr movl sp@(4),a0
894 1.1 gwr pflush #0,#0,a0@ | flush address from both sides
895 1.1 gwr movl #DC_CLEAR,d0
896 1.1 gwr movc d0,cacr | invalidate on-chip data cache
897 1.1 gwr rts
898 1.1 gwr
899 1.1 gwr /*
900 1.1 gwr * Invalidate supervisor side of TLB
901 1.1 gwr */
902 1.1 gwr ENTRY(TBIAS)
903 1.1 gwr #ifdef DEBUG
904 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
905 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
906 1.1 gwr #endif
907 1.1 gwr pflush #4,#4 | flush supervisor TLB entries
908 1.1 gwr movl #DC_CLEAR,d0
909 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
910 1.1 gwr rts
911 1.1 gwr
912 1.1 gwr /*
913 1.1 gwr * Invalidate user side of TLB
914 1.1 gwr */
915 1.1 gwr ENTRY(TBIAU)
916 1.1 gwr #ifdef DEBUG
917 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
918 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
919 1.1 gwr #endif
920 1.1 gwr pflush #0,#4 | flush user TLB entries
921 1.1 gwr movl #DC_CLEAR,d0
922 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
923 1.1 gwr rts
924 1.1 gwr
925 1.1 gwr /*
926 1.1 gwr * Invalidate instruction cache
927 1.1 gwr */
928 1.1 gwr ENTRY(ICIA)
929 1.1 gwr movl #IC_CLEAR,d0
930 1.1 gwr movc d0,cacr | invalidate i-cache
931 1.1 gwr rts
932 1.1 gwr
933 1.1 gwr /*
934 1.1 gwr * Invalidate data cache.
935 1.1 gwr * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
936 1.1 gwr * problems with DC_WA. The only cases we have to worry about are context
937 1.1 gwr * switch and TLB changes, both of which are handled "in-line" in resume
938 1.1 gwr * and TBI*.
939 1.1 gwr */
940 1.1 gwr ENTRY(DCIA)
941 1.1 gwr __DCIA:
942 1.1 gwr rts
943 1.1 gwr
944 1.1 gwr ENTRY(DCIS)
945 1.1 gwr __DCIS:
946 1.1 gwr rts
947 1.1 gwr
948 1.1 gwr /*
949 1.1 gwr * Invalidate data cache.
950 1.1 gwr */
951 1.1 gwr ENTRY(DCIU)
952 1.11 gwr movl #DC_CLEAR,d0
953 1.11 gwr movc d0,cacr | invalidate on-chip d-cache
954 1.1 gwr rts
955 1.1 gwr
956 1.1 gwr /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
957 1.1 gwr
958 1.1 gwr ENTRY(PCIA)
959 1.1 gwr movl #DC_CLEAR,d0
960 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
961 1.1 gwr rts
962 1.1 gwr
963 1.1 gwr ENTRY(ecacheon)
964 1.1 gwr rts
965 1.1 gwr
966 1.1 gwr ENTRY(ecacheoff)
967 1.1 gwr rts
968 1.1 gwr
969 1.1 gwr /*
970 1.1 gwr * Get callers current SP value.
971 1.1 gwr * Note that simply taking the address of a local variable in a C function
972 1.1 gwr * doesn't work because callee saved registers may be outside the stack frame
973 1.1 gwr * defined by A6 (e.g. GCC generated code).
974 1.20 gwr *
975 1.1 gwr * [I don't think the ENTRY() macro will do the right thing with this -- glass]
976 1.1 gwr */
977 1.19 jeremy GLOBAL(getsp)
978 1.1 gwr movl sp,d0 | get current SP
979 1.1 gwr addql #4,d0 | compensate for return address
980 1.1 gwr rts
981 1.1 gwr
982 1.1 gwr ENTRY(getsfc)
983 1.1 gwr movc sfc,d0
984 1.1 gwr rts
985 1.1 gwr
986 1.1 gwr ENTRY(getdfc)
987 1.1 gwr movc dfc,d0
988 1.1 gwr rts
989 1.1 gwr
990 1.1 gwr ENTRY(getvbr)
991 1.1 gwr movc vbr, d0
992 1.1 gwr rts
993 1.1 gwr
994 1.1 gwr ENTRY(setvbr)
995 1.1 gwr movl sp@(4), d0
996 1.1 gwr movc d0, vbr
997 1.1 gwr rts
998 1.1 gwr
999 1.1 gwr /*
1000 1.1 gwr * Load a new CPU Root Pointer (CRP) into the MMU.
1001 1.2 gwr * void loadcrp(struct mmu_rootptr *);
1002 1.1 gwr */
1003 1.1 gwr ENTRY(loadcrp)
1004 1.1 gwr movl sp@(4),a0 | arg1: &CRP
1005 1.1 gwr movl #CACHE_CLR,d0
1006 1.1 gwr movc d0,cacr | invalidate cache(s)
1007 1.1 gwr pflusha | flush entire TLB
1008 1.1 gwr pmove a0@,crp | load new user root pointer
1009 1.10 gwr rts
1010 1.10 gwr
1011 1.10 gwr /*
1012 1.10 gwr * Get the physical address of the PTE for a given VA.
1013 1.10 gwr */
1014 1.10 gwr ENTRY(ptest_addr)
1015 1.10 gwr movl sp@(4),a0 | VA
1016 1.10 gwr ptestr #5,a0@,#7,a1 | a1 = addr of PTE
1017 1.10 gwr movl a1,d0
1018 1.1 gwr rts
1019 1.1 gwr
1020 1.1 gwr /*
1021 1.1 gwr * Set processor priority level calls. Most are implemented with
1022 1.1 gwr * inline asm expansions. However, we need one instantiation here
1023 1.1 gwr * in case some non-optimized code makes external references.
1024 1.21 gwr * Most places will use the inlined functions param.h supplies.
1025 1.1 gwr */
1026 1.1 gwr
1027 1.21 gwr ENTRY(_getsr)
1028 1.21 gwr clrl d0
1029 1.21 gwr movw sr,d0
1030 1.21 gwr rts
1031 1.21 gwr
1032 1.1 gwr ENTRY(_spl)
1033 1.1 gwr clrl d0
1034 1.1 gwr movw sr,d0
1035 1.21 gwr movl sp@(4),d1
1036 1.1 gwr movw d1,sr
1037 1.1 gwr rts
1038 1.1 gwr
1039 1.21 gwr ENTRY(_splraise)
1040 1.21 gwr clrl d0
1041 1.21 gwr movw sr,d0
1042 1.21 gwr movl d0,d1
1043 1.21 gwr andl #PSL_HIGHIPL,d1 | old &= PSL_HIGHIPL
1044 1.21 gwr cmpl sp@(4),d1 | (old - new)
1045 1.21 gwr bge Lsplr
1046 1.21 gwr movl sp@(4),d1
1047 1.21 gwr movw d1,sr
1048 1.21 gwr Lsplr:
1049 1.1 gwr rts
1050 1.1 gwr
1051 1.1 gwr /*
1052 1.1 gwr * Save and restore 68881 state.
1053 1.1 gwr */
1054 1.1 gwr ENTRY(m68881_save)
1055 1.1 gwr movl sp@(4),a0 | save area pointer
1056 1.1 gwr fsave a0@ | save state
1057 1.1 gwr tstb a0@ | null state frame?
1058 1.1 gwr jeq Lm68881sdone | yes, all done
1059 1.1 gwr fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1060 1.1 gwr fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1061 1.1 gwr Lm68881sdone:
1062 1.1 gwr rts
1063 1.1 gwr
1064 1.1 gwr ENTRY(m68881_restore)
1065 1.1 gwr movl sp@(4),a0 | save area pointer
1066 1.1 gwr tstb a0@ | null state frame?
1067 1.1 gwr jeq Lm68881rdone | yes, easy
1068 1.1 gwr fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1069 1.1 gwr fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1070 1.1 gwr Lm68881rdone:
1071 1.1 gwr frestore a0@ | restore state
1072 1.1 gwr rts
1073 1.1 gwr
1074 1.1 gwr /*
1075 1.1 gwr * _delay(unsigned N)
1076 1.1 gwr * Delay for at least (N/256) microseconds.
1077 1.1 gwr * This routine depends on the variable: delay_divisor
1078 1.1 gwr * which should be set based on the CPU clock rate.
1079 1.26 gwr * XXX: Currently this is set based on the CPU model,
1080 1.26 gwr * XXX: but this should be determined at run time...
1081 1.1 gwr */
1082 1.19 jeremy GLOBAL(_delay)
1083 1.1 gwr | d0 = arg = (usecs << 8)
1084 1.1 gwr movl sp@(4),d0
1085 1.1 gwr | d1 = delay_divisor;
1086 1.19 jeremy movl _C_LABEL(delay_divisor),d1
1087 1.36 thorpej jra L_delay /* Jump into the loop! */
1088 1.36 thorpej
1089 1.36 thorpej /*
1090 1.36 thorpej * Align the branch target of the loop to a half-line (8-byte)
1091 1.36 thorpej * boundary to minimize cache effects. This guarantees both
1092 1.36 thorpej * that there will be no prefetch stalls due to cache line burst
1093 1.36 thorpej * operations and that the loop will run from a single cache
1094 1.36 thorpej * half-line.
1095 1.36 thorpej */
1096 1.36 thorpej .align 8
1097 1.1 gwr L_delay:
1098 1.1 gwr subl d1,d0
1099 1.1 gwr jgt L_delay
1100 1.1 gwr rts
1101 1.1 gwr
1102 1.1 gwr
1103 1.1 gwr | Define some addresses, mostly so DDB can print useful info.
1104 1.24 gwr | Not using _C_LABEL() here because these symbols are never
1105 1.24 gwr | referenced by any C code, and if the leading underscore
1106 1.24 gwr | ever goes away, these lines turn into syntax errors...
1107 1.24 gwr .set _KERNBASE,KERNBASE
1108 1.26 gwr .set _MONSTART,SUN3X_MONSTART
1109 1.26 gwr .set _PROM_BASE,SUN3X_PROM_BASE
1110 1.26 gwr .set _MONEND,SUN3X_MONEND
1111 1.1 gwr
1112 1.1 gwr |The end!
1113