locore.s revision 1.45 1 1.45 chs /* $NetBSD: locore.s,v 1.45 2001/02/22 07:11:12 chs Exp $ */
2 1.1 gwr
3 1.1 gwr /*
4 1.1 gwr * Copyright (c) 1988 University of Utah.
5 1.1 gwr * Copyright (c) 1980, 1990, 1993
6 1.1 gwr * The Regents of the University of California. All rights reserved.
7 1.1 gwr *
8 1.1 gwr * This code is derived from software contributed to Berkeley by
9 1.1 gwr * the Systems Programming Group of the University of Utah Computer
10 1.1 gwr * Science Department.
11 1.1 gwr *
12 1.1 gwr * Redistribution and use in source and binary forms, with or without
13 1.1 gwr * modification, are permitted provided that the following conditions
14 1.1 gwr * are met:
15 1.1 gwr * 1. Redistributions of source code must retain the above copyright
16 1.1 gwr * notice, this list of conditions and the following disclaimer.
17 1.1 gwr * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 gwr * notice, this list of conditions and the following disclaimer in the
19 1.1 gwr * documentation and/or other materials provided with the distribution.
20 1.1 gwr * 3. All advertising materials mentioning features or use of this software
21 1.1 gwr * must display the following acknowledgement:
22 1.1 gwr * This product includes software developed by the University of
23 1.1 gwr * California, Berkeley and its contributors.
24 1.1 gwr * 4. Neither the name of the University nor the names of its contributors
25 1.1 gwr * may be used to endorse or promote products derived from this software
26 1.1 gwr * without specific prior written permission.
27 1.1 gwr *
28 1.1 gwr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 gwr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 gwr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 gwr * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 gwr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 gwr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 gwr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 gwr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 gwr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 gwr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 gwr * SUCH DAMAGE.
39 1.1 gwr *
40 1.1 gwr * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 1.1 gwr * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 1.1 gwr */
43 1.1 gwr
44 1.29 thorpej #include "opt_compat_netbsd.h"
45 1.34 kleink #include "opt_compat_svr4.h"
46 1.35 christos #include "opt_compat_sunos.h"
47 1.42 thorpej #include "opt_lockdebug.h"
48 1.27 gwr
49 1.1 gwr #include "assym.h"
50 1.17 thorpej #include <machine/asm.h>
51 1.1 gwr #include <machine/trap.h>
52 1.1 gwr
53 1.1 gwr | Remember this is a fun project!
54 1.1 gwr
55 1.1 gwr .data
56 1.19 jeremy GLOBAL(mon_crp)
57 1.1 gwr .long 0,0
58 1.1 gwr
59 1.1 gwr | This is for kvm_mkdb, and should be the address of the beginning
60 1.1 gwr | of the kernel text segment (not necessarily the same as kernbase).
61 1.1 gwr .text
62 1.19 jeremy GLOBAL(kernel_text)
63 1.1 gwr
64 1.1 gwr | This is the entry point, as well as the end of the temporary stack
65 1.1 gwr | used during process switch (one 8K page ending at start)
66 1.19 jeremy ASGLOBAL(tmpstk)
67 1.20 gwr ASGLOBAL(start)
68 1.19 jeremy
69 1.1 gwr | The first step, after disabling interrupts, is to map enough of the kernel
70 1.1 gwr | into high virtual address space so that we can use position dependent code.
71 1.1 gwr | This is a tricky task on the sun3x because the MMU is already enabled and
72 1.1 gwr | the ROM monitor provides no indication of where the root MMU table is mapped.
73 1.1 gwr | Therefore we must use one of the 68030's 'transparent translation' registers
74 1.1 gwr | to define a range in the address space where the MMU translation is
75 1.1 gwr | turned off. Once this is complete we can modify the MMU table directly
76 1.1 gwr | without the need for it to be mapped into virtual memory.
77 1.1 gwr | All code must be position independent until otherwise noted, as the
78 1.1 gwr | boot loader has loaded us into low memory but all the symbols in this
79 1.1 gwr | code have been linked high.
80 1.45 chs movw #PSL_HIGHIPL,%sr | no interrupts
81 1.45 chs movl #KERNBASE,%a5 | for vtop conversion
82 1.45 chs lea _C_LABEL(mon_crp),%a0 | where to store the CRP
83 1.45 chs subl %a5,%a0
84 1.1 gwr | Note: borrowing mon_crp for tt0 setup...
85 1.45 chs movl #0x3F8107,%a0@ | map the low 1GB v=p with the
86 1.14 jeremy .long 0xf0100800 | transparent translation reg0
87 1.14 jeremy | [ pmove a0@, tt0 ]
88 1.1 gwr | In order to map the kernel into high memory we will copy the root table
89 1.1 gwr | entry which maps the 16 megabytes of memory starting at 0x0 into the
90 1.1 gwr | entry which maps the 16 megabytes starting at KERNBASE.
91 1.45 chs pmove %crp,%a0@ | Get monitor CPU root pointer
92 1.45 chs movl %a0@(4),%a1 | 2nd word is PA of level A table
93 1.1 gwr
94 1.45 chs movl %a1,%a0 | compute the descriptor address
95 1.45 chs addl #0x3e0,%a1 | for VA starting at KERNBASE
96 1.45 chs movl %a0@,%a1@ | copy descriptor type
97 1.45 chs movl %a0@(4),%a1@(4) | copy physical address
98 1.1 gwr
99 1.1 gwr | Kernel is now double mapped at zero and KERNBASE.
100 1.1 gwr | Force a long jump to the relocated code (high VA).
101 1.45 chs movl #IC_CLEAR,%d0 | Flush the I-cache
102 1.45 chs movc %d0,%cacr
103 1.1 gwr jmp L_high_code:l | long jump
104 1.1 gwr
105 1.1 gwr L_high_code:
106 1.1 gwr | We are now running in the correctly relocated kernel, so
107 1.1 gwr | we are no longer restricted to position-independent code.
108 1.1 gwr | It is handy to leave transparent translation enabled while
109 1.20 gwr | for the low 1GB while _bootstrap() is doing its thing.
110 1.1 gwr
111 1.1 gwr | Do bootstrap stuff needed before main() gets called.
112 1.1 gwr | Our boot loader leaves a copy of the kernel's exec header
113 1.1 gwr | just before the start of the kernel text segment, so the
114 1.1 gwr | kernel can sanity-check the DDB symbols at [end...esym].
115 1.20 gwr | Pass the struct exec at tmpstk-32 to _bootstrap().
116 1.7 gwr | Also, make sure the initial frame pointer is zero so that
117 1.7 gwr | the backtrace algorithm used by KGDB terminates nicely.
118 1.45 chs lea _ASM_LABEL(tmpstk)-32,%sp
119 1.45 chs movl #0,%a6
120 1.26 gwr jsr _C_LABEL(_bootstrap) | See locore2.c
121 1.1 gwr
122 1.1 gwr | Now turn off the transparent translation of the low 1GB.
123 1.1 gwr | (this also flushes the ATC)
124 1.45 chs clrl %sp@-
125 1.14 jeremy .long 0xf0170800 | pmove sp@,tt0
126 1.45 chs addql #4,%sp
127 1.1 gwr
128 1.20 gwr | Now that _bootstrap() is done using the PROM functions,
129 1.1 gwr | we can safely set the sfc/dfc to something != FC_CONTROL
130 1.45 chs moveq #FC_USERD,%d0 | make movs access "user data"
131 1.45 chs movc %d0,%sfc | space for copyin/copyout
132 1.45 chs movc %d0,%dfc
133 1.1 gwr
134 1.1 gwr | Setup process zero user/kernel stacks.
135 1.45 chs movl _C_LABEL(proc0paddr),%a1| get proc0 pcb addr
136 1.45 chs lea %a1@(USPACE-4),%sp | set SSP to last word
137 1.45 chs movl #USRSTACK-4,%a2
138 1.45 chs movl %a2,%usp | init user SP
139 1.1 gwr
140 1.20 gwr | Note curpcb was already set in _bootstrap().
141 1.1 gwr | Will do fpu initialization during autoconfig (see fpu.c)
142 1.1 gwr | The interrupt vector table and stack are now ready.
143 1.1 gwr | Interrupts will be enabled later, AFTER autoconfiguration
144 1.1 gwr | is finished, to avoid spurrious interrupts.
145 1.1 gwr
146 1.1 gwr /*
147 1.1 gwr * Final preparation for calling main.
148 1.1 gwr *
149 1.1 gwr * Create a fake exception frame that returns to user mode,
150 1.1 gwr * and save its address in p->p_md.md_regs for cpu_fork().
151 1.1 gwr * The new frames for process 1 and 2 will be adjusted by
152 1.1 gwr * cpu_set_kpc() to arrange for a call to a kernel function
153 1.1 gwr * before the new process does its rte out to user mode.
154 1.1 gwr */
155 1.45 chs clrw %sp@- | tf_format,tf_vector
156 1.45 chs clrl %sp@- | tf_pc (filled in later)
157 1.45 chs movw #PSL_USER,%sp@- | tf_sr for user mode
158 1.45 chs clrl %sp@- | tf_stackadj
159 1.45 chs lea %sp@(-64),%sp | tf_regs[16]
160 1.45 chs movl %sp,%a1 | a1=trapframe
161 1.45 chs lea _C_LABEL(proc0),%a0 | proc0.p_md.md_regs =
162 1.45 chs movl %a1,%a0@(P_MDREGS) | trapframe
163 1.45 chs movl %a2,%a1@(FR_SP) | a2 == usp (from above)
164 1.45 chs pea %a1@ | push &trapframe
165 1.19 jeremy jbsr _C_LABEL(main) | main(&trapframe)
166 1.45 chs addql #4,%sp | help DDB backtrace
167 1.1 gwr trap #15 | should not get here
168 1.1 gwr
169 1.1 gwr | This is used by cpu_fork() to return to user mode.
170 1.1 gwr | It is called with SP pointing to a struct trapframe.
171 1.19 jeremy GLOBAL(proc_do_uret)
172 1.45 chs movl %sp@(FR_SP),%a0 | grab and load
173 1.45 chs movl %a0,%usp | user SP
174 1.45 chs moveml %sp@+,#0x7FFF | load most registers (all but SSP)
175 1.45 chs addql #8,%sp | pop SSP and stack adjust count
176 1.1 gwr rte
177 1.1 gwr
178 1.1 gwr /*
179 1.1 gwr * proc_trampoline:
180 1.1 gwr * This is used by cpu_set_kpc() to "push" a function call onto the
181 1.1 gwr * kernel stack of some process, very much like a signal delivery.
182 1.1 gwr * When we get here, the stack has:
183 1.1 gwr *
184 1.1 gwr * SP+8: switchframe from before cpu_set_kpc
185 1.31 thorpej * SP+4: void *arg;
186 1.1 gwr * SP: u_long func;
187 1.1 gwr *
188 1.1 gwr * On entry, the switchframe pushed by cpu_set_kpc has already been
189 1.1 gwr * popped off the stack, so all this needs to do is pop the function
190 1.1 gwr * pointer into a register, call it, then pop the arg, and finally
191 1.1 gwr * return using the switchframe that remains on the stack.
192 1.1 gwr */
193 1.19 jeremy GLOBAL(proc_trampoline)
194 1.45 chs movl %sp@+,%a0 | function pointer
195 1.45 chs jbsr %a0@ | (*func)(arg)
196 1.45 chs addql #4,%sp | toss the arg
197 1.1 gwr rts | as cpu_switch would do
198 1.1 gwr
199 1.1 gwr | That is all the assembly startup code we need on the sun3x!
200 1.1 gwr | The rest of this is like the hp300/locore.s where possible.
201 1.1 gwr
202 1.1 gwr /*
203 1.1 gwr * Trap/interrupt vector routines
204 1.1 gwr */
205 1.17 thorpej #include <m68k/m68k/trap_subr.s>
206 1.1 gwr
207 1.19 jeremy GLOBAL(buserr)
208 1.19 jeremy tstl _C_LABEL(nofault) | device probe?
209 1.19 jeremy jeq _C_LABEL(addrerr) | no, handle as usual
210 1.45 chs movl _C_LABEL(nofault),%sp@- | yes,
211 1.19 jeremy jbsr _C_LABEL(longjmp) | longjmp(nofault)
212 1.19 jeremy GLOBAL(addrerr)
213 1.45 chs clrl %sp@- | stack adjust count
214 1.45 chs moveml #0xFFFF,%sp@- | save user registers
215 1.45 chs movl %usp,%a0 | save the user SP
216 1.45 chs movl %a0,%sp@(FR_SP) | in the savearea
217 1.45 chs lea %sp@(FR_HW),%a1 | grab base of HW berr frame
218 1.45 chs moveq #0,%d0
219 1.45 chs movw %a1@(10),%d0 | grab SSW for fault processing
220 1.45 chs btst #12,%d0 | RB set?
221 1.1 gwr jeq LbeX0 | no, test RC
222 1.45 chs bset #14,%d0 | yes, must set FB
223 1.45 chs movw %d0,%a1@(10) | for hardware too
224 1.1 gwr LbeX0:
225 1.45 chs btst #13,%d0 | RC set?
226 1.1 gwr jeq LbeX1 | no, skip
227 1.45 chs bset #15,%d0 | yes, must set FC
228 1.45 chs movw %d0,%a1@(10) | for hardware too
229 1.1 gwr LbeX1:
230 1.45 chs btst #8,%d0 | data fault?
231 1.1 gwr jeq Lbe0 | no, check for hard cases
232 1.45 chs movl %a1@(16),%d1 | fault address is as given in frame
233 1.1 gwr jra Lbe10 | thats it
234 1.1 gwr Lbe0:
235 1.45 chs btst #4,%a1@(6) | long (type B) stack frame?
236 1.1 gwr jne Lbe4 | yes, go handle
237 1.45 chs movl %a1@(2),%d1 | no, can use save PC
238 1.45 chs btst #14,%d0 | FB set?
239 1.1 gwr jeq Lbe3 | no, try FC
240 1.45 chs addql #4,%d1 | yes, adjust address
241 1.1 gwr jra Lbe10 | done
242 1.1 gwr Lbe3:
243 1.45 chs btst #15,%d0 | FC set?
244 1.1 gwr jeq Lbe10 | no, done
245 1.45 chs addql #2,%d1 | yes, adjust address
246 1.1 gwr jra Lbe10 | done
247 1.1 gwr Lbe4:
248 1.45 chs movl %a1@(36),%d1 | long format, use stage B address
249 1.45 chs btst #15,%d0 | FC set?
250 1.1 gwr jeq Lbe10 | no, all done
251 1.45 chs subql #2,%d1 | yes, adjust address
252 1.1 gwr Lbe10:
253 1.45 chs movl %d1,%sp@- | push fault VA
254 1.45 chs movl %d0,%sp@- | and padded SSW
255 1.45 chs movw %a1@(6),%d0 | get frame format/vector offset
256 1.45 chs andw #0x0FFF,%d0 | clear out frame format
257 1.45 chs cmpw #12,%d0 | address error vector?
258 1.1 gwr jeq Lisaerr | yes, go to it
259 1.1 gwr
260 1.1 gwr /* MMU-specific code to determine reason for bus error. */
261 1.45 chs movl %d1,%a0 | fault address
262 1.45 chs movl %sp@,%d0 | function code from ssw
263 1.45 chs btst #8,%d0 | data fault?
264 1.1 gwr jne Lbe10a
265 1.45 chs movql #1,%d0 | user program access FC
266 1.1 gwr | (we dont separate data/program)
267 1.45 chs btst #5,%a1@ | supervisor mode?
268 1.1 gwr jeq Lbe10a | if no, done
269 1.45 chs movql #5,%d0 | else supervisor program access
270 1.1 gwr Lbe10a:
271 1.45 chs ptestr %d0,%a0@,#7 | do a table search
272 1.45 chs pmove %psr,%sp@ | save result
273 1.45 chs movb %sp@,%d1
274 1.45 chs btst #2,%d1 | invalid? (incl. limit viol and berr)
275 1.1 gwr jeq Lmightnotbemerr | no -> wp check
276 1.45 chs btst #7,%d1 | is it MMU table berr?
277 1.1 gwr jeq Lismerr | no, must be fast
278 1.1 gwr jra Lisberr1 | real bus err needs not be fast
279 1.1 gwr Lmightnotbemerr:
280 1.45 chs btst #3,%d1 | write protect bit set?
281 1.1 gwr jeq Lisberr1 | no, must be bus error
282 1.45 chs movl %sp@,%d0 | ssw into low word of d0
283 1.45 chs andw #0xc0,%d0 | write protect is set on page:
284 1.45 chs cmpw #0x40,%d0 | was it read cycle?
285 1.1 gwr jeq Lisberr1 | yes, was not WPE, must be bus err
286 1.1 gwr /* End of MMU-specific bus error code. */
287 1.1 gwr
288 1.1 gwr Lismerr:
289 1.45 chs movl #T_MMUFLT,%sp@- | show that we are an MMU fault
290 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
291 1.1 gwr Lisaerr:
292 1.45 chs movl #T_ADDRERR,%sp@- | mark address error
293 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
294 1.1 gwr Lisberr1:
295 1.45 chs clrw %sp@ | re-clear pad word
296 1.1 gwr Lisberr:
297 1.45 chs movl #T_BUSERR,%sp@- | mark bus error
298 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
299 1.1 gwr
300 1.1 gwr /*
301 1.1 gwr * FP exceptions.
302 1.1 gwr */
303 1.19 jeremy GLOBAL(fpfline)
304 1.45 chs clrl %sp@- | stack adjust count
305 1.45 chs moveml #0xFFFF,%sp@- | save registers
306 1.45 chs moveq #T_FPEMULI,%d0 | denote as FP emulation trap
307 1.19 jeremy jra _ASM_LABEL(fault) | do it
308 1.1 gwr
309 1.19 jeremy GLOBAL(fpunsupp)
310 1.45 chs clrl %sp@- | stack adjust count
311 1.45 chs moveml #0xFFFF,%sp@- | save registers
312 1.45 chs moveq #T_FPEMULD,%d0 | denote as FP emulation trap
313 1.19 jeremy jra _ASM_LABEL(fault) | do it
314 1.1 gwr
315 1.1 gwr /*
316 1.1 gwr * Handles all other FP coprocessor exceptions.
317 1.1 gwr * Note that since some FP exceptions generate mid-instruction frames
318 1.1 gwr * and may cause signal delivery, we need to test for stack adjustment
319 1.1 gwr * after the trap call.
320 1.1 gwr */
321 1.19 jeremy GLOBAL(fpfault)
322 1.45 chs clrl %sp@- | stack adjust count
323 1.45 chs moveml #0xFFFF,%sp@- | save user registers
324 1.45 chs movl %usp,%a0 | and save
325 1.45 chs movl %a0,%sp@(FR_SP) | the user stack pointer
326 1.45 chs clrl %sp@- | no VA arg
327 1.45 chs movl _C_LABEL(curpcb),%a0 | current pcb
328 1.45 chs lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
329 1.45 chs fsave %a0@ | save state
330 1.45 chs tstb %a0@ | null state frame?
331 1.1 gwr jeq Lfptnull | yes, safe
332 1.45 chs clrw %d0 | no, need to tweak BIU
333 1.45 chs movb %a0@(1),%d0 | get frame size
334 1.45 chs bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
335 1.1 gwr Lfptnull:
336 1.45 chs fmovem %fpsr,%sp@- | push fpsr as code argument
337 1.45 chs frestore %a0@ | restore state
338 1.45 chs movl #T_FPERR,%sp@- | push type arg
339 1.17 thorpej jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
340 1.1 gwr
341 1.1 gwr /*
342 1.1 gwr * Other exceptions only cause four and six word stack frame and require
343 1.1 gwr * no post-trap stack adjustment.
344 1.1 gwr */
345 1.19 jeremy GLOBAL(badtrap)
346 1.45 chs clrl %sp@- | stack adjust count
347 1.45 chs moveml #0xFFFF,%sp@- | save std frame regs
348 1.19 jeremy jbsr _C_LABEL(straytrap) | report
349 1.45 chs moveml %sp@+,#0xFFFF | restore regs
350 1.45 chs addql #4,%sp | stack adjust count
351 1.19 jeremy jra _ASM_LABEL(rei) | all done
352 1.1 gwr
353 1.1 gwr /*
354 1.1 gwr * Trap 0 is for system calls
355 1.1 gwr */
356 1.19 jeremy GLOBAL(trap0)
357 1.45 chs clrl %sp@- | stack adjust count
358 1.45 chs moveml #0xFFFF,%sp@- | save user registers
359 1.45 chs movl %usp,%a0 | save the user SP
360 1.45 chs movl %a0,%sp@(FR_SP) | in the savearea
361 1.45 chs movl %d0,%sp@- | push syscall number
362 1.19 jeremy jbsr _C_LABEL(syscall) | handle it
363 1.45 chs addql #4,%sp | pop syscall arg
364 1.45 chs movl %sp@(FR_SP),%a0 | grab and restore
365 1.45 chs movl %a0,%usp | user SP
366 1.45 chs moveml %sp@+,#0x7FFF | restore most registers
367 1.45 chs addql #8,%sp | pop SP and stack adjust
368 1.19 jeremy jra _ASM_LABEL(rei) | all done
369 1.11 gwr
370 1.11 gwr /*
371 1.11 gwr * Trap 12 is the entry point for the cachectl "syscall"
372 1.11 gwr * cachectl(command, addr, length)
373 1.11 gwr * command in d0, addr in a1, length in d1
374 1.11 gwr */
375 1.19 jeremy GLOBAL(trap12)
376 1.45 chs movl _C_LABEL(curproc),%sp@- | push curproc pointer
377 1.45 chs movl %d1,%sp@- | push length
378 1.45 chs movl %a1,%sp@- | push addr
379 1.45 chs movl %d0,%sp@- | push command
380 1.32 is jbsr _C_LABEL(cachectl1) | do it
381 1.45 chs lea %sp@(16),%sp | pop args
382 1.19 jeremy jra _ASM_LABEL(rei) | all done
383 1.1 gwr
384 1.1 gwr /*
385 1.1 gwr * Trace (single-step) trap. Kernel-mode is special.
386 1.1 gwr * User mode traps are simply passed on to trap().
387 1.1 gwr */
388 1.19 jeremy GLOBAL(trace)
389 1.45 chs clrl %sp@- | stack adjust count
390 1.45 chs moveml #0xFFFF,%sp@-
391 1.45 chs moveq #T_TRACE,%d0
392 1.37 itohy
393 1.37 itohy | Check PSW and see what happen.
394 1.37 itohy | T=0 S=0 (should not happen)
395 1.37 itohy | T=1 S=0 trace trap from user mode
396 1.37 itohy | T=0 S=1 trace trap on a trap instruction
397 1.37 itohy | T=1 S=1 trace trap from system mode (kernel breakpoint)
398 1.37 itohy
399 1.45 chs movw %sp@(FR_HW),%d1 | get PSW
400 1.45 chs notw %d1 | XXX no support for T0 on 680[234]0
401 1.45 chs andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
402 1.37 itohy jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
403 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
404 1.1 gwr
405 1.1 gwr /*
406 1.1 gwr * Trap 15 is used for:
407 1.1 gwr * - GDB breakpoints (in user programs)
408 1.1 gwr * - KGDB breakpoints (in the kernel)
409 1.1 gwr * - trace traps for SUN binaries (not fully supported yet)
410 1.11 gwr * User mode traps are simply passed to trap().
411 1.1 gwr */
412 1.19 jeremy GLOBAL(trap15)
413 1.45 chs clrl %sp@- | stack adjust count
414 1.45 chs moveml #0xFFFF,%sp@-
415 1.45 chs moveq #T_TRAP15,%d0
416 1.45 chs btst #5,%sp@(FR_HW) | was supervisor mode?
417 1.19 jeremy jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
418 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
419 1.1 gwr
420 1.19 jeremy ASLOCAL(kbrkpt)
421 1.45 chs | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
422 1.1 gwr | Save the system sp rather than the user sp.
423 1.45 chs movw #PSL_HIGHIPL,%sr | lock out interrupts
424 1.45 chs lea %sp@(FR_SIZE),%a6 | Save stack pointer
425 1.45 chs movl %a6,%sp@(FR_SP) | from before trap
426 1.1 gwr
427 1.1 gwr | If we are not on tmpstk switch to it.
428 1.1 gwr | (so debugger can change the stack pointer)
429 1.45 chs movl %a6,%d1
430 1.45 chs cmpl #_ASM_LABEL(tmpstk),%d1
431 1.1 gwr jls Lbrkpt2 | already on tmpstk
432 1.1 gwr | Copy frame to the temporary stack
433 1.45 chs movl %sp,%a0 | %a0=src
434 1.45 chs lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
435 1.45 chs movl %a1,%sp | sp=new frame
436 1.45 chs moveq #FR_SIZE,%d1
437 1.1 gwr Lbrkpt1:
438 1.45 chs movl %a0@+,%a1@+
439 1.45 chs subql #4,%d1
440 1.1 gwr bgt Lbrkpt1
441 1.1 gwr
442 1.1 gwr Lbrkpt2:
443 1.11 gwr | Call the trap handler for the kernel debugger.
444 1.6 gwr | Do not call trap() to handle it, so that we can
445 1.1 gwr | set breakpoints in trap() if we want. We know
446 1.1 gwr | the trap type is either T_TRACE or T_BREAKPOINT.
447 1.45 chs movl %d0,%sp@- | push trap type
448 1.19 jeremy jbsr _C_LABEL(trap_kdebug)
449 1.45 chs addql #4,%sp | pop args
450 1.6 gwr
451 1.1 gwr | The stack pointer may have been modified, or
452 1.1 gwr | data below it modified (by kgdb push call),
453 1.1 gwr | so push the hardware frame at the current sp
454 1.1 gwr | before restoring registers and returning.
455 1.45 chs movl %sp@(FR_SP),%a0 | modified sp
456 1.45 chs lea %sp@(FR_SIZE),%a1 | end of our frame
457 1.45 chs movl %a1@-,%a0@- | copy 2 longs with
458 1.45 chs movl %a1@-,%a0@- | ... predecrement
459 1.45 chs movl %a0,%sp@(FR_SP) | sp = h/w frame
460 1.45 chs moveml %sp@+,#0x7FFF | restore all but sp
461 1.45 chs movl %sp@,%sp | ... and sp
462 1.1 gwr rte | all done
463 1.1 gwr
464 1.11 gwr /* Use common m68k sigreturn */
465 1.11 gwr #include <m68k/m68k/sigreturn.s>
466 1.1 gwr
467 1.1 gwr /*
468 1.1 gwr * Interrupt handlers. Most are auto-vectored,
469 1.1 gwr * and hard-wired the same way on all sun3 models.
470 1.1 gwr * Format in the stack is:
471 1.45 chs * %d0,%d1,%a0,%a1, sr, pc, vo
472 1.1 gwr */
473 1.1 gwr
474 1.1 gwr #define INTERRUPT_SAVEREG \
475 1.45 chs moveml #0xC0C0,%sp@-
476 1.1 gwr
477 1.1 gwr #define INTERRUPT_RESTORE \
478 1.45 chs moveml %sp@+,#0x0303
479 1.1 gwr
480 1.1 gwr /*
481 1.1 gwr * This is the common auto-vector interrupt handler,
482 1.1 gwr * for which the CPU provides the vector=0x18+level.
483 1.1 gwr * These are installed in the interrupt vector table.
484 1.1 gwr */
485 1.1 gwr .align 2
486 1.19 jeremy GLOBAL(_isr_autovec)
487 1.1 gwr INTERRUPT_SAVEREG
488 1.19 jeremy jbsr _C_LABEL(isr_autovec)
489 1.1 gwr INTERRUPT_RESTORE
490 1.19 jeremy jra _ASM_LABEL(rei)
491 1.1 gwr
492 1.1 gwr /* clock: see clock.c */
493 1.1 gwr .align 2
494 1.19 jeremy GLOBAL(_isr_clock)
495 1.1 gwr INTERRUPT_SAVEREG
496 1.19 jeremy jbsr _C_LABEL(clock_intr)
497 1.1 gwr INTERRUPT_RESTORE
498 1.19 jeremy jra _ASM_LABEL(rei)
499 1.1 gwr
500 1.1 gwr | Handler for all vectored interrupts (i.e. VME interrupts)
501 1.1 gwr .align 2
502 1.19 jeremy GLOBAL(_isr_vectored)
503 1.1 gwr INTERRUPT_SAVEREG
504 1.19 jeremy jbsr _C_LABEL(isr_vectored)
505 1.1 gwr INTERRUPT_RESTORE
506 1.19 jeremy jra _ASM_LABEL(rei)
507 1.1 gwr
508 1.1 gwr #undef INTERRUPT_SAVEREG
509 1.1 gwr #undef INTERRUPT_RESTORE
510 1.1 gwr
511 1.1 gwr /* interrupt counters (needed by vmstat) */
512 1.19 jeremy GLOBAL(intrnames)
513 1.1 gwr .asciz "spur" | 0
514 1.1 gwr .asciz "lev1" | 1
515 1.1 gwr .asciz "lev2" | 2
516 1.1 gwr .asciz "lev3" | 3
517 1.1 gwr .asciz "lev4" | 4
518 1.1 gwr .asciz "clock" | 5
519 1.1 gwr .asciz "lev6" | 6
520 1.1 gwr .asciz "nmi" | 7
521 1.19 jeremy GLOBAL(eintrnames)
522 1.1 gwr
523 1.1 gwr .data
524 1.1 gwr .even
525 1.19 jeremy GLOBAL(intrcnt)
526 1.1 gwr .long 0,0,0,0,0,0,0,0,0,0
527 1.19 jeremy GLOBAL(eintrcnt)
528 1.1 gwr .text
529 1.1 gwr
530 1.1 gwr /*
531 1.1 gwr * Emulation of VAX REI instruction.
532 1.1 gwr *
533 1.1 gwr * This code is (mostly) un-altered from the hp300 code,
534 1.1 gwr * except that sun machines do not need a simulated SIR
535 1.1 gwr * because they have a real software interrupt register.
536 1.1 gwr *
537 1.1 gwr * This code deals with checking for and servicing ASTs
538 1.1 gwr * (profiling, scheduling) and software interrupts (network, softclock).
539 1.1 gwr * We check for ASTs first, just like the VAX. To avoid excess overhead
540 1.1 gwr * the T_ASTFLT handling code will also check for software interrupts so we
541 1.1 gwr * do not have to do it here. After identifying that we need an AST we
542 1.1 gwr * drop the IPL to allow device interrupts.
543 1.1 gwr *
544 1.1 gwr * This code is complicated by the fact that sendsig may have been called
545 1.1 gwr * necessitating a stack cleanup.
546 1.1 gwr */
547 1.1 gwr
548 1.19 jeremy ASGLOBAL(rei)
549 1.1 gwr #ifdef DIAGNOSTIC
550 1.19 jeremy tstl _C_LABEL(panicstr) | have we paniced?
551 1.1 gwr jne Ldorte | yes, do not make matters worse
552 1.1 gwr #endif
553 1.19 jeremy tstl _C_LABEL(astpending) | AST pending?
554 1.1 gwr jeq Ldorte | no, done
555 1.1 gwr Lrei1:
556 1.45 chs btst #5,%sp@ | yes, are we returning to user mode?
557 1.1 gwr jne Ldorte | no, done
558 1.45 chs movw #PSL_LOWIPL,%sr | lower SPL
559 1.45 chs clrl %sp@- | stack adjust
560 1.45 chs moveml #0xFFFF,%sp@- | save all registers
561 1.45 chs movl %usp,%a1 | including
562 1.45 chs movl %a1,%sp@(FR_SP) | the users SP
563 1.45 chs clrl %sp@- | VA == none
564 1.45 chs clrl %sp@- | code == none
565 1.45 chs movl #T_ASTFLT,%sp@- | type == async system trap
566 1.19 jeremy jbsr _C_LABEL(trap) | go handle it
567 1.45 chs lea %sp@(12),%sp | pop value args
568 1.45 chs movl %sp@(FR_SP),%a0 | restore user SP
569 1.45 chs movl %a0,%usp | from save area
570 1.45 chs movw %sp@(FR_ADJ),%d0 | need to adjust stack?
571 1.1 gwr jne Laststkadj | yes, go to it
572 1.45 chs moveml %sp@+,#0x7FFF | no, restore most user regs
573 1.45 chs addql #8,%sp | toss SP and stack adjust
574 1.1 gwr rte | and do real RTE
575 1.1 gwr Laststkadj:
576 1.45 chs lea %sp@(FR_HW),%a1 | pointer to HW frame
577 1.45 chs addql #8,%a1 | source pointer
578 1.45 chs movl %a1,%a0 | source
579 1.45 chs addw %d0,%a0 | + hole size = dest pointer
580 1.45 chs movl %a1@-,%a0@- | copy
581 1.45 chs movl %a1@-,%a0@- | 8 bytes
582 1.45 chs movl %a0,%sp@(FR_SP) | new SSP
583 1.45 chs moveml %sp@+,#0x7FFF | restore user registers
584 1.45 chs movl %sp@,%sp | and our SP
585 1.1 gwr Ldorte:
586 1.1 gwr rte | real return
587 1.1 gwr
588 1.1 gwr /*
589 1.1 gwr * Initialization is at the beginning of this file, because the
590 1.1 gwr * kernel entry point needs to be at zero for compatibility with
591 1.1 gwr * the Sun boot loader. This works on Sun machines because the
592 1.1 gwr * interrupt vector table for reset is NOT at address zero.
593 1.1 gwr * (The MMU has a "boot" bit that forces access to the PROM)
594 1.1 gwr */
595 1.1 gwr
596 1.1 gwr /*
597 1.16 thorpej * Use common m68k sigcode.
598 1.1 gwr */
599 1.16 thorpej #include <m68k/m68k/sigcode.s>
600 1.44 jdolecek #ifdef COMPAT_SUNOS
601 1.44 jdolecek #include <m68k/m68k/sunos_sigcode.s>
602 1.44 jdolecek #endif
603 1.44 jdolecek #ifdef COMPAT_SVR4
604 1.44 jdolecek #include <m68k/m68k/svr4_sigcode.s>
605 1.44 jdolecek #endif
606 1.16 thorpej
607 1.1 gwr .text
608 1.1 gwr
609 1.1 gwr /*
610 1.1 gwr * Primitives
611 1.1 gwr */
612 1.1 gwr
613 1.1 gwr /*
614 1.12 thorpej * Use common m68k support routines.
615 1.1 gwr */
616 1.12 thorpej #include <m68k/m68k/support.s>
617 1.1 gwr
618 1.19 jeremy BSS(want_resched,4)
619 1.1 gwr
620 1.1 gwr /*
621 1.15 thorpej * Use common m68k process manipulation routines.
622 1.1 gwr */
623 1.15 thorpej #include <m68k/m68k/proc_subr.s>
624 1.1 gwr
625 1.1 gwr | Message for Lbadsw panic
626 1.1 gwr Lsw0:
627 1.1 gwr .asciz "cpu_switch"
628 1.1 gwr .even
629 1.1 gwr
630 1.1 gwr .data
631 1.19 jeremy GLOBAL(masterpaddr) | XXX compatibility (debuggers)
632 1.19 jeremy GLOBAL(curpcb)
633 1.1 gwr .long 0
634 1.19 jeremy ASBSS(nullpcb,SIZEOF_PCB)
635 1.1 gwr .text
636 1.1 gwr
637 1.1 gwr /*
638 1.1 gwr * At exit of a process, do a cpu_switch for the last time.
639 1.28 thorpej * Switch to a safe stack and PCB, and select a new process to run. The
640 1.28 thorpej * old stack and u-area will be freed by the reaper.
641 1.42 thorpej *
642 1.42 thorpej * MUST BE CALLED AT SPLHIGH!
643 1.1 gwr */
644 1.1 gwr ENTRY(switch_exit)
645 1.45 chs movl %sp@(4),%a0 | struct proc *p
646 1.19 jeremy | save state into garbage pcb
647 1.19 jeremy movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
648 1.45 chs lea _ASM_LABEL(tmpstk),%sp | goto a tmp stack
649 1.1 gwr
650 1.28 thorpej /* Schedule the vmspace and stack to be freed. */
651 1.45 chs movl %a0,%sp@- | exit2(p)
652 1.28 thorpej jbsr _C_LABEL(exit2)
653 1.45 chs lea %sp@(4),%sp
654 1.28 thorpej
655 1.42 thorpej #if defined(LOCKDEBUG)
656 1.42 thorpej /* Acquire sched_lock */
657 1.42 thorpej jbsr _C_LABEL(sched_lock_idle)
658 1.42 thorpej #endif
659 1.1 gwr
660 1.19 jeremy jra _C_LABEL(cpu_switch)
661 1.1 gwr
662 1.1 gwr /*
663 1.1 gwr * When no processes are on the runq, cpu_switch() branches to idle
664 1.1 gwr * to wait for something to come ready.
665 1.1 gwr */
666 1.1 gwr Lidle:
667 1.42 thorpej #if defined(LOCKDEBUG)
668 1.42 thorpej /* Release sched_lock */
669 1.42 thorpej jbsr _C_LABEL(sched_unlock_idle)
670 1.42 thorpej #endif
671 1.1 gwr stop #PSL_LOWIPL
672 1.19 jeremy GLOBAL(_Idle) | See clock.c
673 1.45 chs movw #PSL_HIGHIPL,%sr
674 1.42 thorpej #if defined(LOCKDEBUG)
675 1.42 thorpej /* Acquire sched_lock */
676 1.42 thorpej jbsr _C_LABEL(sched_lock_idle)
677 1.42 thorpej #endif
678 1.42 thorpej movl _C_LABEL(sched_whichqs),%d0
679 1.1 gwr jeq Lidle
680 1.1 gwr jra Lsw1
681 1.1 gwr
682 1.1 gwr Lbadsw:
683 1.45 chs movl #Lsw0,%sp@-
684 1.19 jeremy jbsr _C_LABEL(panic)
685 1.1 gwr /*NOTREACHED*/
686 1.1 gwr
687 1.1 gwr /*
688 1.1 gwr * cpu_switch()
689 1.1 gwr * Hacked for sun3
690 1.1 gwr */
691 1.1 gwr ENTRY(cpu_switch)
692 1.45 chs movl _C_LABEL(curpcb),%a1 | current pcb
693 1.45 chs movw %sr,%a1@(PCB_PS) | save sr before changing ipl
694 1.1 gwr #ifdef notyet
695 1.45 chs movl _C_LABEL(curproc),%sp@- | remember last proc running
696 1.1 gwr #endif
697 1.19 jeremy clrl _C_LABEL(curproc)
698 1.1 gwr
699 1.1 gwr /*
700 1.1 gwr * Find the highest-priority queue that isn't empty,
701 1.1 gwr * then take the first proc from that queue.
702 1.1 gwr */
703 1.42 thorpej movl _C_LABEL(sched_whichqs),%d0
704 1.43 tsutsui jeq Lidle
705 1.42 thorpej Lsw1:
706 1.42 thorpej /*
707 1.42 thorpej * Interrupts are blocked, sched_lock is held. If
708 1.42 thorpej * we come here via Idle, %d0 contains the contents
709 1.42 thorpej * of a non-zero sched_whichqs.
710 1.42 thorpej */
711 1.42 thorpej movl %d0,%d1
712 1.42 thorpej negl %d0
713 1.42 thorpej andl %d1,%d0
714 1.42 thorpej bfffo %d0{#0:#32},%d1
715 1.42 thorpej eorib #31,%d1
716 1.42 thorpej
717 1.42 thorpej movl %d1,%d0
718 1.42 thorpej lslb #3,%d1 | convert queue number to index
719 1.42 thorpej addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
720 1.42 thorpej movl %d1,%a1
721 1.42 thorpej movl %a1@(P_FORW),%a0 | p = q->p_forw
722 1.42 thorpej cmpal %d1,%a0 | anyone on queue?
723 1.1 gwr jeq Lbadsw | no, panic
724 1.38 thorpej #ifdef DIAGNOSTIC
725 1.45 chs tstl %a0@(P_WCHAN)
726 1.38 thorpej jne Lbadsw
727 1.45 chs cmpb #SRUN,%a0@(P_STAT)
728 1.38 thorpej jne Lbadsw
729 1.38 thorpej #endif
730 1.42 thorpej movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
731 1.42 thorpej movl %a0@(P_FORW),%a1 | n = p->p_forw
732 1.42 thorpej movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
733 1.42 thorpej cmpal %d1,%a1 | anyone left on queue?
734 1.42 thorpej jne Lsw2 | yes, skip
735 1.42 thorpej movl _C_LABEL(sched_whichqs),%d1
736 1.42 thorpej bclr %d0,%d1 | no, clear bit
737 1.42 thorpej movl %d1,_C_LABEL(sched_whichqs)
738 1.1 gwr Lsw2:
739 1.41 thorpej /* p->p_cpu initialized in fork1() for single-processor */
740 1.45 chs movb #SONPROC,%a0@(P_STAT) | p->p_stat = SONPROC
741 1.45 chs movl %a0,_C_LABEL(curproc)
742 1.19 jeremy clrl _C_LABEL(want_resched)
743 1.1 gwr #ifdef notyet
744 1.45 chs movl %sp@+,%a1 | XXX - Make this work!
745 1.45 chs cmpl %a0,%a1 | switching to same proc?
746 1.1 gwr jeq Lswdone | yes, skip save and restore
747 1.1 gwr #endif
748 1.1 gwr /*
749 1.1 gwr * Save state of previous process in its pcb.
750 1.1 gwr */
751 1.45 chs movl _C_LABEL(curpcb),%a1
752 1.45 chs moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
753 1.45 chs movl %usp,%a2 | grab USP (a2 has been saved)
754 1.45 chs movl %a2,%a1@(PCB_USP) | and save it
755 1.1 gwr
756 1.19 jeremy tstl _C_LABEL(fputype) | Do we have an fpu?
757 1.1 gwr jeq Lswnofpsave | No? Then don't try save.
758 1.45 chs lea %a1@(PCB_FPCTX),%a2 | pointer to FP save area
759 1.45 chs fsave %a2@ | save FP state
760 1.45 chs tstb %a2@ | null state frame?
761 1.1 gwr jeq Lswnofpsave | yes, all done
762 1.45 chs fmovem %fp0-%fp7,%a2@(FPF_REGS) | save FP general regs
763 1.45 chs fmovem %fpcr/%fpsr/%fpi,%a2@(FPF_FPCR) | save FP control regs
764 1.1 gwr Lswnofpsave:
765 1.1 gwr
766 1.6 gwr /*
767 1.6 gwr * Now that we have saved all the registers that must be
768 1.6 gwr * preserved, we are free to use those registers until
769 1.6 gwr * we load the registers for the switched-to process.
770 1.45 chs * In this section, keep: %a0=curproc, %a1=curpcb
771 1.6 gwr */
772 1.6 gwr
773 1.45 chs clrl %a0@(P_BACK) | clear back link
774 1.45 chs movl %a0@(P_ADDR),%a1 | get p_addr
775 1.45 chs movl %a1,_C_LABEL(curpcb)
776 1.42 thorpej
777 1.42 thorpej #if defined(LOCKDEBUG)
778 1.42 thorpej /*
779 1.42 thorpej * Done mucking with the run queues, release the
780 1.42 thorpej * scheduler lock, but keep interrupts out.
781 1.42 thorpej */
782 1.45 chs movl %a0,%sp@- | not args...
783 1.45 chs movl %a1,%sp@- | ...just saving
784 1.42 thorpej jbsr _C_LABEL(sched_unlock_idle)
785 1.45 chs movl %sp@+,%a1
786 1.45 chs movl %sp@+,%a0
787 1.42 thorpej #endif
788 1.1 gwr
789 1.8 gwr /*
790 1.8 gwr * Load the new VM context (new MMU root pointer)
791 1.8 gwr */
792 1.45 chs movl %a0@(P_VMSPACE),%a2 | vm = p->p_vmspace
793 1.8 gwr #ifdef DIAGNOSTIC
794 1.45 chs tstl %a2 | vm == VM_MAP_NULL?
795 1.8 gwr jeq Lbadsw | panic
796 1.8 gwr #endif
797 1.8 gwr #ifdef PMAP_DEBUG
798 1.25 gwr /* When debugging just call _pmap_switch(). */
799 1.45 chs movl %a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
800 1.45 chs pea %a2@ | push pmap
801 1.25 gwr jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
802 1.45 chs addql #4,%sp
803 1.45 chs movl _C_LABEL(curpcb),%a1 | restore p_addr
804 1.8 gwr #else
805 1.25 gwr /* Otherwise, use this inline version. */
806 1.45 chs lea _C_LABEL(kernel_crp),%a3 | our CPU Root Ptr. (CRP)
807 1.45 chs movl %a2@(VM_PMAP),%a2 | pmap = vm->vm_map.pmap
808 1.45 chs movl %a2@(PM_A_PHYS),%d0 | phys = pmap->pm_a_phys
809 1.45 chs cmpl %a3@(4),%d0 | == kernel_crp.rp_addr ?
810 1.8 gwr jeq Lsame_mmuctx | skip loadcrp/flush
811 1.8 gwr /* OK, it is a new MMU context. Load it up. */
812 1.45 chs movl %d0,%a3@(4)
813 1.45 chs movl #CACHE_CLR,%d0
814 1.45 chs movc %d0,%cacr | invalidate cache(s)
815 1.1 gwr pflusha | flush entire TLB
816 1.45 chs pmove %a3@,%crp | load new user root pointer
817 1.8 gwr Lsame_mmuctx:
818 1.8 gwr #endif
819 1.1 gwr
820 1.6 gwr /*
821 1.6 gwr * Reload the registers for the new process.
822 1.45 chs * After this point we can only use %d0,%d1,%a0,%a1
823 1.6 gwr */
824 1.45 chs moveml %a1@(PCB_REGS),#0xFCFC | reload registers
825 1.45 chs movl %a1@(PCB_USP),%a0
826 1.45 chs movl %a0,%usp | and USP
827 1.1 gwr
828 1.19 jeremy tstl _C_LABEL(fputype) | If we don't have an fpu,
829 1.1 gwr jeq Lres_skip | don't try to restore it.
830 1.45 chs lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
831 1.45 chs tstb %a0@ | null state frame?
832 1.1 gwr jeq Lresfprest | yes, easy
833 1.45 chs fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
834 1.45 chs fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
835 1.1 gwr Lresfprest:
836 1.45 chs frestore %a0@ | restore state
837 1.1 gwr Lres_skip:
838 1.45 chs movw %a1@(PCB_PS),%d0 | no, restore PS
839 1.1 gwr #ifdef DIAGNOSTIC
840 1.45 chs btst #13,%d0 | supervisor mode?
841 1.1 gwr jeq Lbadsw | no? panic!
842 1.1 gwr #endif
843 1.45 chs movw %d0,%sr | OK, restore PS
844 1.45 chs movl #1,%a0 | return 1 (for alternate returns)
845 1.1 gwr rts
846 1.1 gwr
847 1.1 gwr /*
848 1.1 gwr * savectx(pcb)
849 1.1 gwr * Update pcb, saving current processor state.
850 1.1 gwr */
851 1.1 gwr ENTRY(savectx)
852 1.45 chs movl %sp@(4),%a1
853 1.45 chs movw %sr,%a1@(PCB_PS)
854 1.45 chs movl %usp,%a0 | grab USP
855 1.45 chs movl %a0,%a1@(PCB_USP) | and save it
856 1.45 chs moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
857 1.1 gwr
858 1.19 jeremy tstl _C_LABEL(fputype) | Do we have FPU?
859 1.1 gwr jeq Lsavedone | No? Then don't save state.
860 1.45 chs lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
861 1.45 chs fsave %a0@ | save FP state
862 1.45 chs tstb %a0@ | null state frame?
863 1.1 gwr jeq Lsavedone | yes, all done
864 1.45 chs fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
865 1.45 chs fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
866 1.1 gwr Lsavedone:
867 1.45 chs movl #0,%a0 | return 0
868 1.1 gwr rts
869 1.1 gwr
870 1.20 gwr /* suline() */
871 1.1 gwr
872 1.1 gwr #ifdef DEBUG
873 1.1 gwr .data
874 1.19 jeremy ASGLOBAL(fulltflush)
875 1.1 gwr .long 0
876 1.19 jeremy ASGLOBAL(fullcflush)
877 1.1 gwr .long 0
878 1.1 gwr .text
879 1.1 gwr #endif
880 1.1 gwr
881 1.1 gwr /*
882 1.1 gwr * Invalidate entire TLB.
883 1.1 gwr */
884 1.1 gwr ENTRY(TBIA)
885 1.19 jeremy _C_LABEL(_TBIA):
886 1.1 gwr pflusha
887 1.45 chs movl #DC_CLEAR,%d0
888 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
889 1.1 gwr rts
890 1.1 gwr
891 1.1 gwr /*
892 1.1 gwr * Invalidate any TLB entry for given VA (TB Invalidate Single)
893 1.1 gwr */
894 1.1 gwr ENTRY(TBIS)
895 1.1 gwr #ifdef DEBUG
896 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
897 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush entire TLB
898 1.1 gwr #endif
899 1.45 chs movl %sp@(4),%a0
900 1.45 chs pflush #0,#0,%a0@ | flush address from both sides
901 1.45 chs movl #DC_CLEAR,%d0
902 1.45 chs movc %d0,%cacr | invalidate on-chip data cache
903 1.1 gwr rts
904 1.1 gwr
905 1.1 gwr /*
906 1.1 gwr * Invalidate supervisor side of TLB
907 1.1 gwr */
908 1.1 gwr ENTRY(TBIAS)
909 1.1 gwr #ifdef DEBUG
910 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
911 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
912 1.1 gwr #endif
913 1.1 gwr pflush #4,#4 | flush supervisor TLB entries
914 1.45 chs movl #DC_CLEAR,%d0
915 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
916 1.1 gwr rts
917 1.1 gwr
918 1.1 gwr /*
919 1.1 gwr * Invalidate user side of TLB
920 1.1 gwr */
921 1.1 gwr ENTRY(TBIAU)
922 1.1 gwr #ifdef DEBUG
923 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
924 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
925 1.1 gwr #endif
926 1.1 gwr pflush #0,#4 | flush user TLB entries
927 1.45 chs movl #DC_CLEAR,%d0
928 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
929 1.1 gwr rts
930 1.1 gwr
931 1.1 gwr /*
932 1.1 gwr * Invalidate instruction cache
933 1.1 gwr */
934 1.1 gwr ENTRY(ICIA)
935 1.45 chs movl #IC_CLEAR,%d0
936 1.45 chs movc %d0,%cacr | invalidate i-cache
937 1.1 gwr rts
938 1.1 gwr
939 1.1 gwr /*
940 1.1 gwr * Invalidate data cache.
941 1.1 gwr * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
942 1.1 gwr * problems with DC_WA. The only cases we have to worry about are context
943 1.1 gwr * switch and TLB changes, both of which are handled "in-line" in resume
944 1.1 gwr * and TBI*.
945 1.1 gwr */
946 1.1 gwr ENTRY(DCIA)
947 1.1 gwr __DCIA:
948 1.1 gwr rts
949 1.1 gwr
950 1.1 gwr ENTRY(DCIS)
951 1.1 gwr __DCIS:
952 1.1 gwr rts
953 1.1 gwr
954 1.1 gwr /*
955 1.1 gwr * Invalidate data cache.
956 1.1 gwr */
957 1.1 gwr ENTRY(DCIU)
958 1.45 chs movl #DC_CLEAR,%d0
959 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
960 1.1 gwr rts
961 1.1 gwr
962 1.1 gwr /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
963 1.1 gwr
964 1.1 gwr ENTRY(PCIA)
965 1.45 chs movl #DC_CLEAR,%d0
966 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
967 1.1 gwr rts
968 1.1 gwr
969 1.1 gwr ENTRY(ecacheon)
970 1.1 gwr rts
971 1.1 gwr
972 1.1 gwr ENTRY(ecacheoff)
973 1.1 gwr rts
974 1.1 gwr
975 1.1 gwr /*
976 1.1 gwr * Get callers current SP value.
977 1.1 gwr * Note that simply taking the address of a local variable in a C function
978 1.1 gwr * doesn't work because callee saved registers may be outside the stack frame
979 1.1 gwr * defined by A6 (e.g. GCC generated code).
980 1.20 gwr *
981 1.1 gwr * [I don't think the ENTRY() macro will do the right thing with this -- glass]
982 1.1 gwr */
983 1.19 jeremy GLOBAL(getsp)
984 1.45 chs movl %sp,%d0 | get current SP
985 1.45 chs addql #4,%d0 | compensate for return address
986 1.45 chs movl %d0,%a0
987 1.1 gwr rts
988 1.1 gwr
989 1.1 gwr ENTRY(getsfc)
990 1.45 chs movc %sfc,%d0
991 1.45 chs movl %d0,%a0
992 1.1 gwr rts
993 1.1 gwr
994 1.1 gwr ENTRY(getdfc)
995 1.45 chs movc %dfc,%d0
996 1.45 chs movl %d0,%a0
997 1.1 gwr rts
998 1.1 gwr
999 1.1 gwr ENTRY(getvbr)
1000 1.45 chs movc %vbr,%d0
1001 1.45 chs movl %d0,%a0
1002 1.1 gwr rts
1003 1.1 gwr
1004 1.1 gwr ENTRY(setvbr)
1005 1.45 chs movl %sp@(4),%d0
1006 1.45 chs movc %d0,%vbr
1007 1.1 gwr rts
1008 1.1 gwr
1009 1.1 gwr /*
1010 1.1 gwr * Load a new CPU Root Pointer (CRP) into the MMU.
1011 1.2 gwr * void loadcrp(struct mmu_rootptr *);
1012 1.1 gwr */
1013 1.1 gwr ENTRY(loadcrp)
1014 1.45 chs movl %sp@(4),%a0 | arg1: &CRP
1015 1.45 chs movl #CACHE_CLR,%d0
1016 1.45 chs movc %d0,%cacr | invalidate cache(s)
1017 1.1 gwr pflusha | flush entire TLB
1018 1.45 chs pmove %a0@,%crp | load new user root pointer
1019 1.45 chs rts
1020 1.45 chs
1021 1.45 chs ENTRY(getcrp)
1022 1.45 chs movl %sp@(4),%a0 | arg1: &crp
1023 1.45 chs pmove %crp,%a0@ | *crpp = %crp
1024 1.10 gwr rts
1025 1.10 gwr
1026 1.10 gwr /*
1027 1.10 gwr * Get the physical address of the PTE for a given VA.
1028 1.10 gwr */
1029 1.10 gwr ENTRY(ptest_addr)
1030 1.45 chs movl %sp@(4),%a1 | VA
1031 1.45 chs ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
1032 1.1 gwr rts
1033 1.1 gwr
1034 1.1 gwr /*
1035 1.1 gwr * Set processor priority level calls. Most are implemented with
1036 1.1 gwr * inline asm expansions. However, we need one instantiation here
1037 1.1 gwr * in case some non-optimized code makes external references.
1038 1.21 gwr * Most places will use the inlined functions param.h supplies.
1039 1.1 gwr */
1040 1.1 gwr
1041 1.21 gwr ENTRY(_getsr)
1042 1.45 chs clrl %d0
1043 1.45 chs movw %sr,%d0
1044 1.45 chs movl %a1,%d0
1045 1.21 gwr rts
1046 1.21 gwr
1047 1.1 gwr ENTRY(_spl)
1048 1.45 chs clrl %d0
1049 1.45 chs movw %sr,%d0
1050 1.45 chs movl %sp@(4),%d1
1051 1.45 chs movw %d1,%sr
1052 1.1 gwr rts
1053 1.1 gwr
1054 1.21 gwr ENTRY(_splraise)
1055 1.45 chs clrl %d0
1056 1.45 chs movw %sr,%d0
1057 1.45 chs movl %d0,%d1
1058 1.45 chs andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
1059 1.45 chs cmpl %sp@(4),%d1 | (old - new)
1060 1.21 gwr bge Lsplr
1061 1.45 chs movl %sp@(4),%d1
1062 1.45 chs movw %d1,%sr
1063 1.21 gwr Lsplr:
1064 1.1 gwr rts
1065 1.1 gwr
1066 1.1 gwr /*
1067 1.1 gwr * Save and restore 68881 state.
1068 1.1 gwr */
1069 1.1 gwr ENTRY(m68881_save)
1070 1.45 chs movl %sp@(4),%a0 | save area pointer
1071 1.45 chs fsave %a0@ | save state
1072 1.45 chs tstb %a0@ | null state frame?
1073 1.1 gwr jeq Lm68881sdone | yes, all done
1074 1.45 chs fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
1075 1.45 chs fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
1076 1.1 gwr Lm68881sdone:
1077 1.1 gwr rts
1078 1.1 gwr
1079 1.1 gwr ENTRY(m68881_restore)
1080 1.45 chs movl %sp@(4),%a0 | save area pointer
1081 1.45 chs tstb %a0@ | null state frame?
1082 1.1 gwr jeq Lm68881rdone | yes, easy
1083 1.45 chs fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
1084 1.45 chs fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
1085 1.1 gwr Lm68881rdone:
1086 1.45 chs frestore %a0@ | restore state
1087 1.1 gwr rts
1088 1.1 gwr
1089 1.1 gwr /*
1090 1.1 gwr * _delay(unsigned N)
1091 1.1 gwr * Delay for at least (N/256) microseconds.
1092 1.1 gwr * This routine depends on the variable: delay_divisor
1093 1.1 gwr * which should be set based on the CPU clock rate.
1094 1.26 gwr * XXX: Currently this is set based on the CPU model,
1095 1.26 gwr * XXX: but this should be determined at run time...
1096 1.1 gwr */
1097 1.19 jeremy GLOBAL(_delay)
1098 1.45 chs | %d0 = arg = (usecs << 8)
1099 1.45 chs movl %sp@(4),%d0
1100 1.45 chs | %d1 = delay_divisor;
1101 1.45 chs movl _C_LABEL(delay_divisor),%d1
1102 1.36 thorpej jra L_delay /* Jump into the loop! */
1103 1.36 thorpej
1104 1.36 thorpej /*
1105 1.36 thorpej * Align the branch target of the loop to a half-line (8-byte)
1106 1.36 thorpej * boundary to minimize cache effects. This guarantees both
1107 1.36 thorpej * that there will be no prefetch stalls due to cache line burst
1108 1.36 thorpej * operations and that the loop will run from a single cache
1109 1.36 thorpej * half-line.
1110 1.36 thorpej */
1111 1.36 thorpej .align 8
1112 1.1 gwr L_delay:
1113 1.45 chs subl %d1,%d0
1114 1.1 gwr jgt L_delay
1115 1.1 gwr rts
1116 1.1 gwr
1117 1.1 gwr | Define some addresses, mostly so DDB can print useful info.
1118 1.24 gwr | Not using _C_LABEL() here because these symbols are never
1119 1.24 gwr | referenced by any C code, and if the leading underscore
1120 1.24 gwr | ever goes away, these lines turn into syntax errors...
1121 1.24 gwr .set _KERNBASE,KERNBASE
1122 1.26 gwr .set _MONSTART,SUN3X_MONSTART
1123 1.26 gwr .set _PROM_BASE,SUN3X_PROM_BASE
1124 1.26 gwr .set _MONEND,SUN3X_MONEND
1125 1.1 gwr
1126 1.1 gwr |The end!
1127