locore.s revision 1.48 1 1.48 scw /* $NetBSD: locore.s,v 1.48 2001/07/22 14:11:05 scw Exp $ */
2 1.1 gwr
3 1.1 gwr /*
4 1.1 gwr * Copyright (c) 1988 University of Utah.
5 1.1 gwr * Copyright (c) 1980, 1990, 1993
6 1.1 gwr * The Regents of the University of California. All rights reserved.
7 1.1 gwr *
8 1.1 gwr * This code is derived from software contributed to Berkeley by
9 1.1 gwr * the Systems Programming Group of the University of Utah Computer
10 1.1 gwr * Science Department.
11 1.1 gwr *
12 1.1 gwr * Redistribution and use in source and binary forms, with or without
13 1.1 gwr * modification, are permitted provided that the following conditions
14 1.1 gwr * are met:
15 1.1 gwr * 1. Redistributions of source code must retain the above copyright
16 1.1 gwr * notice, this list of conditions and the following disclaimer.
17 1.1 gwr * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 gwr * notice, this list of conditions and the following disclaimer in the
19 1.1 gwr * documentation and/or other materials provided with the distribution.
20 1.1 gwr * 3. All advertising materials mentioning features or use of this software
21 1.1 gwr * must display the following acknowledgement:
22 1.1 gwr * This product includes software developed by the University of
23 1.1 gwr * California, Berkeley and its contributors.
24 1.1 gwr * 4. Neither the name of the University nor the names of its contributors
25 1.1 gwr * may be used to endorse or promote products derived from this software
26 1.1 gwr * without specific prior written permission.
27 1.1 gwr *
28 1.1 gwr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 gwr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 gwr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 gwr * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 gwr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 gwr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 gwr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 gwr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 gwr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 gwr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 gwr * SUCH DAMAGE.
39 1.1 gwr *
40 1.1 gwr * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 1.1 gwr * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 1.1 gwr */
43 1.1 gwr
44 1.29 thorpej #include "opt_compat_netbsd.h"
45 1.34 kleink #include "opt_compat_svr4.h"
46 1.35 christos #include "opt_compat_sunos.h"
47 1.47 lukem #include "opt_kgdb.h"
48 1.42 thorpej #include "opt_lockdebug.h"
49 1.27 gwr
50 1.1 gwr #include "assym.h"
51 1.17 thorpej #include <machine/asm.h>
52 1.1 gwr #include <machine/trap.h>
53 1.1 gwr
54 1.1 gwr | Remember this is a fun project!
55 1.1 gwr
56 1.1 gwr .data
57 1.19 jeremy GLOBAL(mon_crp)
58 1.1 gwr .long 0,0
59 1.1 gwr
60 1.1 gwr | This is for kvm_mkdb, and should be the address of the beginning
61 1.1 gwr | of the kernel text segment (not necessarily the same as kernbase).
62 1.1 gwr .text
63 1.19 jeremy GLOBAL(kernel_text)
64 1.1 gwr
65 1.1 gwr | This is the entry point, as well as the end of the temporary stack
66 1.1 gwr | used during process switch (one 8K page ending at start)
67 1.19 jeremy ASGLOBAL(tmpstk)
68 1.20 gwr ASGLOBAL(start)
69 1.19 jeremy
70 1.1 gwr | The first step, after disabling interrupts, is to map enough of the kernel
71 1.1 gwr | into high virtual address space so that we can use position dependent code.
72 1.1 gwr | This is a tricky task on the sun3x because the MMU is already enabled and
73 1.1 gwr | the ROM monitor provides no indication of where the root MMU table is mapped.
74 1.1 gwr | Therefore we must use one of the 68030's 'transparent translation' registers
75 1.1 gwr | to define a range in the address space where the MMU translation is
76 1.1 gwr | turned off. Once this is complete we can modify the MMU table directly
77 1.1 gwr | without the need for it to be mapped into virtual memory.
78 1.1 gwr | All code must be position independent until otherwise noted, as the
79 1.1 gwr | boot loader has loaded us into low memory but all the symbols in this
80 1.1 gwr | code have been linked high.
81 1.45 chs movw #PSL_HIGHIPL,%sr | no interrupts
82 1.45 chs movl #KERNBASE,%a5 | for vtop conversion
83 1.45 chs lea _C_LABEL(mon_crp),%a0 | where to store the CRP
84 1.45 chs subl %a5,%a0
85 1.1 gwr | Note: borrowing mon_crp for tt0 setup...
86 1.45 chs movl #0x3F8107,%a0@ | map the low 1GB v=p with the
87 1.14 jeremy .long 0xf0100800 | transparent translation reg0
88 1.14 jeremy | [ pmove a0@, tt0 ]
89 1.1 gwr | In order to map the kernel into high memory we will copy the root table
90 1.1 gwr | entry which maps the 16 megabytes of memory starting at 0x0 into the
91 1.1 gwr | entry which maps the 16 megabytes starting at KERNBASE.
92 1.45 chs pmove %crp,%a0@ | Get monitor CPU root pointer
93 1.45 chs movl %a0@(4),%a1 | 2nd word is PA of level A table
94 1.1 gwr
95 1.45 chs movl %a1,%a0 | compute the descriptor address
96 1.45 chs addl #0x3e0,%a1 | for VA starting at KERNBASE
97 1.45 chs movl %a0@,%a1@ | copy descriptor type
98 1.45 chs movl %a0@(4),%a1@(4) | copy physical address
99 1.1 gwr
100 1.1 gwr | Kernel is now double mapped at zero and KERNBASE.
101 1.1 gwr | Force a long jump to the relocated code (high VA).
102 1.45 chs movl #IC_CLEAR,%d0 | Flush the I-cache
103 1.45 chs movc %d0,%cacr
104 1.1 gwr jmp L_high_code:l | long jump
105 1.1 gwr
106 1.1 gwr L_high_code:
107 1.1 gwr | We are now running in the correctly relocated kernel, so
108 1.1 gwr | we are no longer restricted to position-independent code.
109 1.1 gwr | It is handy to leave transparent translation enabled while
110 1.20 gwr | for the low 1GB while _bootstrap() is doing its thing.
111 1.1 gwr
112 1.1 gwr | Do bootstrap stuff needed before main() gets called.
113 1.1 gwr | Our boot loader leaves a copy of the kernel's exec header
114 1.1 gwr | just before the start of the kernel text segment, so the
115 1.1 gwr | kernel can sanity-check the DDB symbols at [end...esym].
116 1.20 gwr | Pass the struct exec at tmpstk-32 to _bootstrap().
117 1.7 gwr | Also, make sure the initial frame pointer is zero so that
118 1.7 gwr | the backtrace algorithm used by KGDB terminates nicely.
119 1.45 chs lea _ASM_LABEL(tmpstk)-32,%sp
120 1.45 chs movl #0,%a6
121 1.26 gwr jsr _C_LABEL(_bootstrap) | See locore2.c
122 1.1 gwr
123 1.1 gwr | Now turn off the transparent translation of the low 1GB.
124 1.1 gwr | (this also flushes the ATC)
125 1.45 chs clrl %sp@-
126 1.14 jeremy .long 0xf0170800 | pmove sp@,tt0
127 1.45 chs addql #4,%sp
128 1.1 gwr
129 1.20 gwr | Now that _bootstrap() is done using the PROM functions,
130 1.1 gwr | we can safely set the sfc/dfc to something != FC_CONTROL
131 1.45 chs moveq #FC_USERD,%d0 | make movs access "user data"
132 1.45 chs movc %d0,%sfc | space for copyin/copyout
133 1.45 chs movc %d0,%dfc
134 1.1 gwr
135 1.1 gwr | Setup process zero user/kernel stacks.
136 1.45 chs movl _C_LABEL(proc0paddr),%a1| get proc0 pcb addr
137 1.45 chs lea %a1@(USPACE-4),%sp | set SSP to last word
138 1.45 chs movl #USRSTACK-4,%a2
139 1.45 chs movl %a2,%usp | init user SP
140 1.1 gwr
141 1.20 gwr | Note curpcb was already set in _bootstrap().
142 1.1 gwr | Will do fpu initialization during autoconfig (see fpu.c)
143 1.1 gwr | The interrupt vector table and stack are now ready.
144 1.1 gwr | Interrupts will be enabled later, AFTER autoconfiguration
145 1.1 gwr | is finished, to avoid spurrious interrupts.
146 1.1 gwr
147 1.1 gwr /*
148 1.1 gwr * Final preparation for calling main.
149 1.1 gwr *
150 1.1 gwr * Create a fake exception frame that returns to user mode,
151 1.1 gwr * and save its address in p->p_md.md_regs for cpu_fork().
152 1.1 gwr * The new frames for process 1 and 2 will be adjusted by
153 1.1 gwr * cpu_set_kpc() to arrange for a call to a kernel function
154 1.1 gwr * before the new process does its rte out to user mode.
155 1.1 gwr */
156 1.45 chs clrw %sp@- | tf_format,tf_vector
157 1.45 chs clrl %sp@- | tf_pc (filled in later)
158 1.45 chs movw #PSL_USER,%sp@- | tf_sr for user mode
159 1.45 chs clrl %sp@- | tf_stackadj
160 1.45 chs lea %sp@(-64),%sp | tf_regs[16]
161 1.45 chs movl %sp,%a1 | a1=trapframe
162 1.45 chs lea _C_LABEL(proc0),%a0 | proc0.p_md.md_regs =
163 1.45 chs movl %a1,%a0@(P_MDREGS) | trapframe
164 1.45 chs movl %a2,%a1@(FR_SP) | a2 == usp (from above)
165 1.45 chs pea %a1@ | push &trapframe
166 1.19 jeremy jbsr _C_LABEL(main) | main(&trapframe)
167 1.45 chs addql #4,%sp | help DDB backtrace
168 1.1 gwr trap #15 | should not get here
169 1.1 gwr
170 1.1 gwr | This is used by cpu_fork() to return to user mode.
171 1.1 gwr | It is called with SP pointing to a struct trapframe.
172 1.19 jeremy GLOBAL(proc_do_uret)
173 1.45 chs movl %sp@(FR_SP),%a0 | grab and load
174 1.45 chs movl %a0,%usp | user SP
175 1.45 chs moveml %sp@+,#0x7FFF | load most registers (all but SSP)
176 1.45 chs addql #8,%sp | pop SSP and stack adjust count
177 1.1 gwr rte
178 1.1 gwr
179 1.1 gwr /*
180 1.1 gwr * proc_trampoline:
181 1.1 gwr * This is used by cpu_set_kpc() to "push" a function call onto the
182 1.1 gwr * kernel stack of some process, very much like a signal delivery.
183 1.1 gwr * When we get here, the stack has:
184 1.1 gwr *
185 1.1 gwr * SP+8: switchframe from before cpu_set_kpc
186 1.31 thorpej * SP+4: void *arg;
187 1.1 gwr * SP: u_long func;
188 1.1 gwr *
189 1.1 gwr * On entry, the switchframe pushed by cpu_set_kpc has already been
190 1.1 gwr * popped off the stack, so all this needs to do is pop the function
191 1.1 gwr * pointer into a register, call it, then pop the arg, and finally
192 1.1 gwr * return using the switchframe that remains on the stack.
193 1.1 gwr */
194 1.19 jeremy GLOBAL(proc_trampoline)
195 1.45 chs movl %sp@+,%a0 | function pointer
196 1.45 chs jbsr %a0@ | (*func)(arg)
197 1.45 chs addql #4,%sp | toss the arg
198 1.1 gwr rts | as cpu_switch would do
199 1.1 gwr
200 1.1 gwr | That is all the assembly startup code we need on the sun3x!
201 1.1 gwr | The rest of this is like the hp300/locore.s where possible.
202 1.1 gwr
203 1.1 gwr /*
204 1.1 gwr * Trap/interrupt vector routines
205 1.1 gwr */
206 1.17 thorpej #include <m68k/m68k/trap_subr.s>
207 1.1 gwr
208 1.19 jeremy GLOBAL(buserr)
209 1.19 jeremy tstl _C_LABEL(nofault) | device probe?
210 1.19 jeremy jeq _C_LABEL(addrerr) | no, handle as usual
211 1.45 chs movl _C_LABEL(nofault),%sp@- | yes,
212 1.19 jeremy jbsr _C_LABEL(longjmp) | longjmp(nofault)
213 1.19 jeremy GLOBAL(addrerr)
214 1.45 chs clrl %sp@- | stack adjust count
215 1.45 chs moveml #0xFFFF,%sp@- | save user registers
216 1.45 chs movl %usp,%a0 | save the user SP
217 1.45 chs movl %a0,%sp@(FR_SP) | in the savearea
218 1.45 chs lea %sp@(FR_HW),%a1 | grab base of HW berr frame
219 1.45 chs moveq #0,%d0
220 1.45 chs movw %a1@(10),%d0 | grab SSW for fault processing
221 1.45 chs btst #12,%d0 | RB set?
222 1.1 gwr jeq LbeX0 | no, test RC
223 1.45 chs bset #14,%d0 | yes, must set FB
224 1.45 chs movw %d0,%a1@(10) | for hardware too
225 1.1 gwr LbeX0:
226 1.45 chs btst #13,%d0 | RC set?
227 1.1 gwr jeq LbeX1 | no, skip
228 1.45 chs bset #15,%d0 | yes, must set FC
229 1.45 chs movw %d0,%a1@(10) | for hardware too
230 1.1 gwr LbeX1:
231 1.45 chs btst #8,%d0 | data fault?
232 1.1 gwr jeq Lbe0 | no, check for hard cases
233 1.45 chs movl %a1@(16),%d1 | fault address is as given in frame
234 1.1 gwr jra Lbe10 | thats it
235 1.1 gwr Lbe0:
236 1.45 chs btst #4,%a1@(6) | long (type B) stack frame?
237 1.1 gwr jne Lbe4 | yes, go handle
238 1.45 chs movl %a1@(2),%d1 | no, can use save PC
239 1.45 chs btst #14,%d0 | FB set?
240 1.1 gwr jeq Lbe3 | no, try FC
241 1.45 chs addql #4,%d1 | yes, adjust address
242 1.1 gwr jra Lbe10 | done
243 1.1 gwr Lbe3:
244 1.45 chs btst #15,%d0 | FC set?
245 1.1 gwr jeq Lbe10 | no, done
246 1.45 chs addql #2,%d1 | yes, adjust address
247 1.1 gwr jra Lbe10 | done
248 1.1 gwr Lbe4:
249 1.45 chs movl %a1@(36),%d1 | long format, use stage B address
250 1.45 chs btst #15,%d0 | FC set?
251 1.1 gwr jeq Lbe10 | no, all done
252 1.45 chs subql #2,%d1 | yes, adjust address
253 1.1 gwr Lbe10:
254 1.45 chs movl %d1,%sp@- | push fault VA
255 1.45 chs movl %d0,%sp@- | and padded SSW
256 1.45 chs movw %a1@(6),%d0 | get frame format/vector offset
257 1.45 chs andw #0x0FFF,%d0 | clear out frame format
258 1.45 chs cmpw #12,%d0 | address error vector?
259 1.1 gwr jeq Lisaerr | yes, go to it
260 1.1 gwr
261 1.1 gwr /* MMU-specific code to determine reason for bus error. */
262 1.45 chs movl %d1,%a0 | fault address
263 1.45 chs movl %sp@,%d0 | function code from ssw
264 1.45 chs btst #8,%d0 | data fault?
265 1.1 gwr jne Lbe10a
266 1.45 chs movql #1,%d0 | user program access FC
267 1.1 gwr | (we dont separate data/program)
268 1.45 chs btst #5,%a1@ | supervisor mode?
269 1.1 gwr jeq Lbe10a | if no, done
270 1.45 chs movql #5,%d0 | else supervisor program access
271 1.1 gwr Lbe10a:
272 1.45 chs ptestr %d0,%a0@,#7 | do a table search
273 1.45 chs pmove %psr,%sp@ | save result
274 1.45 chs movb %sp@,%d1
275 1.45 chs btst #2,%d1 | invalid? (incl. limit viol and berr)
276 1.1 gwr jeq Lmightnotbemerr | no -> wp check
277 1.45 chs btst #7,%d1 | is it MMU table berr?
278 1.1 gwr jeq Lismerr | no, must be fast
279 1.1 gwr jra Lisberr1 | real bus err needs not be fast
280 1.1 gwr Lmightnotbemerr:
281 1.45 chs btst #3,%d1 | write protect bit set?
282 1.1 gwr jeq Lisberr1 | no, must be bus error
283 1.45 chs movl %sp@,%d0 | ssw into low word of d0
284 1.45 chs andw #0xc0,%d0 | write protect is set on page:
285 1.45 chs cmpw #0x40,%d0 | was it read cycle?
286 1.1 gwr jeq Lisberr1 | yes, was not WPE, must be bus err
287 1.1 gwr /* End of MMU-specific bus error code. */
288 1.1 gwr
289 1.1 gwr Lismerr:
290 1.45 chs movl #T_MMUFLT,%sp@- | show that we are an MMU fault
291 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
292 1.1 gwr Lisaerr:
293 1.45 chs movl #T_ADDRERR,%sp@- | mark address error
294 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
295 1.1 gwr Lisberr1:
296 1.45 chs clrw %sp@ | re-clear pad word
297 1.1 gwr Lisberr:
298 1.45 chs movl #T_BUSERR,%sp@- | mark bus error
299 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
300 1.1 gwr
301 1.1 gwr /*
302 1.1 gwr * FP exceptions.
303 1.1 gwr */
304 1.19 jeremy GLOBAL(fpfline)
305 1.45 chs clrl %sp@- | stack adjust count
306 1.45 chs moveml #0xFFFF,%sp@- | save registers
307 1.45 chs moveq #T_FPEMULI,%d0 | denote as FP emulation trap
308 1.19 jeremy jra _ASM_LABEL(fault) | do it
309 1.1 gwr
310 1.19 jeremy GLOBAL(fpunsupp)
311 1.45 chs clrl %sp@- | stack adjust count
312 1.45 chs moveml #0xFFFF,%sp@- | save registers
313 1.45 chs moveq #T_FPEMULD,%d0 | denote as FP emulation trap
314 1.19 jeremy jra _ASM_LABEL(fault) | do it
315 1.1 gwr
316 1.1 gwr /*
317 1.1 gwr * Handles all other FP coprocessor exceptions.
318 1.1 gwr * Note that since some FP exceptions generate mid-instruction frames
319 1.1 gwr * and may cause signal delivery, we need to test for stack adjustment
320 1.1 gwr * after the trap call.
321 1.1 gwr */
322 1.19 jeremy GLOBAL(fpfault)
323 1.45 chs clrl %sp@- | stack adjust count
324 1.45 chs moveml #0xFFFF,%sp@- | save user registers
325 1.45 chs movl %usp,%a0 | and save
326 1.45 chs movl %a0,%sp@(FR_SP) | the user stack pointer
327 1.45 chs clrl %sp@- | no VA arg
328 1.45 chs movl _C_LABEL(curpcb),%a0 | current pcb
329 1.45 chs lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
330 1.45 chs fsave %a0@ | save state
331 1.45 chs tstb %a0@ | null state frame?
332 1.1 gwr jeq Lfptnull | yes, safe
333 1.45 chs clrw %d0 | no, need to tweak BIU
334 1.45 chs movb %a0@(1),%d0 | get frame size
335 1.45 chs bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
336 1.1 gwr Lfptnull:
337 1.45 chs fmovem %fpsr,%sp@- | push fpsr as code argument
338 1.45 chs frestore %a0@ | restore state
339 1.45 chs movl #T_FPERR,%sp@- | push type arg
340 1.17 thorpej jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
341 1.1 gwr
342 1.1 gwr /*
343 1.1 gwr * Other exceptions only cause four and six word stack frame and require
344 1.1 gwr * no post-trap stack adjustment.
345 1.1 gwr */
346 1.19 jeremy GLOBAL(badtrap)
347 1.45 chs clrl %sp@- | stack adjust count
348 1.45 chs moveml #0xFFFF,%sp@- | save std frame regs
349 1.19 jeremy jbsr _C_LABEL(straytrap) | report
350 1.45 chs moveml %sp@+,#0xFFFF | restore regs
351 1.45 chs addql #4,%sp | stack adjust count
352 1.19 jeremy jra _ASM_LABEL(rei) | all done
353 1.1 gwr
354 1.1 gwr /*
355 1.1 gwr * Trap 0 is for system calls
356 1.1 gwr */
357 1.19 jeremy GLOBAL(trap0)
358 1.45 chs clrl %sp@- | stack adjust count
359 1.45 chs moveml #0xFFFF,%sp@- | save user registers
360 1.45 chs movl %usp,%a0 | save the user SP
361 1.45 chs movl %a0,%sp@(FR_SP) | in the savearea
362 1.45 chs movl %d0,%sp@- | push syscall number
363 1.19 jeremy jbsr _C_LABEL(syscall) | handle it
364 1.45 chs addql #4,%sp | pop syscall arg
365 1.45 chs movl %sp@(FR_SP),%a0 | grab and restore
366 1.45 chs movl %a0,%usp | user SP
367 1.45 chs moveml %sp@+,#0x7FFF | restore most registers
368 1.45 chs addql #8,%sp | pop SP and stack adjust
369 1.19 jeremy jra _ASM_LABEL(rei) | all done
370 1.11 gwr
371 1.11 gwr /*
372 1.11 gwr * Trap 12 is the entry point for the cachectl "syscall"
373 1.11 gwr * cachectl(command, addr, length)
374 1.11 gwr * command in d0, addr in a1, length in d1
375 1.11 gwr */
376 1.19 jeremy GLOBAL(trap12)
377 1.45 chs movl _C_LABEL(curproc),%sp@- | push curproc pointer
378 1.45 chs movl %d1,%sp@- | push length
379 1.45 chs movl %a1,%sp@- | push addr
380 1.45 chs movl %d0,%sp@- | push command
381 1.32 is jbsr _C_LABEL(cachectl1) | do it
382 1.45 chs lea %sp@(16),%sp | pop args
383 1.19 jeremy jra _ASM_LABEL(rei) | all done
384 1.1 gwr
385 1.1 gwr /*
386 1.1 gwr * Trace (single-step) trap. Kernel-mode is special.
387 1.1 gwr * User mode traps are simply passed on to trap().
388 1.1 gwr */
389 1.19 jeremy GLOBAL(trace)
390 1.45 chs clrl %sp@- | stack adjust count
391 1.45 chs moveml #0xFFFF,%sp@-
392 1.45 chs moveq #T_TRACE,%d0
393 1.37 itohy
394 1.37 itohy | Check PSW and see what happen.
395 1.37 itohy | T=0 S=0 (should not happen)
396 1.37 itohy | T=1 S=0 trace trap from user mode
397 1.37 itohy | T=0 S=1 trace trap on a trap instruction
398 1.37 itohy | T=1 S=1 trace trap from system mode (kernel breakpoint)
399 1.37 itohy
400 1.45 chs movw %sp@(FR_HW),%d1 | get PSW
401 1.45 chs notw %d1 | XXX no support for T0 on 680[234]0
402 1.45 chs andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
403 1.37 itohy jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
404 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
405 1.1 gwr
406 1.1 gwr /*
407 1.1 gwr * Trap 15 is used for:
408 1.1 gwr * - GDB breakpoints (in user programs)
409 1.1 gwr * - KGDB breakpoints (in the kernel)
410 1.1 gwr * - trace traps for SUN binaries (not fully supported yet)
411 1.11 gwr * User mode traps are simply passed to trap().
412 1.1 gwr */
413 1.19 jeremy GLOBAL(trap15)
414 1.45 chs clrl %sp@- | stack adjust count
415 1.45 chs moveml #0xFFFF,%sp@-
416 1.45 chs moveq #T_TRAP15,%d0
417 1.45 chs btst #5,%sp@(FR_HW) | was supervisor mode?
418 1.19 jeremy jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
419 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
420 1.1 gwr
421 1.19 jeremy ASLOCAL(kbrkpt)
422 1.45 chs | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
423 1.1 gwr | Save the system sp rather than the user sp.
424 1.45 chs movw #PSL_HIGHIPL,%sr | lock out interrupts
425 1.45 chs lea %sp@(FR_SIZE),%a6 | Save stack pointer
426 1.45 chs movl %a6,%sp@(FR_SP) | from before trap
427 1.1 gwr
428 1.1 gwr | If we are not on tmpstk switch to it.
429 1.1 gwr | (so debugger can change the stack pointer)
430 1.45 chs movl %a6,%d1
431 1.45 chs cmpl #_ASM_LABEL(tmpstk),%d1
432 1.1 gwr jls Lbrkpt2 | already on tmpstk
433 1.1 gwr | Copy frame to the temporary stack
434 1.45 chs movl %sp,%a0 | %a0=src
435 1.45 chs lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
436 1.45 chs movl %a1,%sp | sp=new frame
437 1.45 chs moveq #FR_SIZE,%d1
438 1.1 gwr Lbrkpt1:
439 1.45 chs movl %a0@+,%a1@+
440 1.45 chs subql #4,%d1
441 1.1 gwr bgt Lbrkpt1
442 1.1 gwr
443 1.1 gwr Lbrkpt2:
444 1.11 gwr | Call the trap handler for the kernel debugger.
445 1.6 gwr | Do not call trap() to handle it, so that we can
446 1.1 gwr | set breakpoints in trap() if we want. We know
447 1.1 gwr | the trap type is either T_TRACE or T_BREAKPOINT.
448 1.45 chs movl %d0,%sp@- | push trap type
449 1.19 jeremy jbsr _C_LABEL(trap_kdebug)
450 1.45 chs addql #4,%sp | pop args
451 1.6 gwr
452 1.1 gwr | The stack pointer may have been modified, or
453 1.1 gwr | data below it modified (by kgdb push call),
454 1.1 gwr | so push the hardware frame at the current sp
455 1.1 gwr | before restoring registers and returning.
456 1.45 chs movl %sp@(FR_SP),%a0 | modified sp
457 1.45 chs lea %sp@(FR_SIZE),%a1 | end of our frame
458 1.45 chs movl %a1@-,%a0@- | copy 2 longs with
459 1.45 chs movl %a1@-,%a0@- | ... predecrement
460 1.45 chs movl %a0,%sp@(FR_SP) | sp = h/w frame
461 1.45 chs moveml %sp@+,#0x7FFF | restore all but sp
462 1.45 chs movl %sp@,%sp | ... and sp
463 1.1 gwr rte | all done
464 1.1 gwr
465 1.11 gwr /* Use common m68k sigreturn */
466 1.11 gwr #include <m68k/m68k/sigreturn.s>
467 1.1 gwr
468 1.1 gwr /*
469 1.1 gwr * Interrupt handlers. Most are auto-vectored,
470 1.1 gwr * and hard-wired the same way on all sun3 models.
471 1.1 gwr * Format in the stack is:
472 1.45 chs * %d0,%d1,%a0,%a1, sr, pc, vo
473 1.1 gwr */
474 1.1 gwr
475 1.1 gwr #define INTERRUPT_SAVEREG \
476 1.45 chs moveml #0xC0C0,%sp@-
477 1.1 gwr
478 1.1 gwr #define INTERRUPT_RESTORE \
479 1.45 chs moveml %sp@+,#0x0303
480 1.1 gwr
481 1.1 gwr /*
482 1.1 gwr * This is the common auto-vector interrupt handler,
483 1.1 gwr * for which the CPU provides the vector=0x18+level.
484 1.1 gwr * These are installed in the interrupt vector table.
485 1.1 gwr */
486 1.46 kleink #ifdef __ELF__
487 1.46 kleink .align 4
488 1.46 kleink #else
489 1.1 gwr .align 2
490 1.46 kleink #endif
491 1.19 jeremy GLOBAL(_isr_autovec)
492 1.1 gwr INTERRUPT_SAVEREG
493 1.19 jeremy jbsr _C_LABEL(isr_autovec)
494 1.1 gwr INTERRUPT_RESTORE
495 1.19 jeremy jra _ASM_LABEL(rei)
496 1.1 gwr
497 1.1 gwr /* clock: see clock.c */
498 1.46 kleink #ifdef __ELF__
499 1.46 kleink .align 4
500 1.46 kleink #else
501 1.1 gwr .align 2
502 1.46 kleink #endif
503 1.19 jeremy GLOBAL(_isr_clock)
504 1.1 gwr INTERRUPT_SAVEREG
505 1.19 jeremy jbsr _C_LABEL(clock_intr)
506 1.1 gwr INTERRUPT_RESTORE
507 1.19 jeremy jra _ASM_LABEL(rei)
508 1.1 gwr
509 1.1 gwr | Handler for all vectored interrupts (i.e. VME interrupts)
510 1.46 kleink #ifdef __ELF__
511 1.46 kleink .align 4
512 1.46 kleink #else
513 1.1 gwr .align 2
514 1.46 kleink #endif
515 1.19 jeremy GLOBAL(_isr_vectored)
516 1.1 gwr INTERRUPT_SAVEREG
517 1.19 jeremy jbsr _C_LABEL(isr_vectored)
518 1.1 gwr INTERRUPT_RESTORE
519 1.19 jeremy jra _ASM_LABEL(rei)
520 1.1 gwr
521 1.1 gwr #undef INTERRUPT_SAVEREG
522 1.1 gwr #undef INTERRUPT_RESTORE
523 1.1 gwr
524 1.1 gwr /* interrupt counters (needed by vmstat) */
525 1.19 jeremy GLOBAL(intrnames)
526 1.1 gwr .asciz "spur" | 0
527 1.1 gwr .asciz "lev1" | 1
528 1.1 gwr .asciz "lev2" | 2
529 1.1 gwr .asciz "lev3" | 3
530 1.1 gwr .asciz "lev4" | 4
531 1.1 gwr .asciz "clock" | 5
532 1.1 gwr .asciz "lev6" | 6
533 1.1 gwr .asciz "nmi" | 7
534 1.19 jeremy GLOBAL(eintrnames)
535 1.1 gwr
536 1.1 gwr .data
537 1.1 gwr .even
538 1.19 jeremy GLOBAL(intrcnt)
539 1.1 gwr .long 0,0,0,0,0,0,0,0,0,0
540 1.19 jeremy GLOBAL(eintrcnt)
541 1.1 gwr .text
542 1.1 gwr
543 1.1 gwr /*
544 1.1 gwr * Emulation of VAX REI instruction.
545 1.1 gwr *
546 1.1 gwr * This code is (mostly) un-altered from the hp300 code,
547 1.1 gwr * except that sun machines do not need a simulated SIR
548 1.1 gwr * because they have a real software interrupt register.
549 1.1 gwr *
550 1.1 gwr * This code deals with checking for and servicing ASTs
551 1.1 gwr * (profiling, scheduling) and software interrupts (network, softclock).
552 1.1 gwr * We check for ASTs first, just like the VAX. To avoid excess overhead
553 1.1 gwr * the T_ASTFLT handling code will also check for software interrupts so we
554 1.1 gwr * do not have to do it here. After identifying that we need an AST we
555 1.1 gwr * drop the IPL to allow device interrupts.
556 1.1 gwr *
557 1.1 gwr * This code is complicated by the fact that sendsig may have been called
558 1.1 gwr * necessitating a stack cleanup.
559 1.1 gwr */
560 1.1 gwr
561 1.19 jeremy ASGLOBAL(rei)
562 1.1 gwr #ifdef DIAGNOSTIC
563 1.19 jeremy tstl _C_LABEL(panicstr) | have we paniced?
564 1.1 gwr jne Ldorte | yes, do not make matters worse
565 1.1 gwr #endif
566 1.19 jeremy tstl _C_LABEL(astpending) | AST pending?
567 1.1 gwr jeq Ldorte | no, done
568 1.1 gwr Lrei1:
569 1.45 chs btst #5,%sp@ | yes, are we returning to user mode?
570 1.1 gwr jne Ldorte | no, done
571 1.45 chs movw #PSL_LOWIPL,%sr | lower SPL
572 1.45 chs clrl %sp@- | stack adjust
573 1.45 chs moveml #0xFFFF,%sp@- | save all registers
574 1.45 chs movl %usp,%a1 | including
575 1.45 chs movl %a1,%sp@(FR_SP) | the users SP
576 1.45 chs clrl %sp@- | VA == none
577 1.45 chs clrl %sp@- | code == none
578 1.45 chs movl #T_ASTFLT,%sp@- | type == async system trap
579 1.19 jeremy jbsr _C_LABEL(trap) | go handle it
580 1.45 chs lea %sp@(12),%sp | pop value args
581 1.45 chs movl %sp@(FR_SP),%a0 | restore user SP
582 1.45 chs movl %a0,%usp | from save area
583 1.45 chs movw %sp@(FR_ADJ),%d0 | need to adjust stack?
584 1.1 gwr jne Laststkadj | yes, go to it
585 1.45 chs moveml %sp@+,#0x7FFF | no, restore most user regs
586 1.45 chs addql #8,%sp | toss SP and stack adjust
587 1.1 gwr rte | and do real RTE
588 1.1 gwr Laststkadj:
589 1.45 chs lea %sp@(FR_HW),%a1 | pointer to HW frame
590 1.45 chs addql #8,%a1 | source pointer
591 1.45 chs movl %a1,%a0 | source
592 1.45 chs addw %d0,%a0 | + hole size = dest pointer
593 1.45 chs movl %a1@-,%a0@- | copy
594 1.45 chs movl %a1@-,%a0@- | 8 bytes
595 1.45 chs movl %a0,%sp@(FR_SP) | new SSP
596 1.45 chs moveml %sp@+,#0x7FFF | restore user registers
597 1.45 chs movl %sp@,%sp | and our SP
598 1.1 gwr Ldorte:
599 1.1 gwr rte | real return
600 1.1 gwr
601 1.1 gwr /*
602 1.1 gwr * Initialization is at the beginning of this file, because the
603 1.1 gwr * kernel entry point needs to be at zero for compatibility with
604 1.1 gwr * the Sun boot loader. This works on Sun machines because the
605 1.1 gwr * interrupt vector table for reset is NOT at address zero.
606 1.1 gwr * (The MMU has a "boot" bit that forces access to the PROM)
607 1.1 gwr */
608 1.1 gwr
609 1.1 gwr /*
610 1.16 thorpej * Use common m68k sigcode.
611 1.1 gwr */
612 1.16 thorpej #include <m68k/m68k/sigcode.s>
613 1.44 jdolecek #ifdef COMPAT_SUNOS
614 1.44 jdolecek #include <m68k/m68k/sunos_sigcode.s>
615 1.44 jdolecek #endif
616 1.44 jdolecek #ifdef COMPAT_SVR4
617 1.44 jdolecek #include <m68k/m68k/svr4_sigcode.s>
618 1.44 jdolecek #endif
619 1.16 thorpej
620 1.1 gwr .text
621 1.1 gwr
622 1.1 gwr /*
623 1.1 gwr * Primitives
624 1.1 gwr */
625 1.1 gwr
626 1.1 gwr /*
627 1.12 thorpej * Use common m68k support routines.
628 1.1 gwr */
629 1.12 thorpej #include <m68k/m68k/support.s>
630 1.1 gwr
631 1.19 jeremy BSS(want_resched,4)
632 1.1 gwr
633 1.1 gwr /*
634 1.15 thorpej * Use common m68k process manipulation routines.
635 1.1 gwr */
636 1.15 thorpej #include <m68k/m68k/proc_subr.s>
637 1.1 gwr
638 1.1 gwr | Message for Lbadsw panic
639 1.1 gwr Lsw0:
640 1.1 gwr .asciz "cpu_switch"
641 1.1 gwr .even
642 1.1 gwr
643 1.1 gwr .data
644 1.19 jeremy GLOBAL(masterpaddr) | XXX compatibility (debuggers)
645 1.19 jeremy GLOBAL(curpcb)
646 1.1 gwr .long 0
647 1.19 jeremy ASBSS(nullpcb,SIZEOF_PCB)
648 1.1 gwr .text
649 1.1 gwr
650 1.1 gwr /*
651 1.1 gwr * At exit of a process, do a cpu_switch for the last time.
652 1.28 thorpej * Switch to a safe stack and PCB, and select a new process to run. The
653 1.28 thorpej * old stack and u-area will be freed by the reaper.
654 1.42 thorpej *
655 1.42 thorpej * MUST BE CALLED AT SPLHIGH!
656 1.1 gwr */
657 1.1 gwr ENTRY(switch_exit)
658 1.45 chs movl %sp@(4),%a0 | struct proc *p
659 1.19 jeremy | save state into garbage pcb
660 1.19 jeremy movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
661 1.45 chs lea _ASM_LABEL(tmpstk),%sp | goto a tmp stack
662 1.1 gwr
663 1.28 thorpej /* Schedule the vmspace and stack to be freed. */
664 1.45 chs movl %a0,%sp@- | exit2(p)
665 1.28 thorpej jbsr _C_LABEL(exit2)
666 1.45 chs lea %sp@(4),%sp
667 1.28 thorpej
668 1.42 thorpej #if defined(LOCKDEBUG)
669 1.42 thorpej /* Acquire sched_lock */
670 1.42 thorpej jbsr _C_LABEL(sched_lock_idle)
671 1.42 thorpej #endif
672 1.1 gwr
673 1.19 jeremy jra _C_LABEL(cpu_switch)
674 1.1 gwr
675 1.1 gwr /*
676 1.1 gwr * When no processes are on the runq, cpu_switch() branches to idle
677 1.1 gwr * to wait for something to come ready.
678 1.1 gwr */
679 1.1 gwr Lidle:
680 1.42 thorpej #if defined(LOCKDEBUG)
681 1.42 thorpej /* Release sched_lock */
682 1.42 thorpej jbsr _C_LABEL(sched_unlock_idle)
683 1.42 thorpej #endif
684 1.1 gwr stop #PSL_LOWIPL
685 1.19 jeremy GLOBAL(_Idle) | See clock.c
686 1.45 chs movw #PSL_HIGHIPL,%sr
687 1.42 thorpej #if defined(LOCKDEBUG)
688 1.42 thorpej /* Acquire sched_lock */
689 1.42 thorpej jbsr _C_LABEL(sched_lock_idle)
690 1.42 thorpej #endif
691 1.42 thorpej movl _C_LABEL(sched_whichqs),%d0
692 1.1 gwr jeq Lidle
693 1.1 gwr jra Lsw1
694 1.1 gwr
695 1.1 gwr Lbadsw:
696 1.45 chs movl #Lsw0,%sp@-
697 1.19 jeremy jbsr _C_LABEL(panic)
698 1.1 gwr /*NOTREACHED*/
699 1.1 gwr
700 1.1 gwr /*
701 1.1 gwr * cpu_switch()
702 1.1 gwr * Hacked for sun3
703 1.1 gwr */
704 1.1 gwr ENTRY(cpu_switch)
705 1.45 chs movl _C_LABEL(curpcb),%a1 | current pcb
706 1.45 chs movw %sr,%a1@(PCB_PS) | save sr before changing ipl
707 1.1 gwr #ifdef notyet
708 1.45 chs movl _C_LABEL(curproc),%sp@- | remember last proc running
709 1.1 gwr #endif
710 1.19 jeremy clrl _C_LABEL(curproc)
711 1.1 gwr
712 1.1 gwr /*
713 1.1 gwr * Find the highest-priority queue that isn't empty,
714 1.1 gwr * then take the first proc from that queue.
715 1.1 gwr */
716 1.42 thorpej movl _C_LABEL(sched_whichqs),%d0
717 1.43 tsutsui jeq Lidle
718 1.42 thorpej Lsw1:
719 1.42 thorpej /*
720 1.42 thorpej * Interrupts are blocked, sched_lock is held. If
721 1.42 thorpej * we come here via Idle, %d0 contains the contents
722 1.42 thorpej * of a non-zero sched_whichqs.
723 1.42 thorpej */
724 1.42 thorpej movl %d0,%d1
725 1.42 thorpej negl %d0
726 1.42 thorpej andl %d1,%d0
727 1.42 thorpej bfffo %d0{#0:#32},%d1
728 1.42 thorpej eorib #31,%d1
729 1.42 thorpej
730 1.42 thorpej movl %d1,%d0
731 1.42 thorpej lslb #3,%d1 | convert queue number to index
732 1.42 thorpej addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
733 1.42 thorpej movl %d1,%a1
734 1.42 thorpej movl %a1@(P_FORW),%a0 | p = q->p_forw
735 1.42 thorpej cmpal %d1,%a0 | anyone on queue?
736 1.1 gwr jeq Lbadsw | no, panic
737 1.38 thorpej #ifdef DIAGNOSTIC
738 1.45 chs tstl %a0@(P_WCHAN)
739 1.38 thorpej jne Lbadsw
740 1.45 chs cmpb #SRUN,%a0@(P_STAT)
741 1.38 thorpej jne Lbadsw
742 1.38 thorpej #endif
743 1.42 thorpej movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
744 1.42 thorpej movl %a0@(P_FORW),%a1 | n = p->p_forw
745 1.42 thorpej movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
746 1.42 thorpej cmpal %d1,%a1 | anyone left on queue?
747 1.42 thorpej jne Lsw2 | yes, skip
748 1.42 thorpej movl _C_LABEL(sched_whichqs),%d1
749 1.42 thorpej bclr %d0,%d1 | no, clear bit
750 1.42 thorpej movl %d1,_C_LABEL(sched_whichqs)
751 1.1 gwr Lsw2:
752 1.41 thorpej /* p->p_cpu initialized in fork1() for single-processor */
753 1.45 chs movb #SONPROC,%a0@(P_STAT) | p->p_stat = SONPROC
754 1.45 chs movl %a0,_C_LABEL(curproc)
755 1.19 jeremy clrl _C_LABEL(want_resched)
756 1.1 gwr #ifdef notyet
757 1.45 chs movl %sp@+,%a1 | XXX - Make this work!
758 1.45 chs cmpl %a0,%a1 | switching to same proc?
759 1.1 gwr jeq Lswdone | yes, skip save and restore
760 1.1 gwr #endif
761 1.1 gwr /*
762 1.1 gwr * Save state of previous process in its pcb.
763 1.1 gwr */
764 1.45 chs movl _C_LABEL(curpcb),%a1
765 1.45 chs moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
766 1.45 chs movl %usp,%a2 | grab USP (a2 has been saved)
767 1.45 chs movl %a2,%a1@(PCB_USP) | and save it
768 1.1 gwr
769 1.19 jeremy tstl _C_LABEL(fputype) | Do we have an fpu?
770 1.1 gwr jeq Lswnofpsave | No? Then don't try save.
771 1.45 chs lea %a1@(PCB_FPCTX),%a2 | pointer to FP save area
772 1.45 chs fsave %a2@ | save FP state
773 1.45 chs tstb %a2@ | null state frame?
774 1.1 gwr jeq Lswnofpsave | yes, all done
775 1.45 chs fmovem %fp0-%fp7,%a2@(FPF_REGS) | save FP general regs
776 1.45 chs fmovem %fpcr/%fpsr/%fpi,%a2@(FPF_FPCR) | save FP control regs
777 1.1 gwr Lswnofpsave:
778 1.1 gwr
779 1.6 gwr /*
780 1.6 gwr * Now that we have saved all the registers that must be
781 1.6 gwr * preserved, we are free to use those registers until
782 1.6 gwr * we load the registers for the switched-to process.
783 1.45 chs * In this section, keep: %a0=curproc, %a1=curpcb
784 1.6 gwr */
785 1.6 gwr
786 1.45 chs clrl %a0@(P_BACK) | clear back link
787 1.45 chs movl %a0@(P_ADDR),%a1 | get p_addr
788 1.45 chs movl %a1,_C_LABEL(curpcb)
789 1.42 thorpej
790 1.42 thorpej #if defined(LOCKDEBUG)
791 1.42 thorpej /*
792 1.42 thorpej * Done mucking with the run queues, release the
793 1.42 thorpej * scheduler lock, but keep interrupts out.
794 1.42 thorpej */
795 1.45 chs movl %a0,%sp@- | not args...
796 1.45 chs movl %a1,%sp@- | ...just saving
797 1.42 thorpej jbsr _C_LABEL(sched_unlock_idle)
798 1.45 chs movl %sp@+,%a1
799 1.45 chs movl %sp@+,%a0
800 1.42 thorpej #endif
801 1.1 gwr
802 1.8 gwr /*
803 1.8 gwr * Load the new VM context (new MMU root pointer)
804 1.8 gwr */
805 1.45 chs movl %a0@(P_VMSPACE),%a2 | vm = p->p_vmspace
806 1.8 gwr #ifdef DIAGNOSTIC
807 1.45 chs tstl %a2 | vm == VM_MAP_NULL?
808 1.8 gwr jeq Lbadsw | panic
809 1.8 gwr #endif
810 1.8 gwr #ifdef PMAP_DEBUG
811 1.25 gwr /* When debugging just call _pmap_switch(). */
812 1.45 chs movl %a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
813 1.45 chs pea %a2@ | push pmap
814 1.25 gwr jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
815 1.45 chs addql #4,%sp
816 1.45 chs movl _C_LABEL(curpcb),%a1 | restore p_addr
817 1.8 gwr #else
818 1.25 gwr /* Otherwise, use this inline version. */
819 1.45 chs lea _C_LABEL(kernel_crp),%a3 | our CPU Root Ptr. (CRP)
820 1.45 chs movl %a2@(VM_PMAP),%a2 | pmap = vm->vm_map.pmap
821 1.45 chs movl %a2@(PM_A_PHYS),%d0 | phys = pmap->pm_a_phys
822 1.45 chs cmpl %a3@(4),%d0 | == kernel_crp.rp_addr ?
823 1.8 gwr jeq Lsame_mmuctx | skip loadcrp/flush
824 1.8 gwr /* OK, it is a new MMU context. Load it up. */
825 1.45 chs movl %d0,%a3@(4)
826 1.45 chs movl #CACHE_CLR,%d0
827 1.45 chs movc %d0,%cacr | invalidate cache(s)
828 1.1 gwr pflusha | flush entire TLB
829 1.45 chs pmove %a3@,%crp | load new user root pointer
830 1.8 gwr Lsame_mmuctx:
831 1.8 gwr #endif
832 1.1 gwr
833 1.6 gwr /*
834 1.6 gwr * Reload the registers for the new process.
835 1.45 chs * After this point we can only use %d0,%d1,%a0,%a1
836 1.6 gwr */
837 1.45 chs moveml %a1@(PCB_REGS),#0xFCFC | reload registers
838 1.45 chs movl %a1@(PCB_USP),%a0
839 1.45 chs movl %a0,%usp | and USP
840 1.1 gwr
841 1.19 jeremy tstl _C_LABEL(fputype) | If we don't have an fpu,
842 1.1 gwr jeq Lres_skip | don't try to restore it.
843 1.45 chs lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
844 1.45 chs tstb %a0@ | null state frame?
845 1.1 gwr jeq Lresfprest | yes, easy
846 1.45 chs fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
847 1.45 chs fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
848 1.1 gwr Lresfprest:
849 1.45 chs frestore %a0@ | restore state
850 1.1 gwr Lres_skip:
851 1.45 chs movw %a1@(PCB_PS),%d0 | no, restore PS
852 1.1 gwr #ifdef DIAGNOSTIC
853 1.45 chs btst #13,%d0 | supervisor mode?
854 1.1 gwr jeq Lbadsw | no? panic!
855 1.1 gwr #endif
856 1.45 chs movw %d0,%sr | OK, restore PS
857 1.45 chs movl #1,%a0 | return 1 (for alternate returns)
858 1.1 gwr rts
859 1.1 gwr
860 1.1 gwr /*
861 1.1 gwr * savectx(pcb)
862 1.1 gwr * Update pcb, saving current processor state.
863 1.1 gwr */
864 1.1 gwr ENTRY(savectx)
865 1.45 chs movl %sp@(4),%a1
866 1.45 chs movw %sr,%a1@(PCB_PS)
867 1.45 chs movl %usp,%a0 | grab USP
868 1.45 chs movl %a0,%a1@(PCB_USP) | and save it
869 1.45 chs moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
870 1.1 gwr
871 1.19 jeremy tstl _C_LABEL(fputype) | Do we have FPU?
872 1.1 gwr jeq Lsavedone | No? Then don't save state.
873 1.45 chs lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
874 1.45 chs fsave %a0@ | save FP state
875 1.45 chs tstb %a0@ | null state frame?
876 1.1 gwr jeq Lsavedone | yes, all done
877 1.45 chs fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
878 1.45 chs fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
879 1.1 gwr Lsavedone:
880 1.45 chs movl #0,%a0 | return 0
881 1.1 gwr rts
882 1.1 gwr
883 1.20 gwr /* suline() */
884 1.1 gwr
885 1.1 gwr #ifdef DEBUG
886 1.1 gwr .data
887 1.19 jeremy ASGLOBAL(fulltflush)
888 1.1 gwr .long 0
889 1.19 jeremy ASGLOBAL(fullcflush)
890 1.1 gwr .long 0
891 1.1 gwr .text
892 1.1 gwr #endif
893 1.1 gwr
894 1.1 gwr /*
895 1.1 gwr * Invalidate entire TLB.
896 1.1 gwr */
897 1.1 gwr ENTRY(TBIA)
898 1.19 jeremy _C_LABEL(_TBIA):
899 1.1 gwr pflusha
900 1.45 chs movl #DC_CLEAR,%d0
901 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
902 1.1 gwr rts
903 1.1 gwr
904 1.1 gwr /*
905 1.1 gwr * Invalidate any TLB entry for given VA (TB Invalidate Single)
906 1.1 gwr */
907 1.1 gwr ENTRY(TBIS)
908 1.1 gwr #ifdef DEBUG
909 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
910 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush entire TLB
911 1.1 gwr #endif
912 1.45 chs movl %sp@(4),%a0
913 1.45 chs pflush #0,#0,%a0@ | flush address from both sides
914 1.45 chs movl #DC_CLEAR,%d0
915 1.45 chs movc %d0,%cacr | invalidate on-chip data cache
916 1.1 gwr rts
917 1.1 gwr
918 1.1 gwr /*
919 1.1 gwr * Invalidate supervisor side of TLB
920 1.1 gwr */
921 1.1 gwr ENTRY(TBIAS)
922 1.1 gwr #ifdef DEBUG
923 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
924 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
925 1.1 gwr #endif
926 1.1 gwr pflush #4,#4 | flush supervisor TLB entries
927 1.45 chs movl #DC_CLEAR,%d0
928 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
929 1.1 gwr rts
930 1.1 gwr
931 1.1 gwr /*
932 1.1 gwr * Invalidate user side of TLB
933 1.1 gwr */
934 1.1 gwr ENTRY(TBIAU)
935 1.1 gwr #ifdef DEBUG
936 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
937 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
938 1.1 gwr #endif
939 1.1 gwr pflush #0,#4 | flush user TLB entries
940 1.45 chs movl #DC_CLEAR,%d0
941 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
942 1.1 gwr rts
943 1.1 gwr
944 1.1 gwr /*
945 1.1 gwr * Invalidate instruction cache
946 1.1 gwr */
947 1.1 gwr ENTRY(ICIA)
948 1.45 chs movl #IC_CLEAR,%d0
949 1.45 chs movc %d0,%cacr | invalidate i-cache
950 1.1 gwr rts
951 1.1 gwr
952 1.1 gwr /*
953 1.1 gwr * Invalidate data cache.
954 1.1 gwr * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
955 1.1 gwr * problems with DC_WA. The only cases we have to worry about are context
956 1.1 gwr * switch and TLB changes, both of which are handled "in-line" in resume
957 1.1 gwr * and TBI*.
958 1.1 gwr */
959 1.1 gwr ENTRY(DCIA)
960 1.1 gwr __DCIA:
961 1.1 gwr rts
962 1.1 gwr
963 1.1 gwr ENTRY(DCIS)
964 1.1 gwr __DCIS:
965 1.1 gwr rts
966 1.1 gwr
967 1.1 gwr /*
968 1.1 gwr * Invalidate data cache.
969 1.1 gwr */
970 1.1 gwr ENTRY(DCIU)
971 1.45 chs movl #DC_CLEAR,%d0
972 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
973 1.1 gwr rts
974 1.1 gwr
975 1.1 gwr /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
976 1.1 gwr
977 1.1 gwr ENTRY(PCIA)
978 1.45 chs movl #DC_CLEAR,%d0
979 1.45 chs movc %d0,%cacr | invalidate on-chip d-cache
980 1.1 gwr rts
981 1.1 gwr
982 1.1 gwr ENTRY(ecacheon)
983 1.1 gwr rts
984 1.1 gwr
985 1.1 gwr ENTRY(ecacheoff)
986 1.1 gwr rts
987 1.1 gwr
988 1.1 gwr /*
989 1.1 gwr * Get callers current SP value.
990 1.1 gwr * Note that simply taking the address of a local variable in a C function
991 1.1 gwr * doesn't work because callee saved registers may be outside the stack frame
992 1.1 gwr * defined by A6 (e.g. GCC generated code).
993 1.20 gwr *
994 1.1 gwr * [I don't think the ENTRY() macro will do the right thing with this -- glass]
995 1.1 gwr */
996 1.19 jeremy GLOBAL(getsp)
997 1.45 chs movl %sp,%d0 | get current SP
998 1.45 chs addql #4,%d0 | compensate for return address
999 1.45 chs movl %d0,%a0
1000 1.1 gwr rts
1001 1.1 gwr
1002 1.1 gwr ENTRY(getsfc)
1003 1.45 chs movc %sfc,%d0
1004 1.45 chs movl %d0,%a0
1005 1.1 gwr rts
1006 1.1 gwr
1007 1.1 gwr ENTRY(getdfc)
1008 1.45 chs movc %dfc,%d0
1009 1.45 chs movl %d0,%a0
1010 1.1 gwr rts
1011 1.1 gwr
1012 1.1 gwr ENTRY(getvbr)
1013 1.45 chs movc %vbr,%d0
1014 1.45 chs movl %d0,%a0
1015 1.1 gwr rts
1016 1.1 gwr
1017 1.1 gwr ENTRY(setvbr)
1018 1.45 chs movl %sp@(4),%d0
1019 1.45 chs movc %d0,%vbr
1020 1.1 gwr rts
1021 1.1 gwr
1022 1.1 gwr /*
1023 1.1 gwr * Load a new CPU Root Pointer (CRP) into the MMU.
1024 1.2 gwr * void loadcrp(struct mmu_rootptr *);
1025 1.1 gwr */
1026 1.1 gwr ENTRY(loadcrp)
1027 1.45 chs movl %sp@(4),%a0 | arg1: &CRP
1028 1.45 chs movl #CACHE_CLR,%d0
1029 1.45 chs movc %d0,%cacr | invalidate cache(s)
1030 1.1 gwr pflusha | flush entire TLB
1031 1.45 chs pmove %a0@,%crp | load new user root pointer
1032 1.45 chs rts
1033 1.45 chs
1034 1.45 chs ENTRY(getcrp)
1035 1.45 chs movl %sp@(4),%a0 | arg1: &crp
1036 1.45 chs pmove %crp,%a0@ | *crpp = %crp
1037 1.10 gwr rts
1038 1.10 gwr
1039 1.10 gwr /*
1040 1.10 gwr * Get the physical address of the PTE for a given VA.
1041 1.10 gwr */
1042 1.10 gwr ENTRY(ptest_addr)
1043 1.45 chs movl %sp@(4),%a1 | VA
1044 1.45 chs ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
1045 1.48 scw movl %a0,%d0 | Result in %d0 (not a pointer return)
1046 1.1 gwr rts
1047 1.1 gwr
1048 1.1 gwr /*
1049 1.1 gwr * Set processor priority level calls. Most are implemented with
1050 1.1 gwr * inline asm expansions. However, we need one instantiation here
1051 1.1 gwr * in case some non-optimized code makes external references.
1052 1.21 gwr * Most places will use the inlined functions param.h supplies.
1053 1.1 gwr */
1054 1.1 gwr
1055 1.21 gwr ENTRY(_getsr)
1056 1.45 chs clrl %d0
1057 1.45 chs movw %sr,%d0
1058 1.45 chs movl %a1,%d0
1059 1.21 gwr rts
1060 1.21 gwr
1061 1.1 gwr ENTRY(_spl)
1062 1.45 chs clrl %d0
1063 1.45 chs movw %sr,%d0
1064 1.45 chs movl %sp@(4),%d1
1065 1.45 chs movw %d1,%sr
1066 1.1 gwr rts
1067 1.1 gwr
1068 1.21 gwr ENTRY(_splraise)
1069 1.45 chs clrl %d0
1070 1.45 chs movw %sr,%d0
1071 1.45 chs movl %d0,%d1
1072 1.45 chs andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
1073 1.45 chs cmpl %sp@(4),%d1 | (old - new)
1074 1.21 gwr bge Lsplr
1075 1.45 chs movl %sp@(4),%d1
1076 1.45 chs movw %d1,%sr
1077 1.21 gwr Lsplr:
1078 1.1 gwr rts
1079 1.1 gwr
1080 1.1 gwr /*
1081 1.1 gwr * Save and restore 68881 state.
1082 1.1 gwr */
1083 1.1 gwr ENTRY(m68881_save)
1084 1.45 chs movl %sp@(4),%a0 | save area pointer
1085 1.45 chs fsave %a0@ | save state
1086 1.45 chs tstb %a0@ | null state frame?
1087 1.1 gwr jeq Lm68881sdone | yes, all done
1088 1.45 chs fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
1089 1.45 chs fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
1090 1.1 gwr Lm68881sdone:
1091 1.1 gwr rts
1092 1.1 gwr
1093 1.1 gwr ENTRY(m68881_restore)
1094 1.45 chs movl %sp@(4),%a0 | save area pointer
1095 1.45 chs tstb %a0@ | null state frame?
1096 1.1 gwr jeq Lm68881rdone | yes, easy
1097 1.45 chs fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
1098 1.45 chs fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
1099 1.1 gwr Lm68881rdone:
1100 1.45 chs frestore %a0@ | restore state
1101 1.1 gwr rts
1102 1.1 gwr
1103 1.1 gwr /*
1104 1.1 gwr * _delay(unsigned N)
1105 1.1 gwr * Delay for at least (N/256) microseconds.
1106 1.1 gwr * This routine depends on the variable: delay_divisor
1107 1.1 gwr * which should be set based on the CPU clock rate.
1108 1.26 gwr * XXX: Currently this is set based on the CPU model,
1109 1.26 gwr * XXX: but this should be determined at run time...
1110 1.1 gwr */
1111 1.19 jeremy GLOBAL(_delay)
1112 1.45 chs | %d0 = arg = (usecs << 8)
1113 1.45 chs movl %sp@(4),%d0
1114 1.45 chs | %d1 = delay_divisor;
1115 1.45 chs movl _C_LABEL(delay_divisor),%d1
1116 1.36 thorpej jra L_delay /* Jump into the loop! */
1117 1.36 thorpej
1118 1.36 thorpej /*
1119 1.36 thorpej * Align the branch target of the loop to a half-line (8-byte)
1120 1.36 thorpej * boundary to minimize cache effects. This guarantees both
1121 1.36 thorpej * that there will be no prefetch stalls due to cache line burst
1122 1.36 thorpej * operations and that the loop will run from a single cache
1123 1.36 thorpej * half-line.
1124 1.36 thorpej */
1125 1.46 kleink #ifdef __ELF__
1126 1.36 thorpej .align 8
1127 1.46 kleink #else
1128 1.46 kleink .align 3
1129 1.46 kleink #endif
1130 1.1 gwr L_delay:
1131 1.45 chs subl %d1,%d0
1132 1.1 gwr jgt L_delay
1133 1.1 gwr rts
1134 1.1 gwr
1135 1.1 gwr | Define some addresses, mostly so DDB can print useful info.
1136 1.24 gwr | Not using _C_LABEL() here because these symbols are never
1137 1.24 gwr | referenced by any C code, and if the leading underscore
1138 1.24 gwr | ever goes away, these lines turn into syntax errors...
1139 1.24 gwr .set _KERNBASE,KERNBASE
1140 1.26 gwr .set _MONSTART,SUN3X_MONSTART
1141 1.26 gwr .set _PROM_BASE,SUN3X_PROM_BASE
1142 1.26 gwr .set _MONEND,SUN3X_MONEND
1143 1.1 gwr
1144 1.1 gwr |The end!
1145