locore.s revision 1.41 1 1.41 thorpej /* $NetBSD: locore.s,v 1.41 2000/05/31 05:06:57 thorpej Exp $ */
2 1.1 gwr
3 1.1 gwr /*
4 1.1 gwr * Copyright (c) 1988 University of Utah.
5 1.1 gwr * Copyright (c) 1980, 1990, 1993
6 1.1 gwr * The Regents of the University of California. All rights reserved.
7 1.1 gwr *
8 1.1 gwr * This code is derived from software contributed to Berkeley by
9 1.1 gwr * the Systems Programming Group of the University of Utah Computer
10 1.1 gwr * Science Department.
11 1.1 gwr *
12 1.1 gwr * Redistribution and use in source and binary forms, with or without
13 1.1 gwr * modification, are permitted provided that the following conditions
14 1.1 gwr * are met:
15 1.1 gwr * 1. Redistributions of source code must retain the above copyright
16 1.1 gwr * notice, this list of conditions and the following disclaimer.
17 1.1 gwr * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 gwr * notice, this list of conditions and the following disclaimer in the
19 1.1 gwr * documentation and/or other materials provided with the distribution.
20 1.1 gwr * 3. All advertising materials mentioning features or use of this software
21 1.1 gwr * must display the following acknowledgement:
22 1.1 gwr * This product includes software developed by the University of
23 1.1 gwr * California, Berkeley and its contributors.
24 1.1 gwr * 4. Neither the name of the University nor the names of its contributors
25 1.1 gwr * may be used to endorse or promote products derived from this software
26 1.1 gwr * without specific prior written permission.
27 1.1 gwr *
28 1.1 gwr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 gwr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 gwr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 gwr * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 gwr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 gwr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 gwr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 gwr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 gwr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 gwr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 gwr * SUCH DAMAGE.
39 1.1 gwr *
40 1.1 gwr * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 1.1 gwr * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 1.1 gwr */
43 1.1 gwr
44 1.29 thorpej #include "opt_compat_netbsd.h"
45 1.34 kleink #include "opt_compat_svr4.h"
46 1.35 christos #include "opt_compat_sunos.h"
47 1.27 gwr
48 1.1 gwr #include "assym.h"
49 1.17 thorpej #include <machine/asm.h>
50 1.1 gwr #include <machine/trap.h>
51 1.1 gwr
52 1.1 gwr | Remember this is a fun project!
53 1.1 gwr
54 1.1 gwr .data
55 1.19 jeremy GLOBAL(mon_crp)
56 1.1 gwr .long 0,0
57 1.1 gwr
58 1.1 gwr | This is for kvm_mkdb, and should be the address of the beginning
59 1.1 gwr | of the kernel text segment (not necessarily the same as kernbase).
60 1.1 gwr .text
61 1.19 jeremy GLOBAL(kernel_text)
62 1.1 gwr
63 1.1 gwr | This is the entry point, as well as the end of the temporary stack
64 1.1 gwr | used during process switch (one 8K page ending at start)
65 1.19 jeremy ASGLOBAL(tmpstk)
66 1.20 gwr ASGLOBAL(start)
67 1.19 jeremy
68 1.1 gwr | The first step, after disabling interrupts, is to map enough of the kernel
69 1.1 gwr | into high virtual address space so that we can use position dependent code.
70 1.1 gwr | This is a tricky task on the sun3x because the MMU is already enabled and
71 1.1 gwr | the ROM monitor provides no indication of where the root MMU table is mapped.
72 1.1 gwr | Therefore we must use one of the 68030's 'transparent translation' registers
73 1.1 gwr | to define a range in the address space where the MMU translation is
74 1.1 gwr | turned off. Once this is complete we can modify the MMU table directly
75 1.1 gwr | without the need for it to be mapped into virtual memory.
76 1.1 gwr | All code must be position independent until otherwise noted, as the
77 1.1 gwr | boot loader has loaded us into low memory but all the symbols in this
78 1.1 gwr | code have been linked high.
79 1.1 gwr movw #PSL_HIGHIPL, sr | no interrupts
80 1.1 gwr movl #KERNBASE, a5 | for vtop conversion
81 1.19 jeremy lea _C_LABEL(mon_crp), a0 | where to store the CRP
82 1.1 gwr subl a5, a0
83 1.1 gwr | Note: borrowing mon_crp for tt0 setup...
84 1.1 gwr movl #0x3F8107, a0@ | map the low 1GB v=p with the
85 1.14 jeremy .long 0xf0100800 | transparent translation reg0
86 1.14 jeremy | [ pmove a0@, tt0 ]
87 1.1 gwr | In order to map the kernel into high memory we will copy the root table
88 1.1 gwr | entry which maps the 16 megabytes of memory starting at 0x0 into the
89 1.1 gwr | entry which maps the 16 megabytes starting at KERNBASE.
90 1.1 gwr pmove crp, a0@ | Get monitor CPU root pointer
91 1.1 gwr movl a0@(4), a1 | 2nd word is PA of level A table
92 1.1 gwr
93 1.1 gwr movl a1, a0 | compute the descriptor address
94 1.1 gwr addl #0x3e0, a1 | for VA starting at KERNBASE
95 1.1 gwr movl a0@, a1@ | copy descriptor type
96 1.1 gwr movl a0@(4), a1@(4) | copy physical address
97 1.1 gwr
98 1.1 gwr | Kernel is now double mapped at zero and KERNBASE.
99 1.1 gwr | Force a long jump to the relocated code (high VA).
100 1.1 gwr movl #IC_CLEAR, d0 | Flush the I-cache
101 1.1 gwr movc d0, cacr
102 1.1 gwr jmp L_high_code:l | long jump
103 1.1 gwr
104 1.1 gwr L_high_code:
105 1.1 gwr | We are now running in the correctly relocated kernel, so
106 1.1 gwr | we are no longer restricted to position-independent code.
107 1.1 gwr | It is handy to leave transparent translation enabled while
108 1.20 gwr | for the low 1GB while _bootstrap() is doing its thing.
109 1.1 gwr
110 1.1 gwr | Do bootstrap stuff needed before main() gets called.
111 1.1 gwr | Our boot loader leaves a copy of the kernel's exec header
112 1.1 gwr | just before the start of the kernel text segment, so the
113 1.1 gwr | kernel can sanity-check the DDB symbols at [end...esym].
114 1.20 gwr | Pass the struct exec at tmpstk-32 to _bootstrap().
115 1.7 gwr | Also, make sure the initial frame pointer is zero so that
116 1.7 gwr | the backtrace algorithm used by KGDB terminates nicely.
117 1.19 jeremy lea _ASM_LABEL(tmpstk)-32, sp
118 1.6 gwr movl #0,a6
119 1.26 gwr jsr _C_LABEL(_bootstrap) | See locore2.c
120 1.1 gwr
121 1.1 gwr | Now turn off the transparent translation of the low 1GB.
122 1.1 gwr | (this also flushes the ATC)
123 1.1 gwr clrl sp@-
124 1.14 jeremy .long 0xf0170800 | pmove sp@,tt0
125 1.1 gwr addql #4,sp
126 1.1 gwr
127 1.20 gwr | Now that _bootstrap() is done using the PROM functions,
128 1.1 gwr | we can safely set the sfc/dfc to something != FC_CONTROL
129 1.1 gwr moveq #FC_USERD, d0 | make movs access "user data"
130 1.1 gwr movc d0, sfc | space for copyin/copyout
131 1.1 gwr movc d0, dfc
132 1.1 gwr
133 1.1 gwr | Setup process zero user/kernel stacks.
134 1.19 jeremy movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
135 1.1 gwr lea a1@(USPACE-4),sp | set SSP to last word
136 1.1 gwr movl #USRSTACK-4,a2
137 1.1 gwr movl a2,usp | init user SP
138 1.1 gwr
139 1.20 gwr | Note curpcb was already set in _bootstrap().
140 1.1 gwr | Will do fpu initialization during autoconfig (see fpu.c)
141 1.1 gwr | The interrupt vector table and stack are now ready.
142 1.1 gwr | Interrupts will be enabled later, AFTER autoconfiguration
143 1.1 gwr | is finished, to avoid spurrious interrupts.
144 1.1 gwr
145 1.1 gwr /*
146 1.1 gwr * Final preparation for calling main.
147 1.1 gwr *
148 1.1 gwr * Create a fake exception frame that returns to user mode,
149 1.1 gwr * and save its address in p->p_md.md_regs for cpu_fork().
150 1.1 gwr * The new frames for process 1 and 2 will be adjusted by
151 1.1 gwr * cpu_set_kpc() to arrange for a call to a kernel function
152 1.1 gwr * before the new process does its rte out to user mode.
153 1.1 gwr */
154 1.6 gwr clrw sp@- | tf_format,tf_vector
155 1.6 gwr clrl sp@- | tf_pc (filled in later)
156 1.6 gwr movw #PSL_USER,sp@- | tf_sr for user mode
157 1.6 gwr clrl sp@- | tf_stackadj
158 1.6 gwr lea sp@(-64),sp | tf_regs[16]
159 1.6 gwr movl sp,a1 | a1=trapframe
160 1.19 jeremy lea _C_LABEL(proc0),a0 | proc0.p_md.md_regs =
161 1.6 gwr movl a1,a0@(P_MDREGS) | trapframe
162 1.6 gwr movl a2,a1@(FR_SP) | a2 == usp (from above)
163 1.7 gwr pea a1@ | push &trapframe
164 1.19 jeremy jbsr _C_LABEL(main) | main(&trapframe)
165 1.7 gwr addql #4,sp | help DDB backtrace
166 1.1 gwr trap #15 | should not get here
167 1.1 gwr
168 1.1 gwr | This is used by cpu_fork() to return to user mode.
169 1.1 gwr | It is called with SP pointing to a struct trapframe.
170 1.19 jeremy GLOBAL(proc_do_uret)
171 1.1 gwr movl sp@(FR_SP),a0 | grab and load
172 1.1 gwr movl a0,usp | user SP
173 1.1 gwr moveml sp@+,#0x7FFF | load most registers (all but SSP)
174 1.1 gwr addql #8,sp | pop SSP and stack adjust count
175 1.1 gwr rte
176 1.1 gwr
177 1.1 gwr /*
178 1.1 gwr * proc_trampoline:
179 1.1 gwr * This is used by cpu_set_kpc() to "push" a function call onto the
180 1.1 gwr * kernel stack of some process, very much like a signal delivery.
181 1.1 gwr * When we get here, the stack has:
182 1.1 gwr *
183 1.1 gwr * SP+8: switchframe from before cpu_set_kpc
184 1.31 thorpej * SP+4: void *arg;
185 1.1 gwr * SP: u_long func;
186 1.1 gwr *
187 1.1 gwr * On entry, the switchframe pushed by cpu_set_kpc has already been
188 1.1 gwr * popped off the stack, so all this needs to do is pop the function
189 1.1 gwr * pointer into a register, call it, then pop the arg, and finally
190 1.1 gwr * return using the switchframe that remains on the stack.
191 1.1 gwr */
192 1.19 jeremy GLOBAL(proc_trampoline)
193 1.1 gwr movl sp@+,a0 | function pointer
194 1.31 thorpej jbsr a0@ | (*func)(arg)
195 1.1 gwr addql #4,sp | toss the arg
196 1.1 gwr rts | as cpu_switch would do
197 1.1 gwr
198 1.1 gwr | That is all the assembly startup code we need on the sun3x!
199 1.1 gwr | The rest of this is like the hp300/locore.s where possible.
200 1.1 gwr
201 1.1 gwr /*
202 1.1 gwr * Trap/interrupt vector routines
203 1.1 gwr */
204 1.17 thorpej #include <m68k/m68k/trap_subr.s>
205 1.1 gwr
206 1.19 jeremy GLOBAL(buserr)
207 1.19 jeremy tstl _C_LABEL(nofault) | device probe?
208 1.19 jeremy jeq _C_LABEL(addrerr) | no, handle as usual
209 1.19 jeremy movl _C_LABEL(nofault),sp@- | yes,
210 1.19 jeremy jbsr _C_LABEL(longjmp) | longjmp(nofault)
211 1.19 jeremy GLOBAL(addrerr)
212 1.1 gwr clrl sp@- | stack adjust count
213 1.1 gwr moveml #0xFFFF,sp@- | save user registers
214 1.1 gwr movl usp,a0 | save the user SP
215 1.1 gwr movl a0,sp@(FR_SP) | in the savearea
216 1.1 gwr lea sp@(FR_HW),a1 | grab base of HW berr frame
217 1.1 gwr moveq #0,d0
218 1.1 gwr movw a1@(10),d0 | grab SSW for fault processing
219 1.1 gwr btst #12,d0 | RB set?
220 1.1 gwr jeq LbeX0 | no, test RC
221 1.1 gwr bset #14,d0 | yes, must set FB
222 1.1 gwr movw d0,a1@(10) | for hardware too
223 1.1 gwr LbeX0:
224 1.1 gwr btst #13,d0 | RC set?
225 1.1 gwr jeq LbeX1 | no, skip
226 1.1 gwr bset #15,d0 | yes, must set FC
227 1.1 gwr movw d0,a1@(10) | for hardware too
228 1.1 gwr LbeX1:
229 1.1 gwr btst #8,d0 | data fault?
230 1.1 gwr jeq Lbe0 | no, check for hard cases
231 1.1 gwr movl a1@(16),d1 | fault address is as given in frame
232 1.1 gwr jra Lbe10 | thats it
233 1.1 gwr Lbe0:
234 1.1 gwr btst #4,a1@(6) | long (type B) stack frame?
235 1.1 gwr jne Lbe4 | yes, go handle
236 1.1 gwr movl a1@(2),d1 | no, can use save PC
237 1.1 gwr btst #14,d0 | FB set?
238 1.1 gwr jeq Lbe3 | no, try FC
239 1.1 gwr addql #4,d1 | yes, adjust address
240 1.1 gwr jra Lbe10 | done
241 1.1 gwr Lbe3:
242 1.1 gwr btst #15,d0 | FC set?
243 1.1 gwr jeq Lbe10 | no, done
244 1.1 gwr addql #2,d1 | yes, adjust address
245 1.1 gwr jra Lbe10 | done
246 1.1 gwr Lbe4:
247 1.1 gwr movl a1@(36),d1 | long format, use stage B address
248 1.1 gwr btst #15,d0 | FC set?
249 1.1 gwr jeq Lbe10 | no, all done
250 1.1 gwr subql #2,d1 | yes, adjust address
251 1.1 gwr Lbe10:
252 1.1 gwr movl d1,sp@- | push fault VA
253 1.1 gwr movl d0,sp@- | and padded SSW
254 1.1 gwr movw a1@(6),d0 | get frame format/vector offset
255 1.1 gwr andw #0x0FFF,d0 | clear out frame format
256 1.1 gwr cmpw #12,d0 | address error vector?
257 1.1 gwr jeq Lisaerr | yes, go to it
258 1.1 gwr
259 1.1 gwr /* MMU-specific code to determine reason for bus error. */
260 1.1 gwr movl d1,a0 | fault address
261 1.1 gwr movl sp@,d0 | function code from ssw
262 1.1 gwr btst #8,d0 | data fault?
263 1.1 gwr jne Lbe10a
264 1.1 gwr movql #1,d0 | user program access FC
265 1.1 gwr | (we dont separate data/program)
266 1.1 gwr btst #5,a1@ | supervisor mode?
267 1.1 gwr jeq Lbe10a | if no, done
268 1.1 gwr movql #5,d0 | else supervisor program access
269 1.1 gwr Lbe10a:
270 1.1 gwr ptestr d0,a0@,#7 | do a table search
271 1.1 gwr pmove psr,sp@ | save result
272 1.1 gwr movb sp@,d1
273 1.1 gwr btst #2,d1 | invalid? (incl. limit viol and berr)
274 1.1 gwr jeq Lmightnotbemerr | no -> wp check
275 1.1 gwr btst #7,d1 | is it MMU table berr?
276 1.1 gwr jeq Lismerr | no, must be fast
277 1.1 gwr jra Lisberr1 | real bus err needs not be fast
278 1.1 gwr Lmightnotbemerr:
279 1.1 gwr btst #3,d1 | write protect bit set?
280 1.1 gwr jeq Lisberr1 | no, must be bus error
281 1.1 gwr movl sp@,d0 | ssw into low word of d0
282 1.1 gwr andw #0xc0,d0 | write protect is set on page:
283 1.1 gwr cmpw #0x40,d0 | was it read cycle?
284 1.1 gwr jeq Lisberr1 | yes, was not WPE, must be bus err
285 1.1 gwr /* End of MMU-specific bus error code. */
286 1.1 gwr
287 1.1 gwr Lismerr:
288 1.1 gwr movl #T_MMUFLT,sp@- | show that we are an MMU fault
289 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
290 1.1 gwr Lisaerr:
291 1.1 gwr movl #T_ADDRERR,sp@- | mark address error
292 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
293 1.1 gwr Lisberr1:
294 1.1 gwr clrw sp@ | re-clear pad word
295 1.1 gwr Lisberr:
296 1.1 gwr movl #T_BUSERR,sp@- | mark bus error
297 1.17 thorpej jra _ASM_LABEL(faultstkadj) | and deal with it
298 1.1 gwr
299 1.1 gwr /*
300 1.1 gwr * FP exceptions.
301 1.1 gwr */
302 1.19 jeremy GLOBAL(fpfline)
303 1.1 gwr clrl sp@- | stack adjust count
304 1.1 gwr moveml #0xFFFF,sp@- | save registers
305 1.1 gwr moveq #T_FPEMULI,d0 | denote as FP emulation trap
306 1.19 jeremy jra _ASM_LABEL(fault) | do it
307 1.1 gwr
308 1.19 jeremy GLOBAL(fpunsupp)
309 1.1 gwr clrl sp@- | stack adjust count
310 1.1 gwr moveml #0xFFFF,sp@- | save registers
311 1.1 gwr moveq #T_FPEMULD,d0 | denote as FP emulation trap
312 1.19 jeremy jra _ASM_LABEL(fault) | do it
313 1.1 gwr
314 1.1 gwr /*
315 1.1 gwr * Handles all other FP coprocessor exceptions.
316 1.1 gwr * Note that since some FP exceptions generate mid-instruction frames
317 1.1 gwr * and may cause signal delivery, we need to test for stack adjustment
318 1.1 gwr * after the trap call.
319 1.1 gwr */
320 1.19 jeremy GLOBAL(fpfault)
321 1.1 gwr clrl sp@- | stack adjust count
322 1.1 gwr moveml #0xFFFF,sp@- | save user registers
323 1.1 gwr movl usp,a0 | and save
324 1.1 gwr movl a0,sp@(FR_SP) | the user stack pointer
325 1.1 gwr clrl sp@- | no VA arg
326 1.19 jeremy movl _C_LABEL(curpcb),a0 | current pcb
327 1.1 gwr lea a0@(PCB_FPCTX),a0 | address of FP savearea
328 1.1 gwr fsave a0@ | save state
329 1.1 gwr tstb a0@ | null state frame?
330 1.1 gwr jeq Lfptnull | yes, safe
331 1.1 gwr clrw d0 | no, need to tweak BIU
332 1.1 gwr movb a0@(1),d0 | get frame size
333 1.1 gwr bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
334 1.1 gwr Lfptnull:
335 1.1 gwr fmovem fpsr,sp@- | push fpsr as code argument
336 1.1 gwr frestore a0@ | restore state
337 1.1 gwr movl #T_FPERR,sp@- | push type arg
338 1.17 thorpej jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
339 1.1 gwr
340 1.1 gwr /*
341 1.1 gwr * Other exceptions only cause four and six word stack frame and require
342 1.1 gwr * no post-trap stack adjustment.
343 1.1 gwr */
344 1.19 jeremy GLOBAL(badtrap)
345 1.1 gwr clrl sp@- | stack adjust count
346 1.1 gwr moveml #0xFFFF,sp@- | save std frame regs
347 1.19 jeremy jbsr _C_LABEL(straytrap) | report
348 1.1 gwr moveml sp@+,#0xFFFF | restore regs
349 1.1 gwr addql #4, sp | stack adjust count
350 1.19 jeremy jra _ASM_LABEL(rei) | all done
351 1.1 gwr
352 1.1 gwr /*
353 1.1 gwr * Trap 0 is for system calls
354 1.1 gwr */
355 1.19 jeremy GLOBAL(trap0)
356 1.1 gwr clrl sp@- | stack adjust count
357 1.1 gwr moveml #0xFFFF,sp@- | save user registers
358 1.1 gwr movl usp,a0 | save the user SP
359 1.1 gwr movl a0,sp@(FR_SP) | in the savearea
360 1.1 gwr movl d0,sp@- | push syscall number
361 1.19 jeremy jbsr _C_LABEL(syscall) | handle it
362 1.1 gwr addql #4,sp | pop syscall arg
363 1.1 gwr movl sp@(FR_SP),a0 | grab and restore
364 1.1 gwr movl a0,usp | user SP
365 1.1 gwr moveml sp@+,#0x7FFF | restore most registers
366 1.1 gwr addql #8,sp | pop SP and stack adjust
367 1.19 jeremy jra _ASM_LABEL(rei) | all done
368 1.11 gwr
369 1.11 gwr /*
370 1.11 gwr * Trap 12 is the entry point for the cachectl "syscall"
371 1.11 gwr * cachectl(command, addr, length)
372 1.11 gwr * command in d0, addr in a1, length in d1
373 1.11 gwr */
374 1.19 jeremy GLOBAL(trap12)
375 1.32 is movl _C_LABEL(curproc),sp@- | push curproc pointer
376 1.11 gwr movl d1,sp@- | push length
377 1.11 gwr movl a1,sp@- | push addr
378 1.11 gwr movl d0,sp@- | push command
379 1.32 is jbsr _C_LABEL(cachectl1) | do it
380 1.32 is lea sp@(16),sp | pop args
381 1.19 jeremy jra _ASM_LABEL(rei) | all done
382 1.1 gwr
383 1.1 gwr /*
384 1.1 gwr * Trace (single-step) trap. Kernel-mode is special.
385 1.1 gwr * User mode traps are simply passed on to trap().
386 1.1 gwr */
387 1.19 jeremy GLOBAL(trace)
388 1.1 gwr clrl sp@- | stack adjust count
389 1.1 gwr moveml #0xFFFF,sp@-
390 1.1 gwr moveq #T_TRACE,d0
391 1.37 itohy
392 1.37 itohy | Check PSW and see what happen.
393 1.37 itohy | T=0 S=0 (should not happen)
394 1.37 itohy | T=1 S=0 trace trap from user mode
395 1.37 itohy | T=0 S=1 trace trap on a trap instruction
396 1.37 itohy | T=1 S=1 trace trap from system mode (kernel breakpoint)
397 1.37 itohy
398 1.37 itohy movw sp@(FR_HW),d1 | get PSW
399 1.37 itohy notw d1 | XXX no support for T0 on 680[234]0
400 1.37 itohy andw #PSL_TS,d1 | from system mode (T=1, S=1)?
401 1.37 itohy jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
402 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
403 1.1 gwr
404 1.1 gwr /*
405 1.1 gwr * Trap 15 is used for:
406 1.1 gwr * - GDB breakpoints (in user programs)
407 1.1 gwr * - KGDB breakpoints (in the kernel)
408 1.1 gwr * - trace traps for SUN binaries (not fully supported yet)
409 1.11 gwr * User mode traps are simply passed to trap().
410 1.1 gwr */
411 1.19 jeremy GLOBAL(trap15)
412 1.1 gwr clrl sp@- | stack adjust count
413 1.1 gwr moveml #0xFFFF,sp@-
414 1.1 gwr moveq #T_TRAP15,d0
415 1.11 gwr btst #5,sp@(FR_HW) | was supervisor mode?
416 1.19 jeremy jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
417 1.19 jeremy jra _ASM_LABEL(fault) | no, user-mode fault
418 1.1 gwr
419 1.19 jeremy ASLOCAL(kbrkpt)
420 1.11 gwr | Kernel-mode breakpoint or trace trap. (d0=trap_type)
421 1.1 gwr | Save the system sp rather than the user sp.
422 1.1 gwr movw #PSL_HIGHIPL,sr | lock out interrupts
423 1.1 gwr lea sp@(FR_SIZE),a6 | Save stack pointer
424 1.1 gwr movl a6,sp@(FR_SP) | from before trap
425 1.1 gwr
426 1.1 gwr | If we are not on tmpstk switch to it.
427 1.1 gwr | (so debugger can change the stack pointer)
428 1.1 gwr movl a6,d1
429 1.19 jeremy cmpl #_ASM_LABEL(tmpstk),d1
430 1.1 gwr jls Lbrkpt2 | already on tmpstk
431 1.1 gwr | Copy frame to the temporary stack
432 1.1 gwr movl sp,a0 | a0=src
433 1.19 jeremy lea _ASM_LABEL(tmpstk)-96,a1 | a1=dst
434 1.1 gwr movl a1,sp | sp=new frame
435 1.1 gwr moveq #FR_SIZE,d1
436 1.1 gwr Lbrkpt1:
437 1.1 gwr movl a0@+,a1@+
438 1.1 gwr subql #4,d1
439 1.1 gwr bgt Lbrkpt1
440 1.1 gwr
441 1.1 gwr Lbrkpt2:
442 1.11 gwr | Call the trap handler for the kernel debugger.
443 1.6 gwr | Do not call trap() to handle it, so that we can
444 1.1 gwr | set breakpoints in trap() if we want. We know
445 1.1 gwr | the trap type is either T_TRACE or T_BREAKPOINT.
446 1.6 gwr movl d0,sp@- | push trap type
447 1.19 jeremy jbsr _C_LABEL(trap_kdebug)
448 1.6 gwr addql #4,sp | pop args
449 1.6 gwr
450 1.1 gwr | The stack pointer may have been modified, or
451 1.1 gwr | data below it modified (by kgdb push call),
452 1.1 gwr | so push the hardware frame at the current sp
453 1.1 gwr | before restoring registers and returning.
454 1.1 gwr movl sp@(FR_SP),a0 | modified sp
455 1.1 gwr lea sp@(FR_SIZE),a1 | end of our frame
456 1.1 gwr movl a1@-,a0@- | copy 2 longs with
457 1.1 gwr movl a1@-,a0@- | ... predecrement
458 1.1 gwr movl a0,sp@(FR_SP) | sp = h/w frame
459 1.1 gwr moveml sp@+,#0x7FFF | restore all but sp
460 1.1 gwr movl sp@,sp | ... and sp
461 1.1 gwr rte | all done
462 1.1 gwr
463 1.11 gwr /* Use common m68k sigreturn */
464 1.11 gwr #include <m68k/m68k/sigreturn.s>
465 1.1 gwr
466 1.1 gwr /*
467 1.1 gwr * Interrupt handlers. Most are auto-vectored,
468 1.1 gwr * and hard-wired the same way on all sun3 models.
469 1.1 gwr * Format in the stack is:
470 1.1 gwr * d0,d1,a0,a1, sr, pc, vo
471 1.1 gwr */
472 1.1 gwr
473 1.1 gwr #define INTERRUPT_SAVEREG \
474 1.1 gwr moveml #0xC0C0,sp@-
475 1.1 gwr
476 1.1 gwr #define INTERRUPT_RESTORE \
477 1.1 gwr moveml sp@+,#0x0303
478 1.1 gwr
479 1.1 gwr /*
480 1.1 gwr * This is the common auto-vector interrupt handler,
481 1.1 gwr * for which the CPU provides the vector=0x18+level.
482 1.1 gwr * These are installed in the interrupt vector table.
483 1.1 gwr */
484 1.1 gwr .align 2
485 1.19 jeremy GLOBAL(_isr_autovec)
486 1.1 gwr INTERRUPT_SAVEREG
487 1.19 jeremy jbsr _C_LABEL(isr_autovec)
488 1.1 gwr INTERRUPT_RESTORE
489 1.19 jeremy jra _ASM_LABEL(rei)
490 1.1 gwr
491 1.1 gwr /* clock: see clock.c */
492 1.1 gwr .align 2
493 1.19 jeremy GLOBAL(_isr_clock)
494 1.1 gwr INTERRUPT_SAVEREG
495 1.19 jeremy jbsr _C_LABEL(clock_intr)
496 1.1 gwr INTERRUPT_RESTORE
497 1.19 jeremy jra _ASM_LABEL(rei)
498 1.1 gwr
499 1.1 gwr | Handler for all vectored interrupts (i.e. VME interrupts)
500 1.1 gwr .align 2
501 1.19 jeremy GLOBAL(_isr_vectored)
502 1.1 gwr INTERRUPT_SAVEREG
503 1.19 jeremy jbsr _C_LABEL(isr_vectored)
504 1.1 gwr INTERRUPT_RESTORE
505 1.19 jeremy jra _ASM_LABEL(rei)
506 1.1 gwr
507 1.1 gwr #undef INTERRUPT_SAVEREG
508 1.1 gwr #undef INTERRUPT_RESTORE
509 1.1 gwr
510 1.1 gwr /* interrupt counters (needed by vmstat) */
511 1.19 jeremy GLOBAL(intrnames)
512 1.1 gwr .asciz "spur" | 0
513 1.1 gwr .asciz "lev1" | 1
514 1.1 gwr .asciz "lev2" | 2
515 1.1 gwr .asciz "lev3" | 3
516 1.1 gwr .asciz "lev4" | 4
517 1.1 gwr .asciz "clock" | 5
518 1.1 gwr .asciz "lev6" | 6
519 1.1 gwr .asciz "nmi" | 7
520 1.19 jeremy GLOBAL(eintrnames)
521 1.1 gwr
522 1.1 gwr .data
523 1.1 gwr .even
524 1.19 jeremy GLOBAL(intrcnt)
525 1.1 gwr .long 0,0,0,0,0,0,0,0,0,0
526 1.19 jeremy GLOBAL(eintrcnt)
527 1.1 gwr .text
528 1.1 gwr
529 1.1 gwr /*
530 1.1 gwr * Emulation of VAX REI instruction.
531 1.1 gwr *
532 1.1 gwr * This code is (mostly) un-altered from the hp300 code,
533 1.1 gwr * except that sun machines do not need a simulated SIR
534 1.1 gwr * because they have a real software interrupt register.
535 1.1 gwr *
536 1.1 gwr * This code deals with checking for and servicing ASTs
537 1.1 gwr * (profiling, scheduling) and software interrupts (network, softclock).
538 1.1 gwr * We check for ASTs first, just like the VAX. To avoid excess overhead
539 1.1 gwr * the T_ASTFLT handling code will also check for software interrupts so we
540 1.1 gwr * do not have to do it here. After identifying that we need an AST we
541 1.1 gwr * drop the IPL to allow device interrupts.
542 1.1 gwr *
543 1.1 gwr * This code is complicated by the fact that sendsig may have been called
544 1.1 gwr * necessitating a stack cleanup.
545 1.1 gwr */
546 1.1 gwr
547 1.19 jeremy ASGLOBAL(rei)
548 1.1 gwr #ifdef DIAGNOSTIC
549 1.19 jeremy tstl _C_LABEL(panicstr) | have we paniced?
550 1.1 gwr jne Ldorte | yes, do not make matters worse
551 1.1 gwr #endif
552 1.19 jeremy tstl _C_LABEL(astpending) | AST pending?
553 1.1 gwr jeq Ldorte | no, done
554 1.1 gwr Lrei1:
555 1.1 gwr btst #5,sp@ | yes, are we returning to user mode?
556 1.1 gwr jne Ldorte | no, done
557 1.1 gwr movw #PSL_LOWIPL,sr | lower SPL
558 1.1 gwr clrl sp@- | stack adjust
559 1.1 gwr moveml #0xFFFF,sp@- | save all registers
560 1.1 gwr movl usp,a1 | including
561 1.1 gwr movl a1,sp@(FR_SP) | the users SP
562 1.1 gwr clrl sp@- | VA == none
563 1.1 gwr clrl sp@- | code == none
564 1.1 gwr movl #T_ASTFLT,sp@- | type == async system trap
565 1.19 jeremy jbsr _C_LABEL(trap) | go handle it
566 1.1 gwr lea sp@(12),sp | pop value args
567 1.1 gwr movl sp@(FR_SP),a0 | restore user SP
568 1.1 gwr movl a0,usp | from save area
569 1.1 gwr movw sp@(FR_ADJ),d0 | need to adjust stack?
570 1.1 gwr jne Laststkadj | yes, go to it
571 1.1 gwr moveml sp@+,#0x7FFF | no, restore most user regs
572 1.1 gwr addql #8,sp | toss SP and stack adjust
573 1.1 gwr rte | and do real RTE
574 1.1 gwr Laststkadj:
575 1.1 gwr lea sp@(FR_HW),a1 | pointer to HW frame
576 1.1 gwr addql #8,a1 | source pointer
577 1.1 gwr movl a1,a0 | source
578 1.1 gwr addw d0,a0 | + hole size = dest pointer
579 1.1 gwr movl a1@-,a0@- | copy
580 1.1 gwr movl a1@-,a0@- | 8 bytes
581 1.1 gwr movl a0,sp@(FR_SP) | new SSP
582 1.1 gwr moveml sp@+,#0x7FFF | restore user registers
583 1.1 gwr movl sp@,sp | and our SP
584 1.1 gwr Ldorte:
585 1.1 gwr rte | real return
586 1.1 gwr
587 1.1 gwr /*
588 1.1 gwr * Initialization is at the beginning of this file, because the
589 1.1 gwr * kernel entry point needs to be at zero for compatibility with
590 1.1 gwr * the Sun boot loader. This works on Sun machines because the
591 1.1 gwr * interrupt vector table for reset is NOT at address zero.
592 1.1 gwr * (The MMU has a "boot" bit that forces access to the PROM)
593 1.1 gwr */
594 1.1 gwr
595 1.1 gwr /*
596 1.16 thorpej * Use common m68k sigcode.
597 1.1 gwr */
598 1.16 thorpej #include <m68k/m68k/sigcode.s>
599 1.16 thorpej
600 1.1 gwr .text
601 1.1 gwr
602 1.1 gwr /*
603 1.1 gwr * Primitives
604 1.1 gwr */
605 1.1 gwr
606 1.1 gwr /*
607 1.12 thorpej * Use common m68k support routines.
608 1.1 gwr */
609 1.12 thorpej #include <m68k/m68k/support.s>
610 1.1 gwr
611 1.19 jeremy BSS(want_resched,4)
612 1.1 gwr
613 1.1 gwr /*
614 1.15 thorpej * Use common m68k process manipulation routines.
615 1.1 gwr */
616 1.15 thorpej #include <m68k/m68k/proc_subr.s>
617 1.1 gwr
618 1.1 gwr | Message for Lbadsw panic
619 1.1 gwr Lsw0:
620 1.1 gwr .asciz "cpu_switch"
621 1.1 gwr .even
622 1.1 gwr
623 1.1 gwr .data
624 1.19 jeremy GLOBAL(masterpaddr) | XXX compatibility (debuggers)
625 1.19 jeremy GLOBAL(curpcb)
626 1.1 gwr .long 0
627 1.19 jeremy ASBSS(nullpcb,SIZEOF_PCB)
628 1.1 gwr .text
629 1.1 gwr
630 1.1 gwr /*
631 1.1 gwr * At exit of a process, do a cpu_switch for the last time.
632 1.28 thorpej * Switch to a safe stack and PCB, and select a new process to run. The
633 1.28 thorpej * old stack and u-area will be freed by the reaper.
634 1.1 gwr */
635 1.1 gwr ENTRY(switch_exit)
636 1.1 gwr movl sp@(4),a0 | struct proc *p
637 1.19 jeremy | save state into garbage pcb
638 1.19 jeremy movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
639 1.19 jeremy lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
640 1.1 gwr
641 1.28 thorpej /* Schedule the vmspace and stack to be freed. */
642 1.28 thorpej movl a0,sp@- | exit2(p)
643 1.28 thorpej jbsr _C_LABEL(exit2)
644 1.28 thorpej
645 1.28 thorpej /* Don't pop the proc; pass it to cpu_switch(). */
646 1.1 gwr
647 1.19 jeremy jra _C_LABEL(cpu_switch)
648 1.1 gwr
649 1.1 gwr /*
650 1.1 gwr * When no processes are on the runq, cpu_switch() branches to idle
651 1.1 gwr * to wait for something to come ready.
652 1.1 gwr */
653 1.1 gwr .data
654 1.19 jeremy GLOBAL(Idle_count)
655 1.1 gwr .long 0
656 1.1 gwr .text
657 1.1 gwr
658 1.1 gwr Lidle:
659 1.1 gwr stop #PSL_LOWIPL
660 1.19 jeremy GLOBAL(_Idle) | See clock.c
661 1.1 gwr movw #PSL_HIGHIPL,sr
662 1.19 jeremy addql #1, _C_LABEL(Idle_count)
663 1.39 thorpej tstl _C_LABEL(sched_whichqs)
664 1.1 gwr jeq Lidle
665 1.1 gwr movw #PSL_LOWIPL,sr
666 1.1 gwr jra Lsw1
667 1.1 gwr
668 1.1 gwr Lbadsw:
669 1.1 gwr movl #Lsw0,sp@-
670 1.19 jeremy jbsr _C_LABEL(panic)
671 1.1 gwr /*NOTREACHED*/
672 1.1 gwr
673 1.1 gwr /*
674 1.1 gwr * cpu_switch()
675 1.1 gwr * Hacked for sun3
676 1.1 gwr * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
677 1.1 gwr * XXX - Sould we use p->p_addr instead of curpcb? -gwr
678 1.1 gwr */
679 1.1 gwr ENTRY(cpu_switch)
680 1.19 jeremy movl _C_LABEL(curpcb),a1 | current pcb
681 1.1 gwr movw sr,a1@(PCB_PS) | save sr before changing ipl
682 1.1 gwr #ifdef notyet
683 1.19 jeremy movl _C_LABEL(curproc),sp@- | remember last proc running
684 1.1 gwr #endif
685 1.19 jeremy clrl _C_LABEL(curproc)
686 1.1 gwr
687 1.1 gwr Lsw1:
688 1.1 gwr /*
689 1.1 gwr * Find the highest-priority queue that isn't empty,
690 1.1 gwr * then take the first proc from that queue.
691 1.1 gwr */
692 1.1 gwr clrl d0
693 1.39 thorpej lea _C_LABEL(sched_whichqs),a0
694 1.1 gwr movl a0@,d1
695 1.1 gwr Lswchk:
696 1.1 gwr btst d0,d1
697 1.1 gwr jne Lswfnd
698 1.1 gwr addqb #1,d0
699 1.1 gwr cmpb #32,d0
700 1.1 gwr jne Lswchk
701 1.19 jeremy jra _C_LABEL(_Idle)
702 1.1 gwr Lswfnd:
703 1.1 gwr movw #PSL_HIGHIPL,sr | lock out interrupts
704 1.1 gwr movl a0@,d1 | and check again...
705 1.1 gwr bclr d0,d1
706 1.1 gwr jeq Lsw1 | proc moved, rescan
707 1.1 gwr movl d1,a0@ | update whichqs
708 1.1 gwr moveq #1,d1 | double check for higher priority
709 1.1 gwr lsll d0,d1 | process (which may have snuck in
710 1.1 gwr subql #1,d1 | while we were finding this one)
711 1.1 gwr andl a0@,d1
712 1.1 gwr jeq Lswok | no one got in, continue
713 1.1 gwr movl a0@,d1
714 1.1 gwr bset d0,d1 | otherwise put this one back
715 1.1 gwr movl d1,a0@
716 1.1 gwr jra Lsw1 | and rescan
717 1.1 gwr Lswok:
718 1.1 gwr movl d0,d1
719 1.1 gwr lslb #3,d1 | convert queue number to index
720 1.40 nathanw addl #_C_LABEL(sched_qs),d1 | locate queue (q)
721 1.1 gwr movl d1,a1
722 1.1 gwr cmpl a1@(P_FORW),a1 | anyone on queue?
723 1.1 gwr jeq Lbadsw | no, panic
724 1.1 gwr movl a1@(P_FORW),a0 | p = q->p_forw
725 1.38 thorpej #ifdef DIAGNOSTIC
726 1.38 thorpej tstl a0@(P_WCHAN)
727 1.38 thorpej jne Lbadsw
728 1.38 thorpej cmpb #SRUN,a0@(P_STAT)
729 1.38 thorpej jne Lbadsw
730 1.38 thorpej #endif
731 1.1 gwr movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
732 1.1 gwr movl a0@(P_FORW),a1 | q = p->p_forw
733 1.1 gwr movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
734 1.1 gwr cmpl a0@(P_FORW),d1 | anyone left on queue?
735 1.1 gwr jeq Lsw2 | no, skip
736 1.39 thorpej movl _C_LABEL(sched_whichqs),d1
737 1.1 gwr bset d0,d1 | yes, reset bit
738 1.39 thorpej movl d1,_C_LABEL(sched_whichqs)
739 1.1 gwr Lsw2:
740 1.41 thorpej /* p->p_cpu initialized in fork1() for single-processor */
741 1.38 thorpej movb #SONPROC,a0@(P_STAT) | p->p_stat = SONPROC
742 1.19 jeremy movl a0,_C_LABEL(curproc)
743 1.19 jeremy clrl _C_LABEL(want_resched)
744 1.1 gwr #ifdef notyet
745 1.1 gwr movl sp@+,a1 | XXX - Make this work!
746 1.1 gwr cmpl a0,a1 | switching to same proc?
747 1.1 gwr jeq Lswdone | yes, skip save and restore
748 1.1 gwr #endif
749 1.1 gwr /*
750 1.1 gwr * Save state of previous process in its pcb.
751 1.1 gwr */
752 1.19 jeremy movl _C_LABEL(curpcb),a1
753 1.1 gwr moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
754 1.1 gwr movl usp,a2 | grab USP (a2 has been saved)
755 1.1 gwr movl a2,a1@(PCB_USP) | and save it
756 1.1 gwr
757 1.19 jeremy tstl _C_LABEL(fputype) | Do we have an fpu?
758 1.1 gwr jeq Lswnofpsave | No? Then don't try save.
759 1.1 gwr lea a1@(PCB_FPCTX),a2 | pointer to FP save area
760 1.1 gwr fsave a2@ | save FP state
761 1.1 gwr tstb a2@ | null state frame?
762 1.1 gwr jeq Lswnofpsave | yes, all done
763 1.1 gwr fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
764 1.1 gwr fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
765 1.1 gwr Lswnofpsave:
766 1.1 gwr
767 1.6 gwr /*
768 1.6 gwr * Now that we have saved all the registers that must be
769 1.6 gwr * preserved, we are free to use those registers until
770 1.6 gwr * we load the registers for the switched-to process.
771 1.6 gwr * In this section, keep: a0=curproc, a1=curpcb
772 1.6 gwr */
773 1.6 gwr
774 1.1 gwr clrl a0@(P_BACK) | clear back link
775 1.1 gwr movl a0@(P_ADDR),a1 | get p_addr
776 1.19 jeremy movl a1,_C_LABEL(curpcb)
777 1.1 gwr
778 1.8 gwr /*
779 1.8 gwr * Load the new VM context (new MMU root pointer)
780 1.8 gwr */
781 1.8 gwr movl a0@(P_VMSPACE),a2 | vm = p->p_vmspace
782 1.8 gwr #ifdef DIAGNOSTIC
783 1.20 gwr tstl a2 | vm == VM_MAP_NULL?
784 1.8 gwr jeq Lbadsw | panic
785 1.8 gwr #endif
786 1.8 gwr #ifdef PMAP_DEBUG
787 1.25 gwr /* When debugging just call _pmap_switch(). */
788 1.25 gwr movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
789 1.25 gwr pea a2@ | push pmap
790 1.25 gwr jbsr _C_LABEL(_pmap_switch) | _pmap_switch(pmap)
791 1.8 gwr addql #4,sp
792 1.19 jeremy movl _C_LABEL(curpcb),a1 | restore p_addr
793 1.8 gwr #else
794 1.25 gwr /* Otherwise, use this inline version. */
795 1.20 gwr lea _C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
796 1.20 gwr movl a2@(VM_PMAP),a2 | pmap = vm->vm_map.pmap
797 1.8 gwr movl a2@(PM_A_PHYS),d0 | phys = pmap->pm_a_phys
798 1.9 jeremy cmpl a3@(4),d0 | == kernel_crp.rp_addr ?
799 1.8 gwr jeq Lsame_mmuctx | skip loadcrp/flush
800 1.8 gwr /* OK, it is a new MMU context. Load it up. */
801 1.9 jeremy movl d0,a3@(4)
802 1.1 gwr movl #CACHE_CLR,d0
803 1.1 gwr movc d0,cacr | invalidate cache(s)
804 1.1 gwr pflusha | flush entire TLB
805 1.8 gwr pmove a3@,crp | load new user root pointer
806 1.8 gwr Lsame_mmuctx:
807 1.8 gwr #endif
808 1.1 gwr
809 1.6 gwr /*
810 1.6 gwr * Reload the registers for the new process.
811 1.6 gwr * After this point we can only use d0,d1,a0,a1
812 1.6 gwr */
813 1.6 gwr moveml a1@(PCB_REGS),#0xFCFC | reload registers
814 1.1 gwr movl a1@(PCB_USP),a0
815 1.1 gwr movl a0,usp | and USP
816 1.1 gwr
817 1.19 jeremy tstl _C_LABEL(fputype) | If we don't have an fpu,
818 1.1 gwr jeq Lres_skip | don't try to restore it.
819 1.1 gwr lea a1@(PCB_FPCTX),a0 | pointer to FP save area
820 1.1 gwr tstb a0@ | null state frame?
821 1.1 gwr jeq Lresfprest | yes, easy
822 1.1 gwr fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
823 1.1 gwr fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
824 1.1 gwr Lresfprest:
825 1.1 gwr frestore a0@ | restore state
826 1.1 gwr Lres_skip:
827 1.1 gwr movw a1@(PCB_PS),d0 | no, restore PS
828 1.1 gwr #ifdef DIAGNOSTIC
829 1.1 gwr btst #13,d0 | supervisor mode?
830 1.1 gwr jeq Lbadsw | no? panic!
831 1.1 gwr #endif
832 1.1 gwr movw d0,sr | OK, restore PS
833 1.1 gwr moveq #1,d0 | return 1 (for alternate returns)
834 1.1 gwr rts
835 1.1 gwr
836 1.1 gwr /*
837 1.1 gwr * savectx(pcb)
838 1.1 gwr * Update pcb, saving current processor state.
839 1.1 gwr */
840 1.1 gwr ENTRY(savectx)
841 1.1 gwr movl sp@(4),a1
842 1.1 gwr movw sr,a1@(PCB_PS)
843 1.1 gwr movl usp,a0 | grab USP
844 1.1 gwr movl a0,a1@(PCB_USP) | and save it
845 1.1 gwr moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
846 1.1 gwr
847 1.19 jeremy tstl _C_LABEL(fputype) | Do we have FPU?
848 1.1 gwr jeq Lsavedone | No? Then don't save state.
849 1.1 gwr lea a1@(PCB_FPCTX),a0 | pointer to FP save area
850 1.1 gwr fsave a0@ | save FP state
851 1.1 gwr tstb a0@ | null state frame?
852 1.1 gwr jeq Lsavedone | yes, all done
853 1.1 gwr fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
854 1.1 gwr fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
855 1.1 gwr Lsavedone:
856 1.1 gwr moveq #0,d0 | return 0
857 1.1 gwr rts
858 1.1 gwr
859 1.20 gwr /* suline() */
860 1.1 gwr
861 1.1 gwr #ifdef DEBUG
862 1.1 gwr .data
863 1.19 jeremy ASGLOBAL(fulltflush)
864 1.1 gwr .long 0
865 1.19 jeremy ASGLOBAL(fullcflush)
866 1.1 gwr .long 0
867 1.1 gwr .text
868 1.1 gwr #endif
869 1.1 gwr
870 1.1 gwr /*
871 1.1 gwr * Invalidate entire TLB.
872 1.1 gwr */
873 1.1 gwr ENTRY(TBIA)
874 1.19 jeremy _C_LABEL(_TBIA):
875 1.1 gwr pflusha
876 1.1 gwr movl #DC_CLEAR,d0
877 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
878 1.1 gwr rts
879 1.1 gwr
880 1.1 gwr /*
881 1.1 gwr * Invalidate any TLB entry for given VA (TB Invalidate Single)
882 1.1 gwr */
883 1.1 gwr ENTRY(TBIS)
884 1.1 gwr #ifdef DEBUG
885 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
886 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush entire TLB
887 1.1 gwr #endif
888 1.1 gwr movl sp@(4),a0
889 1.1 gwr pflush #0,#0,a0@ | flush address from both sides
890 1.1 gwr movl #DC_CLEAR,d0
891 1.1 gwr movc d0,cacr | invalidate on-chip data cache
892 1.1 gwr rts
893 1.1 gwr
894 1.1 gwr /*
895 1.1 gwr * Invalidate supervisor side of TLB
896 1.1 gwr */
897 1.1 gwr ENTRY(TBIAS)
898 1.1 gwr #ifdef DEBUG
899 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
900 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
901 1.1 gwr #endif
902 1.1 gwr pflush #4,#4 | flush supervisor TLB entries
903 1.1 gwr movl #DC_CLEAR,d0
904 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
905 1.1 gwr rts
906 1.1 gwr
907 1.1 gwr /*
908 1.1 gwr * Invalidate user side of TLB
909 1.1 gwr */
910 1.1 gwr ENTRY(TBIAU)
911 1.1 gwr #ifdef DEBUG
912 1.19 jeremy tstl _ASM_LABEL(fulltflush) | being conservative?
913 1.19 jeremy jne _C_LABEL(_TBIA) | yes, flush everything
914 1.1 gwr #endif
915 1.1 gwr pflush #0,#4 | flush user TLB entries
916 1.1 gwr movl #DC_CLEAR,d0
917 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
918 1.1 gwr rts
919 1.1 gwr
920 1.1 gwr /*
921 1.1 gwr * Invalidate instruction cache
922 1.1 gwr */
923 1.1 gwr ENTRY(ICIA)
924 1.1 gwr movl #IC_CLEAR,d0
925 1.1 gwr movc d0,cacr | invalidate i-cache
926 1.1 gwr rts
927 1.1 gwr
928 1.1 gwr /*
929 1.1 gwr * Invalidate data cache.
930 1.1 gwr * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
931 1.1 gwr * problems with DC_WA. The only cases we have to worry about are context
932 1.1 gwr * switch and TLB changes, both of which are handled "in-line" in resume
933 1.1 gwr * and TBI*.
934 1.1 gwr */
935 1.1 gwr ENTRY(DCIA)
936 1.1 gwr __DCIA:
937 1.1 gwr rts
938 1.1 gwr
939 1.1 gwr ENTRY(DCIS)
940 1.1 gwr __DCIS:
941 1.1 gwr rts
942 1.1 gwr
943 1.1 gwr /*
944 1.1 gwr * Invalidate data cache.
945 1.1 gwr */
946 1.1 gwr ENTRY(DCIU)
947 1.11 gwr movl #DC_CLEAR,d0
948 1.11 gwr movc d0,cacr | invalidate on-chip d-cache
949 1.1 gwr rts
950 1.1 gwr
951 1.1 gwr /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
952 1.1 gwr
953 1.1 gwr ENTRY(PCIA)
954 1.1 gwr movl #DC_CLEAR,d0
955 1.1 gwr movc d0,cacr | invalidate on-chip d-cache
956 1.1 gwr rts
957 1.1 gwr
958 1.1 gwr ENTRY(ecacheon)
959 1.1 gwr rts
960 1.1 gwr
961 1.1 gwr ENTRY(ecacheoff)
962 1.1 gwr rts
963 1.1 gwr
964 1.1 gwr /*
965 1.1 gwr * Get callers current SP value.
966 1.1 gwr * Note that simply taking the address of a local variable in a C function
967 1.1 gwr * doesn't work because callee saved registers may be outside the stack frame
968 1.1 gwr * defined by A6 (e.g. GCC generated code).
969 1.20 gwr *
970 1.1 gwr * [I don't think the ENTRY() macro will do the right thing with this -- glass]
971 1.1 gwr */
972 1.19 jeremy GLOBAL(getsp)
973 1.1 gwr movl sp,d0 | get current SP
974 1.1 gwr addql #4,d0 | compensate for return address
975 1.1 gwr rts
976 1.1 gwr
977 1.1 gwr ENTRY(getsfc)
978 1.1 gwr movc sfc,d0
979 1.1 gwr rts
980 1.1 gwr
981 1.1 gwr ENTRY(getdfc)
982 1.1 gwr movc dfc,d0
983 1.1 gwr rts
984 1.1 gwr
985 1.1 gwr ENTRY(getvbr)
986 1.1 gwr movc vbr, d0
987 1.1 gwr rts
988 1.1 gwr
989 1.1 gwr ENTRY(setvbr)
990 1.1 gwr movl sp@(4), d0
991 1.1 gwr movc d0, vbr
992 1.1 gwr rts
993 1.1 gwr
994 1.1 gwr /*
995 1.1 gwr * Load a new CPU Root Pointer (CRP) into the MMU.
996 1.2 gwr * void loadcrp(struct mmu_rootptr *);
997 1.1 gwr */
998 1.1 gwr ENTRY(loadcrp)
999 1.1 gwr movl sp@(4),a0 | arg1: &CRP
1000 1.1 gwr movl #CACHE_CLR,d0
1001 1.1 gwr movc d0,cacr | invalidate cache(s)
1002 1.1 gwr pflusha | flush entire TLB
1003 1.1 gwr pmove a0@,crp | load new user root pointer
1004 1.10 gwr rts
1005 1.10 gwr
1006 1.10 gwr /*
1007 1.10 gwr * Get the physical address of the PTE for a given VA.
1008 1.10 gwr */
1009 1.10 gwr ENTRY(ptest_addr)
1010 1.10 gwr movl sp@(4),a0 | VA
1011 1.10 gwr ptestr #5,a0@,#7,a1 | a1 = addr of PTE
1012 1.10 gwr movl a1,d0
1013 1.1 gwr rts
1014 1.1 gwr
1015 1.1 gwr /*
1016 1.1 gwr * Set processor priority level calls. Most are implemented with
1017 1.1 gwr * inline asm expansions. However, we need one instantiation here
1018 1.1 gwr * in case some non-optimized code makes external references.
1019 1.21 gwr * Most places will use the inlined functions param.h supplies.
1020 1.1 gwr */
1021 1.1 gwr
1022 1.21 gwr ENTRY(_getsr)
1023 1.21 gwr clrl d0
1024 1.21 gwr movw sr,d0
1025 1.21 gwr rts
1026 1.21 gwr
1027 1.1 gwr ENTRY(_spl)
1028 1.1 gwr clrl d0
1029 1.1 gwr movw sr,d0
1030 1.21 gwr movl sp@(4),d1
1031 1.1 gwr movw d1,sr
1032 1.1 gwr rts
1033 1.1 gwr
1034 1.21 gwr ENTRY(_splraise)
1035 1.21 gwr clrl d0
1036 1.21 gwr movw sr,d0
1037 1.21 gwr movl d0,d1
1038 1.21 gwr andl #PSL_HIGHIPL,d1 | old &= PSL_HIGHIPL
1039 1.21 gwr cmpl sp@(4),d1 | (old - new)
1040 1.21 gwr bge Lsplr
1041 1.21 gwr movl sp@(4),d1
1042 1.21 gwr movw d1,sr
1043 1.21 gwr Lsplr:
1044 1.1 gwr rts
1045 1.1 gwr
1046 1.1 gwr /*
1047 1.1 gwr * Save and restore 68881 state.
1048 1.1 gwr */
1049 1.1 gwr ENTRY(m68881_save)
1050 1.1 gwr movl sp@(4),a0 | save area pointer
1051 1.1 gwr fsave a0@ | save state
1052 1.1 gwr tstb a0@ | null state frame?
1053 1.1 gwr jeq Lm68881sdone | yes, all done
1054 1.1 gwr fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1055 1.1 gwr fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1056 1.1 gwr Lm68881sdone:
1057 1.1 gwr rts
1058 1.1 gwr
1059 1.1 gwr ENTRY(m68881_restore)
1060 1.1 gwr movl sp@(4),a0 | save area pointer
1061 1.1 gwr tstb a0@ | null state frame?
1062 1.1 gwr jeq Lm68881rdone | yes, easy
1063 1.1 gwr fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1064 1.1 gwr fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1065 1.1 gwr Lm68881rdone:
1066 1.1 gwr frestore a0@ | restore state
1067 1.1 gwr rts
1068 1.1 gwr
1069 1.1 gwr /*
1070 1.1 gwr * _delay(unsigned N)
1071 1.1 gwr * Delay for at least (N/256) microseconds.
1072 1.1 gwr * This routine depends on the variable: delay_divisor
1073 1.1 gwr * which should be set based on the CPU clock rate.
1074 1.26 gwr * XXX: Currently this is set based on the CPU model,
1075 1.26 gwr * XXX: but this should be determined at run time...
1076 1.1 gwr */
1077 1.19 jeremy GLOBAL(_delay)
1078 1.1 gwr | d0 = arg = (usecs << 8)
1079 1.1 gwr movl sp@(4),d0
1080 1.1 gwr | d1 = delay_divisor;
1081 1.19 jeremy movl _C_LABEL(delay_divisor),d1
1082 1.36 thorpej jra L_delay /* Jump into the loop! */
1083 1.36 thorpej
1084 1.36 thorpej /*
1085 1.36 thorpej * Align the branch target of the loop to a half-line (8-byte)
1086 1.36 thorpej * boundary to minimize cache effects. This guarantees both
1087 1.36 thorpej * that there will be no prefetch stalls due to cache line burst
1088 1.36 thorpej * operations and that the loop will run from a single cache
1089 1.36 thorpej * half-line.
1090 1.36 thorpej */
1091 1.36 thorpej .align 8
1092 1.1 gwr L_delay:
1093 1.1 gwr subl d1,d0
1094 1.1 gwr jgt L_delay
1095 1.1 gwr rts
1096 1.1 gwr
1097 1.1 gwr
1098 1.1 gwr | Define some addresses, mostly so DDB can print useful info.
1099 1.24 gwr | Not using _C_LABEL() here because these symbols are never
1100 1.24 gwr | referenced by any C code, and if the leading underscore
1101 1.24 gwr | ever goes away, these lines turn into syntax errors...
1102 1.24 gwr .set _KERNBASE,KERNBASE
1103 1.26 gwr .set _MONSTART,SUN3X_MONSTART
1104 1.26 gwr .set _PROM_BASE,SUN3X_PROM_BASE
1105 1.26 gwr .set _MONEND,SUN3X_MONEND
1106 1.1 gwr
1107 1.1 gwr |The end!
1108