locore.s revision 1.2 1 /* $NetBSD: locore.s,v 1.2 1997/01/17 16:27:18 gwr Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "assym.h"
45 #include <machine/trap.h>
46
47 | Remember this is a fun project!
48
49 .data
50 .globl _mon_crp
51 _mon_crp:
52 .long 0,0
53
54 | This is for kvm_mkdb, and should be the address of the beginning
55 | of the kernel text segment (not necessarily the same as kernbase).
56 .text
57 .globl _kernel_text
58 _kernel_text:
59
60 | This is the entry point, as well as the end of the temporary stack
61 | used during process switch (one 8K page ending at start)
62 .globl tmpstk
63 tmpstk:
64 .globl start
65 start:
66 | The first step, after disabling interrupts, is to map enough of the kernel
67 | into high virtual address space so that we can use position dependent code.
68 | This is a tricky task on the sun3x because the MMU is already enabled and
69 | the ROM monitor provides no indication of where the root MMU table is mapped.
70 | Therefore we must use one of the 68030's 'transparent translation' registers
71 | to define a range in the address space where the MMU translation is
72 | turned off. Once this is complete we can modify the MMU table directly
73 | without the need for it to be mapped into virtual memory.
74 | All code must be position independent until otherwise noted, as the
75 | boot loader has loaded us into low memory but all the symbols in this
76 | code have been linked high.
77 movw #PSL_HIGHIPL, sr | no interrupts
78 movl #KERNBASE, a5 | for vtop conversion
79 lea _mon_crp, a0 | where to store the CRP
80 subl a5, a0
81 | Note: borrowing mon_crp for tt0 setup...
82 movl #0x3F8107, a0@ | map the low 1GB v=p with the
83 pmove a0@, tt0 | transparent translation reg0
84
85 | In order to map the kernel into high memory we will copy the root table
86 | entry which maps the 16 megabytes of memory starting at 0x0 into the
87 | entry which maps the 16 megabytes starting at KERNBASE.
88 pmove crp, a0@ | Get monitor CPU root pointer
89 movl a0@(4), a1 | 2nd word is PA of level A table
90
91 movl a1, a0 | compute the descriptor address
92 addl #0x3e0, a1 | for VA starting at KERNBASE
93 movl a0@, a1@ | copy descriptor type
94 movl a0@(4), a1@(4) | copy physical address
95
96 | Kernel is now double mapped at zero and KERNBASE.
97 | Force a long jump to the relocated code (high VA).
98 movl #IC_CLEAR, d0 | Flush the I-cache
99 movc d0, cacr
100 jmp L_high_code:l | long jump
101
102 L_high_code:
103 | We are now running in the correctly relocated kernel, so
104 | we are no longer restricted to position-independent code.
105 | It is handy to leave transparent translation enabled while
106 | for the low 1GB while __bootstrap() is doing its thing.
107
108 | Do bootstrap stuff needed before main() gets called.
109 | Our boot loader leaves a copy of the kernel's exec header
110 | just before the start of the kernel text segment, so the
111 | kernel can sanity-check the DDB symbols at [end...esym].
112 | Pass the struct exec at tmpstk-32 to __bootstrap().
113 lea tmpstk-32, sp
114 jsr __bootstrap | See _startup.c
115
116 | Now turn off the transparent translation of the low 1GB.
117 | (this also flushes the ATC)
118 clrl sp@-
119 pmove sp@,tt0
120 addql #4,sp
121
122 | Now that __bootstrap() is done using the PROM functions,
123 | we can safely set the sfc/dfc to something != FC_CONTROL
124 moveq #FC_USERD, d0 | make movs access "user data"
125 movc d0, sfc | space for copyin/copyout
126 movc d0, dfc
127
128 | Setup process zero user/kernel stacks.
129 movl _proc0paddr,a1 | get proc0 pcb addr
130 lea a1@(USPACE-4),sp | set SSP to last word
131 movl #USRSTACK-4,a2
132 movl a2,usp | init user SP
133
134 | Note curpcb was already set in __bootstrap().
135 | Will do fpu initialization during autoconfig (see fpu.c)
136 | The interrupt vector table and stack are now ready.
137 | Interrupts will be enabled later, AFTER autoconfiguration
138 | is finished, to avoid spurrious interrupts.
139
140 /*
141 * Final preparation for calling main.
142 *
143 * Create a fake exception frame that returns to user mode,
144 * and save its address in p->p_md.md_regs for cpu_fork().
145 * The new frames for process 1 and 2 will be adjusted by
146 * cpu_set_kpc() to arrange for a call to a kernel function
147 * before the new process does its rte out to user mode.
148 */
149 clrw sp@- | vector offset/frame type
150 clrl sp@- | PC - filled in by "execve"
151 movw #PSL_USER,sp@- | in user mode
152 clrl sp@- | stack adjust count and padding
153 lea sp@(-64),sp | construct space for D0-D7/A0-A7
154 lea _proc0,a0 | proc0 in a0
155 movl sp,a0@(P_MDREGS) | save frame for proc0
156 movl usp,a1
157 movl a1,sp@(FR_SP) | save user stack pointer in frame
158 jbsr _main | main()
159 trap #15 | should not get here
160
161 | This is used by cpu_fork() to return to user mode.
162 | It is called with SP pointing to a struct trapframe.
163 .globl _proc_do_uret
164 _proc_do_uret:
165 movl sp@(FR_SP),a0 | grab and load
166 movl a0,usp | user SP
167 moveml sp@+,#0x7FFF | load most registers (all but SSP)
168 addql #8,sp | pop SSP and stack adjust count
169 rte
170
171 /*
172 * proc_trampoline:
173 * This is used by cpu_set_kpc() to "push" a function call onto the
174 * kernel stack of some process, very much like a signal delivery.
175 * When we get here, the stack has:
176 *
177 * SP+8: switchframe from before cpu_set_kpc
178 * SP+4: void *proc;
179 * SP: u_long func;
180 *
181 * On entry, the switchframe pushed by cpu_set_kpc has already been
182 * popped off the stack, so all this needs to do is pop the function
183 * pointer into a register, call it, then pop the arg, and finally
184 * return using the switchframe that remains on the stack.
185 */
186 .globl _proc_trampoline
187 _proc_trampoline:
188 movl sp@+,a0 | function pointer
189 jbsr a0@ | (*func)(procp)
190 addql #4,sp | toss the arg
191 rts | as cpu_switch would do
192
193 | That is all the assembly startup code we need on the sun3x!
194 | The rest of this is like the hp300/locore.s where possible.
195
196 /*
197 * Trap/interrupt vector routines
198 */
199
200 .globl _buserr, _addrerr, _illinst, _zerodiv, _chkinst
201 .globl _trapvinst, _privinst, _trace, _badtrap, _fmterr
202 .globl _trap0, _trap1, _trap2, _trap12, _trap15
203 .globl _coperr, _fpfline, _fpunsupp
204
205 .globl _trap, _nofault, _longjmp
206 _buserr:
207 tstl _nofault | device probe?
208 jeq _addrerr | no, handle as usual
209 movl _nofault,sp@- | yes,
210 jbsr _longjmp | longjmp(nofault)
211 _addrerr:
212 clrl sp@- | stack adjust count
213 moveml #0xFFFF,sp@- | save user registers
214 movl usp,a0 | save the user SP
215 movl a0,sp@(FR_SP) | in the savearea
216 lea sp@(FR_HW),a1 | grab base of HW berr frame
217 moveq #0,d0
218 movw a1@(10),d0 | grab SSW for fault processing
219 btst #12,d0 | RB set?
220 jeq LbeX0 | no, test RC
221 bset #14,d0 | yes, must set FB
222 movw d0,a1@(10) | for hardware too
223 LbeX0:
224 btst #13,d0 | RC set?
225 jeq LbeX1 | no, skip
226 bset #15,d0 | yes, must set FC
227 movw d0,a1@(10) | for hardware too
228 LbeX1:
229 btst #8,d0 | data fault?
230 jeq Lbe0 | no, check for hard cases
231 movl a1@(16),d1 | fault address is as given in frame
232 jra Lbe10 | thats it
233 Lbe0:
234 btst #4,a1@(6) | long (type B) stack frame?
235 jne Lbe4 | yes, go handle
236 movl a1@(2),d1 | no, can use save PC
237 btst #14,d0 | FB set?
238 jeq Lbe3 | no, try FC
239 addql #4,d1 | yes, adjust address
240 jra Lbe10 | done
241 Lbe3:
242 btst #15,d0 | FC set?
243 jeq Lbe10 | no, done
244 addql #2,d1 | yes, adjust address
245 jra Lbe10 | done
246 Lbe4:
247 movl a1@(36),d1 | long format, use stage B address
248 btst #15,d0 | FC set?
249 jeq Lbe10 | no, all done
250 subql #2,d1 | yes, adjust address
251 Lbe10:
252 movl d1,sp@- | push fault VA
253 movl d0,sp@- | and padded SSW
254 movw a1@(6),d0 | get frame format/vector offset
255 andw #0x0FFF,d0 | clear out frame format
256 cmpw #12,d0 | address error vector?
257 jeq Lisaerr | yes, go to it
258
259 /* MMU-specific code to determine reason for bus error. */
260 movl d1,a0 | fault address
261 movl sp@,d0 | function code from ssw
262 btst #8,d0 | data fault?
263 jne Lbe10a
264 movql #1,d0 | user program access FC
265 | (we dont separate data/program)
266 btst #5,a1@ | supervisor mode?
267 jeq Lbe10a | if no, done
268 movql #5,d0 | else supervisor program access
269 Lbe10a:
270 ptestr d0,a0@,#7 | do a table search
271 pmove psr,sp@ | save result
272 movb sp@,d1
273 btst #2,d1 | invalid? (incl. limit viol and berr)
274 jeq Lmightnotbemerr | no -> wp check
275 btst #7,d1 | is it MMU table berr?
276 jeq Lismerr | no, must be fast
277 jra Lisberr1 | real bus err needs not be fast
278 Lmightnotbemerr:
279 btst #3,d1 | write protect bit set?
280 jeq Lisberr1 | no, must be bus error
281 movl sp@,d0 | ssw into low word of d0
282 andw #0xc0,d0 | write protect is set on page:
283 cmpw #0x40,d0 | was it read cycle?
284 jeq Lisberr1 | yes, was not WPE, must be bus err
285 /* End of MMU-specific bus error code. */
286
287 Lismerr:
288 movl #T_MMUFLT,sp@- | show that we are an MMU fault
289 jra Ltrapnstkadj | and deal with it
290 Lisaerr:
291 movl #T_ADDRERR,sp@- | mark address error
292 jra Ltrapnstkadj | and deal with it
293 Lisberr1:
294 clrw sp@ | re-clear pad word
295 Lisberr:
296 movl #T_BUSERR,sp@- | mark bus error
297 Ltrapnstkadj:
298 jbsr _trap | handle the error
299 lea sp@(12),sp | pop value args
300 movl sp@(FR_SP),a0 | restore user SP
301 movl a0,usp | from save area
302 movw sp@(FR_ADJ),d0 | need to adjust stack?
303 jne Lstkadj | yes, go to it
304 moveml sp@+,#0x7FFF | no, restore most user regs
305 addql #8,sp | toss SSP and stkadj
306 jra rei | all done
307 Lstkadj:
308 lea sp@(FR_HW),a1 | pointer to HW frame
309 addql #8,a1 | source pointer
310 movl a1,a0 | source
311 addw d0,a0 | + hole size = dest pointer
312 movl a1@-,a0@- | copy
313 movl a1@-,a0@- | 8 bytes
314 movl a0,sp@(FR_SP) | new SSP
315 moveml sp@+,#0x7FFF | restore user registers
316 movl sp@,sp | and our SP
317 jra rei | all done
318
319 /*
320 * FP exceptions.
321 */
322 _fpfline:
323 clrl sp@- | stack adjust count
324 moveml #0xFFFF,sp@- | save registers
325 moveq #T_FPEMULI,d0 | denote as FP emulation trap
326 jra fault | do it
327
328 _fpunsupp:
329 clrl sp@- | stack adjust count
330 moveml #0xFFFF,sp@- | save registers
331 moveq #T_FPEMULD,d0 | denote as FP emulation trap
332 jra fault | do it
333
334 /*
335 * Handles all other FP coprocessor exceptions.
336 * Note that since some FP exceptions generate mid-instruction frames
337 * and may cause signal delivery, we need to test for stack adjustment
338 * after the trap call.
339 */
340 .globl _fpfault
341 _fpfault:
342 clrl sp@- | stack adjust count
343 moveml #0xFFFF,sp@- | save user registers
344 movl usp,a0 | and save
345 movl a0,sp@(FR_SP) | the user stack pointer
346 clrl sp@- | no VA arg
347 movl _curpcb,a0 | current pcb
348 lea a0@(PCB_FPCTX),a0 | address of FP savearea
349 fsave a0@ | save state
350 tstb a0@ | null state frame?
351 jeq Lfptnull | yes, safe
352 clrw d0 | no, need to tweak BIU
353 movb a0@(1),d0 | get frame size
354 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
355 Lfptnull:
356 fmovem fpsr,sp@- | push fpsr as code argument
357 frestore a0@ | restore state
358 movl #T_FPERR,sp@- | push type arg
359 jra Ltrapnstkadj | call trap and deal with stack cleanup
360
361 /*
362 * Coprocessor and format errors can generate mid-instruction stack
363 * frames and cause signal delivery hence we need to check for potential
364 * stack adjustment.
365 */
366 _coperr:
367 clrl sp@- | stack adjust count
368 moveml #0xFFFF,sp@-
369 movl usp,a0 | get and save
370 movl a0,sp@(FR_SP) | the user stack pointer
371 clrl sp@- | no VA arg
372 clrl sp@- | or code arg
373 movl #T_COPERR,sp@- | push trap type
374 jra Ltrapnstkadj | call trap and deal with stack adjustments
375
376 _fmterr:
377 clrl sp@- | stack adjust count
378 moveml #0xFFFF,sp@-
379 movl usp,a0 | get and save
380 movl a0,sp@(FR_SP) | the user stack pointer
381 clrl sp@- | no VA arg
382 clrl sp@- | or code arg
383 movl #T_FMTERR,sp@- | push trap type
384 jra Ltrapnstkadj | call trap and deal with stack adjustments
385
386 /*
387 * Other exceptions only cause four and six word stack frame and require
388 * no post-trap stack adjustment.
389 */
390 _illinst:
391 clrl sp@-
392 moveml #0xFFFF,sp@-
393 moveq #T_ILLINST,d0
394 jra fault
395
396 _zerodiv:
397 clrl sp@-
398 moveml #0xFFFF,sp@-
399 moveq #T_ZERODIV,d0
400 jra fault
401
402 _chkinst:
403 clrl sp@-
404 moveml #0xFFFF,sp@-
405 moveq #T_CHKINST,d0
406 jra fault
407
408 _trapvinst:
409 clrl sp@-
410 moveml #0xFFFF,sp@-
411 moveq #T_TRAPVINST,d0
412 jra fault
413
414 _privinst:
415 clrl sp@-
416 moveml #0xFFFF,sp@-
417 moveq #T_PRIVINST,d0
418 jra fault
419
420 .globl fault
421 fault:
422 movl usp,a0 | get and save
423 movl a0,sp@(FR_SP) | the user stack pointer
424 clrl sp@- | no VA arg
425 clrl sp@- | or code arg
426 movl d0,sp@- | push trap type
427 jbsr _trap | handle trap
428 lea sp@(12),sp | pop value args
429 movl sp@(FR_SP),a0 | restore
430 movl a0,usp | user SP
431 moveml sp@+,#0x7FFF | restore most user regs
432 addql #8,sp | pop SP and stack adjust
433 jra rei | all done
434
435 .globl _straytrap
436 _badtrap:
437 clrl sp@- | stack adjust count
438 moveml #0xFFFF,sp@- | save std frame regs
439 jbsr _straytrap | report
440 moveml sp@+,#0xFFFF | restore regs
441 addql #4, sp | stack adjust count
442 jra rei | all done
443
444 /*
445 * Trap 0 is for system calls
446 */
447 .globl _syscall
448 _trap0:
449 clrl sp@- | stack adjust count
450 moveml #0xFFFF,sp@- | save user registers
451 movl usp,a0 | save the user SP
452 movl a0,sp@(FR_SP) | in the savearea
453 movl d0,sp@- | push syscall number
454 jbsr _syscall | handle it
455 addql #4,sp | pop syscall arg
456 movl sp@(FR_SP),a0 | grab and restore
457 movl a0,usp | user SP
458 moveml sp@+,#0x7FFF | restore most registers
459 addql #8,sp | pop SP and stack adjust
460 jra rei | all done
461
462 /*
463 * Trap 1 is either:
464 * sigreturn (native NetBSD executable)
465 * breakpoint (HPUX executable)
466 */
467 _trap1:
468 #if 0 /* COMPAT_HPUX */
469 /* If process is HPUX, this is a user breakpoint. */
470 jne trap15 | breakpoint
471 #endif
472 /* fall into sigreturn */
473
474 /*
475 * The sigreturn() syscall comes here. It requires special handling
476 * because we must open a hole in the stack to fill in the (possibly much
477 * larger) original stack frame.
478 */
479 sigreturn:
480 lea sp@(-84),sp | leave enough space for largest frame
481 movl sp@(84),sp@ | move up current 8 byte frame
482 movl sp@(88),sp@(4)
483 movl #84,sp@- | default: adjust by 84 bytes
484 moveml #0xFFFF,sp@- | save user registers
485 movl usp,a0 | save the user SP
486 movl a0,sp@(FR_SP) | in the savearea
487 movl #SYS_sigreturn,sp@- | push syscall number
488 jbsr _syscall | handle it
489 addql #4,sp | pop syscall#
490 movl sp@(FR_SP),a0 | grab and restore
491 movl a0,usp | user SP
492 lea sp@(FR_HW),a1 | pointer to HW frame
493 movw sp@(FR_ADJ),d0 | do we need to adjust the stack?
494 jeq Lsigr1 | no, just continue
495 moveq #92,d1 | total size
496 subw d0,d1 | - hole size = frame size
497 lea a1@(92),a0 | destination
498 addw d1,a1 | source
499 lsrw #1,d1 | convert to word count
500 subqw #1,d1 | minus 1 for dbf
501 Lsigrlp:
502 movw a1@-,a0@- | copy a word
503 dbf d1,Lsigrlp | continue
504 movl a0,a1 | new HW frame base
505 Lsigr1:
506 movl a1,sp@(FR_SP) | new SP value
507 moveml sp@+,#0x7FFF | restore user registers
508 movl sp@,sp | and our SP
509 jra rei | all done
510
511 /*
512 * Trap 2 is one of:
513 * NetBSD: not used (ignore)
514 * SunOS: Some obscure FPU operation
515 * HPUX: sigreturn
516 */
517 _trap2:
518 #if 0 /* COMPAT_HPUX */
519 /* XXX: If HPUX, this is a user breakpoint. */
520 jne sigreturn
521 #endif
522 /* fall into trace (NetBSD or SunOS) */
523
524 /*
525 * Trace (single-step) trap. Kernel-mode is special.
526 * User mode traps are simply passed on to trap().
527 */
528 _trace:
529 clrl sp@- | stack adjust count
530 moveml #0xFFFF,sp@-
531 moveq #T_TRACE,d0
532 movw sp@(FR_HW),d1 | get PSW
533 andw #PSL_S,d1 | from system mode?
534 jne kbrkpt | yes, kernel breakpoint
535 jra fault | no, user-mode fault
536
537 /*
538 * Trap 15 is used for:
539 * - GDB breakpoints (in user programs)
540 * - KGDB breakpoints (in the kernel)
541 * - trace traps for SUN binaries (not fully supported yet)
542 * User mode traps are passed simply passed to trap()
543 */
544 _trap15:
545 clrl sp@- | stack adjust count
546 moveml #0xFFFF,sp@-
547 moveq #T_TRAP15,d0
548 movw sp@(FR_HW),d1 | get PSW
549 andw #PSL_S,d1 | from system mode?
550 jne kbrkpt | yes, kernel breakpoint
551 jra fault | no, user-mode fault
552
553 kbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
554 | Save the system sp rather than the user sp.
555 movw #PSL_HIGHIPL,sr | lock out interrupts
556 lea sp@(FR_SIZE),a6 | Save stack pointer
557 movl a6,sp@(FR_SP) | from before trap
558
559 | If we are not on tmpstk switch to it.
560 | (so debugger can change the stack pointer)
561 movl a6,d1
562 cmpl #tmpstk,d1
563 jls Lbrkpt2 | already on tmpstk
564 | Copy frame to the temporary stack
565 movl sp,a0 | a0=src
566 lea tmpstk-96,a1 | a1=dst
567 movl a1,sp | sp=new frame
568 moveq #FR_SIZE,d1
569 Lbrkpt1:
570 movl a0@+,a1@+
571 subql #4,d1
572 bgt Lbrkpt1
573
574 Lbrkpt2:
575 | Call the trap handler for the kernel debugger.
576 | Do not call trap() to do it, so that we can
577 | set breakpoints in trap() if we want. We know
578 | the trap type is either T_TRACE or T_BREAKPOINT.
579 | If we have both DDB and KGDB, let KGDB see it first,
580 | because KGDB will just return 0 if not connected.
581 | Save args in d2, a2
582 movl d0,d2 | trap type
583 movl sp,a2 | frame ptr
584 #ifdef KGDB
585 | Let KGDB handle it (if connected)
586 movl a2,sp@- | push frame ptr
587 movl d2,sp@- | push trap type
588 jbsr _kgdb_trap | handle the trap
589 addql #8,sp | pop args
590 cmpl #0,d0 | did kgdb handle it
591 jne Lbrkpt3 | yes, done
592 #endif
593 #ifdef DDB
594 | Let DDB handle it.
595 movl a2,sp@- | push frame ptr
596 movl d2,sp@- | push trap type
597 jbsr _kdb_trap | handle the trap
598 addql #8,sp | pop args
599 cmpl #0,d0 | did ddb handle it
600 jne Lbrkpt3 | yes, done
601 #endif
602 | Drop into the PROM temporarily...
603 movl a2,sp@- | push frame ptr
604 movl d2,sp@- | push trap type
605 jbsr _nodb_trap | handle the trap
606 addql #8,sp | pop args
607 Lbrkpt3:
608 | The stack pointer may have been modified, or
609 | data below it modified (by kgdb push call),
610 | so push the hardware frame at the current sp
611 | before restoring registers and returning.
612
613 movl sp@(FR_SP),a0 | modified sp
614 lea sp@(FR_SIZE),a1 | end of our frame
615 movl a1@-,a0@- | copy 2 longs with
616 movl a1@-,a0@- | ... predecrement
617 movl a0,sp@(FR_SP) | sp = h/w frame
618 moveml sp@+,#0x7FFF | restore all but sp
619 movl sp@,sp | ... and sp
620 rte | all done
621
622 /*
623 * Trap 12 is the entry point for the cachectl "syscall"
624 * cachectl(command, addr, length)
625 * command in d0, addr in a1, length in d1
626 */
627 .globl _cachectl
628 _trap12:
629 movl d1,sp@- | push length
630 movl a1,sp@- | push addr
631 movl d0,sp@- | push command
632 jbsr _cachectl | do it
633 lea sp@(12),sp | pop args
634 jra rei | all done
635
636 /*
637 * Interrupt handlers. Most are auto-vectored,
638 * and hard-wired the same way on all sun3 models.
639 * Format in the stack is:
640 * d0,d1,a0,a1, sr, pc, vo
641 */
642
643 #define INTERRUPT_SAVEREG \
644 moveml #0xC0C0,sp@-
645
646 #define INTERRUPT_RESTORE \
647 moveml sp@+,#0x0303
648
649 /*
650 * This is the common auto-vector interrupt handler,
651 * for which the CPU provides the vector=0x18+level.
652 * These are installed in the interrupt vector table.
653 */
654 .align 2
655 .globl __isr_autovec, _isr_autovec
656 __isr_autovec:
657 INTERRUPT_SAVEREG
658 jbsr _isr_autovec
659 INTERRUPT_RESTORE
660 jra rei
661
662 /* clock: see clock.c */
663 .align 2
664 .globl __isr_clock, _clock_intr
665 __isr_clock:
666 INTERRUPT_SAVEREG
667 jbsr _clock_intr
668 INTERRUPT_RESTORE
669 jra rei
670
671 | Handler for all vectored interrupts (i.e. VME interrupts)
672 .align 2
673 .globl __isr_vectored, _isr_vectored
674 __isr_vectored:
675 INTERRUPT_SAVEREG
676 jbsr _isr_vectored
677 INTERRUPT_RESTORE
678 jra rei
679
680 #undef INTERRUPT_SAVEREG
681 #undef INTERRUPT_RESTORE
682
683 /* interrupt counters (needed by vmstat) */
684 .globl _intrcnt,_eintrcnt,_intrnames,_eintrnames
685 _intrnames:
686 .asciz "spur" | 0
687 .asciz "lev1" | 1
688 .asciz "lev2" | 2
689 .asciz "lev3" | 3
690 .asciz "lev4" | 4
691 .asciz "clock" | 5
692 .asciz "lev6" | 6
693 .asciz "nmi" | 7
694 _eintrnames:
695
696 .data
697 .even
698 _intrcnt:
699 .long 0,0,0,0,0,0,0,0,0,0
700 _eintrcnt:
701 .text
702
703 /*
704 * Emulation of VAX REI instruction.
705 *
706 * This code is (mostly) un-altered from the hp300 code,
707 * except that sun machines do not need a simulated SIR
708 * because they have a real software interrupt register.
709 *
710 * This code deals with checking for and servicing ASTs
711 * (profiling, scheduling) and software interrupts (network, softclock).
712 * We check for ASTs first, just like the VAX. To avoid excess overhead
713 * the T_ASTFLT handling code will also check for software interrupts so we
714 * do not have to do it here. After identifying that we need an AST we
715 * drop the IPL to allow device interrupts.
716 *
717 * This code is complicated by the fact that sendsig may have been called
718 * necessitating a stack cleanup.
719 */
720
721 .globl _astpending
722 .globl rei
723 rei:
724 #ifdef DIAGNOSTIC
725 tstl _panicstr | have we paniced?
726 jne Ldorte | yes, do not make matters worse
727 #endif
728 tstl _astpending | AST pending?
729 jeq Ldorte | no, done
730 Lrei1:
731 btst #5,sp@ | yes, are we returning to user mode?
732 jne Ldorte | no, done
733 movw #PSL_LOWIPL,sr | lower SPL
734 clrl sp@- | stack adjust
735 moveml #0xFFFF,sp@- | save all registers
736 movl usp,a1 | including
737 movl a1,sp@(FR_SP) | the users SP
738 clrl sp@- | VA == none
739 clrl sp@- | code == none
740 movl #T_ASTFLT,sp@- | type == async system trap
741 jbsr _trap | go handle it
742 lea sp@(12),sp | pop value args
743 movl sp@(FR_SP),a0 | restore user SP
744 movl a0,usp | from save area
745 movw sp@(FR_ADJ),d0 | need to adjust stack?
746 jne Laststkadj | yes, go to it
747 moveml sp@+,#0x7FFF | no, restore most user regs
748 addql #8,sp | toss SP and stack adjust
749 rte | and do real RTE
750 Laststkadj:
751 lea sp@(FR_HW),a1 | pointer to HW frame
752 addql #8,a1 | source pointer
753 movl a1,a0 | source
754 addw d0,a0 | + hole size = dest pointer
755 movl a1@-,a0@- | copy
756 movl a1@-,a0@- | 8 bytes
757 movl a0,sp@(FR_SP) | new SSP
758 moveml sp@+,#0x7FFF | restore user registers
759 movl sp@,sp | and our SP
760 Ldorte:
761 rte | real return
762
763 /*
764 * Initialization is at the beginning of this file, because the
765 * kernel entry point needs to be at zero for compatibility with
766 * the Sun boot loader. This works on Sun machines because the
767 * interrupt vector table for reset is NOT at address zero.
768 * (The MMU has a "boot" bit that forces access to the PROM)
769 */
770
771 /*
772 * Signal "trampoline" code (18 bytes). Invoked from RTE setup by sendsig().
773 *
774 * Stack looks like:
775 *
776 * sp+0 -> signal number
777 * sp+4 signal specific code
778 * sp+8 pointer to signal context frame (scp)
779 * sp+12 address of handler
780 * sp+16 saved hardware state
781 * .
782 * .
783 * scp+0-> beginning of signal context frame
784 */
785 .globl _sigcode, _esigcode
786 .data
787 .align 2
788 _sigcode: /* Found at address: 0x0DFFffdc */
789 movl sp@(12),a0 | signal handler addr (4 bytes)
790 jsr a0@ | call signal handler (2 bytes)
791 addql #4,sp | pop signo (2 bytes)
792 trap #1 | special syscall entry (2 bytes)
793 movl d0,sp@(4) | save errno (4 bytes)
794 moveq #1,d0 | syscall == exit (2 bytes)
795 trap #0 | exit(errno) (2 bytes)
796 .align 2
797 _esigcode:
798 .text
799
800 /* XXX - hp300 still has icode here... */
801
802 /*
803 * Primitives
804 */
805 #include <machine/asm.h>
806
807 /* XXX copypage(fromaddr, toaddr) */
808
809 /*
810 * non-local gotos
811 */
812 ENTRY(setjmp)
813 movl sp@(4),a0 | savearea pointer
814 moveml #0xFCFC,a0@ | save d2-d7/a2-a7
815 movl sp@,a0@(48) | and return address
816 moveq #0,d0 | return 0
817 rts
818
819 ENTRY(longjmp)
820 movl sp@(4),a0
821 moveml a0@+,#0xFCFC
822 movl a0@,sp@
823 moveq #1,d0
824 rts
825
826 /*
827 * The following primitives manipulate the run queues.
828 * _whichqs tells which of the 32 queues _qs have processes in them.
829 * Setrunqueue puts processes into queues, Remrunqueue removes them
830 * from queues. The running process is on no queue, other processes
831 * are on a queue related to p->p_priority, divided by 4 actually to
832 * shrink the 0-127 range of priorities into the 32 available queues.
833 */
834
835 .globl _whichqs,_qs,_cnt,_panic
836 .globl _curproc
837 .comm _want_resched,4
838
839 /*
840 * setrunqueue(p)
841 *
842 * Call should be made at splclock(), and p->p_stat should be SRUN
843 */
844 ENTRY(setrunqueue)
845 movl sp@(4),a0
846 #ifdef DIAGNOSTIC
847 tstl a0@(P_BACK)
848 jne Lset1
849 tstl a0@(P_WCHAN)
850 jne Lset1
851 cmpb #SRUN,a0@(P_STAT)
852 jne Lset1
853 #endif
854 clrl d0
855 movb a0@(P_PRIORITY),d0
856 lsrb #2,d0
857 movl _whichqs,d1
858 bset d0,d1
859 movl d1,_whichqs
860 lslb #3,d0
861 addl #_qs,d0
862 movl d0,a0@(P_FORW)
863 movl d0,a1
864 movl a1@(P_BACK),a0@(P_BACK)
865 movl a0,a1@(P_BACK)
866 movl a0@(P_BACK),a1
867 movl a0,a1@(P_FORW)
868 rts
869 #ifdef DIAGNOSTIC
870 Lset1:
871 movl #Lset2,sp@-
872 jbsr _panic
873 Lset2:
874 .asciz "setrunqueue"
875 .even
876 #endif
877
878 /*
879 * remrunqueue(p)
880 *
881 * Call should be made at splclock().
882 */
883 ENTRY(remrunqueue)
884 movl sp@(4),a0 | proc *p
885 clrl d0
886 movb a0@(P_PRIORITY),d0
887 lsrb #2,d0
888 movl _whichqs,d1
889 bclr d0,d1 | if ((d1 & (1 << d0)) == 0)
890 jeq Lrem2 | panic (empty queue)
891 movl d1,_whichqs
892 movl a0@(P_FORW),a1
893 movl a0@(P_BACK),a1@(P_BACK)
894 movl a0@(P_BACK),a1
895 movl a0@(P_FORW),a1@(P_FORW)
896 movl #_qs,a1
897 movl d0,d1
898 lslb #3,d1
899 addl d1,a1
900 cmpl a1@(P_FORW),a1
901 jeq Lrem1
902 movl _whichqs,d1
903 bset d0,d1
904 movl d1,_whichqs
905 Lrem1:
906 clrl a0@(P_BACK)
907 rts
908 Lrem2:
909 movl #Lrem3,sp@-
910 jbsr _panic
911 Lrem3:
912 .asciz "remrunqueue"
913
914
915 | Message for Lbadsw panic
916 Lsw0:
917 .asciz "cpu_switch"
918 .even
919
920 .globl _curpcb
921 .globl _masterpaddr | XXX compatibility (debuggers)
922 .data
923 _masterpaddr: | XXX compatibility (debuggers)
924 _curpcb:
925 .long 0
926 mdpflag:
927 .byte 0 | copy of proc md_flags low byte
928 .align 2
929 .comm nullpcb,SIZEOF_PCB
930 .text
931
932 /*
933 * At exit of a process, do a cpu_switch for the last time.
934 * Switch to a safe stack and PCB, and deallocate the process's resources.
935 * The ipl is high enough to prevent the memory from being reallocated.
936 */
937 ENTRY(switch_exit)
938 movl sp@(4),a0 | struct proc *p
939 movl #nullpcb,_curpcb | save state into garbage pcb
940 lea tmpstk,sp | goto a tmp stack
941 movl a0,sp@- | pass proc ptr down
942
943 /* Free old process's u-area. */
944 movl #USPACE,sp@- | size of u-area
945 movl a0@(P_ADDR),sp@- | address of process's u-area
946 movl _kernel_map,sp@- | map it was allocated in
947 jbsr _kmem_free | deallocate it
948 lea sp@(12),sp | pop args
949
950 jra _cpu_switch
951
952 /*
953 * When no processes are on the runq, cpu_switch() branches to idle
954 * to wait for something to come ready.
955 */
956 .data
957 .globl _Idle_count
958 _Idle_count:
959 .long 0
960 .text
961
962 .globl Idle
963 Lidle:
964 stop #PSL_LOWIPL
965 Idle:
966 movw #PSL_HIGHIPL,sr
967 addql #1, _Idle_count
968 tstl _whichqs
969 jeq Lidle
970 movw #PSL_LOWIPL,sr
971 jra Lsw1
972
973 Lbadsw:
974 movl #Lsw0,sp@-
975 jbsr _panic
976 /*NOTREACHED*/
977
978 /*
979 * cpu_switch()
980 * Hacked for sun3
981 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
982 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
983 */
984 ENTRY(cpu_switch)
985 movl _curpcb,a1 | current pcb
986 movw sr,a1@(PCB_PS) | save sr before changing ipl
987 #ifdef notyet
988 movl _curproc,sp@- | remember last proc running
989 #endif
990 clrl _curproc
991
992 Lsw1:
993 /*
994 * Find the highest-priority queue that isn't empty,
995 * then take the first proc from that queue.
996 */
997 clrl d0
998 lea _whichqs,a0
999 movl a0@,d1
1000 Lswchk:
1001 btst d0,d1
1002 jne Lswfnd
1003 addqb #1,d0
1004 cmpb #32,d0
1005 jne Lswchk
1006 jra Idle
1007 Lswfnd:
1008 movw #PSL_HIGHIPL,sr | lock out interrupts
1009 movl a0@,d1 | and check again...
1010 bclr d0,d1
1011 jeq Lsw1 | proc moved, rescan
1012 movl d1,a0@ | update whichqs
1013 moveq #1,d1 | double check for higher priority
1014 lsll d0,d1 | process (which may have snuck in
1015 subql #1,d1 | while we were finding this one)
1016 andl a0@,d1
1017 jeq Lswok | no one got in, continue
1018 movl a0@,d1
1019 bset d0,d1 | otherwise put this one back
1020 movl d1,a0@
1021 jra Lsw1 | and rescan
1022 Lswok:
1023 movl d0,d1
1024 lslb #3,d1 | convert queue number to index
1025 addl #_qs,d1 | locate queue (q)
1026 movl d1,a1
1027 cmpl a1@(P_FORW),a1 | anyone on queue?
1028 jeq Lbadsw | no, panic
1029 movl a1@(P_FORW),a0 | p = q->p_forw
1030 movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
1031 movl a0@(P_FORW),a1 | q = p->p_forw
1032 movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
1033 cmpl a0@(P_FORW),d1 | anyone left on queue?
1034 jeq Lsw2 | no, skip
1035 movl _whichqs,d1
1036 bset d0,d1 | yes, reset bit
1037 movl d1,_whichqs
1038 Lsw2:
1039 movl a0,_curproc
1040 clrl _want_resched
1041 #ifdef notyet
1042 movl sp@+,a1 | XXX - Make this work!
1043 cmpl a0,a1 | switching to same proc?
1044 jeq Lswdone | yes, skip save and restore
1045 #endif
1046 /*
1047 * Save state of previous process in its pcb.
1048 */
1049 movl _curpcb,a1
1050 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
1051 movl usp,a2 | grab USP (a2 has been saved)
1052 movl a2,a1@(PCB_USP) | and save it
1053
1054 tstl _fpu_type | Do we have an fpu?
1055 jeq Lswnofpsave | No? Then don't try save.
1056 lea a1@(PCB_FPCTX),a2 | pointer to FP save area
1057 fsave a2@ | save FP state
1058 tstb a2@ | null state frame?
1059 jeq Lswnofpsave | yes, all done
1060 fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
1061 fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
1062 Lswnofpsave:
1063
1064 #ifdef DIAGNOSTIC
1065 tstl a0@(P_WCHAN)
1066 jne Lbadsw
1067 cmpb #SRUN,a0@(P_STAT)
1068 jne Lbadsw
1069 #endif
1070 clrl a0@(P_BACK) | clear back link
1071 movl a0@(P_ADDR),a1 | get p_addr
1072 movl a1,_curpcb
1073 movb a0@(P_MDFLAG+3),mdpflag | low byte of p_md.md_flags
1074
1075 /* Our pmap does not need pmap_activate() */
1076 /* Just load the new CPU Root Pointer (MMU) */
1077
1078 movl #CACHE_CLR,d0
1079 movc d0,cacr | invalidate cache(s)
1080 pflusha | flush entire TLB
1081
1082 movl a1@(PCB_MMUCRP),a0 | get CRP phys. addr.
1083 pmove a0@,crp | load new user root pointer
1084
1085 | Reload registers of new process.
1086 moveml a1@(PCB_REGS),#0xFCFC | kernel registers
1087 movl a1@(PCB_USP),a0
1088 movl a0,usp | and USP
1089
1090 tstl _fpu_type | If we don't have an fpu,
1091 jeq Lres_skip | don't try to restore it.
1092 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
1093 tstb a0@ | null state frame?
1094 jeq Lresfprest | yes, easy
1095 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1096 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1097 Lresfprest:
1098 frestore a0@ | restore state
1099 Lres_skip:
1100 movw a1@(PCB_PS),d0 | no, restore PS
1101 #ifdef DIAGNOSTIC
1102 btst #13,d0 | supervisor mode?
1103 jeq Lbadsw | no? panic!
1104 #endif
1105 movw d0,sr | OK, restore PS
1106 moveq #1,d0 | return 1 (for alternate returns)
1107 rts
1108
1109 /*
1110 * savectx(pcb)
1111 * Update pcb, saving current processor state.
1112 */
1113 ENTRY(savectx)
1114 movl sp@(4),a1
1115 movw sr,a1@(PCB_PS)
1116 movl usp,a0 | grab USP
1117 movl a0,a1@(PCB_USP) | and save it
1118 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
1119
1120 tstl _fpu_type | Do we have FPU?
1121 jeq Lsavedone | No? Then don't save state.
1122 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
1123 fsave a0@ | save FP state
1124 tstb a0@ | null state frame?
1125 jeq Lsavedone | yes, all done
1126 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1127 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1128 Lsavedone:
1129 moveq #0,d0 | return 0
1130 rts
1131
1132 /* suline() `040 only */
1133
1134 #ifdef DEBUG
1135 .data
1136 .globl fulltflush, fullcflush
1137 fulltflush:
1138 .long 0
1139 fullcflush:
1140 .long 0
1141 .text
1142 #endif
1143
1144 /*
1145 * Invalidate entire TLB.
1146 */
1147 ENTRY(TBIA)
1148 __TBIA:
1149 pflusha
1150 movl #DC_CLEAR,d0
1151 movc d0,cacr | invalidate on-chip d-cache
1152 rts
1153
1154 /*
1155 * Invalidate any TLB entry for given VA (TB Invalidate Single)
1156 */
1157 ENTRY(TBIS)
1158 #ifdef DEBUG
1159 tstl fulltflush | being conservative?
1160 jne __TBIA | yes, flush entire TLB
1161 #endif
1162 movl sp@(4),a0
1163 pflush #0,#0,a0@ | flush address from both sides
1164 movl #DC_CLEAR,d0
1165 movc d0,cacr | invalidate on-chip data cache
1166 rts
1167
1168 /*
1169 * Invalidate supervisor side of TLB
1170 */
1171 ENTRY(TBIAS)
1172 #ifdef DEBUG
1173 tstl fulltflush | being conservative?
1174 jne __TBIA | yes, flush everything
1175 #endif
1176 pflush #4,#4 | flush supervisor TLB entries
1177 movl #DC_CLEAR,d0
1178 movc d0,cacr | invalidate on-chip d-cache
1179 rts
1180
1181 /*
1182 * Invalidate user side of TLB
1183 */
1184 ENTRY(TBIAU)
1185 #ifdef DEBUG
1186 tstl fulltflush | being conservative?
1187 jne __TBIA | yes, flush everything
1188 #endif
1189 pflush #0,#4 | flush user TLB entries
1190 movl #DC_CLEAR,d0
1191 movc d0,cacr | invalidate on-chip d-cache
1192 rts
1193
1194 /*
1195 * Invalidate instruction cache
1196 */
1197 ENTRY(ICIA)
1198 movl #IC_CLEAR,d0
1199 movc d0,cacr | invalidate i-cache
1200 rts
1201
1202 /*
1203 * Invalidate data cache.
1204 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
1205 * problems with DC_WA. The only cases we have to worry about are context
1206 * switch and TLB changes, both of which are handled "in-line" in resume
1207 * and TBI*.
1208 */
1209 ENTRY(DCIA)
1210 __DCIA:
1211 rts
1212
1213 ENTRY(DCIS)
1214 __DCIS:
1215 rts
1216
1217 /*
1218 * Invalidate data cache.
1219 */
1220 ENTRY(DCIU)
1221 rts
1222
1223 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
1224
1225 ENTRY(PCIA)
1226 movl #DC_CLEAR,d0
1227 movc d0,cacr | invalidate on-chip d-cache
1228 rts
1229
1230 ENTRY(ecacheon)
1231 rts
1232
1233 ENTRY(ecacheoff)
1234 rts
1235
1236 /*
1237 * Get callers current SP value.
1238 * Note that simply taking the address of a local variable in a C function
1239 * doesn't work because callee saved registers may be outside the stack frame
1240 * defined by A6 (e.g. GCC generated code).
1241 *
1242 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
1243 */
1244 .globl _getsp
1245 _getsp:
1246 movl sp,d0 | get current SP
1247 addql #4,d0 | compensate for return address
1248 rts
1249
1250 ENTRY(getsfc)
1251 movc sfc,d0
1252 rts
1253
1254 ENTRY(getdfc)
1255 movc dfc,d0
1256 rts
1257
1258 ENTRY(getvbr)
1259 movc vbr, d0
1260 rts
1261
1262 ENTRY(setvbr)
1263 movl sp@(4), d0
1264 movc d0, vbr
1265 rts
1266
1267 /*
1268 * Load a new CPU Root Pointer (CRP) into the MMU.
1269 * void loadcrp(struct mmu_rootptr *);
1270 */
1271 ENTRY(loadcrp)
1272 movl sp@(4),a0 | arg1: &CRP
1273 movl #CACHE_CLR,d0
1274 movc d0,cacr | invalidate cache(s)
1275 pflusha | flush entire TLB
1276 pmove a0@,crp | load new user root pointer
1277 rts
1278
1279 /*
1280 * Set processor priority level calls. Most are implemented with
1281 * inline asm expansions. However, we need one instantiation here
1282 * in case some non-optimized code makes external references.
1283 * Most places will use the inlined function param.h supplies.
1284 */
1285
1286 ENTRY(_spl)
1287 movl sp@(4),d1
1288 clrl d0
1289 movw sr,d0
1290 movw d1,sr
1291 rts
1292
1293 ENTRY(getsr)
1294 moveq #0, d0
1295 movw sr, d0
1296 rts
1297
1298 ENTRY(_insque)
1299 movw sr,d0
1300 movw #PSL_HIGHIPL,sr | atomic
1301 movl sp@(8),a0 | where to insert (after)
1302 movl sp@(4),a1 | element to insert (e)
1303 movl a0@,a1@ | e->next = after->next
1304 movl a0,a1@(4) | e->prev = after
1305 movl a1,a0@ | after->next = e
1306 movl a1@,a0
1307 movl a1,a0@(4) | e->next->prev = e
1308 movw d0,sr
1309 rts
1310
1311 ENTRY(_remque)
1312 movw sr,d0
1313 movw #PSL_HIGHIPL,sr | atomic
1314 movl sp@(4),a0 | element to remove (e)
1315 movl a0@,a1
1316 movl a0@(4),a0
1317 movl a0,a1@(4) | e->next->prev = e->prev
1318 movl a1,a0@ | e->prev->next = e->next
1319 movw d0,sr
1320 rts
1321
1322 /*
1323 * Save and restore 68881 state.
1324 */
1325 ENTRY(m68881_save)
1326 movl sp@(4),a0 | save area pointer
1327 fsave a0@ | save state
1328 tstb a0@ | null state frame?
1329 jeq Lm68881sdone | yes, all done
1330 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1331 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1332 Lm68881sdone:
1333 rts
1334
1335 ENTRY(m68881_restore)
1336 movl sp@(4),a0 | save area pointer
1337 tstb a0@ | null state frame?
1338 jeq Lm68881rdone | yes, easy
1339 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1340 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1341 Lm68881rdone:
1342 frestore a0@ | restore state
1343 rts
1344
1345 /*
1346 * _delay(unsigned N)
1347 * Delay for at least (N/256) microseconds.
1348 * This routine depends on the variable: delay_divisor
1349 * which should be set based on the CPU clock rate.
1350 * XXX: Currently this is set in sun3_startup.c based on the
1351 * XXX: CPU model but this should be determined at run time...
1352 */
1353 .globl __delay
1354 __delay:
1355 | d0 = arg = (usecs << 8)
1356 movl sp@(4),d0
1357 | d1 = delay_divisor;
1358 movl _delay_divisor,d1
1359 L_delay:
1360 subl d1,d0
1361 jgt L_delay
1362 rts
1363
1364
1365 | Define some addresses, mostly so DDB can print useful info.
1366 .globl _kernbase
1367 .set _kernbase,KERNBASE
1368 .globl _dvma_base
1369 .set _dvma_base,DVMA_SPACE_START
1370 .globl _prom_start
1371 .set _prom_start,MONSTART
1372 .globl _prom_base
1373 .set _prom_base,PROM_BASE
1374
1375 |The end!
1376