locore.s revision 1.5 1 /* $NetBSD: locore.s,v 1.5 1997/02/02 08:38:51 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 * @(#)locore.s 8.6 (Berkeley) 5/27/94
42 */
43
44 #include "assym.h"
45 #include <machine/trap.h>
46
47 | Remember this is a fun project!
48
49 .data
50 .globl _mon_crp
51 _mon_crp:
52 .long 0,0
53
54 | This is for kvm_mkdb, and should be the address of the beginning
55 | of the kernel text segment (not necessarily the same as kernbase).
56 .text
57 .globl _kernel_text
58 _kernel_text:
59
60 | This is the entry point, as well as the end of the temporary stack
61 | used during process switch (one 8K page ending at start)
62 .globl tmpstk
63 tmpstk:
64 .globl start
65 start:
66 | The first step, after disabling interrupts, is to map enough of the kernel
67 | into high virtual address space so that we can use position dependent code.
68 | This is a tricky task on the sun3x because the MMU is already enabled and
69 | the ROM monitor provides no indication of where the root MMU table is mapped.
70 | Therefore we must use one of the 68030's 'transparent translation' registers
71 | to define a range in the address space where the MMU translation is
72 | turned off. Once this is complete we can modify the MMU table directly
73 | without the need for it to be mapped into virtual memory.
74 | All code must be position independent until otherwise noted, as the
75 | boot loader has loaded us into low memory but all the symbols in this
76 | code have been linked high.
77 movw #PSL_HIGHIPL, sr | no interrupts
78 movl #KERNBASE, a5 | for vtop conversion
79 lea _mon_crp, a0 | where to store the CRP
80 subl a5, a0
81 | Note: borrowing mon_crp for tt0 setup...
82 movl #0x3F8107, a0@ | map the low 1GB v=p with the
83 pmove a0@, tt0 | transparent translation reg0
84
85 | In order to map the kernel into high memory we will copy the root table
86 | entry which maps the 16 megabytes of memory starting at 0x0 into the
87 | entry which maps the 16 megabytes starting at KERNBASE.
88 pmove crp, a0@ | Get monitor CPU root pointer
89 movl a0@(4), a1 | 2nd word is PA of level A table
90
91 movl a1, a0 | compute the descriptor address
92 addl #0x3e0, a1 | for VA starting at KERNBASE
93 movl a0@, a1@ | copy descriptor type
94 movl a0@(4), a1@(4) | copy physical address
95
96 | Kernel is now double mapped at zero and KERNBASE.
97 | Force a long jump to the relocated code (high VA).
98 movl #IC_CLEAR, d0 | Flush the I-cache
99 movc d0, cacr
100 jmp L_high_code:l | long jump
101
102 L_high_code:
103 | We are now running in the correctly relocated kernel, so
104 | we are no longer restricted to position-independent code.
105 | It is handy to leave transparent translation enabled while
106 | for the low 1GB while __bootstrap() is doing its thing.
107
108 | Do bootstrap stuff needed before main() gets called.
109 | Our boot loader leaves a copy of the kernel's exec header
110 | just before the start of the kernel text segment, so the
111 | kernel can sanity-check the DDB symbols at [end...esym].
112 | Pass the struct exec at tmpstk-32 to __bootstrap().
113 lea tmpstk-32, sp
114 jsr __bootstrap | See _startup.c
115
116 | Now turn off the transparent translation of the low 1GB.
117 | (this also flushes the ATC)
118 clrl sp@-
119 pmove sp@,tt0
120 addql #4,sp
121
122 | Now that __bootstrap() is done using the PROM functions,
123 | we can safely set the sfc/dfc to something != FC_CONTROL
124 moveq #FC_USERD, d0 | make movs access "user data"
125 movc d0, sfc | space for copyin/copyout
126 movc d0, dfc
127
128 | Setup process zero user/kernel stacks.
129 movl _proc0paddr,a1 | get proc0 pcb addr
130 lea a1@(USPACE-4),sp | set SSP to last word
131 movl #USRSTACK-4,a2
132 movl a2,usp | init user SP
133
134 | Note curpcb was already set in __bootstrap().
135 | Will do fpu initialization during autoconfig (see fpu.c)
136 | The interrupt vector table and stack are now ready.
137 | Interrupts will be enabled later, AFTER autoconfiguration
138 | is finished, to avoid spurrious interrupts.
139
140 /*
141 * Final preparation for calling main.
142 *
143 * Create a fake exception frame that returns to user mode,
144 * and save its address in p->p_md.md_regs for cpu_fork().
145 * The new frames for process 1 and 2 will be adjusted by
146 * cpu_set_kpc() to arrange for a call to a kernel function
147 * before the new process does its rte out to user mode.
148 */
149 clrw sp@- | vector offset/frame type
150 clrl sp@- | PC - filled in by "execve"
151 movw #PSL_USER,sp@- | in user mode
152 clrl sp@- | stack adjust count and padding
153 lea sp@(-64),sp | construct space for D0-D7/A0-A7
154 lea _proc0,a0 | proc0 in a0
155 movl sp,a0@(P_MDREGS) | save frame for proc0
156 movl usp,a1
157 movl a1,sp@(FR_SP) | save user stack pointer in frame
158 jbsr _main | main()
159 trap #15 | should not get here
160
161 | This is used by cpu_fork() to return to user mode.
162 | It is called with SP pointing to a struct trapframe.
163 .globl _proc_do_uret
164 _proc_do_uret:
165 movl sp@(FR_SP),a0 | grab and load
166 movl a0,usp | user SP
167 moveml sp@+,#0x7FFF | load most registers (all but SSP)
168 addql #8,sp | pop SSP and stack adjust count
169 rte
170
171 /*
172 * proc_trampoline:
173 * This is used by cpu_set_kpc() to "push" a function call onto the
174 * kernel stack of some process, very much like a signal delivery.
175 * When we get here, the stack has:
176 *
177 * SP+8: switchframe from before cpu_set_kpc
178 * SP+4: void *proc;
179 * SP: u_long func;
180 *
181 * On entry, the switchframe pushed by cpu_set_kpc has already been
182 * popped off the stack, so all this needs to do is pop the function
183 * pointer into a register, call it, then pop the arg, and finally
184 * return using the switchframe that remains on the stack.
185 */
186 .globl _proc_trampoline
187 _proc_trampoline:
188 movl sp@+,a0 | function pointer
189 jbsr a0@ | (*func)(procp)
190 addql #4,sp | toss the arg
191 rts | as cpu_switch would do
192
193 | That is all the assembly startup code we need on the sun3x!
194 | The rest of this is like the hp300/locore.s where possible.
195
196 /*
197 * Trap/interrupt vector routines
198 */
199
200 .globl _buserr, _addrerr, _illinst, _zerodiv, _chkinst
201 .globl _trapvinst, _privinst, _trace, _badtrap, _fmterr
202 .globl _trap0, _trap1, _trap2, _trap12, _trap15
203 .globl _coperr, _fpfline, _fpunsupp
204
205 .globl _trap, _nofault, _longjmp
206 _buserr:
207 tstl _nofault | device probe?
208 jeq _addrerr | no, handle as usual
209 movl _nofault,sp@- | yes,
210 jbsr _longjmp | longjmp(nofault)
211 _addrerr:
212 clrl sp@- | stack adjust count
213 moveml #0xFFFF,sp@- | save user registers
214 movl usp,a0 | save the user SP
215 movl a0,sp@(FR_SP) | in the savearea
216 lea sp@(FR_HW),a1 | grab base of HW berr frame
217 moveq #0,d0
218 movw a1@(10),d0 | grab SSW for fault processing
219 btst #12,d0 | RB set?
220 jeq LbeX0 | no, test RC
221 bset #14,d0 | yes, must set FB
222 movw d0,a1@(10) | for hardware too
223 LbeX0:
224 btst #13,d0 | RC set?
225 jeq LbeX1 | no, skip
226 bset #15,d0 | yes, must set FC
227 movw d0,a1@(10) | for hardware too
228 LbeX1:
229 btst #8,d0 | data fault?
230 jeq Lbe0 | no, check for hard cases
231 movl a1@(16),d1 | fault address is as given in frame
232 jra Lbe10 | thats it
233 Lbe0:
234 btst #4,a1@(6) | long (type B) stack frame?
235 jne Lbe4 | yes, go handle
236 movl a1@(2),d1 | no, can use save PC
237 btst #14,d0 | FB set?
238 jeq Lbe3 | no, try FC
239 addql #4,d1 | yes, adjust address
240 jra Lbe10 | done
241 Lbe3:
242 btst #15,d0 | FC set?
243 jeq Lbe10 | no, done
244 addql #2,d1 | yes, adjust address
245 jra Lbe10 | done
246 Lbe4:
247 movl a1@(36),d1 | long format, use stage B address
248 btst #15,d0 | FC set?
249 jeq Lbe10 | no, all done
250 subql #2,d1 | yes, adjust address
251 Lbe10:
252 movl d1,sp@- | push fault VA
253 movl d0,sp@- | and padded SSW
254 movw a1@(6),d0 | get frame format/vector offset
255 andw #0x0FFF,d0 | clear out frame format
256 cmpw #12,d0 | address error vector?
257 jeq Lisaerr | yes, go to it
258
259 /* MMU-specific code to determine reason for bus error. */
260 movl d1,a0 | fault address
261 movl sp@,d0 | function code from ssw
262 btst #8,d0 | data fault?
263 jne Lbe10a
264 movql #1,d0 | user program access FC
265 | (we dont separate data/program)
266 btst #5,a1@ | supervisor mode?
267 jeq Lbe10a | if no, done
268 movql #5,d0 | else supervisor program access
269 Lbe10a:
270 ptestr d0,a0@,#7 | do a table search
271 pmove psr,sp@ | save result
272 movb sp@,d1
273 btst #2,d1 | invalid? (incl. limit viol and berr)
274 jeq Lmightnotbemerr | no -> wp check
275 btst #7,d1 | is it MMU table berr?
276 jeq Lismerr | no, must be fast
277 jra Lisberr1 | real bus err needs not be fast
278 Lmightnotbemerr:
279 btst #3,d1 | write protect bit set?
280 jeq Lisberr1 | no, must be bus error
281 movl sp@,d0 | ssw into low word of d0
282 andw #0xc0,d0 | write protect is set on page:
283 cmpw #0x40,d0 | was it read cycle?
284 jeq Lisberr1 | yes, was not WPE, must be bus err
285 /* End of MMU-specific bus error code. */
286
287 Lismerr:
288 movl #T_MMUFLT,sp@- | show that we are an MMU fault
289 jra Ltrapnstkadj | and deal with it
290 Lisaerr:
291 movl #T_ADDRERR,sp@- | mark address error
292 jra Ltrapnstkadj | and deal with it
293 Lisberr1:
294 clrw sp@ | re-clear pad word
295 Lisberr:
296 movl #T_BUSERR,sp@- | mark bus error
297 Ltrapnstkadj:
298 jbsr _trap | handle the error
299 lea sp@(12),sp | pop value args
300 movl sp@(FR_SP),a0 | restore user SP
301 movl a0,usp | from save area
302 movw sp@(FR_ADJ),d0 | need to adjust stack?
303 jne Lstkadj | yes, go to it
304 moveml sp@+,#0x7FFF | no, restore most user regs
305 addql #8,sp | toss SSP and stkadj
306 jra rei | all done
307 Lstkadj:
308 lea sp@(FR_HW),a1 | pointer to HW frame
309 addql #8,a1 | source pointer
310 movl a1,a0 | source
311 addw d0,a0 | + hole size = dest pointer
312 movl a1@-,a0@- | copy
313 movl a1@-,a0@- | 8 bytes
314 movl a0,sp@(FR_SP) | new SSP
315 moveml sp@+,#0x7FFF | restore user registers
316 movl sp@,sp | and our SP
317 jra rei | all done
318
319 /*
320 * FP exceptions.
321 */
322 _fpfline:
323 clrl sp@- | stack adjust count
324 moveml #0xFFFF,sp@- | save registers
325 moveq #T_FPEMULI,d0 | denote as FP emulation trap
326 jra fault | do it
327
328 _fpunsupp:
329 clrl sp@- | stack adjust count
330 moveml #0xFFFF,sp@- | save registers
331 moveq #T_FPEMULD,d0 | denote as FP emulation trap
332 jra fault | do it
333
334 /*
335 * Handles all other FP coprocessor exceptions.
336 * Note that since some FP exceptions generate mid-instruction frames
337 * and may cause signal delivery, we need to test for stack adjustment
338 * after the trap call.
339 */
340 .globl _fpfault
341 _fpfault:
342 clrl sp@- | stack adjust count
343 moveml #0xFFFF,sp@- | save user registers
344 movl usp,a0 | and save
345 movl a0,sp@(FR_SP) | the user stack pointer
346 clrl sp@- | no VA arg
347 movl _curpcb,a0 | current pcb
348 lea a0@(PCB_FPCTX),a0 | address of FP savearea
349 fsave a0@ | save state
350 tstb a0@ | null state frame?
351 jeq Lfptnull | yes, safe
352 clrw d0 | no, need to tweak BIU
353 movb a0@(1),d0 | get frame size
354 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
355 Lfptnull:
356 fmovem fpsr,sp@- | push fpsr as code argument
357 frestore a0@ | restore state
358 movl #T_FPERR,sp@- | push type arg
359 jra Ltrapnstkadj | call trap and deal with stack cleanup
360
361 /*
362 * Coprocessor and format errors can generate mid-instruction stack
363 * frames and cause signal delivery hence we need to check for potential
364 * stack adjustment.
365 */
366 _coperr:
367 clrl sp@- | stack adjust count
368 moveml #0xFFFF,sp@-
369 movl usp,a0 | get and save
370 movl a0,sp@(FR_SP) | the user stack pointer
371 clrl sp@- | no VA arg
372 clrl sp@- | or code arg
373 movl #T_COPERR,sp@- | push trap type
374 jra Ltrapnstkadj | call trap and deal with stack adjustments
375
376 _fmterr:
377 clrl sp@- | stack adjust count
378 moveml #0xFFFF,sp@-
379 movl usp,a0 | get and save
380 movl a0,sp@(FR_SP) | the user stack pointer
381 clrl sp@- | no VA arg
382 clrl sp@- | or code arg
383 movl #T_FMTERR,sp@- | push trap type
384 jra Ltrapnstkadj | call trap and deal with stack adjustments
385
386 /*
387 * Other exceptions only cause four and six word stack frame and require
388 * no post-trap stack adjustment.
389 */
390 _illinst:
391 clrl sp@-
392 moveml #0xFFFF,sp@-
393 moveq #T_ILLINST,d0
394 jra fault
395
396 _zerodiv:
397 clrl sp@-
398 moveml #0xFFFF,sp@-
399 moveq #T_ZERODIV,d0
400 jra fault
401
402 _chkinst:
403 clrl sp@-
404 moveml #0xFFFF,sp@-
405 moveq #T_CHKINST,d0
406 jra fault
407
408 _trapvinst:
409 clrl sp@-
410 moveml #0xFFFF,sp@-
411 moveq #T_TRAPVINST,d0
412 jra fault
413
414 _privinst:
415 clrl sp@-
416 moveml #0xFFFF,sp@-
417 moveq #T_PRIVINST,d0
418 jra fault
419
420 .globl fault
421 fault:
422 movl usp,a0 | get and save
423 movl a0,sp@(FR_SP) | the user stack pointer
424 clrl sp@- | no VA arg
425 clrl sp@- | or code arg
426 movl d0,sp@- | push trap type
427 jbsr _trap | handle trap
428 lea sp@(12),sp | pop value args
429 movl sp@(FR_SP),a0 | restore
430 movl a0,usp | user SP
431 moveml sp@+,#0x7FFF | restore most user regs
432 addql #8,sp | pop SP and stack adjust
433 jra rei | all done
434
435 .globl _straytrap
436 _badtrap:
437 clrl sp@- | stack adjust count
438 moveml #0xFFFF,sp@- | save std frame regs
439 jbsr _straytrap | report
440 moveml sp@+,#0xFFFF | restore regs
441 addql #4, sp | stack adjust count
442 jra rei | all done
443
444 /*
445 * Trap 0 is for system calls
446 */
447 .globl _syscall
448 _trap0:
449 clrl sp@- | stack adjust count
450 moveml #0xFFFF,sp@- | save user registers
451 movl usp,a0 | save the user SP
452 movl a0,sp@(FR_SP) | in the savearea
453 movl d0,sp@- | push syscall number
454 jbsr _syscall | handle it
455 addql #4,sp | pop syscall arg
456 movl sp@(FR_SP),a0 | grab and restore
457 movl a0,usp | user SP
458 moveml sp@+,#0x7FFF | restore most registers
459 addql #8,sp | pop SP and stack adjust
460 jra rei | all done
461
462 /*
463 * Trap 1 is either:
464 * sigreturn (native NetBSD executable)
465 * breakpoint (HPUX executable)
466 */
467 _trap1:
468 #if 0 /* COMPAT_HPUX */
469 /* If process is HPUX, this is a user breakpoint. */
470 jne trap15 | breakpoint
471 #endif
472 /* fall into sigreturn */
473
474 /*
475 * The sigreturn() syscall comes here. It requires special handling
476 * because we must open a hole in the stack to fill in the (possibly much
477 * larger) original stack frame.
478 */
479 sigreturn:
480 lea sp@(-84),sp | leave enough space for largest frame
481 movl sp@(84),sp@ | move up current 8 byte frame
482 movl sp@(88),sp@(4)
483 movl #84,sp@- | default: adjust by 84 bytes
484 moveml #0xFFFF,sp@- | save user registers
485 movl usp,a0 | save the user SP
486 movl a0,sp@(FR_SP) | in the savearea
487 movl #SYS_sigreturn,sp@- | push syscall number
488 jbsr _syscall | handle it
489 addql #4,sp | pop syscall#
490 movl sp@(FR_SP),a0 | grab and restore
491 movl a0,usp | user SP
492 lea sp@(FR_HW),a1 | pointer to HW frame
493 movw sp@(FR_ADJ),d0 | do we need to adjust the stack?
494 jeq Lsigr1 | no, just continue
495 moveq #92,d1 | total size
496 subw d0,d1 | - hole size = frame size
497 lea a1@(92),a0 | destination
498 addw d1,a1 | source
499 lsrw #1,d1 | convert to word count
500 subqw #1,d1 | minus 1 for dbf
501 Lsigrlp:
502 movw a1@-,a0@- | copy a word
503 dbf d1,Lsigrlp | continue
504 movl a0,a1 | new HW frame base
505 Lsigr1:
506 movl a1,sp@(FR_SP) | new SP value
507 moveml sp@+,#0x7FFF | restore user registers
508 movl sp@,sp | and our SP
509 jra rei | all done
510
511 /*
512 * Trap 2 is one of:
513 * NetBSD: not used (ignore)
514 * SunOS: Some obscure FPU operation
515 * HPUX: sigreturn
516 */
517 _trap2:
518 #if 0 /* COMPAT_HPUX */
519 /* XXX: If HPUX, this is a user breakpoint. */
520 jne sigreturn
521 #endif
522 /* fall into trace (NetBSD or SunOS) */
523
524 /*
525 * Trace (single-step) trap. Kernel-mode is special.
526 * User mode traps are simply passed on to trap().
527 */
528 _trace:
529 clrl sp@- | stack adjust count
530 moveml #0xFFFF,sp@-
531 moveq #T_TRACE,d0
532 movw sp@(FR_HW),d1 | get PSW
533 andw #PSL_S,d1 | from system mode?
534 jne kbrkpt | yes, kernel breakpoint
535 jra fault | no, user-mode fault
536
537 /*
538 * Trap 15 is used for:
539 * - GDB breakpoints (in user programs)
540 * - KGDB breakpoints (in the kernel)
541 * - trace traps for SUN binaries (not fully supported yet)
542 * User mode traps are passed simply passed to trap()
543 */
544 _trap15:
545 clrl sp@- | stack adjust count
546 moveml #0xFFFF,sp@-
547 moveq #T_TRAP15,d0
548 movw sp@(FR_HW),d1 | get PSW
549 andw #PSL_S,d1 | from system mode?
550 jne kbrkpt | yes, kernel breakpoint
551 jra fault | no, user-mode fault
552
553 kbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
554 | Save the system sp rather than the user sp.
555 movw #PSL_HIGHIPL,sr | lock out interrupts
556 lea sp@(FR_SIZE),a6 | Save stack pointer
557 movl a6,sp@(FR_SP) | from before trap
558
559 | If we are not on tmpstk switch to it.
560 | (so debugger can change the stack pointer)
561 movl a6,d1
562 cmpl #tmpstk,d1
563 jls Lbrkpt2 | already on tmpstk
564 | Copy frame to the temporary stack
565 movl sp,a0 | a0=src
566 lea tmpstk-96,a1 | a1=dst
567 movl a1,sp | sp=new frame
568 moveq #FR_SIZE,d1
569 Lbrkpt1:
570 movl a0@+,a1@+
571 subql #4,d1
572 bgt Lbrkpt1
573
574 Lbrkpt2:
575 | Call the trap handler for the kernel debugger.
576 | Do not call trap() to do it, so that we can
577 | set breakpoints in trap() if we want. We know
578 | the trap type is either T_TRACE or T_BREAKPOINT.
579 | If we have both DDB and KGDB, let KGDB see it first,
580 | because KGDB will just return 0 if not connected.
581 | Save args in d2, a2
582 movl d0,d2 | trap type
583 movl sp,a2 | frame ptr
584 #ifdef KGDB
585 | Let KGDB handle it (if connected)
586 movl a2,sp@- | push frame ptr
587 movl d2,sp@- | push trap type
588 jbsr _kgdb_trap | handle the trap
589 addql #8,sp | pop args
590 cmpl #0,d0 | did kgdb handle it
591 jne Lbrkpt3 | yes, done
592 #endif
593 #ifdef DDB
594 | Let DDB handle it.
595 movl a2,sp@- | push frame ptr
596 movl d2,sp@- | push trap type
597 jbsr _kdb_trap | handle the trap
598 addql #8,sp | pop args
599 cmpl #0,d0 | did ddb handle it
600 jne Lbrkpt3 | yes, done
601 #endif
602 | Drop into the PROM temporarily...
603 movl a2,sp@- | push frame ptr
604 movl d2,sp@- | push trap type
605 jbsr __nodb_trap | handle the trap
606 addql #8,sp | pop args
607 Lbrkpt3:
608 | The stack pointer may have been modified, or
609 | data below it modified (by kgdb push call),
610 | so push the hardware frame at the current sp
611 | before restoring registers and returning.
612
613 movl sp@(FR_SP),a0 | modified sp
614 lea sp@(FR_SIZE),a1 | end of our frame
615 movl a1@-,a0@- | copy 2 longs with
616 movl a1@-,a0@- | ... predecrement
617 movl a0,sp@(FR_SP) | sp = h/w frame
618 moveml sp@+,#0x7FFF | restore all but sp
619 movl sp@,sp | ... and sp
620 rte | all done
621
622 /*
623 * Trap 12 is the entry point for the cachectl "syscall"
624 * cachectl(command, addr, length)
625 * command in d0, addr in a1, length in d1
626 */
627 .globl _cachectl
628 _trap12:
629 movl d1,sp@- | push length
630 movl a1,sp@- | push addr
631 movl d0,sp@- | push command
632 jbsr _cachectl | do it
633 lea sp@(12),sp | pop args
634 jra rei | all done
635
636 /*
637 * Interrupt handlers. Most are auto-vectored,
638 * and hard-wired the same way on all sun3 models.
639 * Format in the stack is:
640 * d0,d1,a0,a1, sr, pc, vo
641 */
642
643 #define INTERRUPT_SAVEREG \
644 moveml #0xC0C0,sp@-
645
646 #define INTERRUPT_RESTORE \
647 moveml sp@+,#0x0303
648
649 /*
650 * This is the common auto-vector interrupt handler,
651 * for which the CPU provides the vector=0x18+level.
652 * These are installed in the interrupt vector table.
653 */
654 .align 2
655 .globl __isr_autovec, _isr_autovec
656 __isr_autovec:
657 INTERRUPT_SAVEREG
658 jbsr _isr_autovec
659 INTERRUPT_RESTORE
660 jra rei
661
662 /* clock: see clock.c */
663 .align 2
664 .globl __isr_clock, _clock_intr
665 __isr_clock:
666 INTERRUPT_SAVEREG
667 jbsr _clock_intr
668 INTERRUPT_RESTORE
669 jra rei
670
671 | Handler for all vectored interrupts (i.e. VME interrupts)
672 .align 2
673 .globl __isr_vectored, _isr_vectored
674 __isr_vectored:
675 INTERRUPT_SAVEREG
676 jbsr _isr_vectored
677 INTERRUPT_RESTORE
678 jra rei
679
680 #undef INTERRUPT_SAVEREG
681 #undef INTERRUPT_RESTORE
682
683 /* interrupt counters (needed by vmstat) */
684 .globl _intrcnt,_eintrcnt,_intrnames,_eintrnames
685 _intrnames:
686 .asciz "spur" | 0
687 .asciz "lev1" | 1
688 .asciz "lev2" | 2
689 .asciz "lev3" | 3
690 .asciz "lev4" | 4
691 .asciz "clock" | 5
692 .asciz "lev6" | 6
693 .asciz "nmi" | 7
694 _eintrnames:
695
696 .data
697 .even
698 _intrcnt:
699 .long 0,0,0,0,0,0,0,0,0,0
700 _eintrcnt:
701 .text
702
703 /*
704 * Emulation of VAX REI instruction.
705 *
706 * This code is (mostly) un-altered from the hp300 code,
707 * except that sun machines do not need a simulated SIR
708 * because they have a real software interrupt register.
709 *
710 * This code deals with checking for and servicing ASTs
711 * (profiling, scheduling) and software interrupts (network, softclock).
712 * We check for ASTs first, just like the VAX. To avoid excess overhead
713 * the T_ASTFLT handling code will also check for software interrupts so we
714 * do not have to do it here. After identifying that we need an AST we
715 * drop the IPL to allow device interrupts.
716 *
717 * This code is complicated by the fact that sendsig may have been called
718 * necessitating a stack cleanup.
719 */
720
721 .globl _astpending
722 .globl rei
723 rei:
724 #ifdef DIAGNOSTIC
725 tstl _panicstr | have we paniced?
726 jne Ldorte | yes, do not make matters worse
727 #endif
728 tstl _astpending | AST pending?
729 jeq Ldorte | no, done
730 Lrei1:
731 btst #5,sp@ | yes, are we returning to user mode?
732 jne Ldorte | no, done
733 movw #PSL_LOWIPL,sr | lower SPL
734 clrl sp@- | stack adjust
735 moveml #0xFFFF,sp@- | save all registers
736 movl usp,a1 | including
737 movl a1,sp@(FR_SP) | the users SP
738 clrl sp@- | VA == none
739 clrl sp@- | code == none
740 movl #T_ASTFLT,sp@- | type == async system trap
741 jbsr _trap | go handle it
742 lea sp@(12),sp | pop value args
743 movl sp@(FR_SP),a0 | restore user SP
744 movl a0,usp | from save area
745 movw sp@(FR_ADJ),d0 | need to adjust stack?
746 jne Laststkadj | yes, go to it
747 moveml sp@+,#0x7FFF | no, restore most user regs
748 addql #8,sp | toss SP and stack adjust
749 rte | and do real RTE
750 Laststkadj:
751 lea sp@(FR_HW),a1 | pointer to HW frame
752 addql #8,a1 | source pointer
753 movl a1,a0 | source
754 addw d0,a0 | + hole size = dest pointer
755 movl a1@-,a0@- | copy
756 movl a1@-,a0@- | 8 bytes
757 movl a0,sp@(FR_SP) | new SSP
758 moveml sp@+,#0x7FFF | restore user registers
759 movl sp@,sp | and our SP
760 Ldorte:
761 rte | real return
762
763 /*
764 * Initialization is at the beginning of this file, because the
765 * kernel entry point needs to be at zero for compatibility with
766 * the Sun boot loader. This works on Sun machines because the
767 * interrupt vector table for reset is NOT at address zero.
768 * (The MMU has a "boot" bit that forces access to the PROM)
769 */
770
771 /*
772 * Signal "trampoline" code (18 bytes). Invoked from RTE setup by sendsig().
773 *
774 * Stack looks like:
775 *
776 * sp+0 -> signal number
777 * sp+4 signal specific code
778 * sp+8 pointer to signal context frame (scp)
779 * sp+12 address of handler
780 * sp+16 saved hardware state
781 * .
782 * .
783 * scp+0-> beginning of signal context frame
784 */
785 .globl _sigcode, _esigcode
786 .data
787 .align 2
788 _sigcode: /* Found at address: 0x0DFFffdc */
789 movl sp@(12),a0 | signal handler addr (4 bytes)
790 jsr a0@ | call signal handler (2 bytes)
791 addql #4,sp | pop signo (2 bytes)
792 trap #1 | special syscall entry (2 bytes)
793 movl d0,sp@(4) | save errno (4 bytes)
794 moveq #1,d0 | syscall == exit (2 bytes)
795 trap #0 | exit(errno) (2 bytes)
796 .align 2
797 _esigcode:
798 .text
799
800 /* XXX - hp300 still has icode here... */
801
802 /*
803 * Primitives
804 */
805 #include <machine/asm.h>
806
807 /*
808 * non-local gotos
809 */
810 ENTRY(setjmp)
811 movl sp@(4),a0 | savearea pointer
812 moveml #0xFCFC,a0@ | save d2-d7/a2-a7
813 movl sp@,a0@(48) | and return address
814 moveq #0,d0 | return 0
815 rts
816
817 ENTRY(longjmp)
818 movl sp@(4),a0
819 moveml a0@+,#0xFCFC
820 movl a0@,sp@
821 moveq #1,d0
822 rts
823
824 /*
825 * The following primitives manipulate the run queues.
826 * _whichqs tells which of the 32 queues _qs have processes in them.
827 * Setrunqueue puts processes into queues, Remrunqueue removes them
828 * from queues. The running process is on no queue, other processes
829 * are on a queue related to p->p_priority, divided by 4 actually to
830 * shrink the 0-127 range of priorities into the 32 available queues.
831 */
832
833 .globl _whichqs,_qs,_cnt,_panic
834 .globl _curproc
835 .comm _want_resched,4
836
837 /*
838 * setrunqueue(p)
839 *
840 * Call should be made at splclock(), and p->p_stat should be SRUN
841 */
842 ENTRY(setrunqueue)
843 movl sp@(4),a0
844 #ifdef DIAGNOSTIC
845 tstl a0@(P_BACK)
846 jne Lset1
847 tstl a0@(P_WCHAN)
848 jne Lset1
849 cmpb #SRUN,a0@(P_STAT)
850 jne Lset1
851 #endif
852 clrl d0
853 movb a0@(P_PRIORITY),d0
854 lsrb #2,d0
855 movl _whichqs,d1
856 bset d0,d1
857 movl d1,_whichqs
858 lslb #3,d0
859 addl #_qs,d0
860 movl d0,a0@(P_FORW)
861 movl d0,a1
862 movl a1@(P_BACK),a0@(P_BACK)
863 movl a0,a1@(P_BACK)
864 movl a0@(P_BACK),a1
865 movl a0,a1@(P_FORW)
866 rts
867 #ifdef DIAGNOSTIC
868 Lset1:
869 movl #Lset2,sp@-
870 jbsr _panic
871 Lset2:
872 .asciz "setrunqueue"
873 .even
874 #endif
875
876 /*
877 * remrunqueue(p)
878 *
879 * Call should be made at splclock().
880 */
881 ENTRY(remrunqueue)
882 movl sp@(4),a0 | proc *p
883 clrl d0
884 movb a0@(P_PRIORITY),d0
885 lsrb #2,d0
886 movl _whichqs,d1
887 bclr d0,d1 | if ((d1 & (1 << d0)) == 0)
888 jeq Lrem2 | panic (empty queue)
889 movl d1,_whichqs
890 movl a0@(P_FORW),a1
891 movl a0@(P_BACK),a1@(P_BACK)
892 movl a0@(P_BACK),a1
893 movl a0@(P_FORW),a1@(P_FORW)
894 movl #_qs,a1
895 movl d0,d1
896 lslb #3,d1
897 addl d1,a1
898 cmpl a1@(P_FORW),a1
899 jeq Lrem1
900 movl _whichqs,d1
901 bset d0,d1
902 movl d1,_whichqs
903 Lrem1:
904 clrl a0@(P_BACK)
905 rts
906 Lrem2:
907 movl #Lrem3,sp@-
908 jbsr _panic
909 Lrem3:
910 .asciz "remrunqueue"
911
912
913 | Message for Lbadsw panic
914 Lsw0:
915 .asciz "cpu_switch"
916 .even
917
918 .globl _curpcb
919 .globl _masterpaddr | XXX compatibility (debuggers)
920 .data
921 _masterpaddr: | XXX compatibility (debuggers)
922 _curpcb:
923 .long 0
924 mdpflag:
925 .byte 0 | copy of proc md_flags low byte
926 .align 2
927 .comm nullpcb,SIZEOF_PCB
928 .text
929
930 /*
931 * At exit of a process, do a cpu_switch for the last time.
932 * Switch to a safe stack and PCB, and deallocate the process's resources.
933 * The ipl is high enough to prevent the memory from being reallocated.
934 */
935 ENTRY(switch_exit)
936 movl sp@(4),a0 | struct proc *p
937 movl #nullpcb,_curpcb | save state into garbage pcb
938 lea tmpstk,sp | goto a tmp stack
939 movl a0,sp@- | pass proc ptr down
940
941 /* Free old process's u-area. */
942 movl #USPACE,sp@- | size of u-area
943 movl a0@(P_ADDR),sp@- | address of process's u-area
944 movl _kernel_map,sp@- | map it was allocated in
945 jbsr _kmem_free | deallocate it
946 lea sp@(12),sp | pop args
947
948 jra _cpu_switch
949
950 /*
951 * When no processes are on the runq, cpu_switch() branches to idle
952 * to wait for something to come ready.
953 */
954 .data
955 .globl _Idle_count
956 _Idle_count:
957 .long 0
958 .text
959
960 .globl Idle
961 Lidle:
962 stop #PSL_LOWIPL
963 Idle:
964 movw #PSL_HIGHIPL,sr
965 addql #1, _Idle_count
966 tstl _whichqs
967 jeq Lidle
968 movw #PSL_LOWIPL,sr
969 jra Lsw1
970
971 Lbadsw:
972 movl #Lsw0,sp@-
973 jbsr _panic
974 /*NOTREACHED*/
975
976 /*
977 * cpu_switch()
978 * Hacked for sun3
979 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
980 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
981 */
982 ENTRY(cpu_switch)
983 movl _curpcb,a1 | current pcb
984 movw sr,a1@(PCB_PS) | save sr before changing ipl
985 #ifdef notyet
986 movl _curproc,sp@- | remember last proc running
987 #endif
988 clrl _curproc
989
990 Lsw1:
991 /*
992 * Find the highest-priority queue that isn't empty,
993 * then take the first proc from that queue.
994 */
995 clrl d0
996 lea _whichqs,a0
997 movl a0@,d1
998 Lswchk:
999 btst d0,d1
1000 jne Lswfnd
1001 addqb #1,d0
1002 cmpb #32,d0
1003 jne Lswchk
1004 jra Idle
1005 Lswfnd:
1006 movw #PSL_HIGHIPL,sr | lock out interrupts
1007 movl a0@,d1 | and check again...
1008 bclr d0,d1
1009 jeq Lsw1 | proc moved, rescan
1010 movl d1,a0@ | update whichqs
1011 moveq #1,d1 | double check for higher priority
1012 lsll d0,d1 | process (which may have snuck in
1013 subql #1,d1 | while we were finding this one)
1014 andl a0@,d1
1015 jeq Lswok | no one got in, continue
1016 movl a0@,d1
1017 bset d0,d1 | otherwise put this one back
1018 movl d1,a0@
1019 jra Lsw1 | and rescan
1020 Lswok:
1021 movl d0,d1
1022 lslb #3,d1 | convert queue number to index
1023 addl #_qs,d1 | locate queue (q)
1024 movl d1,a1
1025 cmpl a1@(P_FORW),a1 | anyone on queue?
1026 jeq Lbadsw | no, panic
1027 movl a1@(P_FORW),a0 | p = q->p_forw
1028 movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
1029 movl a0@(P_FORW),a1 | q = p->p_forw
1030 movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
1031 cmpl a0@(P_FORW),d1 | anyone left on queue?
1032 jeq Lsw2 | no, skip
1033 movl _whichqs,d1
1034 bset d0,d1 | yes, reset bit
1035 movl d1,_whichqs
1036 Lsw2:
1037 movl a0,_curproc
1038 clrl _want_resched
1039 #ifdef notyet
1040 movl sp@+,a1 | XXX - Make this work!
1041 cmpl a0,a1 | switching to same proc?
1042 jeq Lswdone | yes, skip save and restore
1043 #endif
1044 /*
1045 * Save state of previous process in its pcb.
1046 */
1047 movl _curpcb,a1
1048 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
1049 movl usp,a2 | grab USP (a2 has been saved)
1050 movl a2,a1@(PCB_USP) | and save it
1051
1052 tstl _fpu_type | Do we have an fpu?
1053 jeq Lswnofpsave | No? Then don't try save.
1054 lea a1@(PCB_FPCTX),a2 | pointer to FP save area
1055 fsave a2@ | save FP state
1056 tstb a2@ | null state frame?
1057 jeq Lswnofpsave | yes, all done
1058 fmovem fp0-fp7,a2@(FPF_REGS) | save FP general regs
1059 fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control regs
1060 Lswnofpsave:
1061
1062 #ifdef DIAGNOSTIC
1063 tstl a0@(P_WCHAN)
1064 jne Lbadsw
1065 cmpb #SRUN,a0@(P_STAT)
1066 jne Lbadsw
1067 #endif
1068 clrl a0@(P_BACK) | clear back link
1069 movl a0@(P_ADDR),a1 | get p_addr
1070 movl a1,_curpcb
1071 movb a0@(P_MDFLAG+3),mdpflag | low byte of p_md.md_flags
1072
1073 /* Our pmap does not need pmap_activate() */
1074 /* Just load the new CPU Root Pointer (MMU) */
1075 /* XXX - Skip if oldproc has same pm_a_tbl? */
1076 movl a0@(P_VMSPACE),a0 | vm = p->p_vmspace
1077 lea a0@(VM_PMAP_MMUCRP),a0 | a0 = &vm->vm_pmap.pm_mmucrp
1078
1079 movl #CACHE_CLR,d0
1080 movc d0,cacr | invalidate cache(s)
1081 pflusha | flush entire TLB
1082 pmove a0@,crp | load new user root pointer
1083
1084 | Reload registers of new process.
1085 moveml a1@(PCB_REGS),#0xFCFC | kernel registers
1086 movl a1@(PCB_USP),a0
1087 movl a0,usp | and USP
1088
1089 tstl _fpu_type | If we don't have an fpu,
1090 jeq Lres_skip | don't try to restore it.
1091 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
1092 tstb a0@ | null state frame?
1093 jeq Lresfprest | yes, easy
1094 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1095 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1096 Lresfprest:
1097 frestore a0@ | restore state
1098 Lres_skip:
1099 movw a1@(PCB_PS),d0 | no, restore PS
1100 #ifdef DIAGNOSTIC
1101 btst #13,d0 | supervisor mode?
1102 jeq Lbadsw | no? panic!
1103 #endif
1104 movw d0,sr | OK, restore PS
1105 moveq #1,d0 | return 1 (for alternate returns)
1106 rts
1107
1108 /*
1109 * savectx(pcb)
1110 * Update pcb, saving current processor state.
1111 */
1112 ENTRY(savectx)
1113 movl sp@(4),a1
1114 movw sr,a1@(PCB_PS)
1115 movl usp,a0 | grab USP
1116 movl a0,a1@(PCB_USP) | and save it
1117 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
1118
1119 tstl _fpu_type | Do we have FPU?
1120 jeq Lsavedone | No? Then don't save state.
1121 lea a1@(PCB_FPCTX),a0 | pointer to FP save area
1122 fsave a0@ | save FP state
1123 tstb a0@ | null state frame?
1124 jeq Lsavedone | yes, all done
1125 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1126 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1127 Lsavedone:
1128 moveq #0,d0 | return 0
1129 rts
1130
1131 /* suline() `040 only */
1132
1133 #ifdef DEBUG
1134 .data
1135 .globl fulltflush, fullcflush
1136 fulltflush:
1137 .long 0
1138 fullcflush:
1139 .long 0
1140 .text
1141 #endif
1142
1143 /*
1144 * Invalidate entire TLB.
1145 */
1146 ENTRY(TBIA)
1147 __TBIA:
1148 pflusha
1149 movl #DC_CLEAR,d0
1150 movc d0,cacr | invalidate on-chip d-cache
1151 rts
1152
1153 /*
1154 * Invalidate any TLB entry for given VA (TB Invalidate Single)
1155 */
1156 ENTRY(TBIS)
1157 #ifdef DEBUG
1158 tstl fulltflush | being conservative?
1159 jne __TBIA | yes, flush entire TLB
1160 #endif
1161 movl sp@(4),a0
1162 pflush #0,#0,a0@ | flush address from both sides
1163 movl #DC_CLEAR,d0
1164 movc d0,cacr | invalidate on-chip data cache
1165 rts
1166
1167 /*
1168 * Invalidate supervisor side of TLB
1169 */
1170 ENTRY(TBIAS)
1171 #ifdef DEBUG
1172 tstl fulltflush | being conservative?
1173 jne __TBIA | yes, flush everything
1174 #endif
1175 pflush #4,#4 | flush supervisor TLB entries
1176 movl #DC_CLEAR,d0
1177 movc d0,cacr | invalidate on-chip d-cache
1178 rts
1179
1180 /*
1181 * Invalidate user side of TLB
1182 */
1183 ENTRY(TBIAU)
1184 #ifdef DEBUG
1185 tstl fulltflush | being conservative?
1186 jne __TBIA | yes, flush everything
1187 #endif
1188 pflush #0,#4 | flush user TLB entries
1189 movl #DC_CLEAR,d0
1190 movc d0,cacr | invalidate on-chip d-cache
1191 rts
1192
1193 /*
1194 * Invalidate instruction cache
1195 */
1196 ENTRY(ICIA)
1197 movl #IC_CLEAR,d0
1198 movc d0,cacr | invalidate i-cache
1199 rts
1200
1201 /*
1202 * Invalidate data cache.
1203 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
1204 * problems with DC_WA. The only cases we have to worry about are context
1205 * switch and TLB changes, both of which are handled "in-line" in resume
1206 * and TBI*.
1207 */
1208 ENTRY(DCIA)
1209 __DCIA:
1210 rts
1211
1212 ENTRY(DCIS)
1213 __DCIS:
1214 rts
1215
1216 /*
1217 * Invalidate data cache.
1218 */
1219 ENTRY(DCIU)
1220 rts
1221
1222 /* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
1223
1224 ENTRY(PCIA)
1225 movl #DC_CLEAR,d0
1226 movc d0,cacr | invalidate on-chip d-cache
1227 rts
1228
1229 ENTRY(ecacheon)
1230 rts
1231
1232 ENTRY(ecacheoff)
1233 rts
1234
1235 /*
1236 * Get callers current SP value.
1237 * Note that simply taking the address of a local variable in a C function
1238 * doesn't work because callee saved registers may be outside the stack frame
1239 * defined by A6 (e.g. GCC generated code).
1240 *
1241 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
1242 */
1243 .globl _getsp
1244 _getsp:
1245 movl sp,d0 | get current SP
1246 addql #4,d0 | compensate for return address
1247 rts
1248
1249 ENTRY(getsfc)
1250 movc sfc,d0
1251 rts
1252
1253 ENTRY(getdfc)
1254 movc dfc,d0
1255 rts
1256
1257 ENTRY(getvbr)
1258 movc vbr, d0
1259 rts
1260
1261 ENTRY(setvbr)
1262 movl sp@(4), d0
1263 movc d0, vbr
1264 rts
1265
1266 /*
1267 * Load a new CPU Root Pointer (CRP) into the MMU.
1268 * void loadcrp(struct mmu_rootptr *);
1269 */
1270 ENTRY(loadcrp)
1271 movl sp@(4),a0 | arg1: &CRP
1272 movl #CACHE_CLR,d0
1273 movc d0,cacr | invalidate cache(s)
1274 pflusha | flush entire TLB
1275 pmove a0@,crp | load new user root pointer
1276 rts
1277
1278 /*
1279 * Set processor priority level calls. Most are implemented with
1280 * inline asm expansions. However, we need one instantiation here
1281 * in case some non-optimized code makes external references.
1282 * Most places will use the inlined function param.h supplies.
1283 */
1284
1285 ENTRY(_spl)
1286 movl sp@(4),d1
1287 clrl d0
1288 movw sr,d0
1289 movw d1,sr
1290 rts
1291
1292 ENTRY(getsr)
1293 moveq #0, d0
1294 movw sr, d0
1295 rts
1296
1297 ENTRY(_insque)
1298 movw sr,d0
1299 movw #PSL_HIGHIPL,sr | atomic
1300 movl sp@(8),a0 | where to insert (after)
1301 movl sp@(4),a1 | element to insert (e)
1302 movl a0@,a1@ | e->next = after->next
1303 movl a0,a1@(4) | e->prev = after
1304 movl a1,a0@ | after->next = e
1305 movl a1@,a0
1306 movl a1,a0@(4) | e->next->prev = e
1307 movw d0,sr
1308 rts
1309
1310 ENTRY(_remque)
1311 movw sr,d0
1312 movw #PSL_HIGHIPL,sr | atomic
1313 movl sp@(4),a0 | element to remove (e)
1314 movl a0@,a1
1315 movl a0@(4),a0
1316 movl a0,a1@(4) | e->next->prev = e->prev
1317 movl a1,a0@ | e->prev->next = e->next
1318 movw d0,sr
1319 rts
1320
1321 /*
1322 * Save and restore 68881 state.
1323 */
1324 ENTRY(m68881_save)
1325 movl sp@(4),a0 | save area pointer
1326 fsave a0@ | save state
1327 tstb a0@ | null state frame?
1328 jeq Lm68881sdone | yes, all done
1329 fmovem fp0-fp7,a0@(FPF_REGS) | save FP general regs
1330 fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control regs
1331 Lm68881sdone:
1332 rts
1333
1334 ENTRY(m68881_restore)
1335 movl sp@(4),a0 | save area pointer
1336 tstb a0@ | null state frame?
1337 jeq Lm68881rdone | yes, easy
1338 fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control regs
1339 fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general regs
1340 Lm68881rdone:
1341 frestore a0@ | restore state
1342 rts
1343
1344 /*
1345 * _delay(unsigned N)
1346 * Delay for at least (N/256) microseconds.
1347 * This routine depends on the variable: delay_divisor
1348 * which should be set based on the CPU clock rate.
1349 * XXX: Currently this is set in sun3_startup.c based on the
1350 * XXX: CPU model but this should be determined at run time...
1351 */
1352 .globl __delay
1353 __delay:
1354 | d0 = arg = (usecs << 8)
1355 movl sp@(4),d0
1356 | d1 = delay_divisor;
1357 movl _delay_divisor,d1
1358 L_delay:
1359 subl d1,d0
1360 jgt L_delay
1361 rts
1362
1363
1364 | Define some addresses, mostly so DDB can print useful info.
1365 .globl _kernbase
1366 .set _kernbase,KERNBASE
1367 .globl _dvma_base
1368 .set _dvma_base,DVMA_SPACE_START
1369 .globl _prom_start
1370 .set _prom_start,MONSTART
1371 .globl _prom_base
1372 .set _prom_base,PROM_BASE
1373
1374 |The end!
1375