locore.s revision 1.62 1 /* $NetBSD: locore.s,v 1.62 2011/02/08 20:20:25 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: locore.s 1.66 92/12/22$
37 * @(#)locore.s 8.6 (Berkeley) 5/27/94
38 */
39
40 #include "opt_compat_netbsd.h"
41 #include "opt_compat_svr4.h"
42 #include "opt_compat_sunos.h"
43 #include "opt_kgdb.h"
44 #include "opt_lockdebug.h"
45
46 #include "assym.h"
47 #include <machine/asm.h>
48 #include <machine/trap.h>
49
50 | Remember this is a fun project!
51
52 .data
53 GLOBAL(mon_crp)
54 .long 0,0
55
56 | This is for kvm_mkdb, and should be the address of the beginning
57 | of the kernel text segment (not necessarily the same as kernbase).
58 .text
59 GLOBAL(kernel_text)
60
61 | This is the entry point, as well as the end of the temporary stack
62 | used during process switch (one 8K page ending at start)
63 ASGLOBAL(tmpstk)
64 ASGLOBAL(start)
65
66 | The first step, after disabling interrupts, is to map enough of the kernel
67 | into high virtual address space so that we can use position dependent code.
68 | This is a tricky task on the sun3x because the MMU is already enabled and
69 | the ROM monitor provides no indication of where the root MMU table is mapped.
70 | Therefore we must use one of the 68030's 'transparent translation' registers
71 | to define a range in the address space where the MMU translation is
72 | turned off. Once this is complete we can modify the MMU table directly
73 | without the need for it to be mapped into virtual memory.
74 | All code must be position independent until otherwise noted, as the
75 | boot loader has loaded us into low memory but all the symbols in this
76 | code have been linked high.
77 movw #PSL_HIGHIPL,%sr | no interrupts
78 movl #KERNBASE3X,%a5 | for vtop conversion
79 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
80 subl %a5,%a0
81 | Note: borrowing mon_crp for tt0 setup...
82 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
83 .long 0xf0100800 | transparent translation reg0
84 | [ pmove a0@, tt0 ]
85 | In order to map the kernel into high memory we will copy the root table
86 | entry which maps the 16 megabytes of memory starting at 0x0 into the
87 | entry which maps the 16 megabytes starting at KERNBASE.
88 pmove %crp,%a0@ | Get monitor CPU root pointer
89 movl %a0@(4),%a1 | 2nd word is PA of level A table
90
91 movl %a1,%a0 | compute the descriptor address
92 addl #0x3e0,%a1 | for VA starting at KERNBASE
93 movl %a0@,%a1@ | copy descriptor type
94 movl %a0@(4),%a1@(4) | copy physical address
95
96 | Kernel is now double mapped at zero and KERNBASE.
97 | Force a long jump to the relocated code (high VA).
98 movl #IC_CLEAR,%d0 | Flush the I-cache
99 movc %d0,%cacr
100 jmp L_high_code:l | long jump
101
102 L_high_code:
103 | We are now running in the correctly relocated kernel, so
104 | we are no longer restricted to position-independent code.
105 | It is handy to leave transparent translation enabled while
106 | for the low 1GB while _bootstrap() is doing its thing.
107
108 | Do bootstrap stuff needed before main() gets called.
109 | Our boot loader leaves a copy of the kernel's exec header
110 | just before the start of the kernel text segment, so the
111 | kernel can sanity-check the DDB symbols at [end...esym].
112 | Pass the struct exec at tmpstk-32 to _bootstrap().
113 | Also, make sure the initial frame pointer is zero so that
114 | the backtrace algorithm used by KGDB terminates nicely.
115 lea _ASM_LABEL(tmpstk)-32,%sp
116 movl #0,%a6
117 jsr _C_LABEL(_bootstrap) | See locore2.c
118
119 | Now turn off the transparent translation of the low 1GB.
120 | (this also flushes the ATC)
121 clrl %sp@-
122 .long 0xf0170800 | pmove sp@,tt0
123 addql #4,%sp
124
125 | Now that _bootstrap() is done using the PROM functions,
126 | we can safely set the sfc/dfc to something != FC_CONTROL
127 moveq #FC_USERD,%d0 | make movs access "user data"
128 movc %d0,%sfc | space for copyin/copyout
129 movc %d0,%dfc
130
131 | Setup process zero user/kernel stacks.
132 lea _C_LABEL(lwp0),%a0 | get lwp0
133 movl %a0@(L_PCB),%a1 | XXXuvm_lwp_getuarea
134 lea %a1@(USPACE-4),%sp | set SSP to last word
135 movl #USRSTACK3X-4,%a2
136 movl %a2,%usp | init user SP
137
138 | Note curpcb was already set in _bootstrap().
139 | Will do fpu initialization during autoconfig (see fpu.c)
140 | The interrupt vector table and stack are now ready.
141 | Interrupts will be enabled later, AFTER autoconfiguration
142 | is finished, to avoid spurrious interrupts.
143
144 /*
145 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
146 * main() nevers returns; we exit to user mode from a forked process
147 * later on.
148 */
149 clrw %sp@- | tf_format,tf_vector
150 clrl %sp@- | tf_pc (filled in later)
151 movw #PSL_USER,%sp@- | tf_sr for user mode
152 clrl %sp@- | tf_stackadj
153 lea %sp@(-64),%sp | tf_regs[16]
154 movl %a1,%a0@(L_MD_REGS) | lwp0.l_md.md_regs = trapframe
155 jbsr _C_LABEL(main) | main(&trapframe)
156 PANIC("main() returned")
157
158 | That is all the assembly startup code we need on the sun3x!
159 | The rest of this is like the hp300/locore.s where possible.
160
161 /*
162 * Trap/interrupt vector routines
163 */
164 #include <m68k/m68k/trap_subr.s>
165
166 GLOBAL(buserr)
167 tstl _C_LABEL(nofault) | device probe?
168 jeq _C_LABEL(addrerr) | no, handle as usual
169 movl _C_LABEL(nofault),%sp@- | yes,
170 jbsr _C_LABEL(longjmp) | longjmp(nofault)
171 GLOBAL(addrerr)
172 clrl %sp@- | stack adjust count
173 moveml #0xFFFF,%sp@- | save user registers
174 movl %usp,%a0 | save the user SP
175 movl %a0,%sp@(FR_SP) | in the savearea
176 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
177 moveq #0,%d0
178 movw %a1@(10),%d0 | grab SSW for fault processing
179 btst #12,%d0 | RB set?
180 jeq LbeX0 | no, test RC
181 bset #14,%d0 | yes, must set FB
182 movw %d0,%a1@(10) | for hardware too
183 LbeX0:
184 btst #13,%d0 | RC set?
185 jeq LbeX1 | no, skip
186 bset #15,%d0 | yes, must set FC
187 movw %d0,%a1@(10) | for hardware too
188 LbeX1:
189 btst #8,%d0 | data fault?
190 jeq Lbe0 | no, check for hard cases
191 movl %a1@(16),%d1 | fault address is as given in frame
192 jra Lbe10 | thats it
193 Lbe0:
194 btst #4,%a1@(6) | long (type B) stack frame?
195 jne Lbe4 | yes, go handle
196 movl %a1@(2),%d1 | no, can use save PC
197 btst #14,%d0 | FB set?
198 jeq Lbe3 | no, try FC
199 addql #4,%d1 | yes, adjust address
200 jra Lbe10 | done
201 Lbe3:
202 btst #15,%d0 | FC set?
203 jeq Lbe10 | no, done
204 addql #2,%d1 | yes, adjust address
205 jra Lbe10 | done
206 Lbe4:
207 movl %a1@(36),%d1 | long format, use stage B address
208 btst #15,%d0 | FC set?
209 jeq Lbe10 | no, all done
210 subql #2,%d1 | yes, adjust address
211 Lbe10:
212 movl %d1,%sp@- | push fault VA
213 movl %d0,%sp@- | and padded SSW
214 movw %a1@(6),%d0 | get frame format/vector offset
215 andw #0x0FFF,%d0 | clear out frame format
216 cmpw #12,%d0 | address error vector?
217 jeq Lisaerr | yes, go to it
218
219 /* MMU-specific code to determine reason for bus error. */
220 movl %d1,%a0 | fault address
221 movl %sp@,%d0 | function code from ssw
222 btst #8,%d0 | data fault?
223 jne Lbe10a
224 movql #1,%d0 | user program access FC
225 | (we dont separate data/program)
226 btst #5,%a1@ | supervisor mode?
227 jeq Lbe10a | if no, done
228 movql #5,%d0 | else supervisor program access
229 Lbe10a:
230 ptestr %d0,%a0@,#7 | do a table search
231 pmove %psr,%sp@ | save result
232 movb %sp@,%d1
233 btst #2,%d1 | invalid? (incl. limit viol and berr)
234 jeq Lmightnotbemerr | no -> wp check
235 btst #7,%d1 | is it MMU table berr?
236 jeq Lismerr | no, must be fast
237 jra Lisberr1 | real bus err needs not be fast
238 Lmightnotbemerr:
239 btst #3,%d1 | write protect bit set?
240 jeq Lisberr1 | no, must be bus error
241 movl %sp@,%d0 | ssw into low word of d0
242 andw #0xc0,%d0 | write protect is set on page:
243 cmpw #0x40,%d0 | was it read cycle?
244 jeq Lisberr1 | yes, was not WPE, must be bus err
245 /* End of MMU-specific bus error code. */
246
247 Lismerr:
248 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
249 jra _ASM_LABEL(faultstkadj) | and deal with it
250 Lisaerr:
251 movl #T_ADDRERR,%sp@- | mark address error
252 jra _ASM_LABEL(faultstkadj) | and deal with it
253 Lisberr1:
254 clrw %sp@ | re-clear pad word
255 Lisberr:
256 movl #T_BUSERR,%sp@- | mark bus error
257 jra _ASM_LABEL(faultstkadj) | and deal with it
258
259 /*
260 * FP exceptions.
261 */
262 GLOBAL(fpfline)
263 clrl %sp@- | stack adjust count
264 moveml #0xFFFF,%sp@- | save registers
265 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
266 jra _ASM_LABEL(fault) | do it
267
268 GLOBAL(fpunsupp)
269 clrl %sp@- | stack adjust count
270 moveml #0xFFFF,%sp@- | save registers
271 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
272 jra _ASM_LABEL(fault) | do it
273
274 /*
275 * Handles all other FP coprocessor exceptions.
276 * Note that since some FP exceptions generate mid-instruction frames
277 * and may cause signal delivery, we need to test for stack adjustment
278 * after the trap call.
279 */
280 GLOBAL(fpfault)
281 clrl %sp@- | stack adjust count
282 moveml #0xFFFF,%sp@- | save user registers
283 movl %usp,%a0 | and save
284 movl %a0,%sp@(FR_SP) | the user stack pointer
285 clrl %sp@- | no VA arg
286 movl _C_LABEL(curpcb),%a0 | current pcb
287 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
288 fsave %a0@ | save state
289 tstb %a0@ | null state frame?
290 jeq Lfptnull | yes, safe
291 clrw %d0 | no, need to tweak BIU
292 movb %a0@(1),%d0 | get frame size
293 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
294 Lfptnull:
295 fmovem %fpsr,%sp@- | push fpsr as code argument
296 frestore %a0@ | restore state
297 movl #T_FPERR,%sp@- | push type arg
298 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
299
300 /*
301 * Other exceptions only cause four and six word stack frame and require
302 * no post-trap stack adjustment.
303 */
304 GLOBAL(badtrap)
305 clrl %sp@- | stack adjust count
306 moveml #0xFFFF,%sp@- | save std frame regs
307 jbsr _C_LABEL(straytrap) | report
308 moveml %sp@+,#0xFFFF | restore regs
309 addql #4,%sp | stack adjust count
310 jra _ASM_LABEL(rei) | all done
311
312 /*
313 * Trap 0 is for system calls
314 */
315 GLOBAL(trap0)
316 clrl %sp@- | stack adjust count
317 moveml #0xFFFF,%sp@- | save user registers
318 movl %usp,%a0 | save the user SP
319 movl %a0,%sp@(FR_SP) | in the savearea
320 movl %d0,%sp@- | push syscall number
321 jbsr _C_LABEL(syscall) | handle it
322 addql #4,%sp | pop syscall arg
323 movl %sp@(FR_SP),%a0 | grab and restore
324 movl %a0,%usp | user SP
325 moveml %sp@+,#0x7FFF | restore most registers
326 addql #8,%sp | pop SP and stack adjust
327 jra _ASM_LABEL(rei) | all done
328
329 /*
330 * Trap 12 is the entry point for the cachectl "syscall"
331 * cachectl(command, addr, length)
332 * command in d0, addr in a1, length in d1
333 */
334 GLOBAL(trap12)
335 movl _C_LABEL(curlwp),%a0
336 movl %a0@(L_PROC),%sp@- | push curproc pointer
337 movl %d1,%sp@- | push length
338 movl %a1,%sp@- | push addr
339 movl %d0,%sp@- | push command
340 jbsr _C_LABEL(cachectl1) | do it
341 lea %sp@(16),%sp | pop args
342 jra _ASM_LABEL(rei) | all done
343
344 /*
345 * Trace (single-step) trap. Kernel-mode is special.
346 * User mode traps are simply passed on to trap().
347 */
348 GLOBAL(trace)
349 clrl %sp@- | stack adjust count
350 moveml #0xFFFF,%sp@-
351 moveq #T_TRACE,%d0
352
353 | Check PSW and see what happen.
354 | T=0 S=0 (should not happen)
355 | T=1 S=0 trace trap from user mode
356 | T=0 S=1 trace trap on a trap instruction
357 | T=1 S=1 trace trap from system mode (kernel breakpoint)
358
359 movw %sp@(FR_HW),%d1 | get PSW
360 notw %d1 | XXX no support for T0 on 680[234]0
361 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
362 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
363 jra _ASM_LABEL(fault) | no, user-mode fault
364
365 /*
366 * Trap 15 is used for:
367 * - GDB breakpoints (in user programs)
368 * - KGDB breakpoints (in the kernel)
369 * - trace traps for SUN binaries (not fully supported yet)
370 * User mode traps are simply passed to trap().
371 */
372 GLOBAL(trap15)
373 clrl %sp@- | stack adjust count
374 moveml #0xFFFF,%sp@-
375 moveq #T_TRAP15,%d0
376 btst #5,%sp@(FR_HW) | was supervisor mode?
377 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
378 jra _ASM_LABEL(fault) | no, user-mode fault
379
380 ASLOCAL(kbrkpt)
381 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
382 | Save the system sp rather than the user sp.
383 movw #PSL_HIGHIPL,%sr | lock out interrupts
384 lea %sp@(FR_SIZE),%a6 | Save stack pointer
385 movl %a6,%sp@(FR_SP) | from before trap
386
387 | If we are not on tmpstk switch to it.
388 | (so debugger can change the stack pointer)
389 movl %a6,%d1
390 cmpl #_ASM_LABEL(tmpstk),%d1
391 jls Lbrkpt2 | already on tmpstk
392 | Copy frame to the temporary stack
393 movl %sp,%a0 | %a0=src
394 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
395 movl %a1,%sp | sp=new frame
396 moveq #FR_SIZE,%d1
397 Lbrkpt1:
398 movl %a0@+,%a1@+
399 subql #4,%d1
400 bgt Lbrkpt1
401
402 Lbrkpt2:
403 | Call the trap handler for the kernel debugger.
404 | Do not call trap() to handle it, so that we can
405 | set breakpoints in trap() if we want. We know
406 | the trap type is either T_TRACE or T_BREAKPOINT.
407 movl %d0,%sp@- | push trap type
408 jbsr _C_LABEL(trap_kdebug)
409 addql #4,%sp | pop args
410
411 | The stack pointer may have been modified, or
412 | data below it modified (by kgdb push call),
413 | so push the hardware frame at the current sp
414 | before restoring registers and returning.
415 movl %sp@(FR_SP),%a0 | modified sp
416 lea %sp@(FR_SIZE),%a1 | end of our frame
417 movl %a1@-,%a0@- | copy 2 longs with
418 movl %a1@-,%a0@- | ... predecrement
419 movl %a0,%sp@(FR_SP) | sp = h/w frame
420 moveml %sp@+,#0x7FFF | restore all but sp
421 movl %sp@,%sp | ... and sp
422 rte | all done
423
424 /* Use common m68k sigreturn */
425 #include <m68k/m68k/sigreturn.s>
426
427 /*
428 * Interrupt handlers. Most are auto-vectored,
429 * and hard-wired the same way on all sun3 models.
430 * Format in the stack is:
431 * %d0,%d1,%a0,%a1, sr, pc, vo
432 */
433
434 #define INTERRUPT_SAVEREG \
435 moveml #0xC0C0,%sp@-
436
437 #define INTERRUPT_RESTORE \
438 moveml %sp@+,#0x0303
439
440 /*
441 * This is the common auto-vector interrupt handler,
442 * for which the CPU provides the vector=0x18+level.
443 * These are installed in the interrupt vector table.
444 */
445 #ifdef __ELF__
446 .align 4
447 #else
448 .align 2
449 #endif
450 GLOBAL(_isr_autovec)
451 INTERRUPT_SAVEREG
452 jbsr _C_LABEL(isr_autovec)
453 INTERRUPT_RESTORE
454 jra _ASM_LABEL(rei)
455
456 /* clock: see clock.c */
457 #ifdef __ELF__
458 .align 4
459 #else
460 .align 2
461 #endif
462 GLOBAL(_isr_clock)
463 INTERRUPT_SAVEREG
464 jbsr _C_LABEL(clock_intr)
465 INTERRUPT_RESTORE
466 jra _ASM_LABEL(rei)
467
468 | Handler for all vectored interrupts (i.e. VME interrupts)
469 #ifdef __ELF__
470 .align 4
471 #else
472 .align 2
473 #endif
474 GLOBAL(_isr_vectored)
475 INTERRUPT_SAVEREG
476 jbsr _C_LABEL(isr_vectored)
477 INTERRUPT_RESTORE
478 jra _ASM_LABEL(rei)
479
480 #undef INTERRUPT_SAVEREG
481 #undef INTERRUPT_RESTORE
482
483 /* interrupt counters (needed by vmstat) */
484 GLOBAL(intrnames)
485 .asciz "spur" | 0
486 .asciz "lev1" | 1
487 .asciz "lev2" | 2
488 .asciz "lev3" | 3
489 .asciz "lev4" | 4
490 .asciz "clock" | 5
491 .asciz "lev6" | 6
492 .asciz "nmi" | 7
493 GLOBAL(eintrnames)
494
495 .data
496 .even
497 GLOBAL(intrcnt)
498 .long 0,0,0,0,0,0,0,0,0,0
499 GLOBAL(eintrcnt)
500 .text
501
502 /*
503 * Emulation of VAX REI instruction.
504 *
505 * This code is (mostly) un-altered from the hp300 code,
506 * except that sun machines do not need a simulated SIR
507 * because they have a real software interrupt register.
508 *
509 * This code deals with checking for and servicing ASTs
510 * (profiling, scheduling) and software interrupts (network, softclock).
511 * We check for ASTs first, just like the VAX. To avoid excess overhead
512 * the T_ASTFLT handling code will also check for software interrupts so we
513 * do not have to do it here. After identifying that we need an AST we
514 * drop the IPL to allow device interrupts.
515 *
516 * This code is complicated by the fact that sendsig may have been called
517 * necessitating a stack cleanup.
518 */
519
520 ASGLOBAL(rei)
521 #ifdef DIAGNOSTIC
522 tstl _C_LABEL(panicstr) | have we paniced?
523 jne Ldorte | yes, do not make matters worse
524 #endif
525 tstl _C_LABEL(astpending) | AST pending?
526 jeq Ldorte | no, done
527 Lrei1:
528 btst #5,%sp@ | yes, are we returning to user mode?
529 jne Ldorte | no, done
530 movw #PSL_LOWIPL,%sr | lower SPL
531 clrl %sp@- | stack adjust
532 moveml #0xFFFF,%sp@- | save all registers
533 movl %usp,%a1 | including
534 movl %a1,%sp@(FR_SP) | the users SP
535 clrl %sp@- | VA == none
536 clrl %sp@- | code == none
537 movl #T_ASTFLT,%sp@- | type == async system trap
538 pea %sp@(12) | fp == address of trap frame
539 jbsr _C_LABEL(trap) | go handle it
540 lea %sp@(16),%sp | pop value args
541 movl %sp@(FR_SP),%a0 | restore user SP
542 movl %a0,%usp | from save area
543 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
544 jne Laststkadj | yes, go to it
545 moveml %sp@+,#0x7FFF | no, restore most user regs
546 addql #8,%sp | toss SP and stack adjust
547 rte | and do real RTE
548 Laststkadj:
549 lea %sp@(FR_HW),%a1 | pointer to HW frame
550 addql #8,%a1 | source pointer
551 movl %a1,%a0 | source
552 addw %d0,%a0 | + hole size = dest pointer
553 movl %a1@-,%a0@- | copy
554 movl %a1@-,%a0@- | 8 bytes
555 movl %a0,%sp@(FR_SP) | new SSP
556 moveml %sp@+,#0x7FFF | restore user registers
557 movl %sp@,%sp | and our SP
558 Ldorte:
559 rte | real return
560
561 /*
562 * Initialization is at the beginning of this file, because the
563 * kernel entry point needs to be at zero for compatibility with
564 * the Sun boot loader. This works on Sun machines because the
565 * interrupt vector table for reset is NOT at address zero.
566 * (The MMU has a "boot" bit that forces access to the PROM)
567 */
568
569 /*
570 * Use common m68k sigcode.
571 */
572 #include <m68k/m68k/sigcode.s>
573 #ifdef COMPAT_SUNOS
574 #include <m68k/m68k/sunos_sigcode.s>
575 #endif
576 #ifdef COMPAT_SVR4
577 #include <m68k/m68k/svr4_sigcode.s>
578 #endif
579
580 .text
581
582 /*
583 * Primitives
584 */
585
586 /*
587 * Use common m68k support routines.
588 */
589 #include <m68k/m68k/support.s>
590
591 /*
592 * Use common m68k process/lwp switch and context save subroutines.
593 */
594 #define FPCOPROC /* XXX: Temp. Reqd. */
595 #include <m68k/m68k/switch_subr.s>
596
597
598 /* suline() */
599
600 #ifdef DEBUG
601 .data
602 ASGLOBAL(fulltflush)
603 .long 0
604 ASGLOBAL(fullcflush)
605 .long 0
606 .text
607 #endif
608
609 ENTRY(ecacheon)
610 rts
611
612 ENTRY(ecacheoff)
613 rts
614
615 /*
616 * Get callers current SP value.
617 * Note that simply taking the address of a local variable in a C function
618 * doesn't work because callee saved registers may be outside the stack frame
619 * defined by A6 (e.g. GCC generated code).
620 *
621 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
622 */
623 GLOBAL(getsp)
624 movl %sp,%d0 | get current SP
625 addql #4,%d0 | compensate for return address
626 movl %d0,%a0
627 rts
628
629 ENTRY(getsfc)
630 movc %sfc,%d0
631 movl %d0,%a0
632 rts
633
634 ENTRY(getdfc)
635 movc %dfc,%d0
636 movl %d0,%a0
637 rts
638
639 ENTRY(getvbr)
640 movc %vbr,%d0
641 movl %d0,%a0
642 rts
643
644 ENTRY(setvbr)
645 movl %sp@(4),%d0
646 movc %d0,%vbr
647 rts
648
649 /*
650 * Load a new CPU Root Pointer (CRP) into the MMU.
651 * void loadcrp(struct mmu_rootptr *);
652 */
653 ENTRY(loadcrp)
654 movl %sp@(4),%a0 | arg1: &CRP
655 movl #CACHE_CLR,%d0
656 movc %d0,%cacr | invalidate cache(s)
657 pflusha | flush entire TLB
658 pmove %a0@,%crp | load new user root pointer
659 rts
660
661 ENTRY(getcrp)
662 movl %sp@(4),%a0 | arg1: &crp
663 pmove %crp,%a0@ | *crpp = %crp
664 rts
665
666 /*
667 * Get the physical address of the PTE for a given VA.
668 */
669 ENTRY(ptest_addr)
670 movl %sp@(4),%a1 | VA
671 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
672 movl %a0,%d0 | Result in %d0 (not a pointer return)
673 rts
674
675 /*
676 * Set processor priority level calls. Most are implemented with
677 * inline asm expansions. However, we need one instantiation here
678 * in case some non-optimized code makes external references.
679 * Most places will use the inlined functions param.h supplies.
680 */
681
682 ENTRY(_getsr)
683 clrl %d0
684 movw %sr,%d0
685 movl %a1,%d0
686 rts
687
688 ENTRY(_spl)
689 clrl %d0
690 movw %sr,%d0
691 movl %sp@(4),%d1
692 movw %d1,%sr
693 rts
694
695 ENTRY(_splraise)
696 clrl %d0
697 movw %sr,%d0
698 movl %d0,%d1
699 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
700 cmpl %sp@(4),%d1 | (old - new)
701 bge Lsplr
702 movl %sp@(4),%d1
703 movw %d1,%sr
704 Lsplr:
705 rts
706
707 /*
708 * Save and restore 68881 state.
709 */
710 ENTRY(m68881_save)
711 movl %sp@(4),%a0 | save area pointer
712 fsave %a0@ | save state
713 tstb %a0@ | null state frame?
714 jeq Lm68881sdone | yes, all done
715 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs
716 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs
717 Lm68881sdone:
718 rts
719
720 ENTRY(m68881_restore)
721 movl %sp@(4),%a0 | save area pointer
722 tstb %a0@ | null state frame?
723 jeq Lm68881rdone | yes, easy
724 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs
725 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs
726 Lm68881rdone:
727 frestore %a0@ | restore state
728 rts
729
730 /*
731 * _delay(unsigned N)
732 * Delay for at least (N/256) microseconds.
733 * This routine depends on the variable: delay_divisor
734 * which should be set based on the CPU clock rate.
735 * XXX: Currently this is set based on the CPU model,
736 * XXX: but this should be determined at run time...
737 */
738 GLOBAL(_delay)
739 | %d0 = arg = (usecs << 8)
740 movl %sp@(4),%d0
741 | %d1 = delay_divisor;
742 movl _C_LABEL(delay_divisor),%d1
743 jra L_delay /* Jump into the loop! */
744
745 /*
746 * Align the branch target of the loop to a half-line (8-byte)
747 * boundary to minimize cache effects. This guarantees both
748 * that there will be no prefetch stalls due to cache line burst
749 * operations and that the loop will run from a single cache
750 * half-line.
751 */
752 #ifdef __ELF__
753 .align 8
754 #else
755 .align 3
756 #endif
757 L_delay:
758 subl %d1,%d0
759 jgt L_delay
760 rts
761
762 | Define some addresses, mostly so DDB can print useful info.
763 | Not using _C_LABEL() here because these symbols are never
764 | referenced by any C code, and if the leading underscore
765 | ever goes away, these lines turn into syntax errors...
766 .set _KERNBASE3X,KERNBASE3X
767 .set _MONSTART,SUN3X_MONSTART
768 .set _PROM_BASE,SUN3X_PROM_BASE
769 .set _MONEND,SUN3X_MONEND
770
771 |The end!
772