locore.s revision 1.66 1 /* $NetBSD: locore.s,v 1.66 2018/12/19 13:57:51 maxv Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: locore.s 1.66 92/12/22$
37 * @(#)locore.s 8.6 (Berkeley) 5/27/94
38 */
39
40 #include "opt_compat_netbsd.h"
41 #include "opt_compat_sunos.h"
42 #include "opt_kgdb.h"
43 #include "opt_lockdebug.h"
44
45 #include "assym.h"
46 #include <machine/asm.h>
47 #include <machine/trap.h>
48
49 | Remember this is a fun project!
50
51 .data
52 GLOBAL(mon_crp)
53 .long 0,0
54
55 | This is for kvm_mkdb, and should be the address of the beginning
56 | of the kernel text segment (not necessarily the same as kernbase).
57 .text
58 GLOBAL(kernel_text)
59
60 | This is the entry point, as well as the end of the temporary stack
61 | used during process switch (one 8K page ending at start)
62 ASGLOBAL(tmpstk)
63 ASGLOBAL(start)
64
65 | The first step, after disabling interrupts, is to map enough of the kernel
66 | into high virtual address space so that we can use position dependent code.
67 | This is a tricky task on the sun3x because the MMU is already enabled and
68 | the ROM monitor provides no indication of where the root MMU table is mapped.
69 | Therefore we must use one of the 68030's 'transparent translation' registers
70 | to define a range in the address space where the MMU translation is
71 | turned off. Once this is complete we can modify the MMU table directly
72 | without the need for it to be mapped into virtual memory.
73 | All code must be position independent until otherwise noted, as the
74 | boot loader has loaded us into low memory but all the symbols in this
75 | code have been linked high.
76 movw #PSL_HIGHIPL,%sr | no interrupts
77 movl #KERNBASE3X,%a5 | for vtop conversion
78 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
79 subl %a5,%a0
80 | Note: borrowing mon_crp for tt0 setup...
81 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
82 .long 0xf0100800 | transparent translation reg0
83 | [ pmove a0@, tt0 ]
84 | In order to map the kernel into high memory we will copy the root table
85 | entry which maps the 16 megabytes of memory starting at 0x0 into the
86 | entry which maps the 16 megabytes starting at KERNBASE.
87 pmove %crp,%a0@ | Get monitor CPU root pointer
88 movl %a0@(4),%a1 | 2nd word is PA of level A table
89
90 movl %a1,%a0 | compute the descriptor address
91 addl #0x3e0,%a1 | for VA starting at KERNBASE
92 movl %a0@,%a1@ | copy descriptor type
93 movl %a0@(4),%a1@(4) | copy physical address
94
95 | Kernel is now double mapped at zero and KERNBASE.
96 | Force a long jump to the relocated code (high VA).
97 movl #IC_CLEAR,%d0 | Flush the I-cache
98 movc %d0,%cacr
99 jmp L_high_code:l | long jump
100
101 L_high_code:
102 | We are now running in the correctly relocated kernel, so
103 | we are no longer restricted to position-independent code.
104 | It is handy to leave transparent translation enabled while
105 | for the low 1GB while _bootstrap() is doing its thing.
106
107 | Do bootstrap stuff needed before main() gets called.
108 | Our boot loader leaves a copy of the kernel's exec header
109 | just before the start of the kernel text segment, so the
110 | kernel can sanity-check the DDB symbols at [end...esym].
111 | Pass the struct exec at tmpstk-32 to _bootstrap().
112 | Also, make sure the initial frame pointer is zero so that
113 | the backtrace algorithm used by KGDB terminates nicely.
114 lea _ASM_LABEL(tmpstk)-32,%sp
115 movl #0,%a6
116 jsr _C_LABEL(_bootstrap) | See locore2.c
117
118 | Now turn off the transparent translation of the low 1GB.
119 | (this also flushes the ATC)
120 clrl %sp@-
121 .long 0xf0170800 | pmove sp@,tt0
122 addql #4,%sp
123
124 | Now that _bootstrap() is done using the PROM functions,
125 | we can safely set the sfc/dfc to something != FC_CONTROL
126 moveq #FC_USERD,%d0 | make movs access "user data"
127 movc %d0,%sfc | space for copyin/copyout
128 movc %d0,%dfc
129
130 | Setup process zero user/kernel stacks.
131 lea _C_LABEL(lwp0),%a0 | get lwp0
132 movl %a0@(L_PCB),%a1 | XXXuvm_lwp_getuarea
133 lea %a1@(USPACE-4),%sp | set SSP to last word
134 movl #USRSTACK3X-4,%a2
135 movl %a2,%usp | init user SP
136
137 | Note curpcb was already set in _bootstrap().
138 | Will do fpu initialization during autoconfig (see fpu.c)
139 | The interrupt vector table and stack are now ready.
140 | Interrupts will be enabled later, AFTER autoconfiguration
141 | is finished, to avoid spurrious interrupts.
142
143 /*
144 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
145 * main() nevers returns; we exit to user mode from a forked process
146 * later on.
147 */
148 clrw %sp@- | tf_format,tf_vector
149 clrl %sp@- | tf_pc (filled in later)
150 movw #PSL_USER,%sp@- | tf_sr for user mode
151 clrl %sp@- | tf_stackadj
152 lea %sp@(-64),%sp | tf_regs[16]
153 movl %a1,%a0@(L_MD_REGS) | lwp0.l_md.md_regs = trapframe
154 jbsr _C_LABEL(main) | main(&trapframe)
155 PANIC("main() returned")
156
157 | That is all the assembly startup code we need on the sun3x!
158 | The rest of this is like the hp300/locore.s where possible.
159
160 /*
161 * Trap/interrupt vector routines
162 */
163 #include <m68k/m68k/trap_subr.s>
164
165 GLOBAL(buserr)
166 tstl _C_LABEL(nofault) | device probe?
167 jeq _C_LABEL(addrerr) | no, handle as usual
168 movl _C_LABEL(nofault),%sp@- | yes,
169 jbsr _C_LABEL(longjmp) | longjmp(nofault)
170 GLOBAL(addrerr)
171 clrl %sp@- | stack adjust count
172 moveml #0xFFFF,%sp@- | save user registers
173 movl %usp,%a0 | save the user SP
174 movl %a0,%sp@(FR_SP) | in the savearea
175 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
176 moveq #0,%d0
177 movw %a1@(10),%d0 | grab SSW for fault processing
178 btst #12,%d0 | RB set?
179 jeq LbeX0 | no, test RC
180 bset #14,%d0 | yes, must set FB
181 movw %d0,%a1@(10) | for hardware too
182 LbeX0:
183 btst #13,%d0 | RC set?
184 jeq LbeX1 | no, skip
185 bset #15,%d0 | yes, must set FC
186 movw %d0,%a1@(10) | for hardware too
187 LbeX1:
188 btst #8,%d0 | data fault?
189 jeq Lbe0 | no, check for hard cases
190 movl %a1@(16),%d1 | fault address is as given in frame
191 jra Lbe10 | thats it
192 Lbe0:
193 btst #4,%a1@(6) | long (type B) stack frame?
194 jne Lbe4 | yes, go handle
195 movl %a1@(2),%d1 | no, can use save PC
196 btst #14,%d0 | FB set?
197 jeq Lbe3 | no, try FC
198 addql #4,%d1 | yes, adjust address
199 jra Lbe10 | done
200 Lbe3:
201 btst #15,%d0 | FC set?
202 jeq Lbe10 | no, done
203 addql #2,%d1 | yes, adjust address
204 jra Lbe10 | done
205 Lbe4:
206 movl %a1@(36),%d1 | long format, use stage B address
207 btst #15,%d0 | FC set?
208 jeq Lbe10 | no, all done
209 subql #2,%d1 | yes, adjust address
210 Lbe10:
211 movl %d1,%sp@- | push fault VA
212 movl %d0,%sp@- | and padded SSW
213 movw %a1@(6),%d0 | get frame format/vector offset
214 andw #0x0FFF,%d0 | clear out frame format
215 cmpw #12,%d0 | address error vector?
216 jeq Lisaerr | yes, go to it
217
218 /* MMU-specific code to determine reason for bus error. */
219 movl %d1,%a0 | fault address
220 movl %sp@,%d0 | function code from ssw
221 btst #8,%d0 | data fault?
222 jne Lbe10a
223 movql #1,%d0 | user program access FC
224 | (we dont separate data/program)
225 btst #5,%a1@ | supervisor mode?
226 jeq Lbe10a | if no, done
227 movql #5,%d0 | else supervisor program access
228 Lbe10a:
229 ptestr %d0,%a0@,#7 | do a table search
230 pmove %psr,%sp@ | save result
231 movb %sp@,%d1
232 btst #2,%d1 | invalid? (incl. limit viol and berr)
233 jeq Lmightnotbemerr | no -> wp check
234 btst #7,%d1 | is it MMU table berr?
235 jeq Lismerr | no, must be fast
236 jra Lisberr1 | real bus err needs not be fast
237 Lmightnotbemerr:
238 btst #3,%d1 | write protect bit set?
239 jeq Lisberr1 | no, must be bus error
240 movl %sp@,%d0 | ssw into low word of d0
241 andw #0xc0,%d0 | write protect is set on page:
242 cmpw #0x40,%d0 | was it read cycle?
243 jeq Lisberr1 | yes, was not WPE, must be bus err
244 /* End of MMU-specific bus error code. */
245
246 Lismerr:
247 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
248 jra _ASM_LABEL(faultstkadj) | and deal with it
249 Lisaerr:
250 movl #T_ADDRERR,%sp@- | mark address error
251 jra _ASM_LABEL(faultstkadj) | and deal with it
252 Lisberr1:
253 clrw %sp@ | re-clear pad word
254 Lisberr:
255 movl #T_BUSERR,%sp@- | mark bus error
256 jra _ASM_LABEL(faultstkadj) | and deal with it
257
258 /*
259 * FP exceptions.
260 */
261 GLOBAL(fpfline)
262 clrl %sp@- | stack adjust count
263 moveml #0xFFFF,%sp@- | save registers
264 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
265 jra _ASM_LABEL(fault) | do it
266
267 GLOBAL(fpunsupp)
268 clrl %sp@- | stack adjust count
269 moveml #0xFFFF,%sp@- | save registers
270 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
271 jra _ASM_LABEL(fault) | do it
272
273 /*
274 * Handles all other FP coprocessor exceptions.
275 * Note that since some FP exceptions generate mid-instruction frames
276 * and may cause signal delivery, we need to test for stack adjustment
277 * after the trap call.
278 */
279 GLOBAL(fpfault)
280 clrl %sp@- | stack adjust count
281 moveml #0xFFFF,%sp@- | save user registers
282 movl %usp,%a0 | and save
283 movl %a0,%sp@(FR_SP) | the user stack pointer
284 clrl %sp@- | no VA arg
285 movl _C_LABEL(curpcb),%a0 | current pcb
286 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
287 fsave %a0@ | save state
288 tstb %a0@ | null state frame?
289 jeq Lfptnull | yes, safe
290 clrw %d0 | no, need to tweak BIU
291 movb %a0@(1),%d0 | get frame size
292 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
293 Lfptnull:
294 fmovem %fpsr,%sp@- | push fpsr as code argument
295 frestore %a0@ | restore state
296 movl #T_FPERR,%sp@- | push type arg
297 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
298
299 /*
300 * Other exceptions only cause four and six word stack frame and require
301 * no post-trap stack adjustment.
302 */
303 GLOBAL(badtrap)
304 clrl %sp@- | stack adjust count
305 moveml #0xFFFF,%sp@- | save std frame regs
306 jbsr _C_LABEL(straytrap) | report
307 moveml %sp@+,#0xFFFF | restore regs
308 addql #4,%sp | stack adjust count
309 jra _ASM_LABEL(rei) | all done
310
311 /*
312 * Trap 0 is for system calls
313 */
314 GLOBAL(trap0)
315 clrl %sp@- | stack adjust count
316 moveml #0xFFFF,%sp@- | save user registers
317 movl %usp,%a0 | save the user SP
318 movl %a0,%sp@(FR_SP) | in the savearea
319 movl %d0,%sp@- | push syscall number
320 jbsr _C_LABEL(syscall) | handle it
321 addql #4,%sp | pop syscall arg
322 movl %sp@(FR_SP),%a0 | grab and restore
323 movl %a0,%usp | user SP
324 moveml %sp@+,#0x7FFF | restore most registers
325 addql #8,%sp | pop SP and stack adjust
326 jra _ASM_LABEL(rei) | all done
327
328 /*
329 * Trap 12 is the entry point for the cachectl "syscall"
330 * cachectl(command, addr, length)
331 * command in d0, addr in a1, length in d1
332 */
333 GLOBAL(trap12)
334 movl _C_LABEL(curlwp),%a0
335 movl %a0@(L_PROC),%sp@- | push curproc pointer
336 movl %d1,%sp@- | push length
337 movl %a1,%sp@- | push addr
338 movl %d0,%sp@- | push command
339 jbsr _C_LABEL(cachectl1) | do it
340 lea %sp@(16),%sp | pop args
341 jra _ASM_LABEL(rei) | all done
342
343 /*
344 * Trace (single-step) trap. Kernel-mode is special.
345 * User mode traps are simply passed on to trap().
346 */
347 GLOBAL(trace)
348 clrl %sp@- | stack adjust count
349 moveml #0xFFFF,%sp@-
350 moveq #T_TRACE,%d0
351
352 | Check PSW and see what happen.
353 | T=0 S=0 (should not happen)
354 | T=1 S=0 trace trap from user mode
355 | T=0 S=1 trace trap on a trap instruction
356 | T=1 S=1 trace trap from system mode (kernel breakpoint)
357
358 movw %sp@(FR_HW),%d1 | get PSW
359 notw %d1 | XXX no support for T0 on 680[234]0
360 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
361 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
362 jra _ASM_LABEL(fault) | no, user-mode fault
363
364 /*
365 * Trap 15 is used for:
366 * - GDB breakpoints (in user programs)
367 * - KGDB breakpoints (in the kernel)
368 * - trace traps for SUN binaries (not fully supported yet)
369 * User mode traps are simply passed to trap().
370 */
371 GLOBAL(trap15)
372 clrl %sp@- | stack adjust count
373 moveml #0xFFFF,%sp@-
374 moveq #T_TRAP15,%d0
375 btst #5,%sp@(FR_HW) | was supervisor mode?
376 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
377 jra _ASM_LABEL(fault) | no, user-mode fault
378
379 ASLOCAL(kbrkpt)
380 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
381 | Save the system sp rather than the user sp.
382 movw #PSL_HIGHIPL,%sr | lock out interrupts
383 lea %sp@(FR_SIZE),%a6 | Save stack pointer
384 movl %a6,%sp@(FR_SP) | from before trap
385
386 | If we are not on tmpstk switch to it.
387 | (so debugger can change the stack pointer)
388 movl %a6,%d1
389 cmpl #_ASM_LABEL(tmpstk),%d1
390 jls Lbrkpt2 | already on tmpstk
391 | Copy frame to the temporary stack
392 movl %sp,%a0 | %a0=src
393 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
394 movl %a1,%sp | sp=new frame
395 moveq #FR_SIZE,%d1
396 Lbrkpt1:
397 movl %a0@+,%a1@+
398 subql #4,%d1
399 bgt Lbrkpt1
400
401 Lbrkpt2:
402 | Call the trap handler for the kernel debugger.
403 | Do not call trap() to handle it, so that we can
404 | set breakpoints in trap() if we want. We know
405 | the trap type is either T_TRACE or T_BREAKPOINT.
406 movl %d0,%sp@- | push trap type
407 jbsr _C_LABEL(trap_kdebug)
408 addql #4,%sp | pop args
409
410 | The stack pointer may have been modified, or
411 | data below it modified (by kgdb push call),
412 | so push the hardware frame at the current sp
413 | before restoring registers and returning.
414 movl %sp@(FR_SP),%a0 | modified sp
415 lea %sp@(FR_SIZE),%a1 | end of our frame
416 movl %a1@-,%a0@- | copy 2 longs with
417 movl %a1@-,%a0@- | ... predecrement
418 movl %a0,%sp@(FR_SP) | sp = h/w frame
419 moveml %sp@+,#0x7FFF | restore all but sp
420 movl %sp@,%sp | ... and sp
421 rte | all done
422
423 /* Use common m68k sigreturn */
424 #include <m68k/m68k/sigreturn.s>
425
426 /*
427 * Interrupt handlers. Most are auto-vectored,
428 * and hard-wired the same way on all sun3 models.
429 * Format in the stack is:
430 * %d0,%d1,%a0,%a1, sr, pc, vo
431 */
432
433 /*
434 * This is the common auto-vector interrupt handler,
435 * for which the CPU provides the vector=0x18+level.
436 * These are installed in the interrupt vector table.
437 */
438 #ifdef __ELF__
439 .align 4
440 #else
441 .align 2
442 #endif
443 GLOBAL(_isr_autovec)
444 INTERRUPT_SAVEREG
445 jbsr _C_LABEL(isr_autovec)
446 INTERRUPT_RESTOREREG
447 jra _ASM_LABEL(rei)
448
449 /* clock: see clock.c */
450 #ifdef __ELF__
451 .align 4
452 #else
453 .align 2
454 #endif
455 GLOBAL(_isr_clock)
456 INTERRUPT_SAVEREG
457 jbsr _C_LABEL(clock_intr)
458 INTERRUPT_RESTOREREG
459 jra _ASM_LABEL(rei)
460
461 | Handler for all vectored interrupts (i.e. VME interrupts)
462 #ifdef __ELF__
463 .align 4
464 #else
465 .align 2
466 #endif
467 GLOBAL(_isr_vectored)
468 INTERRUPT_SAVEREG
469 jbsr _C_LABEL(isr_vectored)
470 INTERRUPT_RESTOREREG
471 jra _ASM_LABEL(rei)
472
473 #undef INTERRUPT_SAVEREG
474 #undef INTERRUPT_RESTOREREG
475
476 /* interrupt counters (needed by vmstat) */
477 GLOBAL(intrnames)
478 .asciz "spur" | 0
479 .asciz "lev1" | 1
480 .asciz "lev2" | 2
481 .asciz "lev3" | 3
482 .asciz "lev4" | 4
483 .asciz "clock" | 5
484 .asciz "lev6" | 6
485 .asciz "nmi" | 7
486 GLOBAL(eintrnames)
487
488 .data
489 .even
490 GLOBAL(intrcnt)
491 .long 0,0,0,0,0,0,0,0,0,0
492 GLOBAL(eintrcnt)
493 .text
494
495 /*
496 * Emulation of VAX REI instruction.
497 *
498 * This code is (mostly) un-altered from the hp300 code,
499 * except that sun machines do not need a simulated SIR
500 * because they have a real software interrupt register.
501 *
502 * This code deals with checking for and servicing ASTs
503 * (profiling, scheduling) and software interrupts (network, softclock).
504 * We check for ASTs first, just like the VAX. To avoid excess overhead
505 * the T_ASTFLT handling code will also check for software interrupts so we
506 * do not have to do it here. After identifying that we need an AST we
507 * drop the IPL to allow device interrupts.
508 *
509 * This code is complicated by the fact that sendsig may have been called
510 * necessitating a stack cleanup.
511 */
512
513 ASGLOBAL(rei)
514 #ifdef DIAGNOSTIC
515 tstl _C_LABEL(panicstr) | have we paniced?
516 jne Ldorte | yes, do not make matters worse
517 #endif
518 tstl _C_LABEL(astpending) | AST pending?
519 jeq Ldorte | no, done
520 Lrei1:
521 btst #5,%sp@ | yes, are we returning to user mode?
522 jne Ldorte | no, done
523 movw #PSL_LOWIPL,%sr | lower SPL
524 clrl %sp@- | stack adjust
525 moveml #0xFFFF,%sp@- | save all registers
526 movl %usp,%a1 | including
527 movl %a1,%sp@(FR_SP) | the users SP
528 clrl %sp@- | VA == none
529 clrl %sp@- | code == none
530 movl #T_ASTFLT,%sp@- | type == async system trap
531 pea %sp@(12) | fp == address of trap frame
532 jbsr _C_LABEL(trap) | go handle it
533 lea %sp@(16),%sp | pop value args
534 movl %sp@(FR_SP),%a0 | restore user SP
535 movl %a0,%usp | from save area
536 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
537 jne Laststkadj | yes, go to it
538 moveml %sp@+,#0x7FFF | no, restore most user regs
539 addql #8,%sp | toss SP and stack adjust
540 rte | and do real RTE
541 Laststkadj:
542 lea %sp@(FR_HW),%a1 | pointer to HW frame
543 addql #8,%a1 | source pointer
544 movl %a1,%a0 | source
545 addw %d0,%a0 | + hole size = dest pointer
546 movl %a1@-,%a0@- | copy
547 movl %a1@-,%a0@- | 8 bytes
548 movl %a0,%sp@(FR_SP) | new SSP
549 moveml %sp@+,#0x7FFF | restore user registers
550 movl %sp@,%sp | and our SP
551 Ldorte:
552 rte | real return
553
554 /*
555 * Initialization is at the beginning of this file, because the
556 * kernel entry point needs to be at zero for compatibility with
557 * the Sun boot loader. This works on Sun machines because the
558 * interrupt vector table for reset is NOT at address zero.
559 * (The MMU has a "boot" bit that forces access to the PROM)
560 */
561
562 /*
563 * Use common m68k sigcode.
564 */
565 #include <m68k/m68k/sigcode.s>
566 #ifdef COMPAT_SUNOS
567 #include <m68k/m68k/sunos_sigcode.s>
568 #endif
569
570 .text
571
572 /*
573 * Primitives
574 */
575
576 /*
577 * Use common m68k support routines.
578 */
579 #include <m68k/m68k/support.s>
580
581 /*
582 * Use common m68k process/lwp switch and context save subroutines.
583 */
584 #define FPCOPROC /* XXX: Temp. Reqd. */
585 #include <m68k/m68k/switch_subr.s>
586
587
588 /* suline() */
589
590 #ifdef DEBUG
591 .data
592 ASGLOBAL(fulltflush)
593 .long 0
594 ASGLOBAL(fullcflush)
595 .long 0
596 .text
597 #endif
598
599 ENTRY(ecacheon)
600 rts
601
602 ENTRY(ecacheoff)
603 rts
604
605 /*
606 * Get callers current SP value.
607 * Note that simply taking the address of a local variable in a C function
608 * doesn't work because callee saved registers may be outside the stack frame
609 * defined by A6 (e.g. GCC generated code).
610 *
611 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
612 */
613 GLOBAL(getsp)
614 movl %sp,%d0 | get current SP
615 addql #4,%d0 | compensate for return address
616 movl %d0,%a0
617 rts
618
619 ENTRY(getvbr)
620 movc %vbr,%d0
621 movl %d0,%a0
622 rts
623
624 ENTRY(setvbr)
625 movl %sp@(4),%d0
626 movc %d0,%vbr
627 rts
628
629 /*
630 * Load a new CPU Root Pointer (CRP) into the MMU.
631 * void loadcrp(struct mmu_rootptr *);
632 */
633 ENTRY(loadcrp)
634 movl %sp@(4),%a0 | arg1: &CRP
635 movl #CACHE_CLR,%d0
636 movc %d0,%cacr | invalidate cache(s)
637 pflusha | flush entire TLB
638 pmove %a0@,%crp | load new user root pointer
639 rts
640
641 ENTRY(getcrp)
642 movl %sp@(4),%a0 | arg1: &crp
643 pmove %crp,%a0@ | *crpp = %crp
644 rts
645
646 /*
647 * Get the physical address of the PTE for a given VA.
648 */
649 ENTRY(ptest_addr)
650 movl %sp@(4),%a1 | VA
651 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
652 movl %a0,%d0 | Result in %d0 (not a pointer return)
653 rts
654
655 /*
656 * Set processor priority level calls. Most are implemented with
657 * inline asm expansions. However, we need one instantiation here
658 * in case some non-optimized code makes external references.
659 * Most places will use the inlined functions param.h supplies.
660 */
661
662 ENTRY(_getsr)
663 clrl %d0
664 movw %sr,%d0
665 movl %a1,%d0
666 rts
667
668 ENTRY(_spl)
669 clrl %d0
670 movw %sr,%d0
671 movl %sp@(4),%d1
672 movw %d1,%sr
673 rts
674
675 ENTRY(_splraise)
676 clrl %d0
677 movw %sr,%d0
678 movl %d0,%d1
679 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
680 cmpl %sp@(4),%d1 | (old - new)
681 bge Lsplr
682 movl %sp@(4),%d1
683 movw %d1,%sr
684 Lsplr:
685 rts
686
687 /*
688 * _delay(unsigned N)
689 * Delay for at least (N/256) microseconds.
690 * This routine depends on the variable: delay_divisor
691 * which should be set based on the CPU clock rate.
692 * XXX: Currently this is set based on the CPU model,
693 * XXX: but this should be determined at run time...
694 */
695 GLOBAL(_delay)
696 | %d0 = arg = (usecs << 8)
697 movl %sp@(4),%d0
698 | %d1 = delay_divisor;
699 movl _C_LABEL(delay_divisor),%d1
700 jra L_delay /* Jump into the loop! */
701
702 /*
703 * Align the branch target of the loop to a half-line (8-byte)
704 * boundary to minimize cache effects. This guarantees both
705 * that there will be no prefetch stalls due to cache line burst
706 * operations and that the loop will run from a single cache
707 * half-line.
708 */
709 #ifdef __ELF__
710 .align 8
711 #else
712 .align 3
713 #endif
714 L_delay:
715 subl %d1,%d0
716 jgt L_delay
717 rts
718
719 | Define some addresses, mostly so DDB can print useful info.
720 | Not using _C_LABEL() here because these symbols are never
721 | referenced by any C code, and if the leading underscore
722 | ever goes away, these lines turn into syntax errors...
723 .set _KERNBASE3X,KERNBASE3X
724 .set _MONSTART,SUN3X_MONSTART
725 .set _PROM_BASE,SUN3X_PROM_BASE
726 .set _MONEND,SUN3X_MONEND
727
728 |The end!
729