locore.s revision 1.68 1 /* $NetBSD: locore.s,v 1.68 2021/03/31 09:48:15 simonb Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: locore.s 1.66 92/12/22$
37 * @(#)locore.s 8.6 (Berkeley) 5/27/94
38 */
39
40 #include "opt_compat_netbsd.h"
41 #include "opt_compat_sunos.h"
42 #include "opt_kgdb.h"
43 #include "opt_lockdebug.h"
44
45 #include "assym.h"
46 #include <machine/asm.h>
47 #include <machine/trap.h>
48
49 | Remember this is a fun project!
50
51 .data
52 GLOBAL(mon_crp)
53 .long 0,0
54
55 | This is for kvm_mkdb, and should be the address of the beginning
56 | of the kernel text segment (not necessarily the same as kernbase).
57 .text
58 GLOBAL(kernel_text)
59
60 | This is the entry point, as well as the end of the temporary stack
61 | used during process switch (one 8K page ending at start)
62 ASGLOBAL(tmpstk)
63 ASGLOBAL(start)
64
65 | The first step, after disabling interrupts, is to map enough of the kernel
66 | into high virtual address space so that we can use position dependent code.
67 | This is a tricky task on the sun3x because the MMU is already enabled and
68 | the ROM monitor provides no indication of where the root MMU table is mapped.
69 | Therefore we must use one of the 68030's 'transparent translation' registers
70 | to define a range in the address space where the MMU translation is
71 | turned off. Once this is complete we can modify the MMU table directly
72 | without the need for it to be mapped into virtual memory.
73 | All code must be position independent until otherwise noted, as the
74 | boot loader has loaded us into low memory but all the symbols in this
75 | code have been linked high.
76 movw #PSL_HIGHIPL,%sr | no interrupts
77 movl #KERNBASE3X,%a5 | for vtop conversion
78 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
79 subl %a5,%a0
80 | Note: borrowing mon_crp for tt0 setup...
81 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
82 .long 0xf0100800 | transparent translation reg0
83 | [ pmove a0@, tt0 ]
84 | In order to map the kernel into high memory we will copy the root table
85 | entry which maps the 16 megabytes of memory starting at 0x0 into the
86 | entry which maps the 16 megabytes starting at KERNBASE.
87 pmove %crp,%a0@ | Get monitor CPU root pointer
88 movl %a0@(4),%a1 | 2nd word is PA of level A table
89
90 movl %a1,%a0 | compute the descriptor address
91 addl #0x3e0,%a1 | for VA starting at KERNBASE
92 movl %a0@,%a1@ | copy descriptor type
93 movl %a0@(4),%a1@(4) | copy physical address
94
95 | Kernel is now double mapped at zero and KERNBASE.
96 | Force a long jump to the relocated code (high VA).
97 movl #IC_CLEAR,%d0 | Flush the I-cache
98 movc %d0,%cacr
99 jmp L_high_code:l | long jump
100
101 L_high_code:
102 | We are now running in the correctly relocated kernel, so
103 | we are no longer restricted to position-independent code.
104 | It is handy to leave transparent translation enabled while
105 | for the low 1GB while _bootstrap() is doing its thing.
106
107 | Do bootstrap stuff needed before main() gets called.
108 | Our boot loader leaves a copy of the kernel's exec header
109 | just before the start of the kernel text segment, so the
110 | kernel can sanity-check the DDB symbols at [end...esym].
111 | Pass the struct exec at tmpstk-32 to _bootstrap().
112 | Also, make sure the initial frame pointer is zero so that
113 | the backtrace algorithm used by KGDB terminates nicely.
114 lea _ASM_LABEL(tmpstk)-32,%sp
115 movl #0,%a6
116 jsr _C_LABEL(_bootstrap) | See locore2.c
117
118 | Now turn off the transparent translation of the low 1GB.
119 | (this also flushes the ATC)
120 clrl %sp@-
121 .long 0xf0170800 | pmove sp@,tt0
122 addql #4,%sp
123
124 | Now that _bootstrap() is done using the PROM functions,
125 | we can safely set the sfc/dfc to something != FC_CONTROL
126 moveq #FC_USERD,%d0 | make movs access "user data"
127 movc %d0,%sfc | space for copyin/copyout
128 movc %d0,%dfc
129
130 | Setup process zero user/kernel stacks.
131 lea _C_LABEL(lwp0),%a0 | get lwp0
132 movl %a0@(L_PCB),%a1 | XXXuvm_lwp_getuarea
133 lea %a1@(USPACE-4),%sp | set SSP to last word
134 movl #USRSTACK3X-4,%a2
135 movl %a2,%usp | init user SP
136
137 | Note curpcb was already set in _bootstrap().
138 | Will do fpu initialization during autoconfig (see fpu.c)
139 | The interrupt vector table and stack are now ready.
140 | Interrupts will be enabled later, AFTER autoconfiguration
141 | is finished, to avoid spurrious interrupts.
142
143 /*
144 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
145 * main() nevers returns; we exit to user mode from a forked process
146 * later on.
147 */
148 clrw %sp@- | tf_format,tf_vector
149 clrl %sp@- | tf_pc (filled in later)
150 movw #PSL_USER,%sp@- | tf_sr for user mode
151 clrl %sp@- | tf_stackadj
152 lea %sp@(-64),%sp | tf_regs[16]
153 movl %a1,%a0@(L_MD_REGS) | lwp0.l_md.md_regs = trapframe
154 jbsr _C_LABEL(main) | main(&trapframe)
155 PANIC("main() returned")
156
157 | That is all the assembly startup code we need on the sun3x!
158 | The rest of this is like the hp300/locore.s where possible.
159
160 /*
161 * Trap/interrupt vector routines
162 */
163 #include <m68k/m68k/trap_subr.s>
164
165 GLOBAL(buserr)
166 tstl _C_LABEL(nofault) | device probe?
167 jeq _C_LABEL(addrerr) | no, handle as usual
168 movl _C_LABEL(nofault),%sp@- | yes,
169 jbsr _C_LABEL(longjmp) | longjmp(nofault)
170 GLOBAL(addrerr)
171 clrl %sp@- | stack adjust count
172 moveml #0xFFFF,%sp@- | save user registers
173 movl %usp,%a0 | save the user SP
174 movl %a0,%sp@(FR_SP) | in the savearea
175 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
176 moveq #0,%d0
177 movw %a1@(10),%d0 | grab SSW for fault processing
178 btst #12,%d0 | RB set?
179 jeq LbeX0 | no, test RC
180 bset #14,%d0 | yes, must set FB
181 movw %d0,%a1@(10) | for hardware too
182 LbeX0:
183 btst #13,%d0 | RC set?
184 jeq LbeX1 | no, skip
185 bset #15,%d0 | yes, must set FC
186 movw %d0,%a1@(10) | for hardware too
187 LbeX1:
188 btst #8,%d0 | data fault?
189 jeq Lbe0 | no, check for hard cases
190 movl %a1@(16),%d1 | fault address is as given in frame
191 jra Lbe10 | thats it
192 Lbe0:
193 btst #4,%a1@(6) | long (type B) stack frame?
194 jne Lbe4 | yes, go handle
195 movl %a1@(2),%d1 | no, can use save PC
196 btst #14,%d0 | FB set?
197 jeq Lbe3 | no, try FC
198 addql #4,%d1 | yes, adjust address
199 jra Lbe10 | done
200 Lbe3:
201 btst #15,%d0 | FC set?
202 jeq Lbe10 | no, done
203 addql #2,%d1 | yes, adjust address
204 jra Lbe10 | done
205 Lbe4:
206 movl %a1@(36),%d1 | long format, use stage B address
207 btst #15,%d0 | FC set?
208 jeq Lbe10 | no, all done
209 subql #2,%d1 | yes, adjust address
210 Lbe10:
211 movl %d1,%sp@- | push fault VA
212 movl %d0,%sp@- | and padded SSW
213 movw %a1@(6),%d0 | get frame format/vector offset
214 andw #0x0FFF,%d0 | clear out frame format
215 cmpw #12,%d0 | address error vector?
216 jeq Lisaerr | yes, go to it
217
218 /* MMU-specific code to determine reason for bus error. */
219 movl %d1,%a0 | fault address
220 movl %sp@,%d0 | function code from ssw
221 btst #8,%d0 | data fault?
222 jne Lbe10a
223 movql #1,%d0 | user program access FC
224 | (we dont separate data/program)
225 btst #5,%a1@ | supervisor mode?
226 jeq Lbe10a | if no, done
227 movql #5,%d0 | else supervisor program access
228 Lbe10a:
229 ptestr %d0,%a0@,#7 | do a table search
230 pmove %psr,%sp@ | save result
231 movb %sp@,%d1
232 btst #2,%d1 | invalid? (incl. limit viol and berr)
233 jeq Lmightnotbemerr | no -> wp check
234 btst #7,%d1 | is it MMU table berr?
235 jeq Lismerr | no, must be fast
236 jra Lisberr1 | real bus err needs not be fast
237 Lmightnotbemerr:
238 btst #3,%d1 | write protect bit set?
239 jeq Lisberr1 | no, must be bus error
240 movl %sp@,%d0 | ssw into low word of d0
241 andw #0xc0,%d0 | write protect is set on page:
242 cmpw #0x40,%d0 | was it read cycle?
243 jeq Lisberr1 | yes, was not WPE, must be bus err
244 /* End of MMU-specific bus error code. */
245
246 Lismerr:
247 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
248 jra _ASM_LABEL(faultstkadj) | and deal with it
249 Lisaerr:
250 movl #T_ADDRERR,%sp@- | mark address error
251 jra _ASM_LABEL(faultstkadj) | and deal with it
252 Lisberr1:
253 clrw %sp@ | re-clear pad word
254 Lisberr:
255 movl #T_BUSERR,%sp@- | mark bus error
256 jra _ASM_LABEL(faultstkadj) | and deal with it
257
258 /*
259 * FP exceptions.
260 */
261 GLOBAL(fpfline)
262 clrl %sp@- | stack adjust count
263 moveml #0xFFFF,%sp@- | save registers
264 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
265 jra _ASM_LABEL(fault) | do it
266
267 GLOBAL(fpunsupp)
268 clrl %sp@- | stack adjust count
269 moveml #0xFFFF,%sp@- | save registers
270 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
271 jra _ASM_LABEL(fault) | do it
272
273 /*
274 * Handles all other FP coprocessor exceptions.
275 * Note that since some FP exceptions generate mid-instruction frames
276 * and may cause signal delivery, we need to test for stack adjustment
277 * after the trap call.
278 */
279 GLOBAL(fpfault)
280 clrl %sp@- | stack adjust count
281 moveml #0xFFFF,%sp@- | save user registers
282 movl %usp,%a0 | and save
283 movl %a0,%sp@(FR_SP) | the user stack pointer
284 clrl %sp@- | no VA arg
285 movl _C_LABEL(curpcb),%a0 | current pcb
286 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
287 fsave %a0@ | save state
288 tstb %a0@ | null state frame?
289 jeq Lfptnull | yes, safe
290 clrw %d0 | no, need to tweak BIU
291 movb %a0@(1),%d0 | get frame size
292 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
293 Lfptnull:
294 fmovem %fpsr,%sp@- | push fpsr as code argument
295 frestore %a0@ | restore state
296 movl #T_FPERR,%sp@- | push type arg
297 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
298
299 /*
300 * Other exceptions only cause four and six word stack frame and require
301 * no post-trap stack adjustment.
302 */
303 GLOBAL(badtrap)
304 clrl %sp@- | stack adjust count
305 moveml #0xFFFF,%sp@- | save std frame regs
306 jbsr _C_LABEL(straytrap) | report
307 moveml %sp@+,#0xFFFF | restore regs
308 addql #4,%sp | stack adjust count
309 jra _ASM_LABEL(rei) | all done
310
311 /*
312 * Trap 0 is for system calls
313 */
314 GLOBAL(trap0)
315 clrl %sp@- | stack adjust count
316 moveml #0xFFFF,%sp@- | save user registers
317 movl %usp,%a0 | save the user SP
318 movl %a0,%sp@(FR_SP) | in the savearea
319 movl %d0,%sp@- | push syscall number
320 jbsr _C_LABEL(syscall) | handle it
321 addql #4,%sp | pop syscall arg
322 movl %sp@(FR_SP),%a0 | grab and restore
323 movl %a0,%usp | user SP
324 moveml %sp@+,#0x7FFF | restore most registers
325 addql #8,%sp | pop SP and stack adjust
326 jra _ASM_LABEL(rei) | all done
327
328 /*
329 * Trap 12 is the entry point for the cachectl "syscall"
330 * cachectl(command, addr, length)
331 * command in d0, addr in a1, length in d1
332 */
333 GLOBAL(trap12)
334 movl _C_LABEL(curlwp),%a0
335 movl %a0@(L_PROC),%sp@- | push curproc pointer
336 movl %d1,%sp@- | push length
337 movl %a1,%sp@- | push addr
338 movl %d0,%sp@- | push command
339 jbsr _C_LABEL(cachectl1) | do it
340 lea %sp@(16),%sp | pop args
341 jra _ASM_LABEL(rei) | all done
342
343 /*
344 * Trace (single-step) trap. Kernel-mode is special.
345 * User mode traps are simply passed on to trap().
346 */
347 GLOBAL(trace)
348 clrl %sp@- | stack adjust count
349 moveml #0xFFFF,%sp@-
350 moveq #T_TRACE,%d0
351
352 | Check PSW and see what happen.
353 | T=0 S=0 (should not happen)
354 | T=1 S=0 trace trap from user mode
355 | T=0 S=1 trace trap on a trap instruction
356 | T=1 S=1 trace trap from system mode (kernel breakpoint)
357
358 movw %sp@(FR_HW),%d1 | get PSW
359 notw %d1 | XXX no support for T0 on 680[234]0
360 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
361 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
362 jra _ASM_LABEL(fault) | no, user-mode fault
363
364 /*
365 * Trap 15 is used for:
366 * - GDB breakpoints (in user programs)
367 * - KGDB breakpoints (in the kernel)
368 * - trace traps for SUN binaries (not fully supported yet)
369 * User mode traps are simply passed to trap().
370 */
371 GLOBAL(trap15)
372 clrl %sp@- | stack adjust count
373 moveml #0xFFFF,%sp@-
374 moveq #T_TRAP15,%d0
375 btst #5,%sp@(FR_HW) | was supervisor mode?
376 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
377 jra _ASM_LABEL(fault) | no, user-mode fault
378
379 ASLOCAL(kbrkpt)
380 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
381 | Save the system sp rather than the user sp.
382 movw #PSL_HIGHIPL,%sr | lock out interrupts
383 lea %sp@(FR_SIZE),%a6 | Save stack pointer
384 movl %a6,%sp@(FR_SP) | from before trap
385
386 | If we are not on tmpstk switch to it.
387 | (so debugger can change the stack pointer)
388 movl %a6,%d1
389 cmpl #_ASM_LABEL(tmpstk),%d1
390 jls Lbrkpt2 | already on tmpstk
391 | Copy frame to the temporary stack
392 movl %sp,%a0 | %a0=src
393 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
394 movl %a1,%sp | sp=new frame
395 moveq #FR_SIZE,%d1
396 Lbrkpt1:
397 movl %a0@+,%a1@+
398 subql #4,%d1
399 bgt Lbrkpt1
400
401 Lbrkpt2:
402 | Call the trap handler for the kernel debugger.
403 | Do not call trap() to handle it, so that we can
404 | set breakpoints in trap() if we want. We know
405 | the trap type is either T_TRACE or T_BREAKPOINT.
406 movl %d0,%sp@- | push trap type
407 jbsr _C_LABEL(trap_kdebug)
408 addql #4,%sp | pop args
409
410 | The stack pointer may have been modified, or
411 | data below it modified (by kgdb push call),
412 | so push the hardware frame at the current sp
413 | before restoring registers and returning.
414 movl %sp@(FR_SP),%a0 | modified sp
415 lea %sp@(FR_SIZE),%a1 | end of our frame
416 movl %a1@-,%a0@- | copy 2 longs with
417 movl %a1@-,%a0@- | ... predecrement
418 movl %a0,%sp@(FR_SP) | sp = h/w frame
419 moveml %sp@+,#0x7FFF | restore all but sp
420 movl %sp@,%sp | ... and sp
421 rte | all done
422
423 /* Use common m68k sigreturn */
424 #include <m68k/m68k/sigreturn.s>
425
426 /*
427 * Interrupt handlers. Most are auto-vectored,
428 * and hard-wired the same way on all sun3 models.
429 * Format in the stack is:
430 * %d0,%d1,%a0,%a1, sr, pc, vo
431 */
432
433 /*
434 * This is the common auto-vector interrupt handler,
435 * for which the CPU provides the vector=0x18+level.
436 * These are installed in the interrupt vector table.
437 */
438 #ifdef __ELF__
439 .align 4
440 #else
441 .align 2
442 #endif
443 GLOBAL(_isr_autovec)
444 INTERRUPT_SAVEREG
445 jbsr _C_LABEL(isr_autovec)
446 INTERRUPT_RESTOREREG
447 jra _ASM_LABEL(rei)
448
449 /* clock: see clock.c */
450 #ifdef __ELF__
451 .align 4
452 #else
453 .align 2
454 #endif
455 GLOBAL(_isr_clock)
456 INTERRUPT_SAVEREG
457 jbsr _C_LABEL(clock_intr)
458 INTERRUPT_RESTOREREG
459 jra _ASM_LABEL(rei)
460
461 | Handler for all vectored interrupts (i.e. VME interrupts)
462 #ifdef __ELF__
463 .align 4
464 #else
465 .align 2
466 #endif
467 GLOBAL(_isr_vectored)
468 INTERRUPT_SAVEREG
469 jbsr _C_LABEL(isr_vectored)
470 INTERRUPT_RESTOREREG
471 jra _ASM_LABEL(rei)
472
473 /* interrupt counters (needed by vmstat) */
474 GLOBAL(intrnames)
475 .asciz "spur" | 0
476 .asciz "lev1" | 1
477 .asciz "lev2" | 2
478 .asciz "lev3" | 3
479 .asciz "lev4" | 4
480 .asciz "clock" | 5
481 .asciz "lev6" | 6
482 .asciz "nmi" | 7
483 GLOBAL(eintrnames)
484
485 .data
486 .even
487 GLOBAL(intrcnt)
488 .long 0,0,0,0,0,0,0,0
489 GLOBAL(eintrcnt)
490 .text
491
492 /*
493 * Emulation of VAX REI instruction.
494 *
495 * This code is (mostly) un-altered from the hp300 code,
496 * except that sun machines do not need a simulated SIR
497 * because they have a real software interrupt register.
498 *
499 * This code deals with checking for and servicing ASTs
500 * (profiling, scheduling) and software interrupts (network, softclock).
501 * We check for ASTs first, just like the VAX. To avoid excess overhead
502 * the T_ASTFLT handling code will also check for software interrupts so we
503 * do not have to do it here. After identifying that we need an AST we
504 * drop the IPL to allow device interrupts.
505 *
506 * This code is complicated by the fact that sendsig may have been called
507 * necessitating a stack cleanup.
508 */
509
510 ASGLOBAL(rei)
511 #ifdef DIAGNOSTIC
512 tstl _C_LABEL(panicstr) | have we paniced?
513 jne Ldorte | yes, do not make matters worse
514 #endif
515 tstl _C_LABEL(astpending) | AST pending?
516 jeq Ldorte | no, done
517 Lrei1:
518 btst #5,%sp@ | yes, are we returning to user mode?
519 jne Ldorte | no, done
520 movw #PSL_LOWIPL,%sr | lower SPL
521 clrl %sp@- | stack adjust
522 moveml #0xFFFF,%sp@- | save all registers
523 movl %usp,%a1 | including
524 movl %a1,%sp@(FR_SP) | the users SP
525 clrl %sp@- | VA == none
526 clrl %sp@- | code == none
527 movl #T_ASTFLT,%sp@- | type == async system trap
528 pea %sp@(12) | fp == address of trap frame
529 jbsr _C_LABEL(trap) | go handle it
530 lea %sp@(16),%sp | pop value args
531 movl %sp@(FR_SP),%a0 | restore user SP
532 movl %a0,%usp | from save area
533 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
534 jne Laststkadj | yes, go to it
535 moveml %sp@+,#0x7FFF | no, restore most user regs
536 addql #8,%sp | toss SP and stack adjust
537 rte | and do real RTE
538 Laststkadj:
539 lea %sp@(FR_HW),%a1 | pointer to HW frame
540 addql #8,%a1 | source pointer
541 movl %a1,%a0 | source
542 addw %d0,%a0 | + hole size = dest pointer
543 movl %a1@-,%a0@- | copy
544 movl %a1@-,%a0@- | 8 bytes
545 movl %a0,%sp@(FR_SP) | new SSP
546 moveml %sp@+,#0x7FFF | restore user registers
547 movl %sp@,%sp | and our SP
548 Ldorte:
549 rte | real return
550
551 /*
552 * Initialization is at the beginning of this file, because the
553 * kernel entry point needs to be at zero for compatibility with
554 * the Sun boot loader. This works on Sun machines because the
555 * interrupt vector table for reset is NOT at address zero.
556 * (The MMU has a "boot" bit that forces access to the PROM)
557 */
558
559 /*
560 * Use common m68k sigcode.
561 */
562 #include <m68k/m68k/sigcode.s>
563 #ifdef COMPAT_SUNOS
564 #include <m68k/m68k/sunos_sigcode.s>
565 #endif
566
567 .text
568
569 /*
570 * Primitives
571 */
572
573 /*
574 * Use common m68k support routines.
575 */
576 #include <m68k/m68k/support.s>
577
578 /*
579 * Use common m68k process/lwp switch and context save subroutines.
580 */
581 #define FPCOPROC /* XXX: Temp. Reqd. */
582 #include <m68k/m68k/switch_subr.s>
583
584
585 /* suline() */
586
587 #ifdef DEBUG
588 .data
589 ASGLOBAL(fulltflush)
590 .long 0
591 ASGLOBAL(fullcflush)
592 .long 0
593 .text
594 #endif
595
596 ENTRY(ecacheon)
597 rts
598
599 ENTRY(ecacheoff)
600 rts
601
602 /*
603 * Get callers current SP value.
604 * Note that simply taking the address of a local variable in a C function
605 * doesn't work because callee saved registers may be outside the stack frame
606 * defined by A6 (e.g. GCC generated code).
607 *
608 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
609 */
610 GLOBAL(getsp)
611 movl %sp,%d0 | get current SP
612 addql #4,%d0 | compensate for return address
613 movl %d0,%a0
614 rts
615
616 ENTRY(getvbr)
617 movc %vbr,%d0
618 movl %d0,%a0
619 rts
620
621 ENTRY(setvbr)
622 movl %sp@(4),%d0
623 movc %d0,%vbr
624 rts
625
626 /*
627 * Load a new CPU Root Pointer (CRP) into the MMU.
628 * void loadcrp(struct mmu_rootptr *);
629 */
630 ENTRY(loadcrp)
631 movl %sp@(4),%a0 | arg1: &CRP
632 movl #CACHE_CLR,%d0
633 movc %d0,%cacr | invalidate cache(s)
634 pflusha | flush entire TLB
635 pmove %a0@,%crp | load new user root pointer
636 rts
637
638 ENTRY(getcrp)
639 movl %sp@(4),%a0 | arg1: &crp
640 pmove %crp,%a0@ | *crpp = %crp
641 rts
642
643 /*
644 * Get the physical address of the PTE for a given VA.
645 */
646 ENTRY(ptest_addr)
647 movl %sp@(4),%a1 | VA
648 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
649 movl %a0,%d0 | Result in %d0 (not a pointer return)
650 rts
651
652 /*
653 * Set processor priority level calls. Most are implemented with
654 * inline asm expansions. However, we need one instantiation here
655 * in case some non-optimized code makes external references.
656 * Most places will use the inlined functions param.h supplies.
657 */
658
659 ENTRY(_getsr)
660 clrl %d0
661 movw %sr,%d0
662 movl %a1,%d0
663 rts
664
665 ENTRY(_spl)
666 clrl %d0
667 movw %sr,%d0
668 movl %sp@(4),%d1
669 movw %d1,%sr
670 rts
671
672 ENTRY(_splraise)
673 clrl %d0
674 movw %sr,%d0
675 movl %d0,%d1
676 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
677 cmpl %sp@(4),%d1 | (old - new)
678 bge Lsplr
679 movl %sp@(4),%d1
680 movw %d1,%sr
681 Lsplr:
682 rts
683
684 /*
685 * _delay(unsigned N)
686 * Delay for at least (N/256) microseconds.
687 * This routine depends on the variable: delay_divisor
688 * which should be set based on the CPU clock rate.
689 * XXX: Currently this is set based on the CPU model,
690 * XXX: but this should be determined at run time...
691 */
692 GLOBAL(_delay)
693 | %d0 = arg = (usecs << 8)
694 movl %sp@(4),%d0
695 | %d1 = delay_divisor;
696 movl _C_LABEL(delay_divisor),%d1
697 jra L_delay /* Jump into the loop! */
698
699 /*
700 * Align the branch target of the loop to a half-line (8-byte)
701 * boundary to minimize cache effects. This guarantees both
702 * that there will be no prefetch stalls due to cache line burst
703 * operations and that the loop will run from a single cache
704 * half-line.
705 */
706 #ifdef __ELF__
707 .align 8
708 #else
709 .align 3
710 #endif
711 L_delay:
712 subl %d1,%d0
713 jgt L_delay
714 rts
715
716 | Define some addresses, mostly so DDB can print useful info.
717 | Not using _C_LABEL() here because these symbols are never
718 | referenced by any C code, and if the leading underscore
719 | ever goes away, these lines turn into syntax errors...
720 .set _KERNBASE3X,KERNBASE3X
721 .set _MONSTART,SUN3X_MONSTART
722 .set _PROM_BASE,SUN3X_PROM_BASE
723 .set _MONEND,SUN3X_MONEND
724
725 |The end!
726