locore.s revision 1.69.4.1 1 /* $NetBSD: locore.s,v 1.69.4.1 2024/06/27 19:27:28 martin Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: locore.s 1.66 92/12/22$
37 * @(#)locore.s 8.6 (Berkeley) 5/27/94
38 */
39
40 #include "opt_compat_netbsd.h"
41 #include "opt_compat_sunos.h"
42 #include "opt_kgdb.h"
43 #include "opt_lockdebug.h"
44
45 #include "assym.h"
46 #include <machine/asm.h>
47 #include <machine/trap.h>
48
49 | Remember this is a fun project!
50
51 .data
52 GLOBAL(mon_crp)
53 .long 0,0
54
55 | This is for kvm_mkdb, and should be the address of the beginning
56 | of the kernel text segment (not necessarily the same as kernbase).
57 .text
58 GLOBAL(kernel_text)
59
60 | This is the entry point, as well as the end of the temporary stack
61 | used during process switch (one 8K page ending at start)
62 ASGLOBAL(tmpstk)
63 ASGLOBAL(start)
64
65 | The first step, after disabling interrupts, is to map enough of the kernel
66 | into high virtual address space so that we can use position dependent code.
67 | This is a tricky task on the sun3x because the MMU is already enabled and
68 | the ROM monitor provides no indication of where the root MMU table is mapped.
69 | Therefore we must use one of the 68030's 'transparent translation' registers
70 | to define a range in the address space where the MMU translation is
71 | turned off. Once this is complete we can modify the MMU table directly
72 | without the need for it to be mapped into virtual memory.
73 | All code must be position independent until otherwise noted, as the
74 | boot loader has loaded us into low memory but all the symbols in this
75 | code have been linked high.
76 movw #PSL_HIGHIPL,%sr | no interrupts
77 movl #KERNBASE3X,%a5 | for vtop conversion
78 lea _C_LABEL(mon_crp),%a0 | where to store the CRP
79 subl %a5,%a0
80 | Note: borrowing mon_crp for tt0 setup...
81 movl #0x3F8107,%a0@ | map the low 1GB v=p with the
82 .long 0xf0100800 | transparent translation reg0
83 | [ pmove a0@, tt0 ]
84 | In order to map the kernel into high memory we will copy the root table
85 | entry which maps the 16 megabytes of memory starting at 0x0 into the
86 | entry which maps the 16 megabytes starting at KERNBASE.
87 pmove %crp,%a0@ | Get monitor CPU root pointer
88 movl %a0@(4),%a1 | 2nd word is PA of level A table
89
90 movl %a1,%a0 | compute the descriptor address
91 addl #0x3e0,%a1 | for VA starting at KERNBASE
92 movl %a0@,%a1@ | copy descriptor type
93 movl %a0@(4),%a1@(4) | copy physical address
94
95 | Kernel is now double mapped at zero and KERNBASE.
96 | Force a long jump to the relocated code (high VA).
97 movl #IC_CLEAR,%d0 | Flush the I-cache
98 movc %d0,%cacr
99 jmp L_high_code:l | long jump
100
101 L_high_code:
102 | We are now running in the correctly relocated kernel, so
103 | we are no longer restricted to position-independent code.
104 | It is handy to leave transparent translation enabled while
105 | for the low 1GB while _bootstrap() is doing its thing.
106
107 | Do bootstrap stuff needed before main() gets called.
108 | Our boot loader leaves a copy of the kernel's exec header
109 | just before the start of the kernel text segment, so the
110 | kernel can sanity-check the DDB symbols at [end...esym].
111 | Pass the struct exec at tmpstk-32 to _bootstrap().
112 | Also, make sure the initial frame pointer is zero so that
113 | the backtrace algorithm used by KGDB terminates nicely.
114 lea _ASM_LABEL(tmpstk)-32,%sp
115 movl #0,%a6
116 jsr _C_LABEL(_bootstrap) | See locore2.c
117
118 | Now turn off the transparent translation of the low 1GB.
119 | (this also flushes the ATC)
120 clrl %sp@-
121 .long 0xf0170800 | pmove sp@,tt0
122 addql #4,%sp
123
124 | Now that _bootstrap() is done using the PROM functions,
125 | we can safely set the sfc/dfc to something != FC_CONTROL
126 moveq #FC_USERD,%d0 | make movs access "user data"
127 movc %d0,%sfc | space for copyin/copyout
128 movc %d0,%dfc
129
130 | Setup process zero user/kernel stacks.
131 lea _C_LABEL(lwp0),%a0 | get lwp0
132 movl %a0@(L_PCB),%a1 | XXXuvm_lwp_getuarea
133 lea %a1@(USPACE-4),%sp | set SSP to last word
134 movl #USRSTACK3X-4,%a2
135 movl %a2,%usp | init user SP
136
137 | Note curpcb was already set in _bootstrap().
138 | Will do fpu initialization during autoconfig (see fpu.c)
139 | The interrupt vector table and stack are now ready.
140 | Interrupts will be enabled later, AFTER autoconfiguration
141 | is finished, to avoid spurrious interrupts.
142
143 /*
144 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
145 * main() nevers returns; we exit to user mode from a forked process
146 * later on.
147 */
148 clrw %sp@- | tf_format,tf_vector
149 clrl %sp@- | tf_pc (filled in later)
150 movw #PSL_USER,%sp@- | tf_sr for user mode
151 clrl %sp@- | tf_stackadj
152 lea %sp@(-64),%sp | tf_regs[16]
153 movl %a1,%a0@(L_MD_REGS) | lwp0.l_md.md_regs = trapframe
154 jbsr _C_LABEL(main) | main(&trapframe)
155 PANIC("main() returned")
156
157 | That is all the assembly startup code we need on the sun3x!
158 | The rest of this is like the hp300/locore.s where possible.
159
160 /*
161 * Trap/interrupt vector routines
162 */
163 #include <m68k/m68k/trap_subr.s>
164
165 GLOBAL(buserr)
166 tstl _C_LABEL(nofault) | device probe?
167 jeq _C_LABEL(addrerr) | no, handle as usual
168 movl _C_LABEL(nofault),%sp@- | yes,
169 jbsr _C_LABEL(longjmp) | longjmp(nofault)
170 GLOBAL(addrerr)
171 clrl %sp@- | stack adjust count
172 moveml #0xFFFF,%sp@- | save user registers
173 movl %usp,%a0 | save the user SP
174 movl %a0,%sp@(FR_SP) | in the savearea
175 lea %sp@(FR_HW),%a1 | grab base of HW berr frame
176 moveq #0,%d0
177 movw %a1@(10),%d0 | grab SSW for fault processing
178 btst #12,%d0 | RB set?
179 jeq LbeX0 | no, test RC
180 bset #14,%d0 | yes, must set FB
181 movw %d0,%a1@(10) | for hardware too
182 LbeX0:
183 btst #13,%d0 | RC set?
184 jeq LbeX1 | no, skip
185 bset #15,%d0 | yes, must set FC
186 movw %d0,%a1@(10) | for hardware too
187 LbeX1:
188 btst #8,%d0 | data fault?
189 jeq Lbe0 | no, check for hard cases
190 movl %a1@(16),%d1 | fault address is as given in frame
191 jra Lbe10 | thats it
192 Lbe0:
193 btst #4,%a1@(6) | long (type B) stack frame?
194 jne Lbe4 | yes, go handle
195 movl %a1@(2),%d1 | no, can use save PC
196 btst #14,%d0 | FB set?
197 jeq Lbe3 | no, try FC
198 addql #4,%d1 | yes, adjust address
199 jra Lbe10 | done
200 Lbe3:
201 btst #15,%d0 | FC set?
202 jeq Lbe10 | no, done
203 addql #2,%d1 | yes, adjust address
204 jra Lbe10 | done
205 Lbe4:
206 movl %a1@(36),%d1 | long format, use stage B address
207 btst #15,%d0 | FC set?
208 jeq Lbe10 | no, all done
209 subql #2,%d1 | yes, adjust address
210 Lbe10:
211 movl %d1,%sp@- | push fault VA
212 movl %d0,%sp@- | and padded SSW
213 movw %a1@(6),%d0 | get frame format/vector offset
214 andw #0x0FFF,%d0 | clear out frame format
215 cmpw #12,%d0 | address error vector?
216 jeq Lisaerr | yes, go to it
217
218 /* MMU-specific code to determine reason for bus error. */
219 movl %d1,%a0 | fault address
220 movl %sp@,%d0 | function code from ssw
221 btst #8,%d0 | data fault?
222 jne Lbe10a
223 movql #1,%d0 | user program access FC
224 | (we dont separate data/program)
225 btst #5,%a1@ | supervisor mode?
226 jeq Lbe10a | if no, done
227 movql #5,%d0 | else supervisor program access
228 Lbe10a:
229 ptestr %d0,%a0@,#7 | do a table search
230 pmove %psr,%sp@ | save result
231 movb %sp@,%d1
232 btst #2,%d1 | invalid? (incl. limit viol and berr)
233 jeq Lmightnotbemerr | no -> wp check
234 btst #7,%d1 | is it MMU table berr?
235 jeq Lismerr | no, must be fast
236 jra Lisberr1 | real bus err needs not be fast
237 Lmightnotbemerr:
238 btst #3,%d1 | write protect bit set?
239 jeq Lisberr1 | no, must be bus error
240 movl %sp@,%d0 | ssw into low word of d0
241 andw #0xc0,%d0 | write protect is set on page:
242 cmpw #0x40,%d0 | was it read cycle?
243 jeq Lisberr1 | yes, was not WPE, must be bus err
244 /* End of MMU-specific bus error code. */
245
246 Lismerr:
247 movl #T_MMUFLT,%sp@- | show that we are an MMU fault
248 jra _ASM_LABEL(faultstkadj) | and deal with it
249 Lisaerr:
250 movl #T_ADDRERR,%sp@- | mark address error
251 jra _ASM_LABEL(faultstkadj) | and deal with it
252 Lisberr1:
253 clrw %sp@ | re-clear pad word
254 Lisberr:
255 movl #T_BUSERR,%sp@- | mark bus error
256 jra _ASM_LABEL(faultstkadj) | and deal with it
257
258 /*
259 * FP exceptions.
260 */
261 GLOBAL(fpfline)
262 clrl %sp@- | stack adjust count
263 moveml #0xFFFF,%sp@- | save registers
264 moveq #T_FPEMULI,%d0 | denote as FP emulation trap
265 jra _ASM_LABEL(fault) | do it
266
267 GLOBAL(fpunsupp)
268 clrl %sp@- | stack adjust count
269 moveml #0xFFFF,%sp@- | save registers
270 moveq #T_FPEMULD,%d0 | denote as FP emulation trap
271 jra _ASM_LABEL(fault) | do it
272
273 /*
274 * Handles all other FP coprocessor exceptions.
275 * Note that since some FP exceptions generate mid-instruction frames
276 * and may cause signal delivery, we need to test for stack adjustment
277 * after the trap call.
278 */
279 GLOBAL(fpfault)
280 clrl %sp@- | stack adjust count
281 moveml #0xFFFF,%sp@- | save user registers
282 movl %usp,%a0 | and save
283 movl %a0,%sp@(FR_SP) | the user stack pointer
284 clrl %sp@- | no VA arg
285 movl _C_LABEL(curpcb),%a0 | current pcb
286 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea
287 fsave %a0@ | save state
288 tstb %a0@ | null state frame?
289 jeq Lfptnull | yes, safe
290 clrw %d0 | no, need to tweak BIU
291 movb %a0@(1),%d0 | get frame size
292 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU
293 Lfptnull:
294 fmovem %fpsr,%sp@- | push fpsr as code argument
295 frestore %a0@ | restore state
296 movl #T_FPERR,%sp@- | push type arg
297 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
298
299 /*
300 * Other exceptions only cause four and six word stack frame and require
301 * no post-trap stack adjustment.
302 */
303 GLOBAL(badtrap)
304 clrl %sp@- | stack adjust count
305 moveml #0xFFFF,%sp@- | save std frame regs
306 jbsr _C_LABEL(straytrap) | report
307 moveml %sp@+,#0xFFFF | restore regs
308 addql #4,%sp | stack adjust count
309 jra _ASM_LABEL(rei) | all done
310
311 /*
312 * Trap 0 is for system calls
313 */
314 GLOBAL(trap0)
315 clrl %sp@- | stack adjust count
316 moveml #0xFFFF,%sp@- | save user registers
317 movl %usp,%a0 | save the user SP
318 movl %a0,%sp@(FR_SP) | in the savearea
319 movl %d0,%sp@- | push syscall number
320 jbsr _C_LABEL(syscall) | handle it
321 addql #4,%sp | pop syscall arg
322 movl %sp@(FR_SP),%a0 | grab and restore
323 movl %a0,%usp | user SP
324 moveml %sp@+,#0x7FFF | restore most registers
325 addql #8,%sp | pop SP and stack adjust
326 jra _ASM_LABEL(rei) | all done
327
328 /*
329 * Trap 12 is the entry point for the cachectl "syscall"
330 * cachectl(command, addr, length)
331 * command in d0, addr in a1, length in d1
332 */
333 GLOBAL(trap12)
334 movl _C_LABEL(curlwp),%a0
335 movl %a0@(L_PROC),%sp@- | push curproc pointer
336 movl %d1,%sp@- | push length
337 movl %a1,%sp@- | push addr
338 movl %d0,%sp@- | push command
339 jbsr _C_LABEL(cachectl1) | do it
340 lea %sp@(16),%sp | pop args
341 jra _ASM_LABEL(rei) | all done
342
343 /*
344 * Trace (single-step) trap. Kernel-mode is special.
345 * User mode traps are simply passed on to trap().
346 */
347 GLOBAL(trace)
348 clrl %sp@- | stack adjust count
349 moveml #0xFFFF,%sp@-
350 moveq #T_TRACE,%d0
351
352 | Check PSW and see what happen.
353 | T=0 S=0 (should not happen)
354 | T=1 S=0 trace trap from user mode
355 | T=0 S=1 trace trap on a trap instruction
356 | T=1 S=1 trace trap from system mode (kernel breakpoint)
357
358 movw %sp@(FR_HW),%d1 | get PSW
359 notw %d1 | XXX no support for T0 on 680[234]0
360 andw #PSL_TS,%d1 | from system mode (T=1, S=1)?
361 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt
362 jra _ASM_LABEL(fault) | no, user-mode fault
363
364 /*
365 * Trap 15 is used for:
366 * - GDB breakpoints (in user programs)
367 * - KGDB breakpoints (in the kernel)
368 * - trace traps for SUN binaries (not fully supported yet)
369 * User mode traps are simply passed to trap().
370 */
371 GLOBAL(trap15)
372 clrl %sp@- | stack adjust count
373 moveml #0xFFFF,%sp@-
374 moveq #T_TRAP15,%d0
375 btst #5,%sp@(FR_HW) | was supervisor mode?
376 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt
377 jra _ASM_LABEL(fault) | no, user-mode fault
378
379 ASLOCAL(kbrkpt)
380 | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
381 | Save the system sp rather than the user sp.
382 movw #PSL_HIGHIPL,%sr | lock out interrupts
383 lea %sp@(FR_SIZE),%a6 | Save stack pointer
384 movl %a6,%sp@(FR_SP) | from before trap
385
386 | If we are not on tmpstk switch to it.
387 | (so debugger can change the stack pointer)
388 movl %a6,%d1
389 cmpl #_ASM_LABEL(tmpstk),%d1
390 jls Lbrkpt2 | already on tmpstk
391 | Copy frame to the temporary stack
392 movl %sp,%a0 | %a0=src
393 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
394 movl %a1,%sp | sp=new frame
395 moveq #FR_SIZE,%d1
396 Lbrkpt1:
397 movl %a0@+,%a1@+
398 subql #4,%d1
399 bgt Lbrkpt1
400
401 Lbrkpt2:
402 | Call the trap handler for the kernel debugger.
403 | Do not call trap() to handle it, so that we can
404 | set breakpoints in trap() if we want. We know
405 | the trap type is either T_TRACE or T_BREAKPOINT.
406 movl %d0,%sp@- | push trap type
407 jbsr _C_LABEL(trap_kdebug)
408 addql #4,%sp | pop args
409
410 | The stack pointer may have been modified, or
411 | data below it modified (by kgdb push call),
412 | so push the hardware frame at the current sp
413 | before restoring registers and returning.
414 movl %sp@(FR_SP),%a0 | modified sp
415 lea %sp@(FR_SIZE),%a1 | end of our frame
416 movl %a1@-,%a0@- | copy 2 longs with
417 movl %a1@-,%a0@- | ... predecrement
418 movl %a0,%sp@(FR_SP) | sp = h/w frame
419 moveml %sp@+,#0x7FFF | restore all but sp
420 movl %sp@,%sp | ... and sp
421 rte | all done
422
423 /*
424 * Interrupt handlers. Most are auto-vectored,
425 * and hard-wired the same way on all sun3 models.
426 * Format in the stack is:
427 * %d0,%d1,%a0,%a1, sr, pc, vo
428 */
429
430 /*
431 * This is the common auto-vector interrupt handler,
432 * for which the CPU provides the vector=0x18+level.
433 * These are installed in the interrupt vector table.
434 */
435 #ifdef __ELF__
436 .align 4
437 #else
438 .align 2
439 #endif
440 GLOBAL(_isr_autovec)
441 INTERRUPT_SAVEREG
442 jbsr _C_LABEL(isr_autovec)
443 INTERRUPT_RESTOREREG
444 jra _ASM_LABEL(rei)
445
446 /* clock: see clock.c */
447 #ifdef __ELF__
448 .align 4
449 #else
450 .align 2
451 #endif
452 GLOBAL(_isr_clock)
453 INTERRUPT_SAVEREG
454 jbsr _C_LABEL(clock_intr)
455 INTERRUPT_RESTOREREG
456 jra _ASM_LABEL(rei)
457
458 | Handler for all vectored interrupts (i.e. VME interrupts)
459 #ifdef __ELF__
460 .align 4
461 #else
462 .align 2
463 #endif
464 GLOBAL(_isr_vectored)
465 INTERRUPT_SAVEREG
466 jbsr _C_LABEL(isr_vectored)
467 INTERRUPT_RESTOREREG
468 jra _ASM_LABEL(rei)
469
470 /* interrupt counters (needed by vmstat) */
471 GLOBAL(intrnames)
472 .asciz "spur" | 0
473 .asciz "lev1" | 1
474 .asciz "lev2" | 2
475 .asciz "lev3" | 3
476 .asciz "lev4" | 4
477 .asciz "clock" | 5
478 .asciz "lev6" | 6
479 .asciz "nmi" | 7
480 GLOBAL(eintrnames)
481
482 .data
483 .even
484 GLOBAL(intrcnt)
485 .long 0,0,0,0,0,0,0,0
486 GLOBAL(eintrcnt)
487 .text
488
489 /*
490 * Emulation of VAX REI instruction.
491 *
492 * This code is (mostly) un-altered from the hp300 code,
493 * except that sun machines do not need a simulated SIR
494 * because they have a real software interrupt register.
495 *
496 * This code deals with checking for and servicing ASTs
497 * (profiling, scheduling) and software interrupts (network, softclock).
498 * We check for ASTs first, just like the VAX. To avoid excess overhead
499 * the T_ASTFLT handling code will also check for software interrupts so we
500 * do not have to do it here. After identifying that we need an AST we
501 * drop the IPL to allow device interrupts.
502 *
503 * This code is complicated by the fact that sendsig may have been called
504 * necessitating a stack cleanup.
505 */
506
507 ASGLOBAL(rei)
508 #ifdef DIAGNOSTIC
509 tstl _C_LABEL(panicstr) | have we panicked?
510 jne Ldorte | yes, do not make matters worse
511 #endif
512 tstl _C_LABEL(astpending) | AST pending?
513 jeq Ldorte | no, done
514 Lrei1:
515 btst #5,%sp@ | yes, are we returning to user mode?
516 jne Ldorte | no, done
517 movw #PSL_LOWIPL,%sr | lower SPL
518 clrl %sp@- | stack adjust
519 moveml #0xFFFF,%sp@- | save all registers
520 movl %usp,%a1 | including
521 movl %a1,%sp@(FR_SP) | the users SP
522 clrl %sp@- | VA == none
523 clrl %sp@- | code == none
524 movl #T_ASTFLT,%sp@- | type == async system trap
525 pea %sp@(12) | fp == address of trap frame
526 jbsr _C_LABEL(trap) | go handle it
527 lea %sp@(16),%sp | pop value args
528 movl %sp@(FR_SP),%a0 | restore user SP
529 movl %a0,%usp | from save area
530 movw %sp@(FR_ADJ),%d0 | need to adjust stack?
531 jne Laststkadj | yes, go to it
532 moveml %sp@+,#0x7FFF | no, restore most user regs
533 addql #8,%sp | toss SP and stack adjust
534 rte | and do real RTE
535 Laststkadj:
536 lea %sp@(FR_HW),%a1 | pointer to HW frame
537 addql #8,%a1 | source pointer
538 movl %a1,%a0 | source
539 addw %d0,%a0 | + hole size = dest pointer
540 movl %a1@-,%a0@- | copy
541 movl %a1@-,%a0@- | 8 bytes
542 movl %a0,%sp@(FR_SP) | new SSP
543 moveml %sp@+,#0x7FFF | restore user registers
544 movl %sp@,%sp | and our SP
545 Ldorte:
546 rte | real return
547
548 /*
549 * Initialization is at the beginning of this file, because the
550 * kernel entry point needs to be at zero for compatibility with
551 * the Sun boot loader. This works on Sun machines because the
552 * interrupt vector table for reset is NOT at address zero.
553 * (The MMU has a "boot" bit that forces access to the PROM)
554 */
555
556 /*
557 * Use common m68k sigcode.
558 */
559 #include <m68k/m68k/sigcode.s>
560 #ifdef COMPAT_SUNOS
561 #include <m68k/m68k/sunos_sigcode.s>
562 #endif
563
564 .text
565
566 /*
567 * Primitives
568 */
569
570 /*
571 * Use common m68k support routines.
572 */
573 #include <m68k/m68k/support.s>
574
575 /*
576 * Use common m68k process/lwp switch and context save subroutines.
577 */
578 #define FPCOPROC /* XXX: Temp. Reqd. */
579 #include <m68k/m68k/switch_subr.s>
580
581
582 /* suline() */
583
584 #ifdef DEBUG
585 .data
586 ASGLOBAL(fulltflush)
587 .long 0
588 ASGLOBAL(fullcflush)
589 .long 0
590 .text
591 #endif
592
593 ENTRY(ecacheon)
594 rts
595
596 ENTRY(ecacheoff)
597 rts
598
599 /*
600 * Get callers current SP value.
601 * Note that simply taking the address of a local variable in a C function
602 * doesn't work because callee saved registers may be outside the stack frame
603 * defined by A6 (e.g. GCC generated code).
604 *
605 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
606 */
607 GLOBAL(getsp)
608 movl %sp,%d0 | get current SP
609 addql #4,%d0 | compensate for return address
610 movl %d0,%a0
611 rts
612
613 ENTRY(getvbr)
614 movc %vbr,%d0
615 movl %d0,%a0
616 rts
617
618 ENTRY(setvbr)
619 movl %sp@(4),%d0
620 movc %d0,%vbr
621 rts
622
623 /*
624 * Load a new CPU Root Pointer (CRP) into the MMU.
625 * void loadcrp(struct mmu_rootptr *);
626 */
627 ENTRY(loadcrp)
628 movl %sp@(4),%a0 | arg1: &CRP
629 movl #CACHE_CLR,%d0
630 movc %d0,%cacr | invalidate cache(s)
631 pflusha | flush entire TLB
632 pmove %a0@,%crp | load new user root pointer
633 rts
634
635 ENTRY(getcrp)
636 movl %sp@(4),%a0 | arg1: &crp
637 pmove %crp,%a0@ | *crpp = %crp
638 rts
639
640 /*
641 * Get the physical address of the PTE for a given VA.
642 */
643 ENTRY(ptest_addr)
644 movl %sp@(4),%a1 | VA
645 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE
646 movl %a0,%d0 | Result in %d0 (not a pointer return)
647 rts
648
649 /*
650 * Set processor priority level calls. Most are implemented with
651 * inline asm expansions. However, we need one instantiation here
652 * in case some non-optimized code makes external references.
653 * Most places will use the inlined functions param.h supplies.
654 */
655
656 ENTRY(_getsr)
657 clrl %d0
658 movw %sr,%d0
659 movl %a1,%d0
660 rts
661
662 ENTRY(_spl)
663 clrl %d0
664 movw %sr,%d0
665 movl %sp@(4),%d1
666 movw %d1,%sr
667 rts
668
669 ENTRY(_splraise)
670 clrl %d0
671 movw %sr,%d0
672 movl %d0,%d1
673 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL
674 cmpl %sp@(4),%d1 | (old - new)
675 bge Lsplr
676 movl %sp@(4),%d1
677 movw %d1,%sr
678 Lsplr:
679 rts
680
681 /*
682 * _delay(unsigned N)
683 * Delay for at least (N/256) microseconds.
684 * This routine depends on the variable: delay_divisor
685 * which should be set based on the CPU clock rate.
686 * XXX: Currently this is set based on the CPU model,
687 * XXX: but this should be determined at run time...
688 */
689 GLOBAL(_delay)
690 | %d0 = arg = (usecs << 8)
691 movl %sp@(4),%d0
692 | %d1 = delay_divisor;
693 movl _C_LABEL(delay_divisor),%d1
694 jra L_delay /* Jump into the loop! */
695
696 /*
697 * Align the branch target of the loop to a half-line (8-byte)
698 * boundary to minimize cache effects. This guarantees both
699 * that there will be no prefetch stalls due to cache line burst
700 * operations and that the loop will run from a single cache
701 * half-line.
702 */
703 #ifdef __ELF__
704 .align 8
705 #else
706 .align 3
707 #endif
708 L_delay:
709 subl %d1,%d0
710 jgt L_delay
711 rts
712
713 | Define some addresses, mostly so DDB can print useful info.
714 | Not using _C_LABEL() here because these symbols are never
715 | referenced by any C code, and if the leading underscore
716 | ever goes away, these lines turn into syntax errors...
717 .set _KERNBASE3X,KERNBASE3X
718 .set _MONSTART,SUN3X_MONSTART
719 .set _PROM_BASE,SUN3X_PROM_BASE
720 .set _MONEND,SUN3X_MONEND
721
722 |The end!
723