exception_vector.S revision 1.47
11.47Suwe/*	$NetBSD: exception_vector.S,v 1.47 2011/01/28 21:06:08 uwe Exp $	*/
21.1Such
31.1Such/*-
41.1Such * Copyright (c) 2002 The NetBSD Foundation, Inc.
51.1Such * All rights reserved.
61.1Such *
71.1Such * Redistribution and use in source and binary forms, with or without
81.1Such * modification, are permitted provided that the following conditions
91.1Such * are met:
101.1Such * 1. Redistributions of source code must retain the above copyright
111.1Such *    notice, this list of conditions and the following disclaimer.
121.1Such * 2. Redistributions in binary form must reproduce the above copyright
131.1Such *    notice, this list of conditions and the following disclaimer in the
141.1Such *    documentation and/or other materials provided with the distribution.
151.1Such *
161.1Such * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
171.1Such * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
181.1Such * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
191.1Such * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
201.1Such * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
211.1Such * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
221.1Such * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
231.1Such * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
241.1Such * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
251.1Such * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
261.1Such * POSSIBILITY OF SUCH DAMAGE.
271.1Such */
281.1Such
291.9Such#include "opt_cputype.h"
301.9Such#include "opt_ddb.h"
311.47Suwe#include "opt_ptrace.h"
321.47Suwe
331.1Such#include "assym.h"
341.7Such
351.3Such#include <sh3/param.h>
361.1Such#include <sh3/locore.h>
371.8Such#include <sh3/exception.h>
381.2Such#include <sh3/ubcreg.h>
391.29Suwe#include <sh3/pte.h>
401.3Such#include <sh3/mmu_sh3.h>
411.3Such#include <sh3/mmu_sh4.h>
421.12Suwe
431.23Suwe/*
441.23Suwe * Align vectors more strictly here (where we don't really care) so
451.23Suwe * that .align 5 (i.e. 32B cache line) before data block does the
461.23Suwe * right thing w.r.t. final destinations after vectors are copied.
471.23Suwe */
481.23Suwe#define _ALIGN_TEXT	.align 5
491.23Suwe#include <sh3/asm.h>
501.23Suwe
511.47Suwe__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.47 2011/01/28 21:06:08 uwe Exp $")
521.12Suwe
531.1Such
541.1Such/*
551.22Suwe * Exception vectors.
561.22Suwe * The following routines are copied to vector addresses.
571.8Such *	sh_vector_generic:	VBR + 0x100
581.9Such *	sh_vector_tlbmiss:	VBR + 0x400
591.1Such *	sh_vector_interrupt:	VBR + 0x600
601.8Such */
611.8Such
621.15Suwe#define VECTOR_END_MARKER(sym)			\
631.15Suwe		.globl	_C_LABEL(sym);		\
641.15Suwe	_C_LABEL(sym):
651.15Suwe
661.15Suwe
671.1Such/*
681.15Suwe * LINTSTUB: Var: char sh_vector_generic[1];
691.15Suwe *
701.22Suwe * void sh_vector_generic(void);
711.15Suwe *	Copied to VBR+0x100.  This code should be position independent
721.22Suwe *	and maximum 786 bytes long (== 0x400 - 0x100).
731.8Such */
741.15SuweNENTRY(sh_vector_generic)
751.5Such	__EXCEPTION_ENTRY
761.9Such	__INTR_MASK(r0, r1)
771.1Such	/* Identify exception cause */
781.1Such	MOV	(EXPEVT, r0)
791.21Suwe	mov.l	@r0, r0
801.22Suwe	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
811.10Sthorpej	/* Get curlwp */
821.22Suwe	mov.l	.Lg_curlwp, r1
831.21Suwe	mov.l	@r1, r4			/* 1st arg */
841.11Suwe	/* Get TEA */
851.21Suwe	MOV	(TEA, r1)
861.21Suwe	mov.l	@r1, r6			/* 3rd arg */
871.9Such	/* Check TLB exception or not */
881.22Suwe	mov.l	.Lg_TLB_PROT_ST, r1
891.21Suwe	cmp/hi	r1, r0
901.9Such	bt	1f
911.11Suwe
921.22Suwe	/* tlb_exception(curlwp, tf, TEA); */
931.2Such	__EXCEPTION_UNBLOCK(r0, r1)
941.22Suwe	mov.l	.Lg_tlb_exception, r0
951.1Such	jsr	@r0
961.21Suwe	 mov	r14, r5			/* 2nd arg */
971.39Suwe	bra	.Lg_return_from_exception
981.1Such	 nop
991.11Suwe
1001.22Suwe	/* general_exception(curlwp, tf, TEA); */
1011.21Suwe1:	mov	r4, r8
1021.47Suwe#if defined(PTRACE) || defined(DDB)
1031.40Suwe	mov	#0, r2
1041.39Suwe	MOV	(BBRA, r1)
1051.40Suwe	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
1061.47Suwe	mov.w	r2, @r1			/* disable UBC channel A */
1071.47Suwe#endif
1081.2Such	__EXCEPTION_UNBLOCK(r0, r1)
1091.22Suwe	mov.l	.Lg_general_exception, r0
1101.1Such	jsr	@r0
1111.21Suwe	 mov	r14, r5			/* 2nd arg */
1121.1Such
1131.1Such	/* Check for ASTs on exit to user mode. */
1141.21Suwe	mov	r8, r4
1151.22Suwe	mov.l	.Lg_ast, r0
1161.1Such	jsr	@r0
1171.21Suwe	 mov	r14, r5
1181.39Suwe
1191.47Suwe#if defined(PTRACE) || defined(DDB)
1201.39Suwe	mov.l	@(TF_UBC, r14), r2
1211.47Suwe	tst	r2, r2			! single-step == 0?
1221.39Suwe	bt	.Lg_return_from_exception
1231.39Suwe
1241.39Suwe	!! We are returning from DDB to do single step.  Channel A in
1251.39Suwe	!! UBC is already rigged, we just need to enable it.
1261.39Suwe	MOV	(BBRA, r3)
1271.41Suwe	MOV	(BARA, r5)
1281.41Suwe	__EXCEPTION_BLOCK(r0, r1)
1291.41Suwe	mov.l	@(TF_SPC, r14), r4
1301.41Suwe	mov.l	r4, @r5			! BARA = tf->tf_spc
1311.41Suwe	mov.w	r2, @r3			! BBRA = tf->tf_ubc
1321.47Suwe#endif /* PTRACE || DDB */
1331.39Suwe.Lg_return_from_exception:
1341.39Suwe	__EXCEPTION_RETURN
1351.22Suwe
1361.23Suwe	.align	5
1371.42SuweREG_SYMBOL(EXPEVT)
1381.22Suwe.Lg_curlwp:		.long	_C_LABEL(curlwp)
1391.9SuchREG_SYMBOL(TEA)
1401.42Suwe.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
1411.22Suwe.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
1421.22Suwe.Lg_general_exception:	.long	_C_LABEL(general_exception)
1431.22Suwe.Lg_ast:		.long	_C_LABEL(ast)
1441.42SuweREG_SYMBOL(BBRA)
1451.42SuweREG_SYMBOL(BARA)
1461.15Suwe
1471.15Suwe/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
1481.15SuweVECTOR_END_MARKER(sh_vector_generic_end)
1491.15Suwe	SET_ENTRY_SIZE(sh_vector_generic)
1501.15Suwe
1511.8Such
1521.3Such#ifdef SH3
1531.8Such/*
1541.15Suwe * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
1551.15Suwe *
1561.29Suwe * TLB miss vector.  We run through the fast path first, checking if
1571.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
1581.29Suwe * with exceptions disabled, so no P3 addresses please (including no
1591.29Suwe * kernel stack, as we cannot wire TLB entries on sh3).  We can only
1601.29Suwe * use BANK1 registers, and of those r6 and r7 are already taken.
1611.29Suwe *
1621.29Suwe * If we don't find a valid mapping in the fast path, we do context
1631.29Suwe * save and call tlb exception handler.
1641.29Suwe *
1651.29Suwe * Copied to VBR+0x400.  This code should be position independent
1661.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
1671.1Such */
1681.15SuweNENTRY(sh3_vector_tlbmiss)
1691.29Suwe	mov	#(SH3_PTEH & 0xff), r4
1701.29Suwe	mov.l	.L3_VPN_cleanup, r0
1711.29Suwe	mov.l	@r4, r5
1721.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
1731.29Suwe	!! For the duration of fast path we keep
1741.29Suwe	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
1751.29Suwe	!! r5: { VPN, ASID } that caused the miss
1761.29Suwe
1771.29Suwe	cmp/pz	r5		! user space address?
1781.29Suwe	bt/s	.L3_user_va
1791.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
1801.29Suwe
1811.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
1821.29Suwe	!! see __pmap_kpte_lookup
1831.29Suwe.L3_kernel_va:
1841.29Suwe	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
1851.29Suwe	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
1861.29Suwe	bra	.L3_fetch_pte
1871.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
1881.29Suwe
1891.29Suwe	!! user space address, use curlwp's pmap
1901.29Suwe.L3_user_va:
1911.29Suwe	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
1921.29Suwe
1931.29Suwe	!! see __pmap_pte_lookup
1941.29Suwe.L3_fetch_pte:
1951.29Suwe	mov.l	@r1, r3		! fetch ptd
1961.29Suwe
1971.29Suwe	!! r2: vpn, prepared for indexing into ptd
1981.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
1991.29Suwe#ifdef DEBUG
2001.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
2011.29Suwe	bt/s	.L3_call_tlb_exception
2021.29Suwe#endif
2031.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
2041.29Suwe
2051.29Suwe	!! __PMAP_PTP_INDEX(vpn)
2061.29Suwe	mov	r2, r0
2071.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
2081.29Suwe	mov.l	.L3_ptp_index_mask, r1
2091.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
2101.29Suwe	shll2	r0		! array index -> array offset
2111.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
2121.29Suwe	tst	r3, r3		! if (ptp == NULL)
2131.29Suwe	bt/s	.L3_call_tlb_exception
2141.29Suwe	 mov	#-(PGSHIFT - 2), r1
2151.29Suwe
2161.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
2171.29Suwe	!! get the array offset directly, as we know bits 10 and 11
2181.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
2191.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
2201.29Suwe	mov.l	.L3_ptp_offset_mask, r0
2211.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
2221.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
2231.29Suwe
2241.29Suwe
2251.29Suwe	!! r3: pte
2261.29Suwe	!! r4: SH3_PTEH
2271.29Suwe	!! r5: { VPN, ASID }
2281.29Suwe
2291.29Suwe	mov.l	.L3_PG_V, r0
2301.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
2311.29Suwe	bt/s	.L3_call_tlb_exception
2321.29Suwe	 nop
2331.24Suwe
2341.29Suwe	mov.l	.L3_PG_HW_BITS, r1
2351.29Suwe	cmp/pz	r5		! user space address?
2361.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
2371.29Suwe	bf/s	.L3_load_kernel
2381.29Suwe	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
2391.29Suwe
2401.29Suwe	!! load mapping for a user space page
2411.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
2421.29Suwe.L3_load_user:
2431.29Suwe	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
2441.29Suwe	ldtlb			! needs 2 insns padding before RTE
2451.29Suwe	nop
2461.29Suwe	nop
2471.29Suwe	rte
2481.29Suwe	 nop
2491.24Suwe
2501.29Suwe	!! load mapping for a kernel space page
2511.29Suwe	!! we need to temporary set ASID to 0
2521.29Suwe.L3_load_kernel:
2531.29Suwe	mov.l	.L3_clear_ASID, r1
2541.29Suwe	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
2551.29Suwe	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
2561.3Such	ldtlb
2571.29Suwe	mov.l	r5, @r4		! restore ASID
2581.29Suwe	nop
2591.29Suwe	rte
2601.29Suwe	 nop
2611.24Suwe
2621.29Suwe
2631.29Suwe	!! if we haven't found a valid mapping in the fast path
2641.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
2651.29Suwe.L3_call_tlb_exception:
2661.29Suwe	__EXCEPTION_ENTRY
2671.29Suwe	mov.l	.L3_SH3_EXPEVT, r2
2681.29Suwe	mov.l	.L3_curlwp, r1
2691.29Suwe	mov	#(SH3_TEA & 0xff), r0
2701.29Suwe	mov.l	@r2, r2			! *SH3_EXPEVT
2711.29Suwe	mov.l	@r0, r6			! arg3: va = *SH3_TEA
2721.29Suwe	mov.l	@r1, r4			! arg1: curlwp
2731.9Such	__INTR_MASK(r0, r1)
2741.9Such	__EXCEPTION_UNBLOCK(r0, r1)
2751.22Suwe	mov.l	.L3_tlb_exception, r0
2761.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
2771.1Such	jsr	@r0
2781.29Suwe	 mov	r14, r5			! arg2: trapframe
2791.29Suwe	__EXCEPTION_RETURN
2801.22Suwe
2811.29Suwe	.align	4
2821.29Suwe.L3_VPN_cleanup:		.long	~0x00000c00
2831.29Suwe.L3_curptd:			.long	_C_LABEL(curptd)
2841.45Suwe.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
2851.29Suwe.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
2861.29Suwe.L3_ptp_index_mask:		.long	0x1ff
2871.29Suwe.L3_ptp_offset_mask:		.long	0x3ff << 2
2881.29Suwe.L3_PG_HW_BITS:			.long	PG_HW_BITS
2891.29Suwe.L3_PG_V:			.long	PG_V
2901.29Suwe.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
2911.29Suwe.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
2921.29Suwe.L3_curlwp:			.long	_C_LABEL(curlwp)
2931.29Suwe.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
2941.15Suwe
2951.15Suwe/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
2961.15SuweVECTOR_END_MARKER(sh3_vector_tlbmiss_end)
2971.15Suwe	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
2981.29Suwe
2991.3Such#endif /* SH3 */
3001.3Such
3011.15Suwe
3021.3Such#ifdef SH4
3031.8Such/*
3041.15Suwe * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
3051.15Suwe *
3061.29Suwe * TLB miss vector.  We run through the fast path first, checking if
3071.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
3081.29Suwe * with exceptions disabled, so no P3 addresses please (though we can
3091.29Suwe * use kernel stack if need be, as its TLB entries are wired).  We can
3101.29Suwe * only use BANK1 registers, and of those r6 and r7 are already taken.
3111.29Suwe *
3121.29Suwe * If we don't find a valid mapping in the fast path, we do context
3131.29Suwe * save and call tlb exception handler.
3141.29Suwe *
3151.29Suwe * Copied to VBR+0x400.  This code should be relocatable
3161.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
3171.3Such */
3181.15SuweNENTRY(sh4_vector_tlbmiss)
3191.29Suwe	mov.l	.L4_SH4_PTEH, r4
3201.29Suwe	mov.l	.L4_VPN_cleanup, r0
3211.29Suwe	mov.l	@r4, r5
3221.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
3231.29Suwe	!! For the duration of fast path we keep
3241.29Suwe	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
3251.29Suwe	!! r5: { VPN, ASID } that caused the miss
3261.29Suwe
3271.29Suwe	cmp/pz	r5		! user space address?
3281.29Suwe	bt/s	.L4_user_va
3291.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
3301.29Suwe
3311.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
3321.29Suwe	!! see __pmap_kpte_lookup
3331.29Suwe.L4_kernel_va:
3341.29Suwe	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
3351.29Suwe	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
3361.29Suwe	bra	.L4_fetch_pte
3371.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
3381.29Suwe
3391.29Suwe	!! user space address, use curlwp's pmap
3401.29Suwe.L4_user_va:
3411.29Suwe	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
3421.29Suwe
3431.29Suwe	!! see __pmap_pte_lookup
3441.29Suwe.L4_fetch_pte:
3451.29Suwe	mov.l	@r1, r3		! fetch ptd
3461.29Suwe
3471.29Suwe	!! r2: vpn, prepared for indexing into ptd
3481.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
3491.29Suwe#ifdef DEBUG
3501.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
3511.29Suwe	bt/s	.L4_call_tlb_exception
3521.29Suwe#endif
3531.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
3541.29Suwe
3551.29Suwe	!! __PMAP_PTP_INDEX(vpn)
3561.29Suwe	mov	r2, r0
3571.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
3581.29Suwe	mov.l	.L4_ptp_index_mask, r1
3591.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
3601.29Suwe	shll2	r0		! array index -> array offset
3611.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
3621.29Suwe	tst	r3, r3		! if (ptp == NULL)
3631.29Suwe	bt/s	.L4_call_tlb_exception
3641.29Suwe	 mov	#-(PGSHIFT - 2), r1
3651.29Suwe
3661.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
3671.29Suwe	!! get the array offset directly, as we know bits 10 and 11
3681.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
3691.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
3701.29Suwe	mov.l	.L4_ptp_offset_mask, r0
3711.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
3721.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
3731.29Suwe
3741.29Suwe
3751.29Suwe	!! r3: pte
3761.29Suwe	!! r4: SH4_PTEH
3771.29Suwe	!! r5: { VPN, ASID }
3781.29Suwe
3791.29Suwe	mov.l	.L4_PG_V, r0
3801.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
3811.29Suwe	bt/s	.L4_call_tlb_exception
3821.29Suwe	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
3831.29Suwe
3841.29Suwe	mov.l	.L4_PG_HW_BITS, r1
3851.29Suwe	shlr8	r0
3861.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
3871.29Suwe	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
3881.29Suwe	cmp/pz	r5		! user space address?
3891.29Suwe	and	#SH4_PTEA_SA_MASK, r0
3901.29Suwe	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
3911.29Suwe	bf/s	.L4_load_kernel
3921.29Suwe	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
3931.29Suwe
3941.29Suwe	!! load mapping for a user space page
3951.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
3961.29Suwe.L4_load_user:
3971.29Suwe	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
3981.29Suwe	ldtlb			! needs 1 insn padding before RTE
3991.29Suwe	nop
4001.29Suwe	rte
4011.29Suwe	 nop
4021.29Suwe
4031.29Suwe	!! load mapping for a kernel space page
4041.29Suwe	!! we need to temporary set ASID to 0
4051.29Suwe.L4_load_kernel:
4061.29Suwe	mov.l	.L4_clear_ASID, r1
4071.29Suwe	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
4081.29Suwe	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
4091.29Suwe	ldtlb
4101.29Suwe	mov.l	r5, @r4		! restore ASID
4111.29Suwe	rte
4121.29Suwe	 nop
4131.29Suwe
4141.29Suwe
4151.29Suwe	!! if we haven't found a valid mapping in the fast path
4161.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
4171.29Suwe.L4_call_tlb_exception:
4181.8Such	__EXCEPTION_ENTRY
4191.29Suwe	mov.l	.L4_SH4_PTEH, r0
4201.29Suwe	mov.l	.L4_curlwp, r1
4211.29Suwe	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
4221.29Suwe	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
4231.29Suwe	mov.l	@r1, r4			! arg1: curlwp
4241.3Such	__INTR_MASK(r0, r1)
4251.3Such	__EXCEPTION_UNBLOCK(r0, r1)
4261.22Suwe	mov.l	.L4_tlb_exception, r0
4271.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
4281.3Such	jsr	@r0
4291.29Suwe	 mov	r14, r5			! arg2: trapframe
4301.5Such	__EXCEPTION_RETURN
4311.22Suwe
4321.23Suwe	.align	5
4331.29Suwe.L4_SH4_PTEH:			.long	SH4_PTEH
4341.29Suwe.L4_VPN_cleanup:		.long	~0x00000c00
4351.29Suwe.L4_curptd:			.long	_C_LABEL(curptd)
4361.45Suwe.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
4371.29Suwe.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
4381.29Suwe.L4_ptp_index_mask:		.long	0x1ff
4391.29Suwe.L4_ptp_offset_mask:		.long	0x3ff << 2
4401.29Suwe.L4_PG_HW_BITS:			.long	PG_HW_BITS
4411.29Suwe.L4_PG_V:			.long	PG_V
4421.29Suwe.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
4431.29Suwe.L4_curlwp:			.long	_C_LABEL(curlwp)
4441.29Suwe.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
4451.15Suwe
4461.15Suwe/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
4471.15SuweVECTOR_END_MARKER(sh4_vector_tlbmiss_end)
4481.15Suwe	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
4491.29Suwe
4501.3Such#endif /* SH4 */
4511.1Such
4521.15Suwe
4531.1Such/*
4541.15Suwe * LINTSTUB: Var: char sh_vector_interrupt[1];
4551.15Suwe *
4561.22Suwe * void sh_vector_interrupt(void);
4571.22Suwe *	Copied to VBR+0x600.  This code should be position independent.
4581.8Such */
4591.15SuweNENTRY(sh_vector_interrupt)
4601.5Such	__EXCEPTION_ENTRY
4611.32Suwe	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
4621.34Suwe
4631.5Such	__INTR_MASK(r0, r1)
4641.32Suwe	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
4651.32Suwe
4661.37Suwe	mov.l	.Li_ci_idepth, r8	! callee-saved
4671.22Suwe	mov.l	.Li_intc_intr, r0
4681.37Suwe	mov.l	@r8, r9			! callee-saved
4691.32Suwe	mov	#0, r1
4701.37Suwe	add	#1, r9			! curcpu()->ci_idepth++
4711.32Suwe	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
4721.32Suwe	jsr	@r0			! intc_intr(ssr, spc, ssp)
4731.46Smatt	 mov.l	r9, @r8
4741.31Suwe
4751.37Suwe	cmp/pl	r9			! curcpu()->ci_idepth > 0
4761.37Suwe	add	#-1, r9			! curcpu()->ci_idepth--
4771.37Suwe	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
4781.37Suwe	 mov.l	r9, @r8
4791.34Suwe
4801.31Suwe	mov.l	@(TF_SSR, r14), r2
4811.31Suwe	mov.l	.Li_PSL_MD, r1
4821.31Suwe	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
4831.31Suwe	bt	.Li_return_to_user
4841.31Suwe
4851.35Suwe.Li_return_to_kernel:
4861.31Suwe	!! Check for interrupted kernel RAS when returning to kernel
4871.31Suwe	mov.l	@(TF_SPC, r14), r2
4881.31Suwe	mov.l	.Li_ras_start, r3
4891.31Suwe	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
4901.31Suwe	bf	.Li_return_from_interrupt
4911.31Suwe
4921.31Suwe	mov.l	.Li_ras_end, r1
4931.31Suwe	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
4941.31Suwe	bt	.Li_return_from_interrupt
4951.31Suwe
4961.31Suwe	bra	.Li_return_from_interrupt
4971.31Suwe	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
4981.31Suwe
4991.31Suwe.Li_return_to_user:
5001.5Such	/* Check for ASTs on exit to user mode. */
5011.22Suwe	mov.l	.Li_ast, r0
5021.32Suwe	mov.l	.Li_curlwp, r1
5031.32Suwe	mov	r14, r5		/* 2nd arg */
5041.5Such	jsr	@r0
5051.32Suwe	 mov.l	@r1, r4		/* 1st arg */
5061.31Suwe
5071.31Suwe.Li_return_from_interrupt:
5081.5Such	__EXCEPTION_RETURN
5091.22Suwe
5101.23Suwe	.align	5
5111.37Suwe.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
5121.22Suwe.Li_intc_intr:		.long	_C_LABEL(intc_intr)
5131.31Suwe.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
5141.31Suwe.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
5151.31Suwe.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
5161.32Suwe.Li_ast:		.long	_C_LABEL(ast)
5171.32Suwe.Li_curlwp:		.long	_C_LABEL(curlwp)
5181.31Suwe
5191.15Suwe
5201.15Suwe/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
5211.15SuweVECTOR_END_MARKER(sh_vector_interrupt_end)
5221.15Suwe	SET_ENTRY_SIZE(sh_vector_interrupt)
523