exception_vector.S revision 1.35
11.35Suwe/*	$NetBSD: exception_vector.S,v 1.35 2008/02/14 22:46:14 uwe Exp $	*/
21.1Such
31.1Such/*-
41.1Such * Copyright (c) 2002 The NetBSD Foundation, Inc.
51.1Such * All rights reserved.
61.1Such *
71.1Such * Redistribution and use in source and binary forms, with or without
81.1Such * modification, are permitted provided that the following conditions
91.1Such * are met:
101.1Such * 1. Redistributions of source code must retain the above copyright
111.1Such *    notice, this list of conditions and the following disclaimer.
121.1Such * 2. Redistributions in binary form must reproduce the above copyright
131.1Such *    notice, this list of conditions and the following disclaimer in the
141.1Such *    documentation and/or other materials provided with the distribution.
151.1Such * 3. All advertising materials mentioning features or use of this software
161.1Such *    must display the following acknowledgement:
171.1Such *        This product includes software developed by the NetBSD
181.1Such *        Foundation, Inc. and its contributors.
191.1Such * 4. Neither the name of The NetBSD Foundation nor the names of its
201.1Such *    contributors may be used to endorse or promote products derived
211.1Such *    from this software without specific prior written permission.
221.1Such *
231.1Such * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
241.1Such * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
251.1Such * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
261.1Such * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
271.1Such * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
281.1Such * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
291.1Such * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
301.1Such * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
311.1Such * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
321.1Such * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
331.1Such * POSSIBILITY OF SUCH DAMAGE.
341.1Such */
351.1Such
361.9Such#include "opt_cputype.h"
371.9Such#include "opt_ddb.h"
381.1Such#include "assym.h"
391.7Such
401.3Such#include <sh3/param.h>
411.1Such#include <sh3/locore.h>
421.8Such#include <sh3/exception.h>
431.2Such#include <sh3/ubcreg.h>
441.29Suwe#include <sh3/pte.h>
451.3Such#include <sh3/mmu_sh3.h>
461.3Such#include <sh3/mmu_sh4.h>
471.12Suwe
481.23Suwe/*
491.23Suwe * Align vectors more strictly here (where we don't really care) so
501.23Suwe * that .align 5 (i.e. 32B cache line) before data block does the
511.23Suwe * right thing w.r.t. final destinations after vectors are copied.
521.23Suwe */
531.23Suwe#define _ALIGN_TEXT	.align 5
541.23Suwe#include <sh3/asm.h>
551.23Suwe
561.35Suwe__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.35 2008/02/14 22:46:14 uwe Exp $")
571.12Suwe
581.1Such
591.1Such/*
601.22Suwe * Exception vectors.
611.22Suwe * The following routines are copied to vector addresses.
621.8Such *	sh_vector_generic:	VBR + 0x100
631.9Such *	sh_vector_tlbmiss:	VBR + 0x400
641.1Such *	sh_vector_interrupt:	VBR + 0x600
651.8Such */
661.8Such
671.15Suwe#define VECTOR_END_MARKER(sym)			\
681.15Suwe		.globl	_C_LABEL(sym);		\
691.15Suwe	_C_LABEL(sym):
701.15Suwe
711.15Suwe
721.1Such/*
731.15Suwe * LINTSTUB: Var: char sh_vector_generic[1];
741.15Suwe *
751.22Suwe * void sh_vector_generic(void);
761.15Suwe *	Copied to VBR+0x100.  This code should be position independent
771.22Suwe *	and maximum 786 bytes long (== 0x400 - 0x100).
781.8Such */
791.15SuweNENTRY(sh_vector_generic)
801.5Such	__EXCEPTION_ENTRY
811.9Such	__INTR_MASK(r0, r1)
821.1Such	/* Identify exception cause */
831.1Such	MOV	(EXPEVT, r0)
841.21Suwe	mov.l	@r0, r0
851.22Suwe	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
861.10Sthorpej	/* Get curlwp */
871.22Suwe	mov.l	.Lg_curlwp, r1
881.21Suwe	mov.l	@r1, r4			/* 1st arg */
891.11Suwe	/* Get TEA */
901.21Suwe	MOV	(TEA, r1)
911.21Suwe	mov.l	@r1, r6			/* 3rd arg */
921.9Such	/* Check TLB exception or not */
931.22Suwe	mov.l	.Lg_TLB_PROT_ST, r1
941.21Suwe	cmp/hi	r1, r0
951.9Such	bt	1f
961.11Suwe
971.22Suwe	/* tlb_exception(curlwp, tf, TEA); */
981.2Such	__EXCEPTION_UNBLOCK(r0, r1)
991.22Suwe	mov.l	.Lg_tlb_exception, r0
1001.1Such	jsr	@r0
1011.21Suwe	 mov	r14, r5			/* 2nd arg */
1021.9Such	bra	2f
1031.1Such	 nop
1041.11Suwe
1051.22Suwe	/* general_exception(curlwp, tf, TEA); */
1061.21Suwe1:	mov	r4, r8
1071.1Such#ifdef DDB
1081.21Suwe	mov	#0, r2
1091.1Such	MOV	(BBRA, r1)
1101.21Suwe	mov.w	r2, @r1			/* disable UBC */
1111.22Suwe	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
1121.1Such#endif /* DDB */
1131.2Such	__EXCEPTION_UNBLOCK(r0, r1)
1141.22Suwe	mov.l	.Lg_general_exception, r0
1151.1Such	jsr	@r0
1161.21Suwe	 mov	r14, r5			/* 2nd arg */
1171.1Such
1181.1Such	/* Check for ASTs on exit to user mode. */
1191.21Suwe	mov	r8, r4
1201.22Suwe	mov.l	.Lg_ast, r0
1211.1Such	jsr	@r0
1221.21Suwe	 mov	r14, r5
1231.22Suwe#ifdef DDB	/* BBRA = tf->tf_ubc */
1241.2Such	__EXCEPTION_BLOCK(r0, r1)
1251.3Such	mov.l	@(TF_UBC, r14), r0
1261.1Such	MOV	(BBRA, r1)
1271.21Suwe	mov.w	r0, @r1
1281.1Such#endif /* DDB */
1291.9Such2:	__EXCEPTION_RETURN
1301.22Suwe
1311.23Suwe	.align	5
1321.22Suwe.Lg_curlwp:		.long	_C_LABEL(curlwp)
1331.1SuchREG_SYMBOL(EXPEVT)
1341.1SuchREG_SYMBOL(BBRA)
1351.9SuchREG_SYMBOL(TEA)
1361.22Suwe.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
1371.22Suwe.Lg_general_exception:	.long	_C_LABEL(general_exception)
1381.22Suwe.Lg_ast:		.long	_C_LABEL(ast)
1391.22Suwe.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
1401.15Suwe
1411.15Suwe/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
1421.15SuweVECTOR_END_MARKER(sh_vector_generic_end)
1431.15Suwe	SET_ENTRY_SIZE(sh_vector_generic)
1441.15Suwe
1451.8Such
1461.3Such#ifdef SH3
1471.8Such/*
1481.15Suwe * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
1491.15Suwe *
1501.29Suwe * TLB miss vector.  We run through the fast path first, checking if
1511.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
1521.29Suwe * with exceptions disabled, so no P3 addresses please (including no
1531.29Suwe * kernel stack, as we cannot wire TLB entries on sh3).  We can only
1541.29Suwe * use BANK1 registers, and of those r6 and r7 are already taken.
1551.29Suwe *
1561.29Suwe * If we don't find a valid mapping in the fast path, we do context
1571.29Suwe * save and call tlb exception handler.
1581.29Suwe *
1591.29Suwe * Copied to VBR+0x400.  This code should be position independent
1601.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
1611.1Such */
1621.15SuweNENTRY(sh3_vector_tlbmiss)
1631.29Suwe	mov	#(SH3_PTEH & 0xff), r4
1641.29Suwe	mov.l	.L3_VPN_cleanup, r0
1651.29Suwe	mov.l	@r4, r5
1661.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
1671.29Suwe	!! For the duration of fast path we keep
1681.29Suwe	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
1691.29Suwe	!! r5: { VPN, ASID } that caused the miss
1701.29Suwe
1711.29Suwe	cmp/pz	r5		! user space address?
1721.29Suwe	bt/s	.L3_user_va
1731.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
1741.29Suwe
1751.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
1761.29Suwe	!! see __pmap_kpte_lookup
1771.29Suwe.L3_kernel_va:
1781.29Suwe	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
1791.29Suwe	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
1801.29Suwe	bra	.L3_fetch_pte
1811.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
1821.29Suwe
1831.29Suwe	!! user space address, use curlwp's pmap
1841.29Suwe.L3_user_va:
1851.29Suwe	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
1861.29Suwe
1871.29Suwe	!! see __pmap_pte_lookup
1881.29Suwe.L3_fetch_pte:
1891.29Suwe	mov.l	@r1, r3		! fetch ptd
1901.29Suwe
1911.29Suwe	!! r2: vpn, prepared for indexing into ptd
1921.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
1931.29Suwe#ifdef DEBUG
1941.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
1951.29Suwe	bt/s	.L3_call_tlb_exception
1961.29Suwe#endif
1971.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
1981.29Suwe
1991.29Suwe	!! __PMAP_PTP_INDEX(vpn)
2001.29Suwe	mov	r2, r0
2011.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
2021.29Suwe	mov.l	.L3_ptp_index_mask, r1
2031.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
2041.29Suwe	shll2	r0		! array index -> array offset
2051.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
2061.29Suwe	tst	r3, r3		! if (ptp == NULL)
2071.29Suwe	bt/s	.L3_call_tlb_exception
2081.29Suwe	 mov	#-(PGSHIFT - 2), r1
2091.29Suwe
2101.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
2111.29Suwe	!! get the array offset directly, as we know bits 10 and 11
2121.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
2131.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
2141.29Suwe	mov.l	.L3_ptp_offset_mask, r0
2151.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
2161.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
2171.29Suwe
2181.29Suwe
2191.29Suwe	!! r3: pte
2201.29Suwe	!! r4: SH3_PTEH
2211.29Suwe	!! r5: { VPN, ASID }
2221.29Suwe
2231.29Suwe	mov.l	.L3_PG_V, r0
2241.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
2251.29Suwe	bt/s	.L3_call_tlb_exception
2261.29Suwe	 nop
2271.24Suwe
2281.29Suwe	mov.l	.L3_PG_HW_BITS, r1
2291.29Suwe	cmp/pz	r5		! user space address?
2301.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
2311.29Suwe	bf/s	.L3_load_kernel
2321.29Suwe	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
2331.29Suwe
2341.29Suwe	!! load mapping for a user space page
2351.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
2361.29Suwe.L3_load_user:
2371.29Suwe	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
2381.29Suwe	ldtlb			! needs 2 insns padding before RTE
2391.29Suwe	nop
2401.29Suwe	nop
2411.29Suwe	rte
2421.29Suwe	 nop
2431.24Suwe
2441.29Suwe	!! load mapping for a kernel space page
2451.29Suwe	!! we need to temporary set ASID to 0
2461.29Suwe.L3_load_kernel:
2471.29Suwe	mov.l	.L3_clear_ASID, r1
2481.29Suwe	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
2491.29Suwe	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
2501.3Such	ldtlb
2511.29Suwe	mov.l	r5, @r4		! restore ASID
2521.29Suwe	nop
2531.29Suwe	rte
2541.29Suwe	 nop
2551.24Suwe
2561.29Suwe
2571.29Suwe	!! if we haven't found a valid mapping in the fast path
2581.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
2591.29Suwe.L3_call_tlb_exception:
2601.29Suwe	__EXCEPTION_ENTRY
2611.29Suwe	mov.l	.L3_SH3_EXPEVT, r2
2621.29Suwe	mov.l	.L3_curlwp, r1
2631.29Suwe	mov	#(SH3_TEA & 0xff), r0
2641.29Suwe	mov.l	@r2, r2			! *SH3_EXPEVT
2651.29Suwe	mov.l	@r0, r6			! arg3: va = *SH3_TEA
2661.29Suwe	mov.l	@r1, r4			! arg1: curlwp
2671.9Such	__INTR_MASK(r0, r1)
2681.9Such	__EXCEPTION_UNBLOCK(r0, r1)
2691.22Suwe	mov.l	.L3_tlb_exception, r0
2701.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
2711.1Such	jsr	@r0
2721.29Suwe	 mov	r14, r5			! arg2: trapframe
2731.29Suwe	__EXCEPTION_RETURN
2741.22Suwe
2751.29Suwe	.align	4
2761.29Suwe.L3_VPN_cleanup:		.long	~0x00000c00
2771.29Suwe.L3_curptd:			.long	_C_LABEL(curptd)
2781.29Suwe.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
2791.29Suwe.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
2801.29Suwe.L3_ptp_index_mask:		.long	0x1ff
2811.29Suwe.L3_ptp_offset_mask:		.long	0x3ff << 2
2821.29Suwe.L3_PG_HW_BITS:			.long	PG_HW_BITS
2831.29Suwe.L3_PG_V:			.long	PG_V
2841.29Suwe.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
2851.29Suwe.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
2861.29Suwe.L3_curlwp:			.long	_C_LABEL(curlwp)
2871.29Suwe.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
2881.15Suwe
2891.15Suwe/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
2901.15SuweVECTOR_END_MARKER(sh3_vector_tlbmiss_end)
2911.15Suwe	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
2921.29Suwe
2931.3Such#endif /* SH3 */
2941.3Such
2951.15Suwe
2961.3Such#ifdef SH4
2971.8Such/*
2981.15Suwe * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
2991.15Suwe *
3001.29Suwe * TLB miss vector.  We run through the fast path first, checking if
3011.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
3021.29Suwe * with exceptions disabled, so no P3 addresses please (though we can
3031.29Suwe * use kernel stack if need be, as its TLB entries are wired).  We can
3041.29Suwe * only use BANK1 registers, and of those r6 and r7 are already taken.
3051.29Suwe *
3061.29Suwe * If we don't find a valid mapping in the fast path, we do context
3071.29Suwe * save and call tlb exception handler.
3081.29Suwe *
3091.29Suwe * Copied to VBR+0x400.  This code should be relocatable
3101.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
3111.3Such */
3121.15SuweNENTRY(sh4_vector_tlbmiss)
3131.29Suwe	mov.l	.L4_SH4_PTEH, r4
3141.29Suwe	mov.l	.L4_VPN_cleanup, r0
3151.29Suwe	mov.l	@r4, r5
3161.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
3171.29Suwe	!! For the duration of fast path we keep
3181.29Suwe	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
3191.29Suwe	!! r5: { VPN, ASID } that caused the miss
3201.29Suwe
3211.29Suwe	cmp/pz	r5		! user space address?
3221.29Suwe	bt/s	.L4_user_va
3231.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
3241.29Suwe
3251.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
3261.29Suwe	!! see __pmap_kpte_lookup
3271.29Suwe.L4_kernel_va:
3281.29Suwe	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
3291.29Suwe	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
3301.29Suwe	bra	.L4_fetch_pte
3311.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
3321.29Suwe
3331.29Suwe	!! user space address, use curlwp's pmap
3341.29Suwe.L4_user_va:
3351.29Suwe	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
3361.29Suwe
3371.29Suwe	!! see __pmap_pte_lookup
3381.29Suwe.L4_fetch_pte:
3391.29Suwe	mov.l	@r1, r3		! fetch ptd
3401.29Suwe
3411.29Suwe	!! r2: vpn, prepared for indexing into ptd
3421.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
3431.29Suwe#ifdef DEBUG
3441.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
3451.29Suwe	bt/s	.L4_call_tlb_exception
3461.29Suwe#endif
3471.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
3481.29Suwe
3491.29Suwe	!! __PMAP_PTP_INDEX(vpn)
3501.29Suwe	mov	r2, r0
3511.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
3521.29Suwe	mov.l	.L4_ptp_index_mask, r1
3531.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
3541.29Suwe	shll2	r0		! array index -> array offset
3551.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
3561.29Suwe	tst	r3, r3		! if (ptp == NULL)
3571.29Suwe	bt/s	.L4_call_tlb_exception
3581.29Suwe	 mov	#-(PGSHIFT - 2), r1
3591.29Suwe
3601.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
3611.29Suwe	!! get the array offset directly, as we know bits 10 and 11
3621.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
3631.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
3641.29Suwe	mov.l	.L4_ptp_offset_mask, r0
3651.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
3661.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
3671.29Suwe
3681.29Suwe
3691.29Suwe	!! r3: pte
3701.29Suwe	!! r4: SH4_PTEH
3711.29Suwe	!! r5: { VPN, ASID }
3721.29Suwe
3731.29Suwe	mov.l	.L4_PG_V, r0
3741.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
3751.29Suwe	bt/s	.L4_call_tlb_exception
3761.29Suwe	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
3771.29Suwe
3781.29Suwe	mov.l	.L4_PG_HW_BITS, r1
3791.29Suwe	shlr8	r0
3801.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
3811.29Suwe	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
3821.29Suwe	cmp/pz	r5		! user space address?
3831.29Suwe	and	#SH4_PTEA_SA_MASK, r0
3841.29Suwe	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
3851.29Suwe	bf/s	.L4_load_kernel
3861.29Suwe	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
3871.29Suwe
3881.29Suwe	!! load mapping for a user space page
3891.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
3901.29Suwe.L4_load_user:
3911.29Suwe	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
3921.29Suwe	ldtlb			! needs 1 insn padding before RTE
3931.29Suwe	nop
3941.29Suwe	rte
3951.29Suwe	 nop
3961.29Suwe
3971.29Suwe	!! load mapping for a kernel space page
3981.29Suwe	!! we need to temporary set ASID to 0
3991.29Suwe.L4_load_kernel:
4001.29Suwe	mov.l	.L4_clear_ASID, r1
4011.29Suwe	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
4021.29Suwe	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
4031.29Suwe	ldtlb
4041.29Suwe	mov.l	r5, @r4		! restore ASID
4051.29Suwe	rte
4061.29Suwe	 nop
4071.29Suwe
4081.29Suwe
4091.29Suwe	!! if we haven't found a valid mapping in the fast path
4101.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
4111.29Suwe.L4_call_tlb_exception:
4121.8Such	__EXCEPTION_ENTRY
4131.29Suwe	mov.l	.L4_SH4_PTEH, r0
4141.29Suwe	mov.l	.L4_curlwp, r1
4151.29Suwe	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
4161.29Suwe	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
4171.29Suwe	mov.l	@r1, r4			! arg1: curlwp
4181.3Such	__INTR_MASK(r0, r1)
4191.3Such	__EXCEPTION_UNBLOCK(r0, r1)
4201.22Suwe	mov.l	.L4_tlb_exception, r0
4211.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
4221.3Such	jsr	@r0
4231.29Suwe	 mov	r14, r5			! arg2: trapframe
4241.5Such	__EXCEPTION_RETURN
4251.22Suwe
4261.23Suwe	.align	5
4271.29Suwe.L4_SH4_PTEH:			.long	SH4_PTEH
4281.29Suwe.L4_VPN_cleanup:		.long	~0x00000c00
4291.29Suwe.L4_curptd:			.long	_C_LABEL(curptd)
4301.29Suwe.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
4311.29Suwe.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
4321.29Suwe.L4_ptp_index_mask:		.long	0x1ff
4331.29Suwe.L4_ptp_offset_mask:		.long	0x3ff << 2
4341.29Suwe.L4_PG_HW_BITS:			.long	PG_HW_BITS
4351.29Suwe.L4_PG_V:			.long	PG_V
4361.29Suwe.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
4371.29Suwe.L4_curlwp:			.long	_C_LABEL(curlwp)
4381.29Suwe.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
4391.15Suwe
4401.15Suwe/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
4411.15SuweVECTOR_END_MARKER(sh4_vector_tlbmiss_end)
4421.15Suwe	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
4431.29Suwe
4441.3Such#endif /* SH4 */
4451.1Such
4461.15Suwe
4471.1Such/*
4481.15Suwe * LINTSTUB: Var: char sh_vector_interrupt[1];
4491.15Suwe *
4501.22Suwe * void sh_vector_interrupt(void);
4511.22Suwe *	Copied to VBR+0x600.  This code should be position independent.
4521.8Such */
4531.15SuweNENTRY(sh_vector_interrupt)
4541.5Such	__EXCEPTION_ENTRY
4551.32Suwe	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
4561.34Suwe
4571.34Suwe	mov.l	.Li_intfp, r8
4581.34Suwe	mov.l	@r8, r8
4591.34Suwe	cmp/hi	r8, r14		! r14 > intfp ?
4601.35Suwe	bf/s	.Li_on_intstack	! if (r14 <= intfp) already on intstack
4611.35Suwe	 movt	r10		! remember to avoid re-testing later
4621.34Suwe
4631.34Suwe	!! else - switch to the interrupt stack
4641.34Suwe	mov.l	.Li_intsp, r9
4651.34Suwe	mov.l	@r9, r9
4661.34Suwe
4671.34Suwe	mov.l	r15, @-r9
4681.34Suwe	stc.l	r6_bank, @-r9
4691.34Suwe	mov	r9, r15
4701.34Suwe	ldc	r8, r6_bank	! intfp
4711.35Suwe.Li_on_intstack:
4721.34Suwe
4731.5Such	__INTR_MASK(r0, r1)
4741.32Suwe	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
4751.32Suwe
4761.32Suwe	mov.l	.Li_uvmexp_intrs, r2
4771.22Suwe	mov.l	.Li_intc_intr, r0
4781.32Suwe	mov	#0, r1
4791.32Suwe	mov.l	@r2, r3
4801.32Suwe	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
4811.32Suwe	add	#1, r3			! ++uvmexp.intrs
4821.32Suwe	jsr	@r0			! intc_intr(ssr, spc, ssp)
4831.32Suwe	 mov.l	r3, @r2
4841.31Suwe
4851.35Suwe	!! r10 reflects if we have switched to the interrupt stack on
4861.35Suwe	!! entry.  If not - we are in a nested interrupt, so stay on
4871.35Suwe	!! the interrupt stack, and we know we are returning to kernel
4881.35Suwe	tst	r10, r10
4891.35Suwe	bt	.Li_return_to_kernel
4901.35Suwe
4911.35Suwe	!! Switch to the lwp's stack to run the AST check, so that if
4921.34Suwe	!! interrupted lwp is preemtped, it is later resumed on its
4931.34Suwe	!! own stack, not on the intterupt stack
4941.34Suwe	mov	#0x10, r10
4951.34Suwe	stc	sr, r11
4961.34Suwe	swap.b	r10, r10
4971.34Suwe	mov	r11, r2
4981.34Suwe	swap.w	r10, r10	! r10 = PSL_BL
4991.34Suwe	or	r10, r2
5001.34Suwe	ldc	r2, sr		! block exceptions
5011.34Suwe
5021.34Suwe	ldc.l	@r15+, r6_bank
5031.34Suwe	mov.l	@r15+, r15
5041.34Suwe
5051.34Suwe	ldc	r11, sr		! unblock exceptions
5061.34Suwe
5071.31Suwe	mov.l	@(TF_SSR, r14), r2
5081.31Suwe	mov.l	.Li_PSL_MD, r1
5091.31Suwe	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
5101.31Suwe	bt	.Li_return_to_user
5111.31Suwe
5121.35Suwe.Li_return_to_kernel:
5131.31Suwe	!! Check for interrupted kernel RAS when returning to kernel
5141.31Suwe	mov.l	@(TF_SPC, r14), r2
5151.31Suwe	mov.l	.Li_ras_start, r3
5161.31Suwe	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
5171.31Suwe	bf	.Li_return_from_interrupt
5181.31Suwe
5191.31Suwe	mov.l	.Li_ras_end, r1
5201.31Suwe	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
5211.31Suwe	bt	.Li_return_from_interrupt
5221.31Suwe
5231.31Suwe	bra	.Li_return_from_interrupt
5241.31Suwe	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
5251.31Suwe
5261.31Suwe.Li_return_to_user:
5271.5Such	/* Check for ASTs on exit to user mode. */
5281.22Suwe	mov.l	.Li_ast, r0
5291.32Suwe	mov.l	.Li_curlwp, r1
5301.32Suwe	mov	r14, r5		/* 2nd arg */
5311.5Such	jsr	@r0
5321.32Suwe	 mov.l	@r1, r4		/* 1st arg */
5331.31Suwe
5341.31Suwe.Li_return_from_interrupt:
5351.5Such	__EXCEPTION_RETURN
5361.22Suwe
5371.23Suwe	.align	5
5381.34Suwe.Li_intfp:		.long	_C_LABEL(intfp)
5391.34Suwe.Li_intsp:		.long	_C_LABEL(intsp)
5401.32Suwe.Li_uvmexp_intrs:	.long	_C_LABEL(uvmexp) + UVMEXP_INTRS
5411.22Suwe.Li_intc_intr:		.long	_C_LABEL(intc_intr)
5421.31Suwe.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
5431.31Suwe.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
5441.31Suwe.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
5451.32Suwe.Li_ast:		.long	_C_LABEL(ast)
5461.32Suwe.Li_curlwp:		.long	_C_LABEL(curlwp)
5471.31Suwe
5481.15Suwe
5491.15Suwe/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
5501.15SuweVECTOR_END_MARKER(sh_vector_interrupt_end)
5511.15Suwe	SET_ENTRY_SIZE(sh_vector_interrupt)
552