exception_vector.S revision 1.39
11.39Suwe/*	$NetBSD: exception_vector.S,v 1.39 2008/06/06 04:16:22 uwe Exp $	*/
21.1Such
31.1Such/*-
41.1Such * Copyright (c) 2002 The NetBSD Foundation, Inc.
51.1Such * All rights reserved.
61.1Such *
71.1Such * Redistribution and use in source and binary forms, with or without
81.1Such * modification, are permitted provided that the following conditions
91.1Such * are met:
101.1Such * 1. Redistributions of source code must retain the above copyright
111.1Such *    notice, this list of conditions and the following disclaimer.
121.1Such * 2. Redistributions in binary form must reproduce the above copyright
131.1Such *    notice, this list of conditions and the following disclaimer in the
141.1Such *    documentation and/or other materials provided with the distribution.
151.1Such *
161.1Such * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
171.1Such * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
181.1Such * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
191.1Such * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
201.1Such * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
211.1Such * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
221.1Such * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
231.1Such * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
241.1Such * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
251.1Such * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
261.1Such * POSSIBILITY OF SUCH DAMAGE.
271.1Such */
281.1Such
291.9Such#include "opt_cputype.h"
301.9Such#include "opt_ddb.h"
311.1Such#include "assym.h"
321.7Such
331.3Such#include <sh3/param.h>
341.1Such#include <sh3/locore.h>
351.8Such#include <sh3/exception.h>
361.2Such#include <sh3/ubcreg.h>
371.29Suwe#include <sh3/pte.h>
381.3Such#include <sh3/mmu_sh3.h>
391.3Such#include <sh3/mmu_sh4.h>
401.12Suwe
411.23Suwe/*
421.23Suwe * Align vectors more strictly here (where we don't really care) so
431.23Suwe * that .align 5 (i.e. 32B cache line) before data block does the
441.23Suwe * right thing w.r.t. final destinations after vectors are copied.
451.23Suwe */
461.23Suwe#define _ALIGN_TEXT	.align 5
471.23Suwe#include <sh3/asm.h>
481.23Suwe
491.39Suwe__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.39 2008/06/06 04:16:22 uwe Exp $")
501.12Suwe
511.1Such
521.1Such/*
531.22Suwe * Exception vectors.
541.22Suwe * The following routines are copied to vector addresses.
551.8Such *	sh_vector_generic:	VBR + 0x100
561.9Such *	sh_vector_tlbmiss:	VBR + 0x400
571.1Such *	sh_vector_interrupt:	VBR + 0x600
581.8Such */
591.8Such
601.15Suwe#define VECTOR_END_MARKER(sym)			\
611.15Suwe		.globl	_C_LABEL(sym);		\
621.15Suwe	_C_LABEL(sym):
631.15Suwe
641.15Suwe
651.1Such/*
661.15Suwe * LINTSTUB: Var: char sh_vector_generic[1];
671.15Suwe *
681.22Suwe * void sh_vector_generic(void);
691.15Suwe *	Copied to VBR+0x100.  This code should be position independent
701.22Suwe *	and maximum 786 bytes long (== 0x400 - 0x100).
711.8Such */
721.15SuweNENTRY(sh_vector_generic)
731.5Such	__EXCEPTION_ENTRY
741.9Such	__INTR_MASK(r0, r1)
751.1Such	/* Identify exception cause */
761.1Such	MOV	(EXPEVT, r0)
771.21Suwe	mov.l	@r0, r0
781.22Suwe	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
791.10Sthorpej	/* Get curlwp */
801.22Suwe	mov.l	.Lg_curlwp, r1
811.21Suwe	mov.l	@r1, r4			/* 1st arg */
821.11Suwe	/* Get TEA */
831.21Suwe	MOV	(TEA, r1)
841.21Suwe	mov.l	@r1, r6			/* 3rd arg */
851.9Such	/* Check TLB exception or not */
861.22Suwe	mov.l	.Lg_TLB_PROT_ST, r1
871.21Suwe	cmp/hi	r1, r0
881.9Such	bt	1f
891.11Suwe
901.22Suwe	/* tlb_exception(curlwp, tf, TEA); */
911.2Such	__EXCEPTION_UNBLOCK(r0, r1)
921.22Suwe	mov.l	.Lg_tlb_exception, r0
931.1Such	jsr	@r0
941.21Suwe	 mov	r14, r5			/* 2nd arg */
951.39Suwe	bra	.Lg_return_from_exception
961.1Such	 nop
971.11Suwe
981.22Suwe	/* general_exception(curlwp, tf, TEA); */
991.21Suwe1:	mov	r4, r8
1001.1Such#ifdef DDB
1011.39Suwe	MOV	(BBRA, r1)
1021.21Suwe	mov	#0, r2
1031.21Suwe	mov.w	r2, @r1			/* disable UBC */
1041.1Such#endif /* DDB */
1051.2Such	__EXCEPTION_UNBLOCK(r0, r1)
1061.22Suwe	mov.l	.Lg_general_exception, r0
1071.1Such	jsr	@r0
1081.21Suwe	 mov	r14, r5			/* 2nd arg */
1091.1Such
1101.1Such	/* Check for ASTs on exit to user mode. */
1111.21Suwe	mov	r8, r4
1121.22Suwe	mov.l	.Lg_ast, r0
1131.1Such	jsr	@r0
1141.21Suwe	 mov	r14, r5
1151.39Suwe
1161.39Suwe#ifdef DDB
1171.39Suwe	mov.l	@(TF_UBC, r14), r2
1181.39Suwe	tst	r2, r2			! ddb single-step == 0?
1191.39Suwe	bt	.Lg_return_from_exception
1201.39Suwe
1211.39Suwe	!! We are returning from DDB to do single step.  Channel A in
1221.39Suwe	!! UBC is already rigged, we just need to enable it.
1231.39Suwe	mov	#0, r0
1241.39Suwe	MOV	(BBRA, r3)
1251.39Suwe	mov.l	r0, @(TF_UBC, r14)
1261.39Suwe	__EXCEPTION_BLOCK(r0, r1)	! because we ignore insn address
1271.39Suwe	mov.w	r2, @r3			! now safe to set BBRA = tf->tf_ubc
1281.1Such#endif /* DDB */
1291.39Suwe.Lg_return_from_exception:
1301.39Suwe	__EXCEPTION_RETURN
1311.22Suwe
1321.23Suwe	.align	5
1331.22Suwe.Lg_curlwp:		.long	_C_LABEL(curlwp)
1341.1SuchREG_SYMBOL(EXPEVT)
1351.1SuchREG_SYMBOL(BBRA)
1361.9SuchREG_SYMBOL(TEA)
1371.22Suwe.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
1381.22Suwe.Lg_general_exception:	.long	_C_LABEL(general_exception)
1391.22Suwe.Lg_ast:		.long	_C_LABEL(ast)
1401.22Suwe.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
1411.15Suwe
1421.15Suwe/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
1431.15SuweVECTOR_END_MARKER(sh_vector_generic_end)
1441.15Suwe	SET_ENTRY_SIZE(sh_vector_generic)
1451.15Suwe
1461.8Such
1471.3Such#ifdef SH3
1481.8Such/*
1491.15Suwe * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
1501.15Suwe *
1511.29Suwe * TLB miss vector.  We run through the fast path first, checking if
1521.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
1531.29Suwe * with exceptions disabled, so no P3 addresses please (including no
1541.29Suwe * kernel stack, as we cannot wire TLB entries on sh3).  We can only
1551.29Suwe * use BANK1 registers, and of those r6 and r7 are already taken.
1561.29Suwe *
1571.29Suwe * If we don't find a valid mapping in the fast path, we do context
1581.29Suwe * save and call tlb exception handler.
1591.29Suwe *
1601.29Suwe * Copied to VBR+0x400.  This code should be position independent
1611.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
1621.1Such */
1631.15SuweNENTRY(sh3_vector_tlbmiss)
1641.29Suwe	mov	#(SH3_PTEH & 0xff), r4
1651.29Suwe	mov.l	.L3_VPN_cleanup, r0
1661.29Suwe	mov.l	@r4, r5
1671.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
1681.29Suwe	!! For the duration of fast path we keep
1691.29Suwe	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
1701.29Suwe	!! r5: { VPN, ASID } that caused the miss
1711.29Suwe
1721.29Suwe	cmp/pz	r5		! user space address?
1731.29Suwe	bt/s	.L3_user_va
1741.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
1751.29Suwe
1761.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
1771.29Suwe	!! see __pmap_kpte_lookup
1781.29Suwe.L3_kernel_va:
1791.29Suwe	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
1801.29Suwe	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
1811.29Suwe	bra	.L3_fetch_pte
1821.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
1831.29Suwe
1841.29Suwe	!! user space address, use curlwp's pmap
1851.29Suwe.L3_user_va:
1861.29Suwe	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
1871.29Suwe
1881.29Suwe	!! see __pmap_pte_lookup
1891.29Suwe.L3_fetch_pte:
1901.29Suwe	mov.l	@r1, r3		! fetch ptd
1911.29Suwe
1921.29Suwe	!! r2: vpn, prepared for indexing into ptd
1931.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
1941.29Suwe#ifdef DEBUG
1951.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
1961.29Suwe	bt/s	.L3_call_tlb_exception
1971.29Suwe#endif
1981.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
1991.29Suwe
2001.29Suwe	!! __PMAP_PTP_INDEX(vpn)
2011.29Suwe	mov	r2, r0
2021.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
2031.29Suwe	mov.l	.L3_ptp_index_mask, r1
2041.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
2051.29Suwe	shll2	r0		! array index -> array offset
2061.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
2071.29Suwe	tst	r3, r3		! if (ptp == NULL)
2081.29Suwe	bt/s	.L3_call_tlb_exception
2091.29Suwe	 mov	#-(PGSHIFT - 2), r1
2101.29Suwe
2111.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
2121.29Suwe	!! get the array offset directly, as we know bits 10 and 11
2131.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
2141.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
2151.29Suwe	mov.l	.L3_ptp_offset_mask, r0
2161.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
2171.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
2181.29Suwe
2191.29Suwe
2201.29Suwe	!! r3: pte
2211.29Suwe	!! r4: SH3_PTEH
2221.29Suwe	!! r5: { VPN, ASID }
2231.29Suwe
2241.29Suwe	mov.l	.L3_PG_V, r0
2251.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
2261.29Suwe	bt/s	.L3_call_tlb_exception
2271.29Suwe	 nop
2281.24Suwe
2291.29Suwe	mov.l	.L3_PG_HW_BITS, r1
2301.29Suwe	cmp/pz	r5		! user space address?
2311.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
2321.29Suwe	bf/s	.L3_load_kernel
2331.29Suwe	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
2341.29Suwe
2351.29Suwe	!! load mapping for a user space page
2361.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
2371.29Suwe.L3_load_user:
2381.29Suwe	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
2391.29Suwe	ldtlb			! needs 2 insns padding before RTE
2401.29Suwe	nop
2411.29Suwe	nop
2421.29Suwe	rte
2431.29Suwe	 nop
2441.24Suwe
2451.29Suwe	!! load mapping for a kernel space page
2461.29Suwe	!! we need to temporary set ASID to 0
2471.29Suwe.L3_load_kernel:
2481.29Suwe	mov.l	.L3_clear_ASID, r1
2491.29Suwe	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
2501.29Suwe	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
2511.3Such	ldtlb
2521.29Suwe	mov.l	r5, @r4		! restore ASID
2531.29Suwe	nop
2541.29Suwe	rte
2551.29Suwe	 nop
2561.24Suwe
2571.29Suwe
2581.29Suwe	!! if we haven't found a valid mapping in the fast path
2591.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
2601.29Suwe.L3_call_tlb_exception:
2611.29Suwe	__EXCEPTION_ENTRY
2621.29Suwe	mov.l	.L3_SH3_EXPEVT, r2
2631.29Suwe	mov.l	.L3_curlwp, r1
2641.29Suwe	mov	#(SH3_TEA & 0xff), r0
2651.29Suwe	mov.l	@r2, r2			! *SH3_EXPEVT
2661.29Suwe	mov.l	@r0, r6			! arg3: va = *SH3_TEA
2671.29Suwe	mov.l	@r1, r4			! arg1: curlwp
2681.9Such	__INTR_MASK(r0, r1)
2691.9Such	__EXCEPTION_UNBLOCK(r0, r1)
2701.22Suwe	mov.l	.L3_tlb_exception, r0
2711.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
2721.1Such	jsr	@r0
2731.29Suwe	 mov	r14, r5			! arg2: trapframe
2741.29Suwe	__EXCEPTION_RETURN
2751.22Suwe
2761.29Suwe	.align	4
2771.29Suwe.L3_VPN_cleanup:		.long	~0x00000c00
2781.29Suwe.L3_curptd:			.long	_C_LABEL(curptd)
2791.29Suwe.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
2801.29Suwe.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
2811.29Suwe.L3_ptp_index_mask:		.long	0x1ff
2821.29Suwe.L3_ptp_offset_mask:		.long	0x3ff << 2
2831.29Suwe.L3_PG_HW_BITS:			.long	PG_HW_BITS
2841.29Suwe.L3_PG_V:			.long	PG_V
2851.29Suwe.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
2861.29Suwe.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
2871.29Suwe.L3_curlwp:			.long	_C_LABEL(curlwp)
2881.29Suwe.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
2891.15Suwe
2901.15Suwe/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
2911.15SuweVECTOR_END_MARKER(sh3_vector_tlbmiss_end)
2921.15Suwe	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
2931.29Suwe
2941.3Such#endif /* SH3 */
2951.3Such
2961.15Suwe
2971.3Such#ifdef SH4
2981.8Such/*
2991.15Suwe * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
3001.15Suwe *
3011.29Suwe * TLB miss vector.  We run through the fast path first, checking if
3021.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
3031.29Suwe * with exceptions disabled, so no P3 addresses please (though we can
3041.29Suwe * use kernel stack if need be, as its TLB entries are wired).  We can
3051.29Suwe * only use BANK1 registers, and of those r6 and r7 are already taken.
3061.29Suwe *
3071.29Suwe * If we don't find a valid mapping in the fast path, we do context
3081.29Suwe * save and call tlb exception handler.
3091.29Suwe *
3101.29Suwe * Copied to VBR+0x400.  This code should be relocatable
3111.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
3121.3Such */
3131.15SuweNENTRY(sh4_vector_tlbmiss)
3141.29Suwe	mov.l	.L4_SH4_PTEH, r4
3151.29Suwe	mov.l	.L4_VPN_cleanup, r0
3161.29Suwe	mov.l	@r4, r5
3171.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
3181.29Suwe	!! For the duration of fast path we keep
3191.29Suwe	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
3201.29Suwe	!! r5: { VPN, ASID } that caused the miss
3211.29Suwe
3221.29Suwe	cmp/pz	r5		! user space address?
3231.29Suwe	bt/s	.L4_user_va
3241.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
3251.29Suwe
3261.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
3271.29Suwe	!! see __pmap_kpte_lookup
3281.29Suwe.L4_kernel_va:
3291.29Suwe	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
3301.29Suwe	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
3311.29Suwe	bra	.L4_fetch_pte
3321.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
3331.29Suwe
3341.29Suwe	!! user space address, use curlwp's pmap
3351.29Suwe.L4_user_va:
3361.29Suwe	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
3371.29Suwe
3381.29Suwe	!! see __pmap_pte_lookup
3391.29Suwe.L4_fetch_pte:
3401.29Suwe	mov.l	@r1, r3		! fetch ptd
3411.29Suwe
3421.29Suwe	!! r2: vpn, prepared for indexing into ptd
3431.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
3441.29Suwe#ifdef DEBUG
3451.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
3461.29Suwe	bt/s	.L4_call_tlb_exception
3471.29Suwe#endif
3481.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
3491.29Suwe
3501.29Suwe	!! __PMAP_PTP_INDEX(vpn)
3511.29Suwe	mov	r2, r0
3521.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
3531.29Suwe	mov.l	.L4_ptp_index_mask, r1
3541.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
3551.29Suwe	shll2	r0		! array index -> array offset
3561.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
3571.29Suwe	tst	r3, r3		! if (ptp == NULL)
3581.29Suwe	bt/s	.L4_call_tlb_exception
3591.29Suwe	 mov	#-(PGSHIFT - 2), r1
3601.29Suwe
3611.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
3621.29Suwe	!! get the array offset directly, as we know bits 10 and 11
3631.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
3641.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
3651.29Suwe	mov.l	.L4_ptp_offset_mask, r0
3661.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
3671.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
3681.29Suwe
3691.29Suwe
3701.29Suwe	!! r3: pte
3711.29Suwe	!! r4: SH4_PTEH
3721.29Suwe	!! r5: { VPN, ASID }
3731.29Suwe
3741.29Suwe	mov.l	.L4_PG_V, r0
3751.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
3761.29Suwe	bt/s	.L4_call_tlb_exception
3771.29Suwe	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
3781.29Suwe
3791.29Suwe	mov.l	.L4_PG_HW_BITS, r1
3801.29Suwe	shlr8	r0
3811.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
3821.29Suwe	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
3831.29Suwe	cmp/pz	r5		! user space address?
3841.29Suwe	and	#SH4_PTEA_SA_MASK, r0
3851.29Suwe	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
3861.29Suwe	bf/s	.L4_load_kernel
3871.29Suwe	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
3881.29Suwe
3891.29Suwe	!! load mapping for a user space page
3901.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
3911.29Suwe.L4_load_user:
3921.29Suwe	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
3931.29Suwe	ldtlb			! needs 1 insn padding before RTE
3941.29Suwe	nop
3951.29Suwe	rte
3961.29Suwe	 nop
3971.29Suwe
3981.29Suwe	!! load mapping for a kernel space page
3991.29Suwe	!! we need to temporary set ASID to 0
4001.29Suwe.L4_load_kernel:
4011.29Suwe	mov.l	.L4_clear_ASID, r1
4021.29Suwe	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
4031.29Suwe	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
4041.29Suwe	ldtlb
4051.29Suwe	mov.l	r5, @r4		! restore ASID
4061.29Suwe	rte
4071.29Suwe	 nop
4081.29Suwe
4091.29Suwe
4101.29Suwe	!! if we haven't found a valid mapping in the fast path
4111.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
4121.29Suwe.L4_call_tlb_exception:
4131.8Such	__EXCEPTION_ENTRY
4141.29Suwe	mov.l	.L4_SH4_PTEH, r0
4151.29Suwe	mov.l	.L4_curlwp, r1
4161.29Suwe	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
4171.29Suwe	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
4181.29Suwe	mov.l	@r1, r4			! arg1: curlwp
4191.3Such	__INTR_MASK(r0, r1)
4201.3Such	__EXCEPTION_UNBLOCK(r0, r1)
4211.22Suwe	mov.l	.L4_tlb_exception, r0
4221.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
4231.3Such	jsr	@r0
4241.29Suwe	 mov	r14, r5			! arg2: trapframe
4251.5Such	__EXCEPTION_RETURN
4261.22Suwe
4271.23Suwe	.align	5
4281.29Suwe.L4_SH4_PTEH:			.long	SH4_PTEH
4291.29Suwe.L4_VPN_cleanup:		.long	~0x00000c00
4301.29Suwe.L4_curptd:			.long	_C_LABEL(curptd)
4311.29Suwe.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
4321.29Suwe.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
4331.29Suwe.L4_ptp_index_mask:		.long	0x1ff
4341.29Suwe.L4_ptp_offset_mask:		.long	0x3ff << 2
4351.29Suwe.L4_PG_HW_BITS:			.long	PG_HW_BITS
4361.29Suwe.L4_PG_V:			.long	PG_V
4371.29Suwe.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
4381.29Suwe.L4_curlwp:			.long	_C_LABEL(curlwp)
4391.29Suwe.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
4401.15Suwe
4411.15Suwe/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
4421.15SuweVECTOR_END_MARKER(sh4_vector_tlbmiss_end)
4431.15Suwe	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
4441.29Suwe
4451.3Such#endif /* SH4 */
4461.1Such
4471.15Suwe
4481.1Such/*
4491.15Suwe * LINTSTUB: Var: char sh_vector_interrupt[1];
4501.15Suwe *
4511.22Suwe * void sh_vector_interrupt(void);
4521.22Suwe *	Copied to VBR+0x600.  This code should be position independent.
4531.8Such */
4541.15SuweNENTRY(sh_vector_interrupt)
4551.5Such	__EXCEPTION_ENTRY
4561.32Suwe	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
4571.34Suwe
4581.5Such	__INTR_MASK(r0, r1)
4591.32Suwe	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
4601.32Suwe
4611.37Suwe	mov.l	.Li_ci_idepth, r8	! callee-saved
4621.32Suwe	mov.l	.Li_uvmexp_intrs, r2
4631.22Suwe	mov.l	.Li_intc_intr, r0
4641.37Suwe	mov.l	@r8, r9			! callee-saved
4651.32Suwe	mov	#0, r1
4661.32Suwe	mov.l	@r2, r3
4671.37Suwe	add	#1, r9			! curcpu()->ci_idepth++
4681.32Suwe	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
4691.32Suwe	add	#1, r3			! ++uvmexp.intrs
4701.37Suwe	mov.l	r9, @r8
4711.32Suwe	jsr	@r0			! intc_intr(ssr, spc, ssp)
4721.32Suwe	 mov.l	r3, @r2
4731.31Suwe
4741.37Suwe	cmp/pl	r9			! curcpu()->ci_idepth > 0
4751.37Suwe	add	#-1, r9			! curcpu()->ci_idepth--
4761.37Suwe	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
4771.37Suwe	 mov.l	r9, @r8
4781.34Suwe
4791.31Suwe	mov.l	@(TF_SSR, r14), r2
4801.31Suwe	mov.l	.Li_PSL_MD, r1
4811.31Suwe	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
4821.31Suwe	bt	.Li_return_to_user
4831.31Suwe
4841.35Suwe.Li_return_to_kernel:
4851.31Suwe	!! Check for interrupted kernel RAS when returning to kernel
4861.31Suwe	mov.l	@(TF_SPC, r14), r2
4871.31Suwe	mov.l	.Li_ras_start, r3
4881.31Suwe	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
4891.31Suwe	bf	.Li_return_from_interrupt
4901.31Suwe
4911.31Suwe	mov.l	.Li_ras_end, r1
4921.31Suwe	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
4931.31Suwe	bt	.Li_return_from_interrupt
4941.31Suwe
4951.31Suwe	bra	.Li_return_from_interrupt
4961.31Suwe	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
4971.31Suwe
4981.31Suwe.Li_return_to_user:
4991.5Such	/* Check for ASTs on exit to user mode. */
5001.22Suwe	mov.l	.Li_ast, r0
5011.32Suwe	mov.l	.Li_curlwp, r1
5021.32Suwe	mov	r14, r5		/* 2nd arg */
5031.5Such	jsr	@r0
5041.32Suwe	 mov.l	@r1, r4		/* 1st arg */
5051.31Suwe
5061.31Suwe.Li_return_from_interrupt:
5071.5Such	__EXCEPTION_RETURN
5081.22Suwe
5091.23Suwe	.align	5
5101.37Suwe.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
5111.32Suwe.Li_uvmexp_intrs:	.long	_C_LABEL(uvmexp) + UVMEXP_INTRS
5121.22Suwe.Li_intc_intr:		.long	_C_LABEL(intc_intr)
5131.31Suwe.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
5141.31Suwe.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
5151.31Suwe.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
5161.32Suwe.Li_ast:		.long	_C_LABEL(ast)
5171.32Suwe.Li_curlwp:		.long	_C_LABEL(curlwp)
5181.31Suwe
5191.15Suwe
5201.15Suwe/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
5211.15SuweVECTOR_END_MARKER(sh_vector_interrupt_end)
5221.15Suwe	SET_ENTRY_SIZE(sh_vector_interrupt)
523