exception_vector.S revision 1.46
11.46Smatt/*	$NetBSD: exception_vector.S,v 1.46 2010/12/20 00:25:43 matt Exp $	*/
21.1Such
31.1Such/*-
41.1Such * Copyright (c) 2002 The NetBSD Foundation, Inc.
51.1Such * All rights reserved.
61.1Such *
71.1Such * Redistribution and use in source and binary forms, with or without
81.1Such * modification, are permitted provided that the following conditions
91.1Such * are met:
101.1Such * 1. Redistributions of source code must retain the above copyright
111.1Such *    notice, this list of conditions and the following disclaimer.
121.1Such * 2. Redistributions in binary form must reproduce the above copyright
131.1Such *    notice, this list of conditions and the following disclaimer in the
141.1Such *    documentation and/or other materials provided with the distribution.
151.1Such *
161.1Such * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
171.1Such * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
181.1Such * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
191.1Such * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
201.1Such * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
211.1Such * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
221.1Such * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
231.1Such * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
241.1Such * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
251.1Such * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
261.1Such * POSSIBILITY OF SUCH DAMAGE.
271.1Such */
281.1Such
291.9Such#include "opt_cputype.h"
301.9Such#include "opt_ddb.h"
311.1Such#include "assym.h"
321.7Such
331.3Such#include <sh3/param.h>
341.1Such#include <sh3/locore.h>
351.8Such#include <sh3/exception.h>
361.2Such#include <sh3/ubcreg.h>
371.29Suwe#include <sh3/pte.h>
381.3Such#include <sh3/mmu_sh3.h>
391.3Such#include <sh3/mmu_sh4.h>
401.12Suwe
411.23Suwe/*
421.23Suwe * Align vectors more strictly here (where we don't really care) so
431.23Suwe * that .align 5 (i.e. 32B cache line) before data block does the
441.23Suwe * right thing w.r.t. final destinations after vectors are copied.
451.23Suwe */
461.23Suwe#define _ALIGN_TEXT	.align 5
471.23Suwe#include <sh3/asm.h>
481.23Suwe
491.46Smatt__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.46 2010/12/20 00:25:43 matt Exp $")
501.12Suwe
511.1Such
521.1Such/*
531.22Suwe * Exception vectors.
541.22Suwe * The following routines are copied to vector addresses.
551.8Such *	sh_vector_generic:	VBR + 0x100
561.9Such *	sh_vector_tlbmiss:	VBR + 0x400
571.1Such *	sh_vector_interrupt:	VBR + 0x600
581.8Such */
591.8Such
601.15Suwe#define VECTOR_END_MARKER(sym)			\
611.15Suwe		.globl	_C_LABEL(sym);		\
621.15Suwe	_C_LABEL(sym):
631.15Suwe
641.15Suwe
651.1Such/*
661.15Suwe * LINTSTUB: Var: char sh_vector_generic[1];
671.15Suwe *
681.22Suwe * void sh_vector_generic(void);
691.15Suwe *	Copied to VBR+0x100.  This code should be position independent
701.22Suwe *	and maximum 786 bytes long (== 0x400 - 0x100).
711.8Such */
721.15SuweNENTRY(sh_vector_generic)
731.5Such	__EXCEPTION_ENTRY
741.9Such	__INTR_MASK(r0, r1)
751.1Such	/* Identify exception cause */
761.1Such	MOV	(EXPEVT, r0)
771.21Suwe	mov.l	@r0, r0
781.22Suwe	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
791.10Sthorpej	/* Get curlwp */
801.22Suwe	mov.l	.Lg_curlwp, r1
811.21Suwe	mov.l	@r1, r4			/* 1st arg */
821.11Suwe	/* Get TEA */
831.21Suwe	MOV	(TEA, r1)
841.21Suwe	mov.l	@r1, r6			/* 3rd arg */
851.9Such	/* Check TLB exception or not */
861.22Suwe	mov.l	.Lg_TLB_PROT_ST, r1
871.21Suwe	cmp/hi	r1, r0
881.9Such	bt	1f
891.11Suwe
901.22Suwe	/* tlb_exception(curlwp, tf, TEA); */
911.2Such	__EXCEPTION_UNBLOCK(r0, r1)
921.22Suwe	mov.l	.Lg_tlb_exception, r0
931.1Such	jsr	@r0
941.21Suwe	 mov	r14, r5			/* 2nd arg */
951.39Suwe	bra	.Lg_return_from_exception
961.1Such	 nop
971.11Suwe
981.22Suwe	/* general_exception(curlwp, tf, TEA); */
991.21Suwe1:	mov	r4, r8
1001.1Such#ifdef DDB
1011.40Suwe	mov	#0, r2
1021.39Suwe	MOV	(BBRA, r1)
1031.21Suwe	mov.w	r2, @r1			/* disable UBC */
1041.40Suwe	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
1051.1Such#endif /* DDB */
1061.2Such	__EXCEPTION_UNBLOCK(r0, r1)
1071.22Suwe	mov.l	.Lg_general_exception, r0
1081.1Such	jsr	@r0
1091.21Suwe	 mov	r14, r5			/* 2nd arg */
1101.1Such
1111.1Such	/* Check for ASTs on exit to user mode. */
1121.21Suwe	mov	r8, r4
1131.22Suwe	mov.l	.Lg_ast, r0
1141.1Such	jsr	@r0
1151.21Suwe	 mov	r14, r5
1161.39Suwe
1171.39Suwe#ifdef DDB
1181.39Suwe	mov.l	@(TF_UBC, r14), r2
1191.39Suwe	tst	r2, r2			! ddb single-step == 0?
1201.39Suwe	bt	.Lg_return_from_exception
1211.39Suwe
1221.39Suwe	!! We are returning from DDB to do single step.  Channel A in
1231.39Suwe	!! UBC is already rigged, we just need to enable it.
1241.39Suwe	MOV	(BBRA, r3)
1251.41Suwe	MOV	(BARA, r5)
1261.41Suwe	__EXCEPTION_BLOCK(r0, r1)
1271.41Suwe	mov.l	@(TF_SPC, r14), r4
1281.41Suwe	mov.l	r4, @r5			! BARA = tf->tf_spc
1291.41Suwe	mov.w	r2, @r3			! BBRA = tf->tf_ubc
1301.1Such#endif /* DDB */
1311.39Suwe.Lg_return_from_exception:
1321.39Suwe	__EXCEPTION_RETURN
1331.22Suwe
1341.23Suwe	.align	5
1351.42SuweREG_SYMBOL(EXPEVT)
1361.22Suwe.Lg_curlwp:		.long	_C_LABEL(curlwp)
1371.9SuchREG_SYMBOL(TEA)
1381.42Suwe.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
1391.22Suwe.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
1401.22Suwe.Lg_general_exception:	.long	_C_LABEL(general_exception)
1411.22Suwe.Lg_ast:		.long	_C_LABEL(ast)
1421.42SuweREG_SYMBOL(BBRA)
1431.42SuweREG_SYMBOL(BARA)
1441.15Suwe
1451.15Suwe/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
1461.15SuweVECTOR_END_MARKER(sh_vector_generic_end)
1471.15Suwe	SET_ENTRY_SIZE(sh_vector_generic)
1481.15Suwe
1491.8Such
1501.3Such#ifdef SH3
1511.8Such/*
1521.15Suwe * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
1531.15Suwe *
1541.29Suwe * TLB miss vector.  We run through the fast path first, checking if
1551.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
1561.29Suwe * with exceptions disabled, so no P3 addresses please (including no
1571.29Suwe * kernel stack, as we cannot wire TLB entries on sh3).  We can only
1581.29Suwe * use BANK1 registers, and of those r6 and r7 are already taken.
1591.29Suwe *
1601.29Suwe * If we don't find a valid mapping in the fast path, we do context
1611.29Suwe * save and call tlb exception handler.
1621.29Suwe *
1631.29Suwe * Copied to VBR+0x400.  This code should be position independent
1641.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
1651.1Such */
1661.15SuweNENTRY(sh3_vector_tlbmiss)
1671.29Suwe	mov	#(SH3_PTEH & 0xff), r4
1681.29Suwe	mov.l	.L3_VPN_cleanup, r0
1691.29Suwe	mov.l	@r4, r5
1701.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
1711.29Suwe	!! For the duration of fast path we keep
1721.29Suwe	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
1731.29Suwe	!! r5: { VPN, ASID } that caused the miss
1741.29Suwe
1751.29Suwe	cmp/pz	r5		! user space address?
1761.29Suwe	bt/s	.L3_user_va
1771.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
1781.29Suwe
1791.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
1801.29Suwe	!! see __pmap_kpte_lookup
1811.29Suwe.L3_kernel_va:
1821.29Suwe	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
1831.29Suwe	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
1841.29Suwe	bra	.L3_fetch_pte
1851.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
1861.29Suwe
1871.29Suwe	!! user space address, use curlwp's pmap
1881.29Suwe.L3_user_va:
1891.29Suwe	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
1901.29Suwe
1911.29Suwe	!! see __pmap_pte_lookup
1921.29Suwe.L3_fetch_pte:
1931.29Suwe	mov.l	@r1, r3		! fetch ptd
1941.29Suwe
1951.29Suwe	!! r2: vpn, prepared for indexing into ptd
1961.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
1971.29Suwe#ifdef DEBUG
1981.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
1991.29Suwe	bt/s	.L3_call_tlb_exception
2001.29Suwe#endif
2011.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
2021.29Suwe
2031.29Suwe	!! __PMAP_PTP_INDEX(vpn)
2041.29Suwe	mov	r2, r0
2051.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
2061.29Suwe	mov.l	.L3_ptp_index_mask, r1
2071.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
2081.29Suwe	shll2	r0		! array index -> array offset
2091.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
2101.29Suwe	tst	r3, r3		! if (ptp == NULL)
2111.29Suwe	bt/s	.L3_call_tlb_exception
2121.29Suwe	 mov	#-(PGSHIFT - 2), r1
2131.29Suwe
2141.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
2151.29Suwe	!! get the array offset directly, as we know bits 10 and 11
2161.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
2171.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
2181.29Suwe	mov.l	.L3_ptp_offset_mask, r0
2191.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
2201.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
2211.29Suwe
2221.29Suwe
2231.29Suwe	!! r3: pte
2241.29Suwe	!! r4: SH3_PTEH
2251.29Suwe	!! r5: { VPN, ASID }
2261.29Suwe
2271.29Suwe	mov.l	.L3_PG_V, r0
2281.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
2291.29Suwe	bt/s	.L3_call_tlb_exception
2301.29Suwe	 nop
2311.24Suwe
2321.29Suwe	mov.l	.L3_PG_HW_BITS, r1
2331.29Suwe	cmp/pz	r5		! user space address?
2341.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
2351.29Suwe	bf/s	.L3_load_kernel
2361.29Suwe	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
2371.29Suwe
2381.29Suwe	!! load mapping for a user space page
2391.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
2401.29Suwe.L3_load_user:
2411.29Suwe	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
2421.29Suwe	ldtlb			! needs 2 insns padding before RTE
2431.29Suwe	nop
2441.29Suwe	nop
2451.29Suwe	rte
2461.29Suwe	 nop
2471.24Suwe
2481.29Suwe	!! load mapping for a kernel space page
2491.29Suwe	!! we need to temporary set ASID to 0
2501.29Suwe.L3_load_kernel:
2511.29Suwe	mov.l	.L3_clear_ASID, r1
2521.29Suwe	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
2531.29Suwe	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
2541.3Such	ldtlb
2551.29Suwe	mov.l	r5, @r4		! restore ASID
2561.29Suwe	nop
2571.29Suwe	rte
2581.29Suwe	 nop
2591.24Suwe
2601.29Suwe
2611.29Suwe	!! if we haven't found a valid mapping in the fast path
2621.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
2631.29Suwe.L3_call_tlb_exception:
2641.29Suwe	__EXCEPTION_ENTRY
2651.29Suwe	mov.l	.L3_SH3_EXPEVT, r2
2661.29Suwe	mov.l	.L3_curlwp, r1
2671.29Suwe	mov	#(SH3_TEA & 0xff), r0
2681.29Suwe	mov.l	@r2, r2			! *SH3_EXPEVT
2691.29Suwe	mov.l	@r0, r6			! arg3: va = *SH3_TEA
2701.29Suwe	mov.l	@r1, r4			! arg1: curlwp
2711.9Such	__INTR_MASK(r0, r1)
2721.9Such	__EXCEPTION_UNBLOCK(r0, r1)
2731.22Suwe	mov.l	.L3_tlb_exception, r0
2741.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
2751.1Such	jsr	@r0
2761.29Suwe	 mov	r14, r5			! arg2: trapframe
2771.29Suwe	__EXCEPTION_RETURN
2781.22Suwe
2791.29Suwe	.align	4
2801.29Suwe.L3_VPN_cleanup:		.long	~0x00000c00
2811.29Suwe.L3_curptd:			.long	_C_LABEL(curptd)
2821.45Suwe.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
2831.29Suwe.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
2841.29Suwe.L3_ptp_index_mask:		.long	0x1ff
2851.29Suwe.L3_ptp_offset_mask:		.long	0x3ff << 2
2861.29Suwe.L3_PG_HW_BITS:			.long	PG_HW_BITS
2871.29Suwe.L3_PG_V:			.long	PG_V
2881.29Suwe.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
2891.29Suwe.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
2901.29Suwe.L3_curlwp:			.long	_C_LABEL(curlwp)
2911.29Suwe.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
2921.15Suwe
2931.15Suwe/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
2941.15SuweVECTOR_END_MARKER(sh3_vector_tlbmiss_end)
2951.15Suwe	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
2961.29Suwe
2971.3Such#endif /* SH3 */
2981.3Such
2991.15Suwe
3001.3Such#ifdef SH4
3011.8Such/*
3021.15Suwe * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
3031.15Suwe *
3041.29Suwe * TLB miss vector.  We run through the fast path first, checking if
3051.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
3061.29Suwe * with exceptions disabled, so no P3 addresses please (though we can
3071.29Suwe * use kernel stack if need be, as its TLB entries are wired).  We can
3081.29Suwe * only use BANK1 registers, and of those r6 and r7 are already taken.
3091.29Suwe *
3101.29Suwe * If we don't find a valid mapping in the fast path, we do context
3111.29Suwe * save and call tlb exception handler.
3121.29Suwe *
3131.29Suwe * Copied to VBR+0x400.  This code should be relocatable
3141.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
3151.3Such */
3161.15SuweNENTRY(sh4_vector_tlbmiss)
3171.29Suwe	mov.l	.L4_SH4_PTEH, r4
3181.29Suwe	mov.l	.L4_VPN_cleanup, r0
3191.29Suwe	mov.l	@r4, r5
3201.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
3211.29Suwe	!! For the duration of fast path we keep
3221.29Suwe	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
3231.29Suwe	!! r5: { VPN, ASID } that caused the miss
3241.29Suwe
3251.29Suwe	cmp/pz	r5		! user space address?
3261.29Suwe	bt/s	.L4_user_va
3271.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
3281.29Suwe
3291.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
3301.29Suwe	!! see __pmap_kpte_lookup
3311.29Suwe.L4_kernel_va:
3321.29Suwe	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
3331.29Suwe	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
3341.29Suwe	bra	.L4_fetch_pte
3351.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
3361.29Suwe
3371.29Suwe	!! user space address, use curlwp's pmap
3381.29Suwe.L4_user_va:
3391.29Suwe	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
3401.29Suwe
3411.29Suwe	!! see __pmap_pte_lookup
3421.29Suwe.L4_fetch_pte:
3431.29Suwe	mov.l	@r1, r3		! fetch ptd
3441.29Suwe
3451.29Suwe	!! r2: vpn, prepared for indexing into ptd
3461.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
3471.29Suwe#ifdef DEBUG
3481.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
3491.29Suwe	bt/s	.L4_call_tlb_exception
3501.29Suwe#endif
3511.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
3521.29Suwe
3531.29Suwe	!! __PMAP_PTP_INDEX(vpn)
3541.29Suwe	mov	r2, r0
3551.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
3561.29Suwe	mov.l	.L4_ptp_index_mask, r1
3571.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
3581.29Suwe	shll2	r0		! array index -> array offset
3591.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
3601.29Suwe	tst	r3, r3		! if (ptp == NULL)
3611.29Suwe	bt/s	.L4_call_tlb_exception
3621.29Suwe	 mov	#-(PGSHIFT - 2), r1
3631.29Suwe
3641.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
3651.29Suwe	!! get the array offset directly, as we know bits 10 and 11
3661.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
3671.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
3681.29Suwe	mov.l	.L4_ptp_offset_mask, r0
3691.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
3701.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
3711.29Suwe
3721.29Suwe
3731.29Suwe	!! r3: pte
3741.29Suwe	!! r4: SH4_PTEH
3751.29Suwe	!! r5: { VPN, ASID }
3761.29Suwe
3771.29Suwe	mov.l	.L4_PG_V, r0
3781.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
3791.29Suwe	bt/s	.L4_call_tlb_exception
3801.29Suwe	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
3811.29Suwe
3821.29Suwe	mov.l	.L4_PG_HW_BITS, r1
3831.29Suwe	shlr8	r0
3841.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
3851.29Suwe	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
3861.29Suwe	cmp/pz	r5		! user space address?
3871.29Suwe	and	#SH4_PTEA_SA_MASK, r0
3881.29Suwe	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
3891.29Suwe	bf/s	.L4_load_kernel
3901.29Suwe	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
3911.29Suwe
3921.29Suwe	!! load mapping for a user space page
3931.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
3941.29Suwe.L4_load_user:
3951.29Suwe	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
3961.29Suwe	ldtlb			! needs 1 insn padding before RTE
3971.29Suwe	nop
3981.29Suwe	rte
3991.29Suwe	 nop
4001.29Suwe
4011.29Suwe	!! load mapping for a kernel space page
4021.29Suwe	!! we need to temporary set ASID to 0
4031.29Suwe.L4_load_kernel:
4041.29Suwe	mov.l	.L4_clear_ASID, r1
4051.29Suwe	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
4061.29Suwe	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
4071.29Suwe	ldtlb
4081.29Suwe	mov.l	r5, @r4		! restore ASID
4091.29Suwe	rte
4101.29Suwe	 nop
4111.29Suwe
4121.29Suwe
4131.29Suwe	!! if we haven't found a valid mapping in the fast path
4141.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
4151.29Suwe.L4_call_tlb_exception:
4161.8Such	__EXCEPTION_ENTRY
4171.29Suwe	mov.l	.L4_SH4_PTEH, r0
4181.29Suwe	mov.l	.L4_curlwp, r1
4191.29Suwe	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
4201.29Suwe	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
4211.29Suwe	mov.l	@r1, r4			! arg1: curlwp
4221.3Such	__INTR_MASK(r0, r1)
4231.3Such	__EXCEPTION_UNBLOCK(r0, r1)
4241.22Suwe	mov.l	.L4_tlb_exception, r0
4251.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
4261.3Such	jsr	@r0
4271.29Suwe	 mov	r14, r5			! arg2: trapframe
4281.5Such	__EXCEPTION_RETURN
4291.22Suwe
4301.23Suwe	.align	5
4311.29Suwe.L4_SH4_PTEH:			.long	SH4_PTEH
4321.29Suwe.L4_VPN_cleanup:		.long	~0x00000c00
4331.29Suwe.L4_curptd:			.long	_C_LABEL(curptd)
4341.45Suwe.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
4351.29Suwe.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
4361.29Suwe.L4_ptp_index_mask:		.long	0x1ff
4371.29Suwe.L4_ptp_offset_mask:		.long	0x3ff << 2
4381.29Suwe.L4_PG_HW_BITS:			.long	PG_HW_BITS
4391.29Suwe.L4_PG_V:			.long	PG_V
4401.29Suwe.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
4411.29Suwe.L4_curlwp:			.long	_C_LABEL(curlwp)
4421.29Suwe.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
4431.15Suwe
4441.15Suwe/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
4451.15SuweVECTOR_END_MARKER(sh4_vector_tlbmiss_end)
4461.15Suwe	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
4471.29Suwe
4481.3Such#endif /* SH4 */
4491.1Such
4501.15Suwe
4511.1Such/*
4521.15Suwe * LINTSTUB: Var: char sh_vector_interrupt[1];
4531.15Suwe *
4541.22Suwe * void sh_vector_interrupt(void);
4551.22Suwe *	Copied to VBR+0x600.  This code should be position independent.
4561.8Such */
4571.15SuweNENTRY(sh_vector_interrupt)
4581.5Such	__EXCEPTION_ENTRY
4591.32Suwe	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
4601.34Suwe
4611.5Such	__INTR_MASK(r0, r1)
4621.32Suwe	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
4631.32Suwe
4641.37Suwe	mov.l	.Li_ci_idepth, r8	! callee-saved
4651.22Suwe	mov.l	.Li_intc_intr, r0
4661.37Suwe	mov.l	@r8, r9			! callee-saved
4671.32Suwe	mov	#0, r1
4681.37Suwe	add	#1, r9			! curcpu()->ci_idepth++
4691.32Suwe	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
4701.32Suwe	jsr	@r0			! intc_intr(ssr, spc, ssp)
4711.46Smatt	 mov.l	r9, @r8
4721.31Suwe
4731.37Suwe	cmp/pl	r9			! curcpu()->ci_idepth > 0
4741.37Suwe	add	#-1, r9			! curcpu()->ci_idepth--
4751.37Suwe	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
4761.37Suwe	 mov.l	r9, @r8
4771.34Suwe
4781.31Suwe	mov.l	@(TF_SSR, r14), r2
4791.31Suwe	mov.l	.Li_PSL_MD, r1
4801.31Suwe	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
4811.31Suwe	bt	.Li_return_to_user
4821.31Suwe
4831.35Suwe.Li_return_to_kernel:
4841.31Suwe	!! Check for interrupted kernel RAS when returning to kernel
4851.31Suwe	mov.l	@(TF_SPC, r14), r2
4861.31Suwe	mov.l	.Li_ras_start, r3
4871.31Suwe	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
4881.31Suwe	bf	.Li_return_from_interrupt
4891.31Suwe
4901.31Suwe	mov.l	.Li_ras_end, r1
4911.31Suwe	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
4921.31Suwe	bt	.Li_return_from_interrupt
4931.31Suwe
4941.31Suwe	bra	.Li_return_from_interrupt
4951.31Suwe	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
4961.31Suwe
4971.31Suwe.Li_return_to_user:
4981.5Such	/* Check for ASTs on exit to user mode. */
4991.22Suwe	mov.l	.Li_ast, r0
5001.32Suwe	mov.l	.Li_curlwp, r1
5011.32Suwe	mov	r14, r5		/* 2nd arg */
5021.5Such	jsr	@r0
5031.32Suwe	 mov.l	@r1, r4		/* 1st arg */
5041.31Suwe
5051.31Suwe.Li_return_from_interrupt:
5061.5Such	__EXCEPTION_RETURN
5071.22Suwe
5081.23Suwe	.align	5
5091.37Suwe.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
5101.22Suwe.Li_intc_intr:		.long	_C_LABEL(intc_intr)
5111.31Suwe.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
5121.31Suwe.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
5131.31Suwe.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
5141.32Suwe.Li_ast:		.long	_C_LABEL(ast)
5151.32Suwe.Li_curlwp:		.long	_C_LABEL(curlwp)
5161.31Suwe
5171.15Suwe
5181.15Suwe/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
5191.15SuweVECTOR_END_MARKER(sh_vector_interrupt_end)
5201.15Suwe	SET_ENTRY_SIZE(sh_vector_interrupt)
521