11.53Sandvar/*	$NetBSD: exception_vector.S,v 1.53 2021/07/26 21:43:11 andvar Exp $	*/
21.1Such
31.1Such/*-
41.51Sad * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc.
51.1Such * All rights reserved.
61.1Such *
71.1Such * Redistribution and use in source and binary forms, with or without
81.1Such * modification, are permitted provided that the following conditions
91.1Such * are met:
101.1Such * 1. Redistributions of source code must retain the above copyright
111.1Such *    notice, this list of conditions and the following disclaimer.
121.1Such * 2. Redistributions in binary form must reproduce the above copyright
131.1Such *    notice, this list of conditions and the following disclaimer in the
141.1Such *    documentation and/or other materials provided with the distribution.
151.1Such *
161.1Such * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
171.1Such * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
181.1Such * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
191.1Such * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
201.1Such * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
211.1Such * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
221.1Such * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
231.1Such * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
241.1Such * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
251.1Such * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
261.1Such * POSSIBILITY OF SUCH DAMAGE.
271.1Such */
281.1Such
291.9Such#include "opt_cputype.h"
301.9Such#include "opt_ddb.h"
311.47Suwe#include "opt_ptrace.h"
321.47Suwe
331.1Such#include "assym.h"
341.7Such
351.3Such#include <sh3/param.h>
361.1Such#include <sh3/locore.h>
371.8Such#include <sh3/exception.h>
381.2Such#include <sh3/ubcreg.h>
391.29Suwe#include <sh3/pte.h>
401.3Such#include <sh3/mmu_sh3.h>
411.3Such#include <sh3/mmu_sh4.h>
421.12Suwe
431.23Suwe/*
441.23Suwe * Align vectors more strictly here (where we don't really care) so
451.23Suwe * that .align 5 (i.e. 32B cache line) before data block does the
461.23Suwe * right thing w.r.t. final destinations after vectors are copied.
471.23Suwe */
481.23Suwe#define _ALIGN_TEXT	.align 5
491.23Suwe#include <sh3/asm.h>
501.23Suwe
511.53Sandvar__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.53 2021/07/26 21:43:11 andvar Exp $")
521.12Suwe
531.1Such
541.1Such/*
551.22Suwe * Exception vectors.
561.22Suwe * The following routines are copied to vector addresses.
571.8Such *	sh_vector_generic:	VBR + 0x100
581.9Such *	sh_vector_tlbmiss:	VBR + 0x400
591.1Such *	sh_vector_interrupt:	VBR + 0x600
601.8Such */
611.8Such
621.15Suwe#define VECTOR_END_MARKER(sym)			\
631.15Suwe		.globl	_C_LABEL(sym);		\
641.15Suwe	_C_LABEL(sym):
651.15Suwe
661.15Suwe
671.1Such/*
681.15Suwe * LINTSTUB: Var: char sh_vector_generic[1];
691.15Suwe *
701.22Suwe * void sh_vector_generic(void);
711.15Suwe *	Copied to VBR+0x100.  This code should be position independent
721.22Suwe *	and maximum 786 bytes long (== 0x400 - 0x100).
731.8Such */
741.15SuweNENTRY(sh_vector_generic)
751.5Such	__EXCEPTION_ENTRY
761.1Such	/* Identify exception cause */
771.1Such	MOV	(EXPEVT, r0)
781.21Suwe	mov.l	@r0, r0
791.22Suwe	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
801.10Sthorpej	/* Get curlwp */
811.22Suwe	mov.l	.Lg_curlwp, r1
821.21Suwe	mov.l	@r1, r4			/* 1st arg */
831.11Suwe	/* Get TEA */
841.21Suwe	MOV	(TEA, r1)
851.21Suwe	mov.l	@r1, r6			/* 3rd arg */
861.9Such	/* Check TLB exception or not */
871.22Suwe	mov.l	.Lg_TLB_PROT_ST, r1
881.21Suwe	cmp/hi	r1, r0
891.51Sad	bt/s	1f
901.51Sad	 mov	r4, r8	/* preserve curlwp across call */
911.11Suwe
921.22Suwe	/* tlb_exception(curlwp, tf, TEA); */
931.49Suwe	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
941.22Suwe	mov.l	.Lg_tlb_exception, r0
951.1Such	jsr	@r0
961.21Suwe	 mov	r14, r5			/* 2nd arg */
971.51Sad
981.51Sad	/* Check for ASTs on exit to user mode. */
991.51Sad	__INTR_MASK(r0, r1)
1001.51Sad	mov.l	.Lg_ast, r0
1011.51Sad	mov	r8, r4
1021.51Sad	jsr	@r0
1031.51Sad	 mov	r14, r5
1041.39Suwe	bra	.Lg_return_from_exception
1051.1Such	 nop
1061.11Suwe
1071.51Sad1:	/* general_exception(curlwp, tf, TEA); */
1081.50Spgoyette#if defined(PTRACE_HOOKS) || defined(DDB)
1091.40Suwe	mov	#0, r2
1101.39Suwe	MOV	(BBRA, r1)
1111.40Suwe	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
1121.47Suwe	mov.w	r2, @r1			/* disable UBC channel A */
1131.47Suwe#endif
1141.49Suwe	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
1151.22Suwe	mov.l	.Lg_general_exception, r0
1161.1Such	jsr	@r0
1171.21Suwe	 mov	r14, r5			/* 2nd arg */
1181.1Such
1191.1Such	/* Check for ASTs on exit to user mode. */
1201.51Sad	__INTR_MASK(r0, r1)
1211.51Sad	mov.l	.Lg_ast, r0
1221.21Suwe	mov	r8, r4
1231.1Such	jsr	@r0
1241.21Suwe	 mov	r14, r5
1251.39Suwe
1261.50Spgoyette#if defined(PTRACE_HOOKS) || defined(DDB)
1271.39Suwe	mov.l	@(TF_UBC, r14), r2
1281.47Suwe	tst	r2, r2			! single-step == 0?
1291.39Suwe	bt	.Lg_return_from_exception
1301.39Suwe
1311.39Suwe	!! We are returning from DDB to do single step.  Channel A in
1321.39Suwe	!! UBC is already rigged, we just need to enable it.
1331.39Suwe	MOV	(BBRA, r3)
1341.41Suwe	MOV	(BARA, r5)
1351.41Suwe	__EXCEPTION_BLOCK(r0, r1)
1361.41Suwe	mov.l	@(TF_SPC, r14), r4
1371.41Suwe	mov.l	r4, @r5			! BARA = tf->tf_spc
1381.41Suwe	mov.w	r2, @r3			! BBRA = tf->tf_ubc
1391.47Suwe#endif /* PTRACE || DDB */
1401.39Suwe.Lg_return_from_exception:
1411.39Suwe	__EXCEPTION_RETURN
1421.22Suwe
1431.23Suwe	.align	5
1441.42SuweREG_SYMBOL(EXPEVT)
1451.22Suwe.Lg_curlwp:		.long	_C_LABEL(curlwp)
1461.9SuchREG_SYMBOL(TEA)
1471.42Suwe.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
1481.22Suwe.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
1491.22Suwe.Lg_general_exception:	.long	_C_LABEL(general_exception)
1501.22Suwe.Lg_ast:		.long	_C_LABEL(ast)
1511.42SuweREG_SYMBOL(BBRA)
1521.42SuweREG_SYMBOL(BARA)
1531.15Suwe
1541.15Suwe/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
1551.15SuweVECTOR_END_MARKER(sh_vector_generic_end)
1561.15Suwe	SET_ENTRY_SIZE(sh_vector_generic)
1571.15Suwe
1581.8Such
1591.3Such#ifdef SH3
1601.8Such/*
1611.15Suwe * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
1621.15Suwe *
1631.29Suwe * TLB miss vector.  We run through the fast path first, checking if
1641.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
1651.29Suwe * with exceptions disabled, so no P3 addresses please (including no
1661.29Suwe * kernel stack, as we cannot wire TLB entries on sh3).  We can only
1671.29Suwe * use BANK1 registers, and of those r6 and r7 are already taken.
1681.29Suwe *
1691.29Suwe * If we don't find a valid mapping in the fast path, we do context
1701.29Suwe * save and call tlb exception handler.
1711.29Suwe *
1721.29Suwe * Copied to VBR+0x400.  This code should be position independent
1731.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
1741.1Such */
1751.15SuweNENTRY(sh3_vector_tlbmiss)
1761.29Suwe	mov	#(SH3_PTEH & 0xff), r4
1771.29Suwe	mov.l	.L3_VPN_cleanup, r0
1781.29Suwe	mov.l	@r4, r5
1791.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
1801.29Suwe	!! For the duration of fast path we keep
1811.29Suwe	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
1821.29Suwe	!! r5: { VPN, ASID } that caused the miss
1831.29Suwe
1841.29Suwe	cmp/pz	r5		! user space address?
1851.29Suwe	bt/s	.L3_user_va
1861.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
1871.29Suwe
1881.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
1891.29Suwe	!! see __pmap_kpte_lookup
1901.29Suwe.L3_kernel_va:
1911.29Suwe	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
1921.29Suwe	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
1931.29Suwe	bra	.L3_fetch_pte
1941.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
1951.29Suwe
1961.29Suwe	!! user space address, use curlwp's pmap
1971.29Suwe.L3_user_va:
1981.29Suwe	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
1991.29Suwe
2001.29Suwe	!! see __pmap_pte_lookup
2011.29Suwe.L3_fetch_pte:
2021.29Suwe	mov.l	@r1, r3		! fetch ptd
2031.29Suwe
2041.29Suwe	!! r2: vpn, prepared for indexing into ptd
2051.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
2061.29Suwe#ifdef DEBUG
2071.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
2081.29Suwe	bt/s	.L3_call_tlb_exception
2091.29Suwe#endif
2101.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
2111.29Suwe
2121.29Suwe	!! __PMAP_PTP_INDEX(vpn)
2131.29Suwe	mov	r2, r0
2141.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
2151.29Suwe	mov.l	.L3_ptp_index_mask, r1
2161.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
2171.29Suwe	shll2	r0		! array index -> array offset
2181.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
2191.29Suwe	tst	r3, r3		! if (ptp == NULL)
2201.29Suwe	bt/s	.L3_call_tlb_exception
2211.29Suwe	 mov	#-(PGSHIFT - 2), r1
2221.29Suwe
2231.53Sandvar	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
2241.29Suwe	!! get the array offset directly, as we know bits 10 and 11
2251.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
2261.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
2271.29Suwe	mov.l	.L3_ptp_offset_mask, r0
2281.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
2291.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
2301.29Suwe
2311.29Suwe
2321.29Suwe	!! r3: pte
2331.29Suwe	!! r4: SH3_PTEH
2341.29Suwe	!! r5: { VPN, ASID }
2351.29Suwe
2361.29Suwe	mov.l	.L3_PG_V, r0
2371.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
2381.29Suwe	bt/s	.L3_call_tlb_exception
2391.29Suwe	 nop
2401.24Suwe
2411.29Suwe	mov.l	.L3_PG_HW_BITS, r1
2421.29Suwe	cmp/pz	r5		! user space address?
2431.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
2441.29Suwe	bf/s	.L3_load_kernel
2451.29Suwe	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
2461.29Suwe
2471.29Suwe	!! load mapping for a user space page
2481.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
2491.29Suwe.L3_load_user:
2501.29Suwe	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
2511.29Suwe	ldtlb			! needs 2 insns padding before RTE
2521.29Suwe	nop
2531.29Suwe	nop
2541.29Suwe	rte
2551.29Suwe	 nop
2561.24Suwe
2571.29Suwe	!! load mapping for a kernel space page
2581.29Suwe	!! we need to temporary set ASID to 0
2591.29Suwe.L3_load_kernel:
2601.29Suwe	mov.l	.L3_clear_ASID, r1
2611.29Suwe	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
2621.29Suwe	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
2631.3Such	ldtlb
2641.29Suwe	mov.l	r5, @r4		! restore ASID
2651.29Suwe	nop
2661.29Suwe	rte
2671.29Suwe	 nop
2681.24Suwe
2691.29Suwe
2701.29Suwe	!! if we haven't found a valid mapping in the fast path
2711.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
2721.29Suwe.L3_call_tlb_exception:
2731.29Suwe	__EXCEPTION_ENTRY
2741.29Suwe	mov.l	.L3_SH3_EXPEVT, r2
2751.29Suwe	mov.l	.L3_curlwp, r1
2761.29Suwe	mov	#(SH3_TEA & 0xff), r0
2771.29Suwe	mov.l	@r2, r2			! *SH3_EXPEVT
2781.29Suwe	mov.l	@r0, r6			! arg3: va = *SH3_TEA
2791.29Suwe	mov.l	@r1, r4			! arg1: curlwp
2801.49Suwe	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
2811.22Suwe	mov.l	.L3_tlb_exception, r0
2821.51Sad	mov	r4, r8			! save curlwp across the call
2831.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
2841.1Such	jsr	@r0
2851.29Suwe	 mov	r14, r5			! arg2: trapframe
2861.51Sad
2871.51Sad	/* Check for ASTs on exit to user mode. */
2881.51Sad	__INTR_MASK(r0, r1)
2891.51Sad	mov.l	.L3_ast, r0
2901.51Sad	mov	r8, r4			! arg1: curlwp
2911.51Sad	jsr	@r0
2921.51Sad	 mov	r14, r5			! arg2: trapframe
2931.29Suwe	__EXCEPTION_RETURN
2941.22Suwe
2951.29Suwe	.align	4
2961.29Suwe.L3_VPN_cleanup:		.long	~0x00000c00
2971.29Suwe.L3_curptd:			.long	_C_LABEL(curptd)
2981.45Suwe.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
2991.29Suwe.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
3001.29Suwe.L3_ptp_index_mask:		.long	0x1ff
3011.29Suwe.L3_ptp_offset_mask:		.long	0x3ff << 2
3021.29Suwe.L3_PG_HW_BITS:			.long	PG_HW_BITS
3031.29Suwe.L3_PG_V:			.long	PG_V
3041.29Suwe.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
3051.29Suwe.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
3061.29Suwe.L3_curlwp:			.long	_C_LABEL(curlwp)
3071.29Suwe.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
3081.51Sad.L3_ast:			.long	_C_LABEL(ast)
3091.15Suwe
3101.15Suwe/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
3111.15SuweVECTOR_END_MARKER(sh3_vector_tlbmiss_end)
3121.15Suwe	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
3131.29Suwe
3141.3Such#endif /* SH3 */
3151.3Such
3161.15Suwe
3171.3Such#ifdef SH4
3181.8Such/*
3191.15Suwe * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
3201.15Suwe *
3211.29Suwe * TLB miss vector.  We run through the fast path first, checking if
3221.29Suwe * there's a valid mapping in curlwp or kernel pmap.  We do fast path
3231.29Suwe * with exceptions disabled, so no P3 addresses please (though we can
3241.29Suwe * use kernel stack if need be, as its TLB entries are wired).  We can
3251.29Suwe * only use BANK1 registers, and of those r6 and r7 are already taken.
3261.29Suwe *
3271.29Suwe * If we don't find a valid mapping in the fast path, we do context
3281.29Suwe * save and call tlb exception handler.
3291.29Suwe *
3301.29Suwe * Copied to VBR+0x400.  This code should be relocatable
3311.29Suwe * and maximum 512 bytes long (== 0x600 - 0x400).
3321.3Such */
3331.15SuweNENTRY(sh4_vector_tlbmiss)
3341.29Suwe	mov.l	.L4_SH4_PTEH, r4
3351.29Suwe	mov.l	.L4_VPN_cleanup, r0
3361.29Suwe	mov.l	@r4, r5
3371.29Suwe	and	r0, r5		! trim vpn to 4K page boundary
3381.29Suwe	!! For the duration of fast path we keep
3391.29Suwe	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
3401.29Suwe	!! r5: { VPN, ASID } that caused the miss
3411.29Suwe
3421.29Suwe	cmp/pz	r5		! user space address?
3431.29Suwe	bt/s	.L4_user_va
3441.29Suwe	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
3451.29Suwe
3461.29Suwe	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
3471.29Suwe	!! see __pmap_kpte_lookup
3481.29Suwe.L4_kernel_va:
3491.29Suwe	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
3501.29Suwe	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
3511.29Suwe	bra	.L4_fetch_pte
3521.29Suwe	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
3531.29Suwe
3541.29Suwe	!! user space address, use curlwp's pmap
3551.29Suwe.L4_user_va:
3561.29Suwe	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
3571.29Suwe
3581.29Suwe	!! see __pmap_pte_lookup
3591.29Suwe.L4_fetch_pte:
3601.29Suwe	mov.l	@r1, r3		! fetch ptd
3611.29Suwe
3621.29Suwe	!! r2: vpn, prepared for indexing into ptd
3631.29Suwe	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
3641.29Suwe#ifdef DEBUG
3651.29Suwe	tst	r3, r3		! ptd == NULL  - cannot happen
3661.29Suwe	bt/s	.L4_call_tlb_exception
3671.29Suwe#endif
3681.29Suwe	 mov	#-22, r1	! __PMAP_PTP_SHIFT
3691.29Suwe
3701.29Suwe	!! __PMAP_PTP_INDEX(vpn)
3711.29Suwe	mov	r2, r0
3721.29Suwe	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
3731.29Suwe	mov.l	.L4_ptp_index_mask, r1
3741.29Suwe	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
3751.29Suwe	shll2	r0		! array index -> array offset
3761.29Suwe	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
3771.29Suwe	tst	r3, r3		! if (ptp == NULL)
3781.29Suwe	bt/s	.L4_call_tlb_exception
3791.29Suwe	 mov	#-(PGSHIFT - 2), r1
3801.29Suwe
3811.29Suwe	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
3821.29Suwe	!! get the array offset directly, as we know bits 10 and 11
3831.29Suwe	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
3841.29Suwe	shld	r1, r2		! vpn >> (PGSHIFT - 2)
3851.29Suwe	mov.l	.L4_ptp_offset_mask, r0
3861.29Suwe	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
3871.29Suwe	mov.l	@(r0, r3), r3	! pte = ptp[idx]
3881.29Suwe
3891.29Suwe
3901.29Suwe	!! r3: pte
3911.29Suwe	!! r4: SH4_PTEH
3921.29Suwe	!! r5: { VPN, ASID }
3931.29Suwe
3941.29Suwe	mov.l	.L4_PG_V, r0
3951.29Suwe	tst	r0, r3		! if ((pte & PG_V) == 0)
3961.29Suwe	bt/s	.L4_call_tlb_exception
3971.29Suwe	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
3981.29Suwe
3991.29Suwe	mov.l	.L4_PG_HW_BITS, r1
4001.29Suwe	shlr8	r0
4011.29Suwe	and	r1, r3		! pte &= PG_HW_BITS
4021.29Suwe	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
4031.29Suwe	cmp/pz	r5		! user space address?
4041.29Suwe	and	#SH4_PTEA_SA_MASK, r0
4051.29Suwe	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
4061.29Suwe	bf/s	.L4_load_kernel
4071.29Suwe	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
4081.29Suwe
4091.29Suwe	!! load mapping for a user space page
4101.29Suwe	!! we reload PTEH to enter VPN aligned to 4K page boundary
4111.29Suwe.L4_load_user:
4121.29Suwe	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
4131.29Suwe	ldtlb			! needs 1 insn padding before RTE
4141.29Suwe	nop
4151.29Suwe	rte
4161.29Suwe	 nop
4171.29Suwe
4181.29Suwe	!! load mapping for a kernel space page
4191.29Suwe	!! we need to temporary set ASID to 0
4201.29Suwe.L4_load_kernel:
4211.29Suwe	mov.l	.L4_clear_ASID, r1
4221.29Suwe	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
4231.29Suwe	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
4241.29Suwe	ldtlb
4251.29Suwe	mov.l	r5, @r4		! restore ASID
4261.29Suwe	rte
4271.29Suwe	 nop
4281.29Suwe
4291.29Suwe
4301.29Suwe	!! if we haven't found a valid mapping in the fast path
4311.29Suwe	!!     tlb_exception(curlwp, trapframe, tea)
4321.29Suwe.L4_call_tlb_exception:
4331.8Such	__EXCEPTION_ENTRY
4341.29Suwe	mov.l	.L4_SH4_PTEH, r0
4351.29Suwe	mov.l	.L4_curlwp, r1
4361.29Suwe	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
4371.29Suwe	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
4381.29Suwe	mov.l	@r1, r4			! arg1: curlwp
4391.49Suwe	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
4401.22Suwe	mov.l	.L4_tlb_exception, r0
4411.51Sad	mov	r4, r8			! save curlwp across the call
4421.29Suwe	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
4431.3Such	jsr	@r0
4441.29Suwe	 mov	r14, r5			! arg2: trapframe
4451.51Sad
4461.51Sad	/* Check for ASTs on exit to user mode. */
4471.51Sad	__INTR_MASK(r0, r1)
4481.51Sad	mov.l	.L4_ast, r0
4491.51Sad	mov	r8, r4			! arg1: curlwp
4501.51Sad	jsr	@r0
4511.51Sad	 mov	r14, r5			! arg2: trapframe
4521.5Such	__EXCEPTION_RETURN
4531.22Suwe
4541.23Suwe	.align	5
4551.29Suwe.L4_SH4_PTEH:			.long	SH4_PTEH
4561.29Suwe.L4_VPN_cleanup:		.long	~0x00000c00
4571.29Suwe.L4_curptd:			.long	_C_LABEL(curptd)
4581.45Suwe.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
4591.29Suwe.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
4601.29Suwe.L4_ptp_index_mask:		.long	0x1ff
4611.29Suwe.L4_ptp_offset_mask:		.long	0x3ff << 2
4621.29Suwe.L4_PG_HW_BITS:			.long	PG_HW_BITS
4631.29Suwe.L4_PG_V:			.long	PG_V
4641.29Suwe.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
4651.29Suwe.L4_curlwp:			.long	_C_LABEL(curlwp)
4661.29Suwe.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
4671.51Sad.L4_ast:			.long	_C_LABEL(ast)
4681.15Suwe
4691.15Suwe/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
4701.15SuweVECTOR_END_MARKER(sh4_vector_tlbmiss_end)
4711.15Suwe	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
4721.29Suwe
4731.3Such#endif /* SH4 */
4741.1Such
4751.15Suwe
4761.1Such/*
4771.15Suwe * LINTSTUB: Var: char sh_vector_interrupt[1];
4781.15Suwe *
4791.22Suwe * void sh_vector_interrupt(void);
4801.22Suwe *	Copied to VBR+0x600.  This code should be position independent.
4811.8Such */
4821.15SuweNENTRY(sh_vector_interrupt)
4831.5Such	__EXCEPTION_ENTRY
4841.48Suwe	!! arguments for intc_intr(): for struct clockframe
4851.48Suwe	stc	ssr, r4
4861.48Suwe	stc	spc, r5
4871.48Suwe	stc	r0_bank, r6		! ssp
4881.49Suwe	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
4891.32Suwe
4901.37Suwe	mov.l	.Li_ci_idepth, r8	! callee-saved
4911.22Suwe	mov.l	.Li_intc_intr, r0
4921.37Suwe	mov.l	@r8, r9			! callee-saved
4931.32Suwe	mov	#0, r1
4941.37Suwe	add	#1, r9			! curcpu()->ci_idepth++
4951.32Suwe	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
4961.32Suwe	jsr	@r0			! intc_intr(ssr, spc, ssp)
4971.46Smatt	 mov.l	r9, @r8
4981.31Suwe
4991.37Suwe	cmp/pl	r9			! curcpu()->ci_idepth > 0
5001.37Suwe	add	#-1, r9			! curcpu()->ci_idepth--
5011.37Suwe	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
5021.37Suwe	 mov.l	r9, @r8
5031.34Suwe
5041.31Suwe	mov.l	@(TF_SSR, r14), r2
5051.31Suwe	mov.l	.Li_PSL_MD, r1
5061.31Suwe	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
5071.31Suwe	bt	.Li_return_to_user
5081.31Suwe
5091.35Suwe.Li_return_to_kernel:
5101.31Suwe	!! Check for interrupted kernel RAS when returning to kernel
5111.31Suwe	mov.l	@(TF_SPC, r14), r2
5121.31Suwe	mov.l	.Li_ras_start, r3
5131.31Suwe	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
5141.31Suwe	bf	.Li_return_from_interrupt
5151.31Suwe
5161.31Suwe	mov.l	.Li_ras_end, r1
5171.31Suwe	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
5181.31Suwe	bt	.Li_return_from_interrupt
5191.31Suwe
5201.31Suwe	bra	.Li_return_from_interrupt
5211.31Suwe	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
5221.31Suwe
5231.31Suwe.Li_return_to_user:
5241.5Such	/* Check for ASTs on exit to user mode. */
5251.22Suwe	mov.l	.Li_ast, r0
5261.32Suwe	mov.l	.Li_curlwp, r1
5271.32Suwe	mov	r14, r5		/* 2nd arg */
5281.5Such	jsr	@r0
5291.32Suwe	 mov.l	@r1, r4		/* 1st arg */
5301.31Suwe
5311.31Suwe.Li_return_from_interrupt:
5321.5Such	__EXCEPTION_RETURN
5331.22Suwe
5341.23Suwe	.align	5
5351.37Suwe.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
5361.22Suwe.Li_intc_intr:		.long	_C_LABEL(intc_intr)
5371.31Suwe.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
5381.31Suwe.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
5391.31Suwe.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
5401.32Suwe.Li_ast:		.long	_C_LABEL(ast)
5411.32Suwe.Li_curlwp:		.long	_C_LABEL(curlwp)
5421.31Suwe
5431.15Suwe
5441.15Suwe/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
5451.15SuweVECTOR_END_MARKER(sh_vector_interrupt_end)
5461.15Suwe	SET_ENTRY_SIZE(sh_vector_interrupt)
547