exception_vector.S revision 1.32
1/*	$NetBSD: exception_vector.S,v 1.32 2007/09/25 00:53:04 uwe Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *        This product includes software developed by the NetBSD
18 *        Foundation, Inc. and its contributors.
19 * 4. Neither the name of The NetBSD Foundation nor the names of its
20 *    contributors may be used to endorse or promote products derived
21 *    from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#include "opt_cputype.h"
37#include "opt_ddb.h"
38#include "assym.h"
39
40#include <sh3/param.h>
41#include <sh3/locore.h>
42#include <sh3/exception.h>
43#include <sh3/ubcreg.h>
44#include <sh3/pte.h>
45#include <sh3/mmu_sh3.h>
46#include <sh3/mmu_sh4.h>
47
48/*
49 * Align vectors more strictly here (where we don't really care) so
50 * that .align 5 (i.e. 32B cache line) before data block does the
51 * right thing w.r.t. final destinations after vectors are copied.
52 */
53#define _ALIGN_TEXT	.align 5
54#include <sh3/asm.h>
55
56__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.32 2007/09/25 00:53:04 uwe Exp $")
57
58
59/*
60 * Exception vectors.
61 * The following routines are copied to vector addresses.
62 *	sh_vector_generic:	VBR + 0x100
63 *	sh_vector_tlbmiss:	VBR + 0x400
64 *	sh_vector_interrupt:	VBR + 0x600
65 */
66
67#define VECTOR_END_MARKER(sym)			\
68		.globl	_C_LABEL(sym);		\
69	_C_LABEL(sym):
70
71
72/*
73 * LINTSTUB: Var: char sh_vector_generic[1];
74 *
75 * void sh_vector_generic(void);
76 *	Copied to VBR+0x100.  This code should be position independent
77 *	and maximum 786 bytes long (== 0x400 - 0x100).
78 */
79NENTRY(sh_vector_generic)
80	__EXCEPTION_ENTRY
81	__INTR_MASK(r0, r1)
82	/* Identify exception cause */
83	MOV	(EXPEVT, r0)
84	mov.l	@r0, r0
85	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
86	/* Get curlwp */
87	mov.l	.Lg_curlwp, r1
88	mov.l	@r1, r4			/* 1st arg */
89	/* Get TEA */
90	MOV	(TEA, r1)
91	mov.l	@r1, r6			/* 3rd arg */
92	/* Check TLB exception or not */
93	mov.l	.Lg_TLB_PROT_ST, r1
94	cmp/hi	r1, r0
95	bt	1f
96
97	/* tlb_exception(curlwp, tf, TEA); */
98	__EXCEPTION_UNBLOCK(r0, r1)
99	mov.l	.Lg_tlb_exception, r0
100	jsr	@r0
101	 mov	r14, r5			/* 2nd arg */
102	bra	2f
103	 nop
104
105	/* general_exception(curlwp, tf, TEA); */
1061:	mov	r4, r8
107#ifdef DDB
108	mov	#0, r2
109	MOV	(BBRA, r1)
110	mov.w	r2, @r1			/* disable UBC */
111	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
112#endif /* DDB */
113	__EXCEPTION_UNBLOCK(r0, r1)
114	mov.l	.Lg_general_exception, r0
115	jsr	@r0
116	 mov	r14, r5			/* 2nd arg */
117
118	/* Check for ASTs on exit to user mode. */
119	mov	r8, r4
120	mov.l	.Lg_ast, r0
121	jsr	@r0
122	 mov	r14, r5
123#ifdef DDB	/* BBRA = tf->tf_ubc */
124	__EXCEPTION_BLOCK(r0, r1)
125	mov.l	@(TF_UBC, r14), r0
126	MOV	(BBRA, r1)
127	mov.w	r0, @r1
128#endif /* DDB */
1292:	__EXCEPTION_RETURN
130
131	.align	5
132.Lg_curlwp:		.long	_C_LABEL(curlwp)
133REG_SYMBOL(EXPEVT)
134REG_SYMBOL(BBRA)
135REG_SYMBOL(TEA)
136.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
137.Lg_general_exception:	.long	_C_LABEL(general_exception)
138.Lg_ast:		.long	_C_LABEL(ast)
139.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
140
141/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
142VECTOR_END_MARKER(sh_vector_generic_end)
143	SET_ENTRY_SIZE(sh_vector_generic)
144
145
146#ifdef SH3
147/*
148 * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
149 *
150 * TLB miss vector.  We run through the fast path first, checking if
151 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
152 * with exceptions disabled, so no P3 addresses please (including no
153 * kernel stack, as we cannot wire TLB entries on sh3).  We can only
154 * use BANK1 registers, and of those r6 and r7 are already taken.
155 *
156 * If we don't find a valid mapping in the fast path, we do context
157 * save and call tlb exception handler.
158 *
159 * Copied to VBR+0x400.  This code should be position independent
160 * and maximum 512 bytes long (== 0x600 - 0x400).
161 */
162NENTRY(sh3_vector_tlbmiss)
163	mov	#(SH3_PTEH & 0xff), r4
164	mov.l	.L3_VPN_cleanup, r0
165	mov.l	@r4, r5
166	and	r0, r5		! trim vpn to 4K page boundary
167	!! For the duration of fast path we keep
168	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
169	!! r5: { VPN, ASID } that caused the miss
170
171	cmp/pz	r5		! user space address?
172	bt/s	.L3_user_va
173	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
174
175	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
176	!! see __pmap_kpte_lookup
177.L3_kernel_va:
178	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
179	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
180	bra	.L3_fetch_pte
181	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
182
183	!! user space address, use curlwp's pmap
184.L3_user_va:
185	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
186
187	!! see __pmap_pte_lookup
188.L3_fetch_pte:
189	mov.l	@r1, r3		! fetch ptd
190
191	!! r2: vpn, prepared for indexing into ptd
192	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
193#ifdef DEBUG
194	tst	r3, r3		! ptd == NULL  - cannot happen
195	bt/s	.L3_call_tlb_exception
196#endif
197	 mov	#-22, r1	! __PMAP_PTP_SHIFT
198
199	!! __PMAP_PTP_INDEX(vpn)
200	mov	r2, r0
201	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
202	mov.l	.L3_ptp_index_mask, r1
203	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
204	shll2	r0		! array index -> array offset
205	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
206	tst	r3, r3		! if (ptp == NULL)
207	bt/s	.L3_call_tlb_exception
208	 mov	#-(PGSHIFT - 2), r1
209
210	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
211	!! get the array offset directly, as we know bits 10 and 11
212	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
213	shld	r1, r2		! vpn >> (PGSHIFT - 2)
214	mov.l	.L3_ptp_offset_mask, r0
215	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
216	mov.l	@(r0, r3), r3	! pte = ptp[idx]
217
218
219	!! r3: pte
220	!! r4: SH3_PTEH
221	!! r5: { VPN, ASID }
222
223	mov.l	.L3_PG_V, r0
224	tst	r0, r3		! if ((pte & PG_V) == 0)
225	bt/s	.L3_call_tlb_exception
226	 nop
227
228	mov.l	.L3_PG_HW_BITS, r1
229	cmp/pz	r5		! user space address?
230	and	r1, r3		! pte &= PG_HW_BITS
231	bf/s	.L3_load_kernel
232	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
233
234	!! load mapping for a user space page
235	!! we reload PTEH to enter VPN aligned to 4K page boundary
236.L3_load_user:
237	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
238	ldtlb			! needs 2 insns padding before RTE
239	nop
240	nop
241	rte
242	 nop
243
244	!! load mapping for a kernel space page
245	!! we need to temporary set ASID to 0
246.L3_load_kernel:
247	mov.l	.L3_clear_ASID, r1
248	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
249	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
250	ldtlb
251	mov.l	r5, @r4		! restore ASID
252	nop
253	rte
254	 nop
255
256
257	!! if we haven't found a valid mapping in the fast path
258	!!     tlb_exception(curlwp, trapframe, tea)
259.L3_call_tlb_exception:
260	__EXCEPTION_ENTRY
261	mov.l	.L3_SH3_EXPEVT, r2
262	mov.l	.L3_curlwp, r1
263	mov	#(SH3_TEA & 0xff), r0
264	mov.l	@r2, r2			! *SH3_EXPEVT
265	mov.l	@r0, r6			! arg3: va = *SH3_TEA
266	mov.l	@r1, r4			! arg1: curlwp
267	__INTR_MASK(r0, r1)
268	__EXCEPTION_UNBLOCK(r0, r1)
269	mov.l	.L3_tlb_exception, r0
270	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
271	jsr	@r0
272	 mov	r14, r5			! arg2: trapframe
273	__EXCEPTION_RETURN
274
275	.align	4
276.L3_VPN_cleanup:		.long	~0x00000c00
277.L3_curptd:			.long	_C_LABEL(curptd)
278.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
279.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
280.L3_ptp_index_mask:		.long	0x1ff
281.L3_ptp_offset_mask:		.long	0x3ff << 2
282.L3_PG_HW_BITS:			.long	PG_HW_BITS
283.L3_PG_V:			.long	PG_V
284.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
285.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
286.L3_curlwp:			.long	_C_LABEL(curlwp)
287.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
288
289/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
290VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
291	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
292
293#endif /* SH3 */
294
295
296#ifdef SH4
297/*
298 * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
299 *
300 * TLB miss vector.  We run through the fast path first, checking if
301 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
302 * with exceptions disabled, so no P3 addresses please (though we can
303 * use kernel stack if need be, as its TLB entries are wired).  We can
304 * only use BANK1 registers, and of those r6 and r7 are already taken.
305 *
306 * If we don't find a valid mapping in the fast path, we do context
307 * save and call tlb exception handler.
308 *
309 * Copied to VBR+0x400.  This code should be relocatable
310 * and maximum 512 bytes long (== 0x600 - 0x400).
311 */
312NENTRY(sh4_vector_tlbmiss)
313	mov.l	.L4_SH4_PTEH, r4
314	mov.l	.L4_VPN_cleanup, r0
315	mov.l	@r4, r5
316	and	r0, r5		! trim vpn to 4K page boundary
317	!! For the duration of fast path we keep
318	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
319	!! r5: { VPN, ASID } that caused the miss
320
321	cmp/pz	r5		! user space address?
322	bt/s	.L4_user_va
323	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
324
325	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
326	!! see __pmap_kpte_lookup
327.L4_kernel_va:
328	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
329	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
330	bra	.L4_fetch_pte
331	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
332
333	!! user space address, use curlwp's pmap
334.L4_user_va:
335	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
336
337	!! see __pmap_pte_lookup
338.L4_fetch_pte:
339	mov.l	@r1, r3		! fetch ptd
340
341	!! r2: vpn, prepared for indexing into ptd
342	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
343#ifdef DEBUG
344	tst	r3, r3		! ptd == NULL  - cannot happen
345	bt/s	.L4_call_tlb_exception
346#endif
347	 mov	#-22, r1	! __PMAP_PTP_SHIFT
348
349	!! __PMAP_PTP_INDEX(vpn)
350	mov	r2, r0
351	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
352	mov.l	.L4_ptp_index_mask, r1
353	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
354	shll2	r0		! array index -> array offset
355	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
356	tst	r3, r3		! if (ptp == NULL)
357	bt/s	.L4_call_tlb_exception
358	 mov	#-(PGSHIFT - 2), r1
359
360	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
361	!! get the array offset directly, as we know bits 10 and 11
362	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
363	shld	r1, r2		! vpn >> (PGSHIFT - 2)
364	mov.l	.L4_ptp_offset_mask, r0
365	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
366	mov.l	@(r0, r3), r3	! pte = ptp[idx]
367
368
369	!! r3: pte
370	!! r4: SH4_PTEH
371	!! r5: { VPN, ASID }
372
373	mov.l	.L4_PG_V, r0
374	tst	r0, r3		! if ((pte & PG_V) == 0)
375	bt/s	.L4_call_tlb_exception
376	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
377
378	mov.l	.L4_PG_HW_BITS, r1
379	shlr8	r0
380	and	r1, r3		! pte &= PG_HW_BITS
381	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
382	cmp/pz	r5		! user space address?
383	and	#SH4_PTEA_SA_MASK, r0
384	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
385	bf/s	.L4_load_kernel
386	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
387
388	!! load mapping for a user space page
389	!! we reload PTEH to enter VPN aligned to 4K page boundary
390.L4_load_user:
391	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
392	ldtlb			! needs 1 insn padding before RTE
393	nop
394	rte
395	 nop
396
397	!! load mapping for a kernel space page
398	!! we need to temporary set ASID to 0
399.L4_load_kernel:
400	mov.l	.L4_clear_ASID, r1
401	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
402	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
403	ldtlb
404	mov.l	r5, @r4		! restore ASID
405	rte
406	 nop
407
408
409	!! if we haven't found a valid mapping in the fast path
410	!!     tlb_exception(curlwp, trapframe, tea)
411.L4_call_tlb_exception:
412	__EXCEPTION_ENTRY
413	mov.l	.L4_SH4_PTEH, r0
414	mov.l	.L4_curlwp, r1
415	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
416	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
417	mov.l	@r1, r4			! arg1: curlwp
418	__INTR_MASK(r0, r1)
419	__EXCEPTION_UNBLOCK(r0, r1)
420	mov.l	.L4_tlb_exception, r0
421	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
422	jsr	@r0
423	 mov	r14, r5			! arg2: trapframe
424	__EXCEPTION_RETURN
425
426	.align	5
427.L4_SH4_PTEH:			.long	SH4_PTEH
428.L4_VPN_cleanup:		.long	~0x00000c00
429.L4_curptd:			.long	_C_LABEL(curptd)
430.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
431.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
432.L4_ptp_index_mask:		.long	0x1ff
433.L4_ptp_offset_mask:		.long	0x3ff << 2
434.L4_PG_HW_BITS:			.long	PG_HW_BITS
435.L4_PG_V:			.long	PG_V
436.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
437.L4_curlwp:			.long	_C_LABEL(curlwp)
438.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
439
440/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
441VECTOR_END_MARKER(sh4_vector_tlbmiss_end)
442	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
443
444#endif /* SH4 */
445
446
447/*
448 * LINTSTUB: Var: char sh_vector_interrupt[1];
449 *
450 * void sh_vector_interrupt(void);
451 *	Copied to VBR+0x600.  This code should be position independent.
452 */
453NENTRY(sh_vector_interrupt)
454	__EXCEPTION_ENTRY
455	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
456	__INTR_MASK(r0, r1)
457	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
458
459	mov.l	.Li_uvmexp_intrs, r2
460	mov.l	.Li_intc_intr, r0
461	mov	#0, r1
462	mov.l	@r2, r3
463	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
464	add	#1, r3			! ++uvmexp.intrs
465	jsr	@r0			! intc_intr(ssr, spc, ssp)
466	 mov.l	r3, @r2
467
468	mov.l	@(TF_SSR, r14), r2
469	mov.l	.Li_PSL_MD, r1
470	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
471	bt	.Li_return_to_user
472
473	!! Check for interrupted kernel RAS when returning to kernel
474	mov.l	@(TF_SPC, r14), r2
475	mov.l	.Li_ras_start, r3
476	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
477	bf	.Li_return_from_interrupt
478
479	mov.l	.Li_ras_end, r1
480	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
481	bt	.Li_return_from_interrupt
482
483	bra	.Li_return_from_interrupt
484	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
485
486.Li_return_to_user:
487	/* Check for ASTs on exit to user mode. */
488	mov.l	.Li_ast, r0
489	mov.l	.Li_curlwp, r1
490	mov	r14, r5		/* 2nd arg */
491	jsr	@r0
492	 mov.l	@r1, r4		/* 1st arg */
493
494.Li_return_from_interrupt:
495	__EXCEPTION_RETURN
496
497	.align	5
498.Li_uvmexp_intrs:	.long	_C_LABEL(uvmexp) + UVMEXP_INTRS
499.Li_intc_intr:		.long	_C_LABEL(intc_intr)
500.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
501.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
502.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
503.Li_ast:		.long	_C_LABEL(ast)
504.Li_curlwp:		.long	_C_LABEL(curlwp)
505
506
507/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
508VECTOR_END_MARKER(sh_vector_interrupt_end)
509	SET_ENTRY_SIZE(sh_vector_interrupt)
510