exception_vector.S revision 1.46
1/*	$NetBSD: exception_vector.S,v 1.46 2010/12/20 00:25:43 matt Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "opt_cputype.h"
30#include "opt_ddb.h"
31#include "assym.h"
32
33#include <sh3/param.h>
34#include <sh3/locore.h>
35#include <sh3/exception.h>
36#include <sh3/ubcreg.h>
37#include <sh3/pte.h>
38#include <sh3/mmu_sh3.h>
39#include <sh3/mmu_sh4.h>
40
41/*
42 * Align vectors more strictly here (where we don't really care) so
43 * that .align 5 (i.e. 32B cache line) before data block does the
44 * right thing w.r.t. final destinations after vectors are copied.
45 */
46#define _ALIGN_TEXT	.align 5
47#include <sh3/asm.h>
48
49__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.46 2010/12/20 00:25:43 matt Exp $")
50
51
52/*
53 * Exception vectors.
54 * The following routines are copied to vector addresses.
55 *	sh_vector_generic:	VBR + 0x100
56 *	sh_vector_tlbmiss:	VBR + 0x400
57 *	sh_vector_interrupt:	VBR + 0x600
58 */
59
60#define VECTOR_END_MARKER(sym)			\
61		.globl	_C_LABEL(sym);		\
62	_C_LABEL(sym):
63
64
65/*
66 * LINTSTUB: Var: char sh_vector_generic[1];
67 *
68 * void sh_vector_generic(void);
69 *	Copied to VBR+0x100.  This code should be position independent
70 *	and maximum 786 bytes long (== 0x400 - 0x100).
71 */
72NENTRY(sh_vector_generic)
73	__EXCEPTION_ENTRY
74	__INTR_MASK(r0, r1)
75	/* Identify exception cause */
76	MOV	(EXPEVT, r0)
77	mov.l	@r0, r0
78	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
79	/* Get curlwp */
80	mov.l	.Lg_curlwp, r1
81	mov.l	@r1, r4			/* 1st arg */
82	/* Get TEA */
83	MOV	(TEA, r1)
84	mov.l	@r1, r6			/* 3rd arg */
85	/* Check TLB exception or not */
86	mov.l	.Lg_TLB_PROT_ST, r1
87	cmp/hi	r1, r0
88	bt	1f
89
90	/* tlb_exception(curlwp, tf, TEA); */
91	__EXCEPTION_UNBLOCK(r0, r1)
92	mov.l	.Lg_tlb_exception, r0
93	jsr	@r0
94	 mov	r14, r5			/* 2nd arg */
95	bra	.Lg_return_from_exception
96	 nop
97
98	/* general_exception(curlwp, tf, TEA); */
991:	mov	r4, r8
100#ifdef DDB
101	mov	#0, r2
102	MOV	(BBRA, r1)
103	mov.w	r2, @r1			/* disable UBC */
104	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
105#endif /* DDB */
106	__EXCEPTION_UNBLOCK(r0, r1)
107	mov.l	.Lg_general_exception, r0
108	jsr	@r0
109	 mov	r14, r5			/* 2nd arg */
110
111	/* Check for ASTs on exit to user mode. */
112	mov	r8, r4
113	mov.l	.Lg_ast, r0
114	jsr	@r0
115	 mov	r14, r5
116
117#ifdef DDB
118	mov.l	@(TF_UBC, r14), r2
119	tst	r2, r2			! ddb single-step == 0?
120	bt	.Lg_return_from_exception
121
122	!! We are returning from DDB to do single step.  Channel A in
123	!! UBC is already rigged, we just need to enable it.
124	MOV	(BBRA, r3)
125	MOV	(BARA, r5)
126	__EXCEPTION_BLOCK(r0, r1)
127	mov.l	@(TF_SPC, r14), r4
128	mov.l	r4, @r5			! BARA = tf->tf_spc
129	mov.w	r2, @r3			! BBRA = tf->tf_ubc
130#endif /* DDB */
131.Lg_return_from_exception:
132	__EXCEPTION_RETURN
133
134	.align	5
135REG_SYMBOL(EXPEVT)
136.Lg_curlwp:		.long	_C_LABEL(curlwp)
137REG_SYMBOL(TEA)
138.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
139.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
140.Lg_general_exception:	.long	_C_LABEL(general_exception)
141.Lg_ast:		.long	_C_LABEL(ast)
142REG_SYMBOL(BBRA)
143REG_SYMBOL(BARA)
144
145/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
146VECTOR_END_MARKER(sh_vector_generic_end)
147	SET_ENTRY_SIZE(sh_vector_generic)
148
149
150#ifdef SH3
151/*
152 * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
153 *
154 * TLB miss vector.  We run through the fast path first, checking if
155 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
156 * with exceptions disabled, so no P3 addresses please (including no
157 * kernel stack, as we cannot wire TLB entries on sh3).  We can only
158 * use BANK1 registers, and of those r6 and r7 are already taken.
159 *
160 * If we don't find a valid mapping in the fast path, we do context
161 * save and call tlb exception handler.
162 *
163 * Copied to VBR+0x400.  This code should be position independent
164 * and maximum 512 bytes long (== 0x600 - 0x400).
165 */
166NENTRY(sh3_vector_tlbmiss)
167	mov	#(SH3_PTEH & 0xff), r4
168	mov.l	.L3_VPN_cleanup, r0
169	mov.l	@r4, r5
170	and	r0, r5		! trim vpn to 4K page boundary
171	!! For the duration of fast path we keep
172	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
173	!! r5: { VPN, ASID } that caused the miss
174
175	cmp/pz	r5		! user space address?
176	bt/s	.L3_user_va
177	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
178
179	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
180	!! see __pmap_kpte_lookup
181.L3_kernel_va:
182	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
183	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
184	bra	.L3_fetch_pte
185	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
186
187	!! user space address, use curlwp's pmap
188.L3_user_va:
189	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
190
191	!! see __pmap_pte_lookup
192.L3_fetch_pte:
193	mov.l	@r1, r3		! fetch ptd
194
195	!! r2: vpn, prepared for indexing into ptd
196	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
197#ifdef DEBUG
198	tst	r3, r3		! ptd == NULL  - cannot happen
199	bt/s	.L3_call_tlb_exception
200#endif
201	 mov	#-22, r1	! __PMAP_PTP_SHIFT
202
203	!! __PMAP_PTP_INDEX(vpn)
204	mov	r2, r0
205	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
206	mov.l	.L3_ptp_index_mask, r1
207	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
208	shll2	r0		! array index -> array offset
209	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
210	tst	r3, r3		! if (ptp == NULL)
211	bt/s	.L3_call_tlb_exception
212	 mov	#-(PGSHIFT - 2), r1
213
214	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
215	!! get the array offset directly, as we know bits 10 and 11
216	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
217	shld	r1, r2		! vpn >> (PGSHIFT - 2)
218	mov.l	.L3_ptp_offset_mask, r0
219	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
220	mov.l	@(r0, r3), r3	! pte = ptp[idx]
221
222
223	!! r3: pte
224	!! r4: SH3_PTEH
225	!! r5: { VPN, ASID }
226
227	mov.l	.L3_PG_V, r0
228	tst	r0, r3		! if ((pte & PG_V) == 0)
229	bt/s	.L3_call_tlb_exception
230	 nop
231
232	mov.l	.L3_PG_HW_BITS, r1
233	cmp/pz	r5		! user space address?
234	and	r1, r3		! pte &= PG_HW_BITS
235	bf/s	.L3_load_kernel
236	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
237
238	!! load mapping for a user space page
239	!! we reload PTEH to enter VPN aligned to 4K page boundary
240.L3_load_user:
241	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
242	ldtlb			! needs 2 insns padding before RTE
243	nop
244	nop
245	rte
246	 nop
247
248	!! load mapping for a kernel space page
249	!! we need to temporary set ASID to 0
250.L3_load_kernel:
251	mov.l	.L3_clear_ASID, r1
252	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
253	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
254	ldtlb
255	mov.l	r5, @r4		! restore ASID
256	nop
257	rte
258	 nop
259
260
261	!! if we haven't found a valid mapping in the fast path
262	!!     tlb_exception(curlwp, trapframe, tea)
263.L3_call_tlb_exception:
264	__EXCEPTION_ENTRY
265	mov.l	.L3_SH3_EXPEVT, r2
266	mov.l	.L3_curlwp, r1
267	mov	#(SH3_TEA & 0xff), r0
268	mov.l	@r2, r2			! *SH3_EXPEVT
269	mov.l	@r0, r6			! arg3: va = *SH3_TEA
270	mov.l	@r1, r4			! arg1: curlwp
271	__INTR_MASK(r0, r1)
272	__EXCEPTION_UNBLOCK(r0, r1)
273	mov.l	.L3_tlb_exception, r0
274	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
275	jsr	@r0
276	 mov	r14, r5			! arg2: trapframe
277	__EXCEPTION_RETURN
278
279	.align	4
280.L3_VPN_cleanup:		.long	~0x00000c00
281.L3_curptd:			.long	_C_LABEL(curptd)
282.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
283.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
284.L3_ptp_index_mask:		.long	0x1ff
285.L3_ptp_offset_mask:		.long	0x3ff << 2
286.L3_PG_HW_BITS:			.long	PG_HW_BITS
287.L3_PG_V:			.long	PG_V
288.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
289.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
290.L3_curlwp:			.long	_C_LABEL(curlwp)
291.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
292
293/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
294VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
295	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
296
297#endif /* SH3 */
298
299
300#ifdef SH4
301/*
302 * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
303 *
304 * TLB miss vector.  We run through the fast path first, checking if
305 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
306 * with exceptions disabled, so no P3 addresses please (though we can
307 * use kernel stack if need be, as its TLB entries are wired).  We can
308 * only use BANK1 registers, and of those r6 and r7 are already taken.
309 *
310 * If we don't find a valid mapping in the fast path, we do context
311 * save and call tlb exception handler.
312 *
313 * Copied to VBR+0x400.  This code should be relocatable
314 * and maximum 512 bytes long (== 0x600 - 0x400).
315 */
316NENTRY(sh4_vector_tlbmiss)
317	mov.l	.L4_SH4_PTEH, r4
318	mov.l	.L4_VPN_cleanup, r0
319	mov.l	@r4, r5
320	and	r0, r5		! trim vpn to 4K page boundary
321	!! For the duration of fast path we keep
322	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
323	!! r5: { VPN, ASID } that caused the miss
324
325	cmp/pz	r5		! user space address?
326	bt/s	.L4_user_va
327	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
328
329	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
330	!! see __pmap_kpte_lookup
331.L4_kernel_va:
332	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
333	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
334	bra	.L4_fetch_pte
335	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
336
337	!! user space address, use curlwp's pmap
338.L4_user_va:
339	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
340
341	!! see __pmap_pte_lookup
342.L4_fetch_pte:
343	mov.l	@r1, r3		! fetch ptd
344
345	!! r2: vpn, prepared for indexing into ptd
346	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
347#ifdef DEBUG
348	tst	r3, r3		! ptd == NULL  - cannot happen
349	bt/s	.L4_call_tlb_exception
350#endif
351	 mov	#-22, r1	! __PMAP_PTP_SHIFT
352
353	!! __PMAP_PTP_INDEX(vpn)
354	mov	r2, r0
355	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
356	mov.l	.L4_ptp_index_mask, r1
357	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
358	shll2	r0		! array index -> array offset
359	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
360	tst	r3, r3		! if (ptp == NULL)
361	bt/s	.L4_call_tlb_exception
362	 mov	#-(PGSHIFT - 2), r1
363
364	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
365	!! get the array offset directly, as we know bits 10 and 11
366	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
367	shld	r1, r2		! vpn >> (PGSHIFT - 2)
368	mov.l	.L4_ptp_offset_mask, r0
369	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
370	mov.l	@(r0, r3), r3	! pte = ptp[idx]
371
372
373	!! r3: pte
374	!! r4: SH4_PTEH
375	!! r5: { VPN, ASID }
376
377	mov.l	.L4_PG_V, r0
378	tst	r0, r3		! if ((pte & PG_V) == 0)
379	bt/s	.L4_call_tlb_exception
380	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
381
382	mov.l	.L4_PG_HW_BITS, r1
383	shlr8	r0
384	and	r1, r3		! pte &= PG_HW_BITS
385	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
386	cmp/pz	r5		! user space address?
387	and	#SH4_PTEA_SA_MASK, r0
388	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
389	bf/s	.L4_load_kernel
390	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
391
392	!! load mapping for a user space page
393	!! we reload PTEH to enter VPN aligned to 4K page boundary
394.L4_load_user:
395	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
396	ldtlb			! needs 1 insn padding before RTE
397	nop
398	rte
399	 nop
400
401	!! load mapping for a kernel space page
402	!! we need to temporary set ASID to 0
403.L4_load_kernel:
404	mov.l	.L4_clear_ASID, r1
405	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
406	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
407	ldtlb
408	mov.l	r5, @r4		! restore ASID
409	rte
410	 nop
411
412
413	!! if we haven't found a valid mapping in the fast path
414	!!     tlb_exception(curlwp, trapframe, tea)
415.L4_call_tlb_exception:
416	__EXCEPTION_ENTRY
417	mov.l	.L4_SH4_PTEH, r0
418	mov.l	.L4_curlwp, r1
419	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
420	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
421	mov.l	@r1, r4			! arg1: curlwp
422	__INTR_MASK(r0, r1)
423	__EXCEPTION_UNBLOCK(r0, r1)
424	mov.l	.L4_tlb_exception, r0
425	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
426	jsr	@r0
427	 mov	r14, r5			! arg2: trapframe
428	__EXCEPTION_RETURN
429
430	.align	5
431.L4_SH4_PTEH:			.long	SH4_PTEH
432.L4_VPN_cleanup:		.long	~0x00000c00
433.L4_curptd:			.long	_C_LABEL(curptd)
434.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
435.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
436.L4_ptp_index_mask:		.long	0x1ff
437.L4_ptp_offset_mask:		.long	0x3ff << 2
438.L4_PG_HW_BITS:			.long	PG_HW_BITS
439.L4_PG_V:			.long	PG_V
440.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
441.L4_curlwp:			.long	_C_LABEL(curlwp)
442.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
443
444/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
445VECTOR_END_MARKER(sh4_vector_tlbmiss_end)
446	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
447
448#endif /* SH4 */
449
450
451/*
452 * LINTSTUB: Var: char sh_vector_interrupt[1];
453 *
454 * void sh_vector_interrupt(void);
455 *	Copied to VBR+0x600.  This code should be position independent.
456 */
457NENTRY(sh_vector_interrupt)
458	__EXCEPTION_ENTRY
459	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
460
461	__INTR_MASK(r0, r1)
462	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
463
464	mov.l	.Li_ci_idepth, r8	! callee-saved
465	mov.l	.Li_intc_intr, r0
466	mov.l	@r8, r9			! callee-saved
467	mov	#0, r1
468	add	#1, r9			! curcpu()->ci_idepth++
469	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
470	jsr	@r0			! intc_intr(ssr, spc, ssp)
471	 mov.l	r9, @r8
472
473	cmp/pl	r9			! curcpu()->ci_idepth > 0
474	add	#-1, r9			! curcpu()->ci_idepth--
475	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
476	 mov.l	r9, @r8
477
478	mov.l	@(TF_SSR, r14), r2
479	mov.l	.Li_PSL_MD, r1
480	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
481	bt	.Li_return_to_user
482
483.Li_return_to_kernel:
484	!! Check for interrupted kernel RAS when returning to kernel
485	mov.l	@(TF_SPC, r14), r2
486	mov.l	.Li_ras_start, r3
487	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
488	bf	.Li_return_from_interrupt
489
490	mov.l	.Li_ras_end, r1
491	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
492	bt	.Li_return_from_interrupt
493
494	bra	.Li_return_from_interrupt
495	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
496
497.Li_return_to_user:
498	/* Check for ASTs on exit to user mode. */
499	mov.l	.Li_ast, r0
500	mov.l	.Li_curlwp, r1
501	mov	r14, r5		/* 2nd arg */
502	jsr	@r0
503	 mov.l	@r1, r4		/* 1st arg */
504
505.Li_return_from_interrupt:
506	__EXCEPTION_RETURN
507
508	.align	5
509.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
510.Li_intc_intr:		.long	_C_LABEL(intc_intr)
511.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
512.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
513.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
514.Li_ast:		.long	_C_LABEL(ast)
515.Li_curlwp:		.long	_C_LABEL(curlwp)
516
517
518/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
519VECTOR_END_MARKER(sh_vector_interrupt_end)
520	SET_ENTRY_SIZE(sh_vector_interrupt)
521