exception_vector.S revision 1.49
1/*	$NetBSD: exception_vector.S,v 1.49 2011/02/04 04:14:25 uwe Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "opt_cputype.h"
30#include "opt_ddb.h"
31#include "opt_ptrace.h"
32
33#include "assym.h"
34
35#include <sh3/param.h>
36#include <sh3/locore.h>
37#include <sh3/exception.h>
38#include <sh3/ubcreg.h>
39#include <sh3/pte.h>
40#include <sh3/mmu_sh3.h>
41#include <sh3/mmu_sh4.h>
42
43/*
44 * Align vectors more strictly here (where we don't really care) so
45 * that .align 5 (i.e. 32B cache line) before data block does the
46 * right thing w.r.t. final destinations after vectors are copied.
47 */
48#define _ALIGN_TEXT	.align 5
49#include <sh3/asm.h>
50
51__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.49 2011/02/04 04:14:25 uwe Exp $")
52
53
54/*
55 * Exception vectors.
56 * The following routines are copied to vector addresses.
57 *	sh_vector_generic:	VBR + 0x100
58 *	sh_vector_tlbmiss:	VBR + 0x400
59 *	sh_vector_interrupt:	VBR + 0x600
60 */
61
62#define VECTOR_END_MARKER(sym)			\
63		.globl	_C_LABEL(sym);		\
64	_C_LABEL(sym):
65
66
67/*
68 * LINTSTUB: Var: char sh_vector_generic[1];
69 *
70 * void sh_vector_generic(void);
71 *	Copied to VBR+0x100.  This code should be position independent
72 *	and maximum 786 bytes long (== 0x400 - 0x100).
73 */
74NENTRY(sh_vector_generic)
75	__EXCEPTION_ENTRY
76	/* Identify exception cause */
77	MOV	(EXPEVT, r0)
78	mov.l	@r0, r0
79	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
80	/* Get curlwp */
81	mov.l	.Lg_curlwp, r1
82	mov.l	@r1, r4			/* 1st arg */
83	/* Get TEA */
84	MOV	(TEA, r1)
85	mov.l	@r1, r6			/* 3rd arg */
86	/* Check TLB exception or not */
87	mov.l	.Lg_TLB_PROT_ST, r1
88	cmp/hi	r1, r0
89	bt	1f
90
91	/* tlb_exception(curlwp, tf, TEA); */
92	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
93	mov.l	.Lg_tlb_exception, r0
94	jsr	@r0
95	 mov	r14, r5			/* 2nd arg */
96	bra	.Lg_return_from_exception
97	 nop
98
99	/* general_exception(curlwp, tf, TEA); */
1001:	mov	r4, r8
101#if defined(PTRACE) || defined(DDB)
102	mov	#0, r2
103	MOV	(BBRA, r1)
104	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
105	mov.w	r2, @r1			/* disable UBC channel A */
106#endif
107	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
108	mov.l	.Lg_general_exception, r0
109	jsr	@r0
110	 mov	r14, r5			/* 2nd arg */
111
112	/* Check for ASTs on exit to user mode. */
113	mov	r8, r4
114	mov.l	.Lg_ast, r0
115	jsr	@r0
116	 mov	r14, r5
117
118#if defined(PTRACE) || defined(DDB)
119	mov.l	@(TF_UBC, r14), r2
120	tst	r2, r2			! single-step == 0?
121	bt	.Lg_return_from_exception
122
123	!! We are returning from DDB to do single step.  Channel A in
124	!! UBC is already rigged, we just need to enable it.
125	MOV	(BBRA, r3)
126	MOV	(BARA, r5)
127	__EXCEPTION_BLOCK(r0, r1)
128	mov.l	@(TF_SPC, r14), r4
129	mov.l	r4, @r5			! BARA = tf->tf_spc
130	mov.w	r2, @r3			! BBRA = tf->tf_ubc
131#endif /* PTRACE || DDB */
132.Lg_return_from_exception:
133	__EXCEPTION_RETURN
134
135	.align	5
136REG_SYMBOL(EXPEVT)
137.Lg_curlwp:		.long	_C_LABEL(curlwp)
138REG_SYMBOL(TEA)
139.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
140.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
141.Lg_general_exception:	.long	_C_LABEL(general_exception)
142.Lg_ast:		.long	_C_LABEL(ast)
143REG_SYMBOL(BBRA)
144REG_SYMBOL(BARA)
145
146/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
147VECTOR_END_MARKER(sh_vector_generic_end)
148	SET_ENTRY_SIZE(sh_vector_generic)
149
150
151#ifdef SH3
152/*
153 * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
154 *
155 * TLB miss vector.  We run through the fast path first, checking if
156 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
157 * with exceptions disabled, so no P3 addresses please (including no
158 * kernel stack, as we cannot wire TLB entries on sh3).  We can only
159 * use BANK1 registers, and of those r6 and r7 are already taken.
160 *
161 * If we don't find a valid mapping in the fast path, we do context
162 * save and call tlb exception handler.
163 *
164 * Copied to VBR+0x400.  This code should be position independent
165 * and maximum 512 bytes long (== 0x600 - 0x400).
166 */
167NENTRY(sh3_vector_tlbmiss)
168	mov	#(SH3_PTEH & 0xff), r4
169	mov.l	.L3_VPN_cleanup, r0
170	mov.l	@r4, r5
171	and	r0, r5		! trim vpn to 4K page boundary
172	!! For the duration of fast path we keep
173	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
174	!! r5: { VPN, ASID } that caused the miss
175
176	cmp/pz	r5		! user space address?
177	bt/s	.L3_user_va
178	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
179
180	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
181	!! see __pmap_kpte_lookup
182.L3_kernel_va:
183	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
184	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
185	bra	.L3_fetch_pte
186	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
187
188	!! user space address, use curlwp's pmap
189.L3_user_va:
190	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
191
192	!! see __pmap_pte_lookup
193.L3_fetch_pte:
194	mov.l	@r1, r3		! fetch ptd
195
196	!! r2: vpn, prepared for indexing into ptd
197	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
198#ifdef DEBUG
199	tst	r3, r3		! ptd == NULL  - cannot happen
200	bt/s	.L3_call_tlb_exception
201#endif
202	 mov	#-22, r1	! __PMAP_PTP_SHIFT
203
204	!! __PMAP_PTP_INDEX(vpn)
205	mov	r2, r0
206	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
207	mov.l	.L3_ptp_index_mask, r1
208	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
209	shll2	r0		! array index -> array offset
210	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
211	tst	r3, r3		! if (ptp == NULL)
212	bt/s	.L3_call_tlb_exception
213	 mov	#-(PGSHIFT - 2), r1
214
215	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
216	!! get the array offset directly, as we know bits 10 and 11
217	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
218	shld	r1, r2		! vpn >> (PGSHIFT - 2)
219	mov.l	.L3_ptp_offset_mask, r0
220	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
221	mov.l	@(r0, r3), r3	! pte = ptp[idx]
222
223
224	!! r3: pte
225	!! r4: SH3_PTEH
226	!! r5: { VPN, ASID }
227
228	mov.l	.L3_PG_V, r0
229	tst	r0, r3		! if ((pte & PG_V) == 0)
230	bt/s	.L3_call_tlb_exception
231	 nop
232
233	mov.l	.L3_PG_HW_BITS, r1
234	cmp/pz	r5		! user space address?
235	and	r1, r3		! pte &= PG_HW_BITS
236	bf/s	.L3_load_kernel
237	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
238
239	!! load mapping for a user space page
240	!! we reload PTEH to enter VPN aligned to 4K page boundary
241.L3_load_user:
242	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
243	ldtlb			! needs 2 insns padding before RTE
244	nop
245	nop
246	rte
247	 nop
248
249	!! load mapping for a kernel space page
250	!! we need to temporary set ASID to 0
251.L3_load_kernel:
252	mov.l	.L3_clear_ASID, r1
253	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
254	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
255	ldtlb
256	mov.l	r5, @r4		! restore ASID
257	nop
258	rte
259	 nop
260
261
262	!! if we haven't found a valid mapping in the fast path
263	!!     tlb_exception(curlwp, trapframe, tea)
264.L3_call_tlb_exception:
265	__EXCEPTION_ENTRY
266	mov.l	.L3_SH3_EXPEVT, r2
267	mov.l	.L3_curlwp, r1
268	mov	#(SH3_TEA & 0xff), r0
269	mov.l	@r2, r2			! *SH3_EXPEVT
270	mov.l	@r0, r6			! arg3: va = *SH3_TEA
271	mov.l	@r1, r4			! arg1: curlwp
272	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
273	mov.l	.L3_tlb_exception, r0
274	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
275	jsr	@r0
276	 mov	r14, r5			! arg2: trapframe
277	__EXCEPTION_RETURN
278
279	.align	4
280.L3_VPN_cleanup:		.long	~0x00000c00
281.L3_curptd:			.long	_C_LABEL(curptd)
282.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
283.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
284.L3_ptp_index_mask:		.long	0x1ff
285.L3_ptp_offset_mask:		.long	0x3ff << 2
286.L3_PG_HW_BITS:			.long	PG_HW_BITS
287.L3_PG_V:			.long	PG_V
288.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
289.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
290.L3_curlwp:			.long	_C_LABEL(curlwp)
291.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
292
293/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
294VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
295	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
296
297#endif /* SH3 */
298
299
300#ifdef SH4
301/*
302 * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
303 *
304 * TLB miss vector.  We run through the fast path first, checking if
305 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
306 * with exceptions disabled, so no P3 addresses please (though we can
307 * use kernel stack if need be, as its TLB entries are wired).  We can
308 * only use BANK1 registers, and of those r6 and r7 are already taken.
309 *
310 * If we don't find a valid mapping in the fast path, we do context
311 * save and call tlb exception handler.
312 *
313 * Copied to VBR+0x400.  This code should be relocatable
314 * and maximum 512 bytes long (== 0x600 - 0x400).
315 */
316NENTRY(sh4_vector_tlbmiss)
317	mov.l	.L4_SH4_PTEH, r4
318	mov.l	.L4_VPN_cleanup, r0
319	mov.l	@r4, r5
320	and	r0, r5		! trim vpn to 4K page boundary
321	!! For the duration of fast path we keep
322	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
323	!! r5: { VPN, ASID } that caused the miss
324
325	cmp/pz	r5		! user space address?
326	bt/s	.L4_user_va
327	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
328
329	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
330	!! see __pmap_kpte_lookup
331.L4_kernel_va:
332	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
333	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
334	bra	.L4_fetch_pte
335	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
336
337	!! user space address, use curlwp's pmap
338.L4_user_va:
339	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
340
341	!! see __pmap_pte_lookup
342.L4_fetch_pte:
343	mov.l	@r1, r3		! fetch ptd
344
345	!! r2: vpn, prepared for indexing into ptd
346	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
347#ifdef DEBUG
348	tst	r3, r3		! ptd == NULL  - cannot happen
349	bt/s	.L4_call_tlb_exception
350#endif
351	 mov	#-22, r1	! __PMAP_PTP_SHIFT
352
353	!! __PMAP_PTP_INDEX(vpn)
354	mov	r2, r0
355	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
356	mov.l	.L4_ptp_index_mask, r1
357	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
358	shll2	r0		! array index -> array offset
359	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
360	tst	r3, r3		! if (ptp == NULL)
361	bt/s	.L4_call_tlb_exception
362	 mov	#-(PGSHIFT - 2), r1
363
364	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
365	!! get the array offset directly, as we know bits 10 and 11
366	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
367	shld	r1, r2		! vpn >> (PGSHIFT - 2)
368	mov.l	.L4_ptp_offset_mask, r0
369	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
370	mov.l	@(r0, r3), r3	! pte = ptp[idx]
371
372
373	!! r3: pte
374	!! r4: SH4_PTEH
375	!! r5: { VPN, ASID }
376
377	mov.l	.L4_PG_V, r0
378	tst	r0, r3		! if ((pte & PG_V) == 0)
379	bt/s	.L4_call_tlb_exception
380	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
381
382	mov.l	.L4_PG_HW_BITS, r1
383	shlr8	r0
384	and	r1, r3		! pte &= PG_HW_BITS
385	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
386	cmp/pz	r5		! user space address?
387	and	#SH4_PTEA_SA_MASK, r0
388	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
389	bf/s	.L4_load_kernel
390	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
391
392	!! load mapping for a user space page
393	!! we reload PTEH to enter VPN aligned to 4K page boundary
394.L4_load_user:
395	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
396	ldtlb			! needs 1 insn padding before RTE
397	nop
398	rte
399	 nop
400
401	!! load mapping for a kernel space page
402	!! we need to temporary set ASID to 0
403.L4_load_kernel:
404	mov.l	.L4_clear_ASID, r1
405	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
406	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
407	ldtlb
408	mov.l	r5, @r4		! restore ASID
409	rte
410	 nop
411
412
413	!! if we haven't found a valid mapping in the fast path
414	!!     tlb_exception(curlwp, trapframe, tea)
415.L4_call_tlb_exception:
416	__EXCEPTION_ENTRY
417	mov.l	.L4_SH4_PTEH, r0
418	mov.l	.L4_curlwp, r1
419	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
420	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
421	mov.l	@r1, r4			! arg1: curlwp
422	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
423	mov.l	.L4_tlb_exception, r0
424	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
425	jsr	@r0
426	 mov	r14, r5			! arg2: trapframe
427	__EXCEPTION_RETURN
428
429	.align	5
430.L4_SH4_PTEH:			.long	SH4_PTEH
431.L4_VPN_cleanup:		.long	~0x00000c00
432.L4_curptd:			.long	_C_LABEL(curptd)
433.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
434.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
435.L4_ptp_index_mask:		.long	0x1ff
436.L4_ptp_offset_mask:		.long	0x3ff << 2
437.L4_PG_HW_BITS:			.long	PG_HW_BITS
438.L4_PG_V:			.long	PG_V
439.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
440.L4_curlwp:			.long	_C_LABEL(curlwp)
441.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
442
443/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
444VECTOR_END_MARKER(sh4_vector_tlbmiss_end)
445	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
446
447#endif /* SH4 */
448
449
450/*
451 * LINTSTUB: Var: char sh_vector_interrupt[1];
452 *
453 * void sh_vector_interrupt(void);
454 *	Copied to VBR+0x600.  This code should be position independent.
455 */
456NENTRY(sh_vector_interrupt)
457	__EXCEPTION_ENTRY
458	!! arguments for intc_intr(): for struct clockframe
459	stc	ssr, r4
460	stc	spc, r5
461	stc	r0_bank, r6		! ssp
462	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
463
464	mov.l	.Li_ci_idepth, r8	! callee-saved
465	mov.l	.Li_intc_intr, r0
466	mov.l	@r8, r9			! callee-saved
467	mov	#0, r1
468	add	#1, r9			! curcpu()->ci_idepth++
469	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
470	jsr	@r0			! intc_intr(ssr, spc, ssp)
471	 mov.l	r9, @r8
472
473	cmp/pl	r9			! curcpu()->ci_idepth > 0
474	add	#-1, r9			! curcpu()->ci_idepth--
475	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
476	 mov.l	r9, @r8
477
478	mov.l	@(TF_SSR, r14), r2
479	mov.l	.Li_PSL_MD, r1
480	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
481	bt	.Li_return_to_user
482
483.Li_return_to_kernel:
484	!! Check for interrupted kernel RAS when returning to kernel
485	mov.l	@(TF_SPC, r14), r2
486	mov.l	.Li_ras_start, r3
487	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
488	bf	.Li_return_from_interrupt
489
490	mov.l	.Li_ras_end, r1
491	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
492	bt	.Li_return_from_interrupt
493
494	bra	.Li_return_from_interrupt
495	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
496
497.Li_return_to_user:
498	/* Check for ASTs on exit to user mode. */
499	mov.l	.Li_ast, r0
500	mov.l	.Li_curlwp, r1
501	mov	r14, r5		/* 2nd arg */
502	jsr	@r0
503	 mov.l	@r1, r4		/* 1st arg */
504
505.Li_return_from_interrupt:
506	__EXCEPTION_RETURN
507
508	.align	5
509.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
510.Li_intc_intr:		.long	_C_LABEL(intc_intr)
511.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
512.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
513.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
514.Li_ast:		.long	_C_LABEL(ast)
515.Li_curlwp:		.long	_C_LABEL(curlwp)
516
517
518/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
519VECTOR_END_MARKER(sh_vector_interrupt_end)
520	SET_ENTRY_SIZE(sh_vector_interrupt)
521