exception_vector.S revision 1.39
1/*	$NetBSD: exception_vector.S,v 1.39 2008/06/06 04:16:22 uwe Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "opt_cputype.h"
30#include "opt_ddb.h"
31#include "assym.h"
32
33#include <sh3/param.h>
34#include <sh3/locore.h>
35#include <sh3/exception.h>
36#include <sh3/ubcreg.h>
37#include <sh3/pte.h>
38#include <sh3/mmu_sh3.h>
39#include <sh3/mmu_sh4.h>
40
41/*
42 * Align vectors more strictly here (where we don't really care) so
43 * that .align 5 (i.e. 32B cache line) before data block does the
44 * right thing w.r.t. final destinations after vectors are copied.
45 */
46#define _ALIGN_TEXT	.align 5
47#include <sh3/asm.h>
48
49__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.39 2008/06/06 04:16:22 uwe Exp $")
50
51
52/*
53 * Exception vectors.
54 * The following routines are copied to vector addresses.
55 *	sh_vector_generic:	VBR + 0x100
56 *	sh_vector_tlbmiss:	VBR + 0x400
57 *	sh_vector_interrupt:	VBR + 0x600
58 */
59
60#define VECTOR_END_MARKER(sym)			\
61		.globl	_C_LABEL(sym);		\
62	_C_LABEL(sym):
63
64
65/*
66 * LINTSTUB: Var: char sh_vector_generic[1];
67 *
68 * void sh_vector_generic(void);
69 *	Copied to VBR+0x100.  This code should be position independent
70 *	and maximum 786 bytes long (== 0x400 - 0x100).
71 */
72NENTRY(sh_vector_generic)
73	__EXCEPTION_ENTRY
74	__INTR_MASK(r0, r1)
75	/* Identify exception cause */
76	MOV	(EXPEVT, r0)
77	mov.l	@r0, r0
78	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
79	/* Get curlwp */
80	mov.l	.Lg_curlwp, r1
81	mov.l	@r1, r4			/* 1st arg */
82	/* Get TEA */
83	MOV	(TEA, r1)
84	mov.l	@r1, r6			/* 3rd arg */
85	/* Check TLB exception or not */
86	mov.l	.Lg_TLB_PROT_ST, r1
87	cmp/hi	r1, r0
88	bt	1f
89
90	/* tlb_exception(curlwp, tf, TEA); */
91	__EXCEPTION_UNBLOCK(r0, r1)
92	mov.l	.Lg_tlb_exception, r0
93	jsr	@r0
94	 mov	r14, r5			/* 2nd arg */
95	bra	.Lg_return_from_exception
96	 nop
97
98	/* general_exception(curlwp, tf, TEA); */
991:	mov	r4, r8
100#ifdef DDB
101	MOV	(BBRA, r1)
102	mov	#0, r2
103	mov.w	r2, @r1			/* disable UBC */
104#endif /* DDB */
105	__EXCEPTION_UNBLOCK(r0, r1)
106	mov.l	.Lg_general_exception, r0
107	jsr	@r0
108	 mov	r14, r5			/* 2nd arg */
109
110	/* Check for ASTs on exit to user mode. */
111	mov	r8, r4
112	mov.l	.Lg_ast, r0
113	jsr	@r0
114	 mov	r14, r5
115
116#ifdef DDB
117	mov.l	@(TF_UBC, r14), r2
118	tst	r2, r2			! ddb single-step == 0?
119	bt	.Lg_return_from_exception
120
121	!! We are returning from DDB to do single step.  Channel A in
122	!! UBC is already rigged, we just need to enable it.
123	mov	#0, r0
124	MOV	(BBRA, r3)
125	mov.l	r0, @(TF_UBC, r14)
126	__EXCEPTION_BLOCK(r0, r1)	! because we ignore insn address
127	mov.w	r2, @r3			! now safe to set BBRA = tf->tf_ubc
128#endif /* DDB */
129.Lg_return_from_exception:
130	__EXCEPTION_RETURN
131
132	.align	5
133.Lg_curlwp:		.long	_C_LABEL(curlwp)
134REG_SYMBOL(EXPEVT)
135REG_SYMBOL(BBRA)
136REG_SYMBOL(TEA)
137.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
138.Lg_general_exception:	.long	_C_LABEL(general_exception)
139.Lg_ast:		.long	_C_LABEL(ast)
140.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
141
142/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
143VECTOR_END_MARKER(sh_vector_generic_end)
144	SET_ENTRY_SIZE(sh_vector_generic)
145
146
147#ifdef SH3
148/*
149 * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
150 *
151 * TLB miss vector.  We run through the fast path first, checking if
152 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
153 * with exceptions disabled, so no P3 addresses please (including no
154 * kernel stack, as we cannot wire TLB entries on sh3).  We can only
155 * use BANK1 registers, and of those r6 and r7 are already taken.
156 *
157 * If we don't find a valid mapping in the fast path, we do context
158 * save and call tlb exception handler.
159 *
160 * Copied to VBR+0x400.  This code should be position independent
161 * and maximum 512 bytes long (== 0x600 - 0x400).
162 */
163NENTRY(sh3_vector_tlbmiss)
164	mov	#(SH3_PTEH & 0xff), r4
165	mov.l	.L3_VPN_cleanup, r0
166	mov.l	@r4, r5
167	and	r0, r5		! trim vpn to 4K page boundary
168	!! For the duration of fast path we keep
169	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
170	!! r5: { VPN, ASID } that caused the miss
171
172	cmp/pz	r5		! user space address?
173	bt/s	.L3_user_va
174	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
175
176	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
177	!! see __pmap_kpte_lookup
178.L3_kernel_va:
179	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
180	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
181	bra	.L3_fetch_pte
182	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
183
184	!! user space address, use curlwp's pmap
185.L3_user_va:
186	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
187
188	!! see __pmap_pte_lookup
189.L3_fetch_pte:
190	mov.l	@r1, r3		! fetch ptd
191
192	!! r2: vpn, prepared for indexing into ptd
193	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
194#ifdef DEBUG
195	tst	r3, r3		! ptd == NULL  - cannot happen
196	bt/s	.L3_call_tlb_exception
197#endif
198	 mov	#-22, r1	! __PMAP_PTP_SHIFT
199
200	!! __PMAP_PTP_INDEX(vpn)
201	mov	r2, r0
202	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
203	mov.l	.L3_ptp_index_mask, r1
204	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
205	shll2	r0		! array index -> array offset
206	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
207	tst	r3, r3		! if (ptp == NULL)
208	bt/s	.L3_call_tlb_exception
209	 mov	#-(PGSHIFT - 2), r1
210
211	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
212	!! get the array offset directly, as we know bits 10 and 11
213	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
214	shld	r1, r2		! vpn >> (PGSHIFT - 2)
215	mov.l	.L3_ptp_offset_mask, r0
216	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
217	mov.l	@(r0, r3), r3	! pte = ptp[idx]
218
219
220	!! r3: pte
221	!! r4: SH3_PTEH
222	!! r5: { VPN, ASID }
223
224	mov.l	.L3_PG_V, r0
225	tst	r0, r3		! if ((pte & PG_V) == 0)
226	bt/s	.L3_call_tlb_exception
227	 nop
228
229	mov.l	.L3_PG_HW_BITS, r1
230	cmp/pz	r5		! user space address?
231	and	r1, r3		! pte &= PG_HW_BITS
232	bf/s	.L3_load_kernel
233	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
234
235	!! load mapping for a user space page
236	!! we reload PTEH to enter VPN aligned to 4K page boundary
237.L3_load_user:
238	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
239	ldtlb			! needs 2 insns padding before RTE
240	nop
241	nop
242	rte
243	 nop
244
245	!! load mapping for a kernel space page
246	!! we need to temporary set ASID to 0
247.L3_load_kernel:
248	mov.l	.L3_clear_ASID, r1
249	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
250	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
251	ldtlb
252	mov.l	r5, @r4		! restore ASID
253	nop
254	rte
255	 nop
256
257
258	!! if we haven't found a valid mapping in the fast path
259	!!     tlb_exception(curlwp, trapframe, tea)
260.L3_call_tlb_exception:
261	__EXCEPTION_ENTRY
262	mov.l	.L3_SH3_EXPEVT, r2
263	mov.l	.L3_curlwp, r1
264	mov	#(SH3_TEA & 0xff), r0
265	mov.l	@r2, r2			! *SH3_EXPEVT
266	mov.l	@r0, r6			! arg3: va = *SH3_TEA
267	mov.l	@r1, r4			! arg1: curlwp
268	__INTR_MASK(r0, r1)
269	__EXCEPTION_UNBLOCK(r0, r1)
270	mov.l	.L3_tlb_exception, r0
271	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
272	jsr	@r0
273	 mov	r14, r5			! arg2: trapframe
274	__EXCEPTION_RETURN
275
276	.align	4
277.L3_VPN_cleanup:		.long	~0x00000c00
278.L3_curptd:			.long	_C_LABEL(curptd)
279.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
280.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
281.L3_ptp_index_mask:		.long	0x1ff
282.L3_ptp_offset_mask:		.long	0x3ff << 2
283.L3_PG_HW_BITS:			.long	PG_HW_BITS
284.L3_PG_V:			.long	PG_V
285.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
286.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
287.L3_curlwp:			.long	_C_LABEL(curlwp)
288.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
289
290/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
291VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
292	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
293
294#endif /* SH3 */
295
296
297#ifdef SH4
298/*
299 * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
300 *
301 * TLB miss vector.  We run through the fast path first, checking if
302 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
303 * with exceptions disabled, so no P3 addresses please (though we can
304 * use kernel stack if need be, as its TLB entries are wired).  We can
305 * only use BANK1 registers, and of those r6 and r7 are already taken.
306 *
307 * If we don't find a valid mapping in the fast path, we do context
308 * save and call tlb exception handler.
309 *
310 * Copied to VBR+0x400.  This code should be relocatable
311 * and maximum 512 bytes long (== 0x600 - 0x400).
312 */
313NENTRY(sh4_vector_tlbmiss)
314	mov.l	.L4_SH4_PTEH, r4
315	mov.l	.L4_VPN_cleanup, r0
316	mov.l	@r4, r5
317	and	r0, r5		! trim vpn to 4K page boundary
318	!! For the duration of fast path we keep
319	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
320	!! r5: { VPN, ASID } that caused the miss
321
322	cmp/pz	r5		! user space address?
323	bt/s	.L4_user_va
324	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
325
326	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
327	!! see __pmap_kpte_lookup
328.L4_kernel_va:
329	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
330	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
331	bra	.L4_fetch_pte
332	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
333
334	!! user space address, use curlwp's pmap
335.L4_user_va:
336	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
337
338	!! see __pmap_pte_lookup
339.L4_fetch_pte:
340	mov.l	@r1, r3		! fetch ptd
341
342	!! r2: vpn, prepared for indexing into ptd
343	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
344#ifdef DEBUG
345	tst	r3, r3		! ptd == NULL  - cannot happen
346	bt/s	.L4_call_tlb_exception
347#endif
348	 mov	#-22, r1	! __PMAP_PTP_SHIFT
349
350	!! __PMAP_PTP_INDEX(vpn)
351	mov	r2, r0
352	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
353	mov.l	.L4_ptp_index_mask, r1
354	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
355	shll2	r0		! array index -> array offset
356	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
357	tst	r3, r3		! if (ptp == NULL)
358	bt/s	.L4_call_tlb_exception
359	 mov	#-(PGSHIFT - 2), r1
360
361	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
362	!! get the array offset directly, as we know bits 10 and 11
363	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
364	shld	r1, r2		! vpn >> (PGSHIFT - 2)
365	mov.l	.L4_ptp_offset_mask, r0
366	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
367	mov.l	@(r0, r3), r3	! pte = ptp[idx]
368
369
370	!! r3: pte
371	!! r4: SH4_PTEH
372	!! r5: { VPN, ASID }
373
374	mov.l	.L4_PG_V, r0
375	tst	r0, r3		! if ((pte & PG_V) == 0)
376	bt/s	.L4_call_tlb_exception
377	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
378
379	mov.l	.L4_PG_HW_BITS, r1
380	shlr8	r0
381	and	r1, r3		! pte &= PG_HW_BITS
382	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
383	cmp/pz	r5		! user space address?
384	and	#SH4_PTEA_SA_MASK, r0
385	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
386	bf/s	.L4_load_kernel
387	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
388
389	!! load mapping for a user space page
390	!! we reload PTEH to enter VPN aligned to 4K page boundary
391.L4_load_user:
392	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
393	ldtlb			! needs 1 insn padding before RTE
394	nop
395	rte
396	 nop
397
398	!! load mapping for a kernel space page
399	!! we need to temporary set ASID to 0
400.L4_load_kernel:
401	mov.l	.L4_clear_ASID, r1
402	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
403	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
404	ldtlb
405	mov.l	r5, @r4		! restore ASID
406	rte
407	 nop
408
409
410	!! if we haven't found a valid mapping in the fast path
411	!!     tlb_exception(curlwp, trapframe, tea)
412.L4_call_tlb_exception:
413	__EXCEPTION_ENTRY
414	mov.l	.L4_SH4_PTEH, r0
415	mov.l	.L4_curlwp, r1
416	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
417	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
418	mov.l	@r1, r4			! arg1: curlwp
419	__INTR_MASK(r0, r1)
420	__EXCEPTION_UNBLOCK(r0, r1)
421	mov.l	.L4_tlb_exception, r0
422	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
423	jsr	@r0
424	 mov	r14, r5			! arg2: trapframe
425	__EXCEPTION_RETURN
426
427	.align	5
428.L4_SH4_PTEH:			.long	SH4_PTEH
429.L4_VPN_cleanup:		.long	~0x00000c00
430.L4_curptd:			.long	_C_LABEL(curptd)
431.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
432.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
433.L4_ptp_index_mask:		.long	0x1ff
434.L4_ptp_offset_mask:		.long	0x3ff << 2
435.L4_PG_HW_BITS:			.long	PG_HW_BITS
436.L4_PG_V:			.long	PG_V
437.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
438.L4_curlwp:			.long	_C_LABEL(curlwp)
439.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
440
441/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
442VECTOR_END_MARKER(sh4_vector_tlbmiss_end)
443	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
444
445#endif /* SH4 */
446
447
448/*
449 * LINTSTUB: Var: char sh_vector_interrupt[1];
450 *
451 * void sh_vector_interrupt(void);
452 *	Copied to VBR+0x600.  This code should be position independent.
453 */
454NENTRY(sh_vector_interrupt)
455	__EXCEPTION_ENTRY
456	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
457
458	__INTR_MASK(r0, r1)
459	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
460
461	mov.l	.Li_ci_idepth, r8	! callee-saved
462	mov.l	.Li_uvmexp_intrs, r2
463	mov.l	.Li_intc_intr, r0
464	mov.l	@r8, r9			! callee-saved
465	mov	#0, r1
466	mov.l	@r2, r3
467	add	#1, r9			! curcpu()->ci_idepth++
468	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
469	add	#1, r3			! ++uvmexp.intrs
470	mov.l	r9, @r8
471	jsr	@r0			! intc_intr(ssr, spc, ssp)
472	 mov.l	r3, @r2
473
474	cmp/pl	r9			! curcpu()->ci_idepth > 0
475	add	#-1, r9			! curcpu()->ci_idepth--
476	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
477	 mov.l	r9, @r8
478
479	mov.l	@(TF_SSR, r14), r2
480	mov.l	.Li_PSL_MD, r1
481	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
482	bt	.Li_return_to_user
483
484.Li_return_to_kernel:
485	!! Check for interrupted kernel RAS when returning to kernel
486	mov.l	@(TF_SPC, r14), r2
487	mov.l	.Li_ras_start, r3
488	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
489	bf	.Li_return_from_interrupt
490
491	mov.l	.Li_ras_end, r1
492	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
493	bt	.Li_return_from_interrupt
494
495	bra	.Li_return_from_interrupt
496	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
497
498.Li_return_to_user:
499	/* Check for ASTs on exit to user mode. */
500	mov.l	.Li_ast, r0
501	mov.l	.Li_curlwp, r1
502	mov	r14, r5		/* 2nd arg */
503	jsr	@r0
504	 mov.l	@r1, r4		/* 1st arg */
505
506.Li_return_from_interrupt:
507	__EXCEPTION_RETURN
508
509	.align	5
510.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
511.Li_uvmexp_intrs:	.long	_C_LABEL(uvmexp) + UVMEXP_INTRS
512.Li_intc_intr:		.long	_C_LABEL(intc_intr)
513.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
514.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
515.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
516.Li_ast:		.long	_C_LABEL(ast)
517.Li_curlwp:		.long	_C_LABEL(curlwp)
518
519
520/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
521VECTOR_END_MARKER(sh_vector_interrupt_end)
522	SET_ENTRY_SIZE(sh_vector_interrupt)
523