exception_vector.S revision 1.38
1/*	$NetBSD: exception_vector.S,v 1.38 2008/04/28 20:23:35 martin Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "opt_cputype.h"
30#include "opt_ddb.h"
31#include "assym.h"
32
33#include <sh3/param.h>
34#include <sh3/locore.h>
35#include <sh3/exception.h>
36#include <sh3/ubcreg.h>
37#include <sh3/pte.h>
38#include <sh3/mmu_sh3.h>
39#include <sh3/mmu_sh4.h>
40
41/*
42 * Align vectors more strictly here (where we don't really care) so
43 * that .align 5 (i.e. 32B cache line) before data block does the
44 * right thing w.r.t. final destinations after vectors are copied.
45 */
46#define _ALIGN_TEXT	.align 5
47#include <sh3/asm.h>
48
49__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.38 2008/04/28 20:23:35 martin Exp $")
50
51
52/*
53 * Exception vectors.
54 * The following routines are copied to vector addresses.
55 *	sh_vector_generic:	VBR + 0x100
56 *	sh_vector_tlbmiss:	VBR + 0x400
57 *	sh_vector_interrupt:	VBR + 0x600
58 */
59
60#define VECTOR_END_MARKER(sym)			\
61		.globl	_C_LABEL(sym);		\
62	_C_LABEL(sym):
63
64
65/*
66 * LINTSTUB: Var: char sh_vector_generic[1];
67 *
68 * void sh_vector_generic(void);
69 *	Copied to VBR+0x100.  This code should be position independent
70 *	and maximum 786 bytes long (== 0x400 - 0x100).
71 */
72NENTRY(sh_vector_generic)
73	__EXCEPTION_ENTRY
74	__INTR_MASK(r0, r1)
75	/* Identify exception cause */
76	MOV	(EXPEVT, r0)
77	mov.l	@r0, r0
78	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
79	/* Get curlwp */
80	mov.l	.Lg_curlwp, r1
81	mov.l	@r1, r4			/* 1st arg */
82	/* Get TEA */
83	MOV	(TEA, r1)
84	mov.l	@r1, r6			/* 3rd arg */
85	/* Check TLB exception or not */
86	mov.l	.Lg_TLB_PROT_ST, r1
87	cmp/hi	r1, r0
88	bt	1f
89
90	/* tlb_exception(curlwp, tf, TEA); */
91	__EXCEPTION_UNBLOCK(r0, r1)
92	mov.l	.Lg_tlb_exception, r0
93	jsr	@r0
94	 mov	r14, r5			/* 2nd arg */
95	bra	2f
96	 nop
97
98	/* general_exception(curlwp, tf, TEA); */
991:	mov	r4, r8
100#ifdef DDB
101	mov	#0, r2
102	MOV	(BBRA, r1)
103	mov.w	r2, @r1			/* disable UBC */
104	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
105#endif /* DDB */
106	__EXCEPTION_UNBLOCK(r0, r1)
107	mov.l	.Lg_general_exception, r0
108	jsr	@r0
109	 mov	r14, r5			/* 2nd arg */
110
111	/* Check for ASTs on exit to user mode. */
112	mov	r8, r4
113	mov.l	.Lg_ast, r0
114	jsr	@r0
115	 mov	r14, r5
116#ifdef DDB	/* BBRA = tf->tf_ubc */
117	__EXCEPTION_BLOCK(r0, r1)
118	mov.l	@(TF_UBC, r14), r0
119	MOV	(BBRA, r1)
120	mov.w	r0, @r1
121#endif /* DDB */
1222:	__EXCEPTION_RETURN
123
124	.align	5
125.Lg_curlwp:		.long	_C_LABEL(curlwp)
126REG_SYMBOL(EXPEVT)
127REG_SYMBOL(BBRA)
128REG_SYMBOL(TEA)
129.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
130.Lg_general_exception:	.long	_C_LABEL(general_exception)
131.Lg_ast:		.long	_C_LABEL(ast)
132.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
133
134/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
135VECTOR_END_MARKER(sh_vector_generic_end)
136	SET_ENTRY_SIZE(sh_vector_generic)
137
138
139#ifdef SH3
140/*
141 * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
142 *
143 * TLB miss vector.  We run through the fast path first, checking if
144 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
145 * with exceptions disabled, so no P3 addresses please (including no
146 * kernel stack, as we cannot wire TLB entries on sh3).  We can only
147 * use BANK1 registers, and of those r6 and r7 are already taken.
148 *
149 * If we don't find a valid mapping in the fast path, we do context
150 * save and call tlb exception handler.
151 *
152 * Copied to VBR+0x400.  This code should be position independent
153 * and maximum 512 bytes long (== 0x600 - 0x400).
154 */
155NENTRY(sh3_vector_tlbmiss)
156	mov	#(SH3_PTEH & 0xff), r4
157	mov.l	.L3_VPN_cleanup, r0
158	mov.l	@r4, r5
159	and	r0, r5		! trim vpn to 4K page boundary
160	!! For the duration of fast path we keep
161	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
162	!! r5: { VPN, ASID } that caused the miss
163
164	cmp/pz	r5		! user space address?
165	bt/s	.L3_user_va
166	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
167
168	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
169	!! see __pmap_kpte_lookup
170.L3_kernel_va:
171	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
172	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
173	bra	.L3_fetch_pte
174	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
175
176	!! user space address, use curlwp's pmap
177.L3_user_va:
178	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
179
180	!! see __pmap_pte_lookup
181.L3_fetch_pte:
182	mov.l	@r1, r3		! fetch ptd
183
184	!! r2: vpn, prepared for indexing into ptd
185	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
186#ifdef DEBUG
187	tst	r3, r3		! ptd == NULL  - cannot happen
188	bt/s	.L3_call_tlb_exception
189#endif
190	 mov	#-22, r1	! __PMAP_PTP_SHIFT
191
192	!! __PMAP_PTP_INDEX(vpn)
193	mov	r2, r0
194	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
195	mov.l	.L3_ptp_index_mask, r1
196	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
197	shll2	r0		! array index -> array offset
198	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
199	tst	r3, r3		! if (ptp == NULL)
200	bt/s	.L3_call_tlb_exception
201	 mov	#-(PGSHIFT - 2), r1
202
203	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
204	!! get the array offset directly, as we know bits 10 and 11
205	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
206	shld	r1, r2		! vpn >> (PGSHIFT - 2)
207	mov.l	.L3_ptp_offset_mask, r0
208	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
209	mov.l	@(r0, r3), r3	! pte = ptp[idx]
210
211
212	!! r3: pte
213	!! r4: SH3_PTEH
214	!! r5: { VPN, ASID }
215
216	mov.l	.L3_PG_V, r0
217	tst	r0, r3		! if ((pte & PG_V) == 0)
218	bt/s	.L3_call_tlb_exception
219	 nop
220
221	mov.l	.L3_PG_HW_BITS, r1
222	cmp/pz	r5		! user space address?
223	and	r1, r3		! pte &= PG_HW_BITS
224	bf/s	.L3_load_kernel
225	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
226
227	!! load mapping for a user space page
228	!! we reload PTEH to enter VPN aligned to 4K page boundary
229.L3_load_user:
230	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
231	ldtlb			! needs 2 insns padding before RTE
232	nop
233	nop
234	rte
235	 nop
236
237	!! load mapping for a kernel space page
238	!! we need to temporary set ASID to 0
239.L3_load_kernel:
240	mov.l	.L3_clear_ASID, r1
241	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
242	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
243	ldtlb
244	mov.l	r5, @r4		! restore ASID
245	nop
246	rte
247	 nop
248
249
250	!! if we haven't found a valid mapping in the fast path
251	!!     tlb_exception(curlwp, trapframe, tea)
252.L3_call_tlb_exception:
253	__EXCEPTION_ENTRY
254	mov.l	.L3_SH3_EXPEVT, r2
255	mov.l	.L3_curlwp, r1
256	mov	#(SH3_TEA & 0xff), r0
257	mov.l	@r2, r2			! *SH3_EXPEVT
258	mov.l	@r0, r6			! arg3: va = *SH3_TEA
259	mov.l	@r1, r4			! arg1: curlwp
260	__INTR_MASK(r0, r1)
261	__EXCEPTION_UNBLOCK(r0, r1)
262	mov.l	.L3_tlb_exception, r0
263	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
264	jsr	@r0
265	 mov	r14, r5			! arg2: trapframe
266	__EXCEPTION_RETURN
267
268	.align	4
269.L3_VPN_cleanup:		.long	~0x00000c00
270.L3_curptd:			.long	_C_LABEL(curptd)
271.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
272.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
273.L3_ptp_index_mask:		.long	0x1ff
274.L3_ptp_offset_mask:		.long	0x3ff << 2
275.L3_PG_HW_BITS:			.long	PG_HW_BITS
276.L3_PG_V:			.long	PG_V
277.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
278.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
279.L3_curlwp:			.long	_C_LABEL(curlwp)
280.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
281
282/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
283VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
284	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
285
286#endif /* SH3 */
287
288
289#ifdef SH4
290/*
291 * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
292 *
293 * TLB miss vector.  We run through the fast path first, checking if
294 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
295 * with exceptions disabled, so no P3 addresses please (though we can
296 * use kernel stack if need be, as its TLB entries are wired).  We can
297 * only use BANK1 registers, and of those r6 and r7 are already taken.
298 *
299 * If we don't find a valid mapping in the fast path, we do context
300 * save and call tlb exception handler.
301 *
302 * Copied to VBR+0x400.  This code should be relocatable
303 * and maximum 512 bytes long (== 0x600 - 0x400).
304 */
305NENTRY(sh4_vector_tlbmiss)
306	mov.l	.L4_SH4_PTEH, r4
307	mov.l	.L4_VPN_cleanup, r0
308	mov.l	@r4, r5
309	and	r0, r5		! trim vpn to 4K page boundary
310	!! For the duration of fast path we keep
311	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
312	!! r5: { VPN, ASID } that caused the miss
313
314	cmp/pz	r5		! user space address?
315	bt/s	.L4_user_va
316	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
317
318	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
319	!! see __pmap_kpte_lookup
320.L4_kernel_va:
321	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
322	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
323	bra	.L4_fetch_pte
324	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
325
326	!! user space address, use curlwp's pmap
327.L4_user_va:
328	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
329
330	!! see __pmap_pte_lookup
331.L4_fetch_pte:
332	mov.l	@r1, r3		! fetch ptd
333
334	!! r2: vpn, prepared for indexing into ptd
335	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
336#ifdef DEBUG
337	tst	r3, r3		! ptd == NULL  - cannot happen
338	bt/s	.L4_call_tlb_exception
339#endif
340	 mov	#-22, r1	! __PMAP_PTP_SHIFT
341
342	!! __PMAP_PTP_INDEX(vpn)
343	mov	r2, r0
344	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
345	mov.l	.L4_ptp_index_mask, r1
346	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
347	shll2	r0		! array index -> array offset
348	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
349	tst	r3, r3		! if (ptp == NULL)
350	bt/s	.L4_call_tlb_exception
351	 mov	#-(PGSHIFT - 2), r1
352
353	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
354	!! get the array offset directly, as we know bits 10 and 11
355	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
356	shld	r1, r2		! vpn >> (PGSHIFT - 2)
357	mov.l	.L4_ptp_offset_mask, r0
358	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
359	mov.l	@(r0, r3), r3	! pte = ptp[idx]
360
361
362	!! r3: pte
363	!! r4: SH4_PTEH
364	!! r5: { VPN, ASID }
365
366	mov.l	.L4_PG_V, r0
367	tst	r0, r3		! if ((pte & PG_V) == 0)
368	bt/s	.L4_call_tlb_exception
369	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
370
371	mov.l	.L4_PG_HW_BITS, r1
372	shlr8	r0
373	and	r1, r3		! pte &= PG_HW_BITS
374	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
375	cmp/pz	r5		! user space address?
376	and	#SH4_PTEA_SA_MASK, r0
377	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
378	bf/s	.L4_load_kernel
379	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
380
381	!! load mapping for a user space page
382	!! we reload PTEH to enter VPN aligned to 4K page boundary
383.L4_load_user:
384	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
385	ldtlb			! needs 1 insn padding before RTE
386	nop
387	rte
388	 nop
389
390	!! load mapping for a kernel space page
391	!! we need to temporary set ASID to 0
392.L4_load_kernel:
393	mov.l	.L4_clear_ASID, r1
394	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
395	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
396	ldtlb
397	mov.l	r5, @r4		! restore ASID
398	rte
399	 nop
400
401
402	!! if we haven't found a valid mapping in the fast path
403	!!     tlb_exception(curlwp, trapframe, tea)
404.L4_call_tlb_exception:
405	__EXCEPTION_ENTRY
406	mov.l	.L4_SH4_PTEH, r0
407	mov.l	.L4_curlwp, r1
408	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
409	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
410	mov.l	@r1, r4			! arg1: curlwp
411	__INTR_MASK(r0, r1)
412	__EXCEPTION_UNBLOCK(r0, r1)
413	mov.l	.L4_tlb_exception, r0
414	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
415	jsr	@r0
416	 mov	r14, r5			! arg2: trapframe
417	__EXCEPTION_RETURN
418
419	.align	5
420.L4_SH4_PTEH:			.long	SH4_PTEH
421.L4_VPN_cleanup:		.long	~0x00000c00
422.L4_curptd:			.long	_C_LABEL(curptd)
423.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
424.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
425.L4_ptp_index_mask:		.long	0x1ff
426.L4_ptp_offset_mask:		.long	0x3ff << 2
427.L4_PG_HW_BITS:			.long	PG_HW_BITS
428.L4_PG_V:			.long	PG_V
429.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
430.L4_curlwp:			.long	_C_LABEL(curlwp)
431.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
432
433/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
434VECTOR_END_MARKER(sh4_vector_tlbmiss_end)
435	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
436
437#endif /* SH4 */
438
439
440/*
441 * LINTSTUB: Var: char sh_vector_interrupt[1];
442 *
443 * void sh_vector_interrupt(void);
444 *	Copied to VBR+0x600.  This code should be position independent.
445 */
446NENTRY(sh_vector_interrupt)
447	__EXCEPTION_ENTRY
448	stc	r0_bank, r6		! ssp - 3rd arg to intc_intr()
449
450	__INTR_MASK(r0, r1)
451	__EXCEPTION_UNBLOCK(r0, r1)	! enable exceptions for P3 access
452
453	mov.l	.Li_ci_idepth, r8	! callee-saved
454	mov.l	.Li_uvmexp_intrs, r2
455	mov.l	.Li_intc_intr, r0
456	mov.l	@r8, r9			! callee-saved
457	mov	#0, r1
458	mov.l	@r2, r3
459	add	#1, r9			! curcpu()->ci_idepth++
460	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
461	add	#1, r3			! ++uvmexp.intrs
462	mov.l	r9, @r8
463	jsr	@r0			! intc_intr(ssr, spc, ssp)
464	 mov.l	r3, @r2
465
466	cmp/pl	r9			! curcpu()->ci_idepth > 0
467	add	#-1, r9			! curcpu()->ci_idepth--
468	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
469	 mov.l	r9, @r8
470
471	mov.l	@(TF_SSR, r14), r2
472	mov.l	.Li_PSL_MD, r1
473	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
474	bt	.Li_return_to_user
475
476.Li_return_to_kernel:
477	!! Check for interrupted kernel RAS when returning to kernel
478	mov.l	@(TF_SPC, r14), r2
479	mov.l	.Li_ras_start, r3
480	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
481	bf	.Li_return_from_interrupt
482
483	mov.l	.Li_ras_end, r1
484	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
485	bt	.Li_return_from_interrupt
486
487	bra	.Li_return_from_interrupt
488	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
489
490.Li_return_to_user:
491	/* Check for ASTs on exit to user mode. */
492	mov.l	.Li_ast, r0
493	mov.l	.Li_curlwp, r1
494	mov	r14, r5		/* 2nd arg */
495	jsr	@r0
496	 mov.l	@r1, r4		/* 1st arg */
497
498.Li_return_from_interrupt:
499	__EXCEPTION_RETURN
500
501	.align	5
502.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
503.Li_uvmexp_intrs:	.long	_C_LABEL(uvmexp) + UVMEXP_INTRS
504.Li_intc_intr:		.long	_C_LABEL(intc_intr)
505.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
506.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
507.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
508.Li_ast:		.long	_C_LABEL(ast)
509.Li_curlwp:		.long	_C_LABEL(curlwp)
510
511
512/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
513VECTOR_END_MARKER(sh_vector_interrupt_end)
514	SET_ENTRY_SIZE(sh_vector_interrupt)
515