Home | History | Annotate | Line # | Download | only in vfp
pmap_vfp.S revision 1.5
      1  1.1  matt /*-
      2  1.1  matt  * Copyright (c) 2012 The NetBSD Foundation, Inc.
      3  1.1  matt  * All rights reserved.
      4  1.1  matt  *
      5  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      6  1.1  matt  * by Matt Thomas of 3am Software Foundry.
      7  1.1  matt  *
      8  1.1  matt  * Redistribution and use in source and binary forms, with or without
      9  1.1  matt  * modification, are permitted provided that the following conditions
     10  1.1  matt  * are met:
     11  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     12  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     13  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     15  1.1  matt  *    documentation and/or other materials provided with the distribution.
     16  1.1  matt  *
     17  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     28  1.1  matt  */
     29  1.1  matt 
     30  1.1  matt #include "opt_cputypes.h"
     31  1.1  matt 
     32  1.1  matt #include <machine/asm.h>
     33  1.1  matt #include "assym.h"
     34  1.1  matt 
     35  1.5  matt RCSID("$NetBSD: pmap_vfp.S,v 1.5 2012/12/26 18:35:47 matt Exp $")
     36  1.2  matt 
     37  1.1  matt /*
     38  1.3  matt  * This zeroes a page 64-bytes at a time.  64 was chosen over 32 since
     39  1.1  matt  * 64 is the cache line size of the Cortex-A8.
     40  1.1  matt  */
     41  1.3  matt /* LINTSTUB: void bzero_page_vfp(vaddr_t); */
     42  1.3  matt ENTRY(bzero_page_vfp)
     43  1.5  matt #if 0
     44  1.5  matt 	str	lr, [sp, #-8]!
     45  1.5  matt 	bl	_C_LABEL(vfp_kernel_acquire)
     46  1.5  matt #else
     47  1.1  matt 	mrc	p10, 7, r3, c8, c0, 0
     48  1.3  matt 	tst	r3, #VFP_FPEXC_EN
     49  1.3  matt 	orreq	r2, r3, #VFP_FPEXC_EN
     50  1.3  matt 	mcreq	p10, 7, r2, c8, c0, 0
     51  1.1  matt 	vpush	{d0-d7}
     52  1.5  matt #endif
     53  1.1  matt #if (CPU_CORTEX == 0)
     54  1.1  matt 	mov	ip, #0
     55  1.1  matt 	vmov	s0, ip
     56  1.1  matt 	vmov	s1, ip
     57  1.1  matt 	vmov.f64 d1, d0
     58  1.1  matt 	vmov.f64 d2, d0
     59  1.1  matt 	vmov.f64 d3, d0
     60  1.1  matt 	vmov.f64 d4, d0
     61  1.1  matt 	vmov.f64 d5, d0
     62  1.1  matt 	vmov.f64 d6, d0
     63  1.1  matt 	vmov.f64 d7, d0
     64  1.1  matt #else
     65  1.1  matt 	veor	q0, q0, q0
     66  1.1  matt 	veor	q1, q1, q1
     67  1.1  matt 	veor	q2, q2, q2
     68  1.1  matt 	veor	q3, q3, q3
     69  1.1  matt #endif
     70  1.1  matt 	add	r2, r0, #PAGE_SIZE
     71  1.1  matt 1:	vstmia	r0!, {d0-d7}
     72  1.1  matt 	vstmia	r0!, {d0-d7}
     73  1.1  matt 	vstmia	r0!, {d0-d7}
     74  1.1  matt 	vstmia	r0!, {d0-d7}
     75  1.1  matt 	cmp	r0, r2
     76  1.1  matt 	blt	1b
     77  1.5  matt #if 0
     78  1.5  matt 	ldr	lr, [sp], #8		/* fetch LR */
     79  1.5  matt 	b	_C_LABEL(vfp_kernel_release)	/* tailcall the vfp release */
     80  1.5  matt #else
     81  1.1  matt 	vpop	{d0-d7}
     82  1.1  matt 	mcr	p10, 7, r3, c8, c0, 0
     83  1.4  matt 	RET
     84  1.5  matt #endif
     85  1.3  matt END(bzero_page_vfp)
     86  1.1  matt 
     87  1.1  matt /*
     88  1.3  matt  * This copies a page 64-bytes at a time.  64 was chosen over 32 since
     89  1.1  matt  * 64 is the cache line size of the Cortex-A8.
     90  1.1  matt  */
     91  1.3  matt /* LINTSTUB: void bcopy_page_vfp(vaddr_t, vaddr_t); */
     92  1.3  matt ENTRY(bcopy_page_vfp)
     93  1.4  matt #ifdef _ARM_ARCH_DWORD_OK
     94  1.1  matt 	pld	[r0]			@ preload the first 128 bytes
     95  1.1  matt 	pld	[r0, #32]
     96  1.1  matt 	pld	[r0, #64]
     97  1.1  matt 	pld	[r0, #96]
     98  1.4  matt #endif
     99  1.5  matt #if 0
    100  1.5  matt 	str	lr, [sp, #-8]!
    101  1.5  matt 	bl	_C_LABEL(vfp_kernel_acquire)
    102  1.5  matt #else
    103  1.1  matt 	mrc	p10, 7, r3, c8, c0, 0
    104  1.3  matt 	tst	r3, #VFP_FPEXC_EN
    105  1.3  matt 	orreq	r2, r3, #VFP_FPEXC_EN
    106  1.3  matt 	mcreq	p10, 7, r2, c8, c0, 0
    107  1.1  matt 	vpush	{d0-d7}
    108  1.1  matt 	add	r2, r0, #PAGE_SIZE-128
    109  1.5  matt #endif
    110  1.4  matt 1:
    111  1.4  matt #ifdef _ARM_ARCH_DWORD_OK
    112  1.4  matt 	pld	[r0, #128]		@ preload the next 128
    113  1.1  matt 	pld	[r0, #160]
    114  1.1  matt 	pld	[r0, #192]
    115  1.1  matt 	pld	[r0, #224]
    116  1.4  matt #endif
    117  1.1  matt 2:	vldmia	r0!, {d0-d7}		@ read   0-63
    118  1.1  matt 	vstmia	r1!, {d0-d7}		@ write  0-63
    119  1.1  matt 	vldmia	r0!, {d0-d7}		@ read  64-127
    120  1.1  matt 	vstmia	r1!, {d0-d7}		@ write 64-127
    121  1.1  matt 	cmp	r0, r2
    122  1.1  matt 	blt	1b
    123  1.1  matt 	beq	2b
    124  1.5  matt #if 0
    125  1.5  matt 	ldr	lr, [sp], #8		/* fetch LR */
    126  1.5  matt 	b	_C_LABEL(vfp_kernel_release)	/* tailcall the vfp release */
    127  1.5  matt #else
    128  1.1  matt 	vpop	{d0-d7}
    129  1.1  matt 	mcr	p10, 7, r3, c8, c0, 0
    130  1.4  matt 	RET
    131  1.5  matt #endif
    132  1.3  matt END(bcopy_page_vfp)
    133