Home | History | Annotate | Line # | Download | only in string
      1  1.1  matt /*	$NetBSD: memset_neon.S,v 1.1 2012/12/13 01:41:59 matt Exp $	*/
      2  1.1  matt 
      3  1.1  matt /*-
      4  1.1  matt  * Copyright (c) 2012 The NetBSD Foundation, Inc.
      5  1.1  matt  * All rights reserved.
      6  1.1  matt  *
      7  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  matt  * by Matt Thomas of 3am Software Foundry.
      9  1.1  matt  *
     10  1.1  matt  * Redistribution and use in source and binary forms, with or without
     11  1.1  matt  * modification, are permitted provided that the following conditions
     12  1.1  matt  * are met:
     13  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     14  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     15  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  matt  *    documentation and/or other materials provided with the distribution.
     18  1.1  matt  *
     19  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  matt  */
     31  1.1  matt #include <machine/asm.h>
     32  1.1  matt #include "assym.h"
     33  1.1  matt 
     34  1.1  matt /*
     35  1.1  matt  * memset: Sets a block of memory to the specified value
     36  1.1  matt  * Using NEON instructions
     37  1.1  matt  *
     38  1.1  matt  * On entry:
     39  1.1  matt  *   r0 - dest address
     40  1.1  matt  *   r1 - byte to write
     41  1.1  matt  *   r2 - number of bytes to write
     42  1.1  matt  *
     43  1.1  matt  * On exit:
     44  1.1  matt  *   r0 - dest address
     45  1.1  matt  */
     46  1.1  matt /* LINTSTUB: Func: void *memset(void *, int, size_t) */
     47  1.1  matt ENTRY(memset)
     48  1.1  matt 	and		r3, r1, #0xff	/* We deal with bytes */
     49  1.1  matt 	mov		r1, r2
     50  1.1  matt 	mov		ip, r0		/* r0 needs to stay the same */
     51  1.1  matt 
     52  1.1  matt 	vdup.8		q0, r3		/* move fill to SIMD */
     53  1.1  matt 	/* we no longer need to keep the fill value in a ARM register */
     54  1.1  matt 
     55  1.1  matt 	/* Ok first we will dword align the address */
     56  1.1  matt 	ands		r2, ip, #7	/* grab the bottom three bits */
     57  1.1  matt 	beq		.Lmemset_dwordaligned	/* The addr is dword aligned */
     58  1.1  matt 
     59  1.1  matt 	bic		ip, ip, #7	/* clear bottom three bits of addr */
     60  1.1  matt 	vldr		d7, [ip]	/* load from memory */
     61  1.1  matt 	add		r1, r1, r2	/* add "pre-fill" to length */
     62  1.1  matt 	lsl		r2, r2, #3	/* byte to no-fill bit count */
     63  1.1  matt 
     64  1.1  matt #ifdef __ARMEB__
     65  1.1  matt 	neg		r1, r1		/* start from the MSB */
     66  1.1  matt #endif
     67  1.1  matt 	vmov		s4, r2		/* move to SIMD d2 */
     68  1.1  matt 	vmvn.u64	d3, #0		/* set all ones */
     69  1.1  matt 	vshl.u64	d3, d3, d2	/* create a no-fill mask */
     70  1.1  matt 	vmvn.u64	d2, d3		/* invert mask for a fill-mask */
     71  1.1  matt 	vand		d7, d7, d3	/* mask out fill bits */
     72  1.1  matt 	vand		d2, d0, d2	/* mask out no-fill bits */
     73  1.1  matt 	vorr		d7, d2, d7	/* merge fill and memory */
     74  1.1  matt 
     75  1.1  matt 	cmp		r1, #8		/* Do we have less than 8 bytes */
     76  1.1  matt 	movlt		r2, #0		/* indicate this is the last word */
     77  1.1  matt 	blt		.Lmemset_lessthaneight_noload
     78  1.1  matt 
     79  1.1  matt 	vstmia		ip!, {d7}	/* write back to memory */
     80  1.1  matt 	subs		r1, r1, #8	/* and remove 8 bytes from the length */
     81  1.1  matt 	RETc(eq)
     82  1.1  matt 
     83  1.1  matt 	/* We are now doubleword aligned */
     84  1.1  matt .Lmemset_dwordaligned:
     85  1.1  matt 	vmov		q1, q0		/* put fill in q1 (d2-d3) */
     86  1.1  matt 	vmov		q2, q0		/* put fill in q2 (d4-d5) */
     87  1.1  matt 	vmov		q3, q0		/* put fill in q3 (d6-d7) */
     88  1.1  matt 
     89  1.1  matt 	and		r2, ip, #63	/* check for 64-byte alignment */
     90  1.1  matt 	beq		.Lmemset_8dwordaligned
     91  1.1  matt 	/*
     92  1.1  matt 	 * Let's align to a 64-byte boundary so that stores don't cross
     93  1.1  matt 	 * cacheline boundaries.  We also know we have at least 128-bytes to
     94  1.1  matt 	 * copy so we don't have to worry about the length at the moment.
     95  1.1  matt 	 */
     96  1.1  matt 	rsb		r2, r2, #64	/* how many bytes until 64 bytes */
     97  1.1  matt 	cmp		r1, r2		/* compare against length */
     98  1.1  matt 	andlt		r2, r1, #0x38 	/* if < len, use trunc(len, 8) */
     99  1.1  matt 	subs		r1, r1, r2	/* subtract from len */
    100  1.1  matt 	add		pc, pc, r2	/* and jump to it */
    101  1.1  matt 	nop
    102  1.1  matt 	RETc(eq);			b	.Lmemset_lessthaneight
    103  1.1  matt 	vstmia		ip!, {d0};	b	.Lmemset_8dwordaligned
    104  1.1  matt 	vstmia		ip!, {d0-d1};	b	.Lmemset_8dwordaligned
    105  1.1  matt 	vstmia		ip!, {d0-d2};	b	.Lmemset_8dwordaligned
    106  1.1  matt 	vstmia		ip!, {d0-d3};	b	.Lmemset_8dwordaligned
    107  1.1  matt 	vstmia		ip!, {d0-d4};	b	.Lmemset_8dwordaligned
    108  1.1  matt 	vstmia		ip!, {d0-d5};	b	.Lmemset_8dwordaligned
    109  1.1  matt 	vstmia		ip!, {d0-d6}
    110  1.1  matt .Lmemset_8dwordaligned:
    111  1.1  matt 	vmov		d0, d1		/* restore in case of unaligned start */
    112  1.1  matt 	cmp		r1, #8		/* do we have less than 8 bytes */
    113  1.1  matt 	movlt		r2, #0		/* indicate last word */
    114  1.1  matt 	blt		.Lmemset_lessthaneight
    115  1.1  matt 
    116  1.1  matt 	cmp		r1, #512
    117  1.1  matt 	blt		.Lmemset_sub512
    118  1.1  matt 
    119  1.1  matt 	/* Do 512 bytes at a time */
    120  1.1  matt 	mov		r2, #512
    121  1.1  matt .Lmemset_512:
    122  1.1  matt 	vstmia		ip!, {d0-d7}
    123  1.1  matt 	vstmia		ip!, {d0-d7}
    124  1.1  matt 	vstmia		ip!, {d0-d7}
    125  1.1  matt 	vstmia		ip!, {d0-d7}
    126  1.1  matt 	vstmia		ip!, {d0-d7}
    127  1.1  matt 	vstmia		ip!, {d0-d7}
    128  1.1  matt 	vstmia		ip!, {d0-d7}
    129  1.1  matt 	vstmia		ip!, {d0-d7}
    130  1.1  matt .Lmemset_0:
    131  1.1  matt 	subs		r1, r1, r2
    132  1.1  matt 	RETc(eq)			/* return if done */
    133  1.1  matt 	cmp		r1, #512
    134  1.1  matt 	bge		.Lmemset_512
    135  1.1  matt 
    136  1.1  matt 	/*
    137  1.1  matt 	 * We have less than 512 bytes left, but since the sequence above
    138  1.1  matt 	 * store 64 bytes at a time, we determine the number of instructions
    139  1.1  matt 	 * we need to store the remainder (if >= 64 bytes) and execute that
    140  1.1  matt 	 * many vstmia.
    141  1.1  matt 	 */
    142  1.1  matt .Lmemset_sub512:
    143  1.1  matt 	lsr		r2, r1, #6	/* divide by 64 */
    144  1.1  matt 	lslne		r4, r2, #2	/* multiply by 4 */
    145  1.1  matt 	addne		r4, r4, #1f + 8 - .Lmemset_0
    146  1.1  matt 					/* add the # of bytes between */
    147  1.1  matt 1:	subne		pc, r4 		/* and go */
    148  1.1  matt 
    149  1.1  matt 	/*
    150  1.1  matt 	 * We have less than 64 bytes to copy on a 8dword aligned address
    151  1.1  matt 	 */
    152  1.1  matt 	and		r2, r1, #56	/* get # of full dwords */
    153  1.1  matt 	ands		r1, r1, #7	/* get # of extra bytes */
    154  1.1  matt 	beq		.Lmemset_finalstore
    155  1.1  matt 	/*
    156  1.1  matt 	 * The last word is a partial fill so load its value and update it
    157  1.1  matt 	 * to include the fill value.
    158  1.1  matt 	 */
    159  1.1  matt .Lmemset_lessthaneight:
    160  1.1  matt 	vldr		d7, [ip, r2]	/* load the last partial dword */
    161  1.1  matt .Lmemset_lessthaneight_noload:
    162  1.1  matt 	lsl		r1, r1, #3	/* byte to fill bit count */
    163  1.1  matt #ifdef __ARMEB__
    164  1.1  matt 	neg		r1, r1		/* start from the MSB */
    165  1.1  matt #endif
    166  1.1  matt 	vmov		s4, r1		/* move to SIMD d2 */
    167  1.1  matt 	vmvn.u64	d3, #0		/* set all ones */
    168  1.1  matt 	vshl.u64	d3, d3, d2	/* create a no-fill mask */
    169  1.1  matt 	vmvn.u64	d2, d3		/* invert mask */
    170  1.1  matt 	vand		d7, d7, d2	/* keep no-fill bits */
    171  1.1  matt 	vand		d2, d0, d3	/* mask out no-fill bits */
    172  1.1  matt 	vorr		d7, d2, d7	/* merge fill and no-fill */
    173  1.1  matt 	vmov		q1, q0		/* restore d2 & d3 */
    174  1.1  matt 	add		r2, r2, #8	/* compensate for the partial dword */
    175  1.1  matt .Lmemset_finalstore:
    176  1.1  matt 	add		pc, pc, r2	/* and jump to it */
    177  1.1  matt 	nop
    178  1.1  matt 	vstr		d7, [ip];	RET
    179  1.1  matt 	vstmia		ip, {d6-d7};	RET
    180  1.1  matt 	vstmia		ip, {d5-d7};	RET
    181  1.1  matt 	vstmia		ip, {d4-d7};	RET
    182  1.1  matt 	vstmia		ip, {d3-d7};	RET
    183  1.1  matt 	vstmia		ip, {d2-d7};	RET
    184  1.1  matt 	vstmia		ip, {d1-d7};	RET
    185  1.1  matt 	vstmia		ip, {d0-d7};	RET
    186  1.1  matt END(memset)
    187