Home | History | Annotate | Line # | Download | only in string
      1  1.1     matt /*-
      2  1.1     matt  * Copyright (c) 2012 The NetBSD Foundation, Inc.
      3  1.1     matt  * All rights reserved.
      4  1.1     matt  *
      5  1.1     matt  * This code is derived from software contributed to The NetBSD Foundation
      6  1.1     matt  * by Matt Thomas of 3am Software Foundry.
      7  1.1     matt  *
      8  1.1     matt  * Redistribution and use in source and binary forms, with or without
      9  1.1     matt  * modification, are permitted provided that the following conditions
     10  1.1     matt  * are met:
     11  1.1     matt  * 1. Redistributions of source code must retain the above copyright
     12  1.1     matt  *    notice, this list of conditions and the following disclaimer.
     13  1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     15  1.1     matt  *    documentation and/or other materials provided with the distribution.
     16  1.1     matt  *
     17  1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  1.1     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  1.1     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  1.1     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  1.1     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  1.1     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  1.1     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  1.1     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  1.1     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  1.1     matt  * POSSIBILITY OF SUCH DAMAGE.
     28  1.1     matt  */
     29  1.1     matt 
     30  1.1     matt #include <machine/asm.h>
     31  1.1     matt 
     32  1.4  mlelstv RCSID("$NetBSD: strlen_neon.S,v 1.4 2023/01/19 18:03:03 mlelstv Exp $")
     33  1.1     matt 	.text
     34  1.1     matt 
     35  1.1     matt ENTRY(strlen)
     36  1.3     matt 	mov	ip, r0		/* we use r0 for return value */
     37  1.1     matt 	ands	r1, r0, #15	/* verify qword alignment */
     38  1.1     matt 	neg	r0, r1		/* subtract misalignment from count */
     39  1.1     matt 	veor	q2, q2, q2	/* clear mask */
     40  1.1     matt 	mov	r3, #7		/* NBBY - 1 */
     41  1.1     matt 	vdup.32	q3, r3		/* dup throughout q3 */
     42  1.3     matt 	movw	r3, #0x0404	/* magic since there are 4 bytes per U32 */
     43  1.2     matt 	orr	r3, r3, lsl #16	/* copy to upper 16 bits */
     44  1.1     matt 	beq	.Lmain_loop
     45  1.1     matt 	veor	q0, q0, q0	/* clear q0 */
     46  1.1     matt 	vmvn	q2, q2		/* set all 16 bytes of mask to all 1s */
     47  1.1     matt 	bic	ip, ip, #15	/* qword align string address */
     48  1.3     matt 	lsl	r1, r1, #3	/* convert to bits */
     49  1.3     matt 	cmp	r1, #64
     50  1.4  mlelstv 	rsbhi	r1, r1, #128	/*   > 64? BE so we are shifting LSW right */
     51  1.4  mlelstv 	movhi	r2, #0		/*   > 64? leave MSW alone */
     52  1.4  mlelstv 	rsbls	r2, r1, #64	/*   <=64? BE so we are shifting MSW right */
     53  1.4  mlelstv 	movls	r1, #64		/*   <=64? clear LSW */
     54  1.3     matt 	vmov	d0, r1, r2	/* set shifts for lower and upper halves */
     55  1.3     matt 	vmovl.u32 q0, d0	/* 2 U32 -> 2 U64 */
     56  1.3     matt 	vshl.u64 q2, q2, q0	/* shift */
     57  1.1     matt 	/*
     58  1.1     matt 	 * Main loop.  Load 16 bytes, do a clz,
     59  1.1     matt 	 */
     60  1.1     matt .Lmain_loop:
     61  1.1     matt 	vld1.64 {d0, d1}, [ip:128]!	/* load qword */
     62  1.1     matt #ifdef __ARMEL__
     63  1.1     matt 	vrev64.8 q0, q0		/* convert to BE for clz */
     64  1.1     matt #endif
     65  1.1     matt 	vswp	d0, d1		/* swap dwords to get BE qword */
     66  1.1     matt 	vorr	q0, q0, q2	/* or "in" leading byte mask */
     67  1.3     matt 	veor	q2, q2, q2	/* clear leading byte mask */
     68  1.1     matt 	vceq.i8	q1, q0, #0	/* test each byte for 0 */
     69  1.2     matt 	/* Why couldn't there be a 64-bit CLZ? */
     70  1.3     matt 	vclz.u32 q1, q1		/* count leading zeroes to find the 0 byte */
     71  1.3     matt 	vadd.u32 q1, q1, q3	/* round up to byte bounary */
     72  1.1     matt 	vshr.u32 q1, q1, #3	/* convert to bytes */
     73  1.3     matt 	vmovn.u32 d0, q1	/* 4 I32 -> 4 I16 */
     74  1.3     matt 	vmovn.u16 d0, q0	/* 4 I16 -> 4  I8 */
     75  1.2     matt 	vmov	r2, s0		/* get counts */
     76  1.3     matt 	eors	r2, r2, r3	/* xor with 0x04040404 */
     77  1.3     matt 	addeq	r0, #16		/*   0?  no NULs */
     78  1.2     matt 	beq	.Lmain_loop	/* get next qword */
     79  1.3     matt 	clz	ip, r2		/* count leading zeros */
     80  1.3     matt 	mov	r2, r2, lsl ip	/* discard them */
     81  1.3     matt 	mov	ip, ip, lsr #3	/* divide leading zeroes by 8 */
     82  1.3     matt 	add	r0, r0, ip, lsl #2 /* multiple by 4 and add to count */
     83  1.3     matt 	and	r2, r2, #(3 << 29)
     84  1.3     matt 	add	r0, r0, r2, lsr #29
     85  1.2     matt 	RET			/* and return. */
     86  1.1     matt END(strlen)
     87