strlen_neon.S revision 1.4 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <machine/asm.h>
31
32 RCSID("$NetBSD: strlen_neon.S,v 1.4 2023/01/19 18:03:03 mlelstv Exp $")
33 .text
34
35 ENTRY(strlen)
36 mov ip, r0 /* we use r0 for return value */
37 ands r1, r0, #15 /* verify qword alignment */
38 neg r0, r1 /* subtract misalignment from count */
39 veor q2, q2, q2 /* clear mask */
40 mov r3, #7 /* NBBY - 1 */
41 vdup.32 q3, r3 /* dup throughout q3 */
42 movw r3, #0x0404 /* magic since there are 4 bytes per U32 */
43 orr r3, r3, lsl #16 /* copy to upper 16 bits */
44 beq .Lmain_loop
45 veor q0, q0, q0 /* clear q0 */
46 vmvn q2, q2 /* set all 16 bytes of mask to all 1s */
47 bic ip, ip, #15 /* qword align string address */
48 lsl r1, r1, #3 /* convert to bits */
49 cmp r1, #64
50 rsbhi r1, r1, #128 /* > 64? BE so we are shifting LSW right */
51 movhi r2, #0 /* > 64? leave MSW alone */
52 rsbls r2, r1, #64 /* <=64? BE so we are shifting MSW right */
53 movls r1, #64 /* <=64? clear LSW */
54 vmov d0, r1, r2 /* set shifts for lower and upper halves */
55 vmovl.u32 q0, d0 /* 2 U32 -> 2 U64 */
56 vshl.u64 q2, q2, q0 /* shift */
57 /*
58 * Main loop. Load 16 bytes, do a clz,
59 */
60 .Lmain_loop:
61 vld1.64 {d0, d1}, [ip:128]! /* load qword */
62 #ifdef __ARMEL__
63 vrev64.8 q0, q0 /* convert to BE for clz */
64 #endif
65 vswp d0, d1 /* swap dwords to get BE qword */
66 vorr q0, q0, q2 /* or "in" leading byte mask */
67 veor q2, q2, q2 /* clear leading byte mask */
68 vceq.i8 q1, q0, #0 /* test each byte for 0 */
69 /* Why couldn't there be a 64-bit CLZ? */
70 vclz.u32 q1, q1 /* count leading zeroes to find the 0 byte */
71 vadd.u32 q1, q1, q3 /* round up to byte bounary */
72 vshr.u32 q1, q1, #3 /* convert to bytes */
73 vmovn.u32 d0, q1 /* 4 I32 -> 4 I16 */
74 vmovn.u16 d0, q0 /* 4 I16 -> 4 I8 */
75 vmov r2, s0 /* get counts */
76 eors r2, r2, r3 /* xor with 0x04040404 */
77 addeq r0, #16 /* 0? no NULs */
78 beq .Lmain_loop /* get next qword */
79 clz ip, r2 /* count leading zeros */
80 mov r2, r2, lsl ip /* discard them */
81 mov ip, ip, lsr #3 /* divide leading zeroes by 8 */
82 add r0, r0, ip, lsl #2 /* multiple by 4 and add to count */
83 and r2, r2, #(3 << 29)
84 add r0, r0, r2, lsr #29
85 RET /* and return. */
86 END(strlen)
87