Home | History | Annotate | Line # | Download | only in arm
aes_neon_32.S revision 1.1
      1 /*	$NetBSD: aes_neon_32.S,v 1.1 2020/06/29 23:57:56 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include <arm/asm.h>
     30 
     31 	.fpu	neon
     32 
     33 	.section .rodata
     34 	.p2align 4
     35 
     36 	.type	inv,_ASM_TYPE_OBJECT
     37 inv:
     38 	.byte	0x80,0x01,0x08,0x0D,0x0F,0x06,0x05,0x0E
     39 	.byte	0x02,0x0C,0x0B,0x0A,0x09,0x03,0x07,0x04
     40 END(inv)
     41 
     42 	.type	inva,_ASM_TYPE_OBJECT
     43 inva:
     44 	.byte	0x80,0x07,0x0B,0x0F,0x06,0x0A,0x04,0x01
     45 	.byte	0x09,0x08,0x05,0x02,0x0C,0x0E,0x0D,0x03
     46 END(inva)
     47 
     48 	.type	mc_forward,_ASM_TYPE_OBJECT
     49 mc_forward:
     50 	.byte	0x01,0x02,0x03,0x00,0x05,0x06,0x07,0x04	/* 0 */
     51 	.byte	0x09,0x0A,0x0B,0x08,0x0D,0x0E,0x0F,0x0C
     52 
     53 	.byte	0x05,0x06,0x07,0x04,0x09,0x0A,0x0B,0x08	/* 1 */
     54 	.byte	0x0D,0x0E,0x0F,0x0C,0x01,0x02,0x03,0x00
     55 
     56 	.byte	0x09,0x0A,0x0B,0x08,0x0D,0x0E,0x0F,0x0C	/* 2 */
     57 	.byte	0x01,0x02,0x03,0x00,0x05,0x06,0x07,0x04
     58 
     59 .Lmc_forward_3:
     60 	.byte	0x0D,0x0E,0x0F,0x0C,0x01,0x02,0x03,0x00	/* 3 */
     61 	.byte	0x05,0x06,0x07,0x04,0x09,0x0A,0x0B,0x08
     62 END(mc_forward)
     63 
     64 	.type	mc_backward,_ASM_TYPE_OBJECT
     65 mc_backward:
     66 	.byte	0x03,0x00,0x01,0x02,0x07,0x04,0x05,0x06	/* 0 */
     67 	.byte	0x0B,0x08,0x09,0x0A,0x0F,0x0C,0x0D,0x0E
     68 
     69 	.byte	0x0F,0x0C,0x0D,0x0E,0x03,0x00,0x01,0x02	/* 1 */
     70 	.byte	0x07,0x04,0x05,0x06,0x0B,0x08,0x09,0x0A
     71 
     72 	.byte	0x0B,0x08,0x09,0x0A,0x0F,0x0C,0x0D,0x0E	/* 2 */
     73 	.byte	0x03,0x00,0x01,0x02,0x07,0x04,0x05,0x06
     74 
     75 	.byte	0x07,0x04,0x05,0x06,0x0B,0x08,0x09,0x0A	/* 3 */
     76 	.byte	0x0F,0x0C,0x0D,0x0E,0x03,0x00,0x01,0x02
     77 END(mc_backward)
     78 
     79 	.type	sr,_ASM_TYPE_OBJECT
     80 sr:
     81 	.byte	0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07	/* 0 */
     82 	.byte	0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F
     83 
     84 	.byte	0x00,0x05,0x0A,0x0F,0x04,0x09,0x0E,0x03	/* 1 */
     85 	.byte	0x08,0x0D,0x02,0x07,0x0C,0x01,0x06,0x0B
     86 
     87 	.byte	0x00,0x09,0x02,0x0B,0x04,0x0D,0x06,0x0F	/* 2 */
     88 	.byte	0x08,0x01,0x0A,0x03,0x0C,0x05,0x0E,0x07
     89 
     90 	.byte	0x00,0x0D,0x0A,0x07,0x04,0x01,0x0E,0x0B	/* 3 */
     91 	.byte	0x08,0x05,0x02,0x0F,0x0C,0x09,0x06,0x03
     92 END(sr)
     93 
     94 	.type	iptlo,_ASM_TYPE_OBJECT
     95 iptlo:
     96 	.byte	0x00,0x70,0x2A,0x5A,0x98,0xE8,0xB2,0xC2
     97 	.byte	0x08,0x78,0x22,0x52,0x90,0xE0,0xBA,0xCA
     98 END(iptlo)
     99 
    100 	.type	ipthi,_ASM_TYPE_OBJECT
    101 ipthi:
    102 	.byte	0x00,0x4D,0x7C,0x31,0x7D,0x30,0x01,0x4C
    103 	.byte	0x81,0xCC,0xFD,0xB0,0xFC,0xB1,0x80,0xCD
    104 END(ipthi)
    105 
    106 	.type	sb1_0,_ASM_TYPE_OBJECT
    107 sb1_0:
    108 	.byte	0x00,0x3E,0x50,0xCB,0x8F,0xE1,0x9B,0xB1
    109 	.byte	0x44,0xF5,0x2A,0x14,0x6E,0x7A,0xDF,0xA5
    110 END(sb1_0)
    111 
    112 	.type	sb1_1,_ASM_TYPE_OBJECT
    113 sb1_1:
    114 	.byte	0x00,0x23,0xE2,0xFA,0x15,0xD4,0x18,0x36
    115 	.byte	0xEF,0xD9,0x2E,0x0D,0xC1,0xCC,0xF7,0x3B
    116 END(sb1_1)
    117 
    118 	.type	sb2_0,_ASM_TYPE_OBJECT
    119 sb2_0:
    120 	.byte	0x00,0x24,0x71,0x0B,0xC6,0x93,0x7A,0xE2
    121 	.byte	0xCD,0x2F,0x98,0xBC,0x55,0xE9,0xB7,0x5E
    122 END(sb2_0)
    123 
    124 	.type	sb2_1,_ASM_TYPE_OBJECT
    125 sb2_1:
    126 	.byte	0x00,0x29,0xE1,0x0A,0x40,0x88,0xEB,0x69
    127 	.byte	0x4A,0x23,0x82,0xAB,0xC8,0x63,0xA1,0xC2
    128 END(sb2_1)
    129 
    130 	.type	sbo_0,_ASM_TYPE_OBJECT
    131 sbo_0:
    132 	.byte	0x00,0xC7,0xBD,0x6F,0x17,0x6D,0xD2,0xD0
    133 	.byte	0x78,0xA8,0x02,0xC5,0x7A,0xBF,0xAA,0x15
    134 END(sbo_0)
    135 
    136 	.type	sbo_1,_ASM_TYPE_OBJECT
    137 sbo_1:
    138 	.byte	0x00,0x6A,0xBB,0x5F,0xA5,0x74,0xE4,0xCF
    139 	.byte	0xFA,0x35,0x2B,0x41,0xD1,0x90,0x1E,0x8E
    140 END(sbo_1)
    141 
    142 	.type	diptlo,_ASM_TYPE_OBJECT
    143 diptlo:
    144 	.byte	0x00,0x5F,0x54,0x0B,0x04,0x5B,0x50,0x0F
    145 	.byte	0x1A,0x45,0x4E,0x11,0x1E,0x41,0x4A,0x15
    146 END(diptlo)
    147 
    148 	.type	dipthi,_ASM_TYPE_OBJECT
    149 dipthi:
    150 	.byte	0x00,0x65,0x05,0x60,0xE6,0x83,0xE3,0x86
    151 	.byte	0x94,0xF1,0x91,0xF4,0x72,0x17,0x77,0x12
    152 END(dipthi)
    153 
    154 	.type	dsb9_0,_ASM_TYPE_OBJECT
    155 dsb9_0:
    156 	.byte	0x00,0xD6,0x86,0x9A,0x53,0x03,0x1C,0x85
    157 	.byte	0xC9,0x4C,0x99,0x4F,0x50,0x1F,0xD5,0xCA
    158 END(dsb9_0)
    159 
    160 	.type	dsb9_1,_ASM_TYPE_OBJECT
    161 dsb9_1:
    162 	.byte	0x00,0x49,0xD7,0xEC,0x89,0x17,0x3B,0xC0
    163 	.byte	0x65,0xA5,0xFB,0xB2,0x9E,0x2C,0x5E,0x72
    164 END(dsb9_1)
    165 
    166 	.type	dsbd_0,_ASM_TYPE_OBJECT
    167 dsbd_0:
    168 	.byte	0x00,0xA2,0xB1,0xE6,0xDF,0xCC,0x57,0x7D
    169 	.byte	0x39,0x44,0x2A,0x88,0x13,0x9B,0x6E,0xF5
    170 END(dsbd_0)
    171 
    172 	.type	dsbd_1,_ASM_TYPE_OBJECT
    173 dsbd_1:
    174 	.byte	0x00,0xCB,0xC6,0x24,0xF7,0xFA,0xE2,0x3C
    175 	.byte	0xD3,0xEF,0xDE,0x15,0x0D,0x18,0x31,0x29
    176 END(dsbd_1)
    177 
    178 	.type	dsbb_0,_ASM_TYPE_OBJECT
    179 dsbb_0:
    180 	.byte	0x00,0x42,0xB4,0x96,0x92,0x64,0x22,0xD0
    181 	.byte	0x04,0xD4,0xF2,0xB0,0xF6,0x46,0x26,0x60
    182 END(dsbb_0)
    183 
    184 	.type	dsbb_1,_ASM_TYPE_OBJECT
    185 dsbb_1:
    186 	.byte	0x00,0x67,0x59,0xCD,0xA6,0x98,0x94,0xC1
    187 	.byte	0x6B,0xAA,0x55,0x32,0x3E,0x0C,0xFF,0xF3
    188 END(dsbb_1)
    189 
    190 	.type	dsbe_0,_ASM_TYPE_OBJECT
    191 dsbe_0:
    192 	.byte	0x00,0xD0,0xD4,0x26,0x96,0x92,0xF2,0x46
    193 	.byte	0xB0,0xF6,0xB4,0x64,0x04,0x60,0x42,0x22
    194 END(dsbe_0)
    195 
    196 	.type	dsbe_1,_ASM_TYPE_OBJECT
    197 dsbe_1:
    198 	.byte	0x00,0xC1,0xAA,0xFF,0xCD,0xA6,0x55,0x0C
    199 	.byte	0x32,0x3E,0x59,0x98,0x6B,0xF3,0x67,0x94
    200 END(dsbe_1)
    201 
    202 	.type	dsbo_0,_ASM_TYPE_OBJECT
    203 dsbo_0:
    204 	.byte	0x00,0x40,0xF9,0x7E,0x53,0xEA,0x87,0x13
    205 	.byte	0x2D,0x3E,0x94,0xD4,0xB9,0x6D,0xAA,0xC7
    206 END(dsbo_0)
    207 
    208 	.type	dsbo_1,_ASM_TYPE_OBJECT
    209 dsbo_1:
    210 	.byte	0x00,0x1D,0x44,0x93,0x0F,0x56,0xD7,0x12
    211 	.byte	0x9C,0x8E,0xC5,0xD8,0x59,0x81,0x4B,0xCA
    212 END(dsbo_1)
    213 
    214 /*
    215  * aes_neon_enc1(enc, x, nrounds)
    216  *
    217  *	With -mfloat-abi=hard:
    218  *
    219  * uint8x16_t@q0
    220  * aes_neon_enc1(const struct aesenc *enc@r0, uint8x16_t x@q0,
    221  *     unsigned nrounds@r1)
    222  *
    223  *	With -mfloat-abi=soft(fp) (here spelled `#ifdef _KERNEL'):
    224  *
    225  * uint8x16_t@(r0,r1,r2,r3)
    226  * aes_neon_enc1(const struct aesenc *enc@r0,
    227  *     uint8x16_t x@(r2,r3,sp[0],sp[4]), nrounds@sp[8])
    228  */
    229 ENTRY(aes_neon_enc1)
    230 #ifdef _KERNEL
    231 	vmov	d0, r2, r3		/* d0 := x lo */
    232 	vldr	d1, [sp]		/* d1 := x hi */
    233 	ldr	r1, [sp, #8]		/* r1 := nrounds */
    234 #endif
    235 	push	{r4, r5, r6, r7, r8, r10, r11, lr}
    236 	vpush	{d8-d15}
    237 
    238 	/*
    239 	 * r3: rmod4
    240 	 * r4: mc_forward
    241 	 * r5: mc_backward
    242 	 * r6,r7,r8,r10,r11: temporaries
    243 	 * q0={d0-d1}: x/ak/A
    244 	 * q1={d2-d3}: 0x0f0f...
    245 	 * q2={d4-d5}: lo/k/j/io
    246 	 * q3={d6-d7}: hi/i/jo
    247 	 * q4={d8-d9}: iptlo
    248 	 * q5={d10-d11}: ipthi
    249 	 * q6={d12-d13}: sb1[0]/sbo[0]
    250 	 * q7={d14-d15}: sb1[1]/sbo[1]
    251 	 * q8={d16-d17}: sb2[0]
    252 	 * q9={d18-d19}: sb2[1]
    253 	 * q10={d20-d21}: inv
    254 	 * q11={d22-d23}: inva
    255 	 * q12={d24-d25}: ir/iak/iakr/sb1_0(io)/mc_backward[rmod4]
    256 	 * q13={d26-d27}: jr/jak/jakr/sb1_1(jo)/mc_forward[rmod4]
    257 	 * q14={d28-d29}: rk/A2/A2_B_D
    258 	 * q15={d30-d31}: A2_B/sr[rmod4]
    259 	 */
    260 
    261 	vld1.64	{d28-d29}, [r0 :128]!	/* q14 = *rk++ */
    262 	movw	r3, #0
    263 	vmov.i8	q1, #0x0f
    264 
    265 	/* (q4, q5) := (iptlo, ipthi) */
    266 	ldr	r6, =iptlo
    267 	ldr	r7, =ipthi
    268 	vld1.64	{d8-d9}, [r6 :128]
    269 	vld1.64	{d10-d11}, [r7 :128]
    270 
    271 	/* load the rest of the constants */
    272 	ldr	r4, =sb1_0
    273 	ldr	r5, =sb1_1
    274 	ldr	r6, =sb2_0
    275 	ldr	r7, =sb2_1
    276 	ldr	r8, =inv
    277 	ldr	r10, =inva
    278 	vld1.64	{d12-d13}, [r4 :128]	/* q6 = sb1[0] */
    279 	vld1.64	{d14-d15}, [r5 :128]	/* q7 = sb1[1] */
    280 	vld1.64	{d16-d17}, [r6 :128]	/* q8 = sb2[0] */
    281 	vld1.64	{d18-d19}, [r7 :128]	/* q9 = sb2[1] */
    282 	vld1.64	{d20-d21}, [r8 :128]	/* q10 = inv */
    283 	vld1.64	{d22-d23}, [r10 :128]	/* q11 = inva */
    284 
    285 	/* (r4, r5) := (&mc_forward[0], &mc_backward[0]) */
    286 	ldr	r4, =mc_forward
    287 	ldr	r5, =mc_backward
    288 
    289 	/* (q2, q3) := (lo, hi) */
    290 	vshr.u8	q3, q0, #4
    291 	vand	q2, q0, q1		/* q2 := x & 0x0f0f... */
    292 	vand	q3, q3, q1		/* q3 := (x >> 4) & 0x0f0f... */
    293 
    294 	/* (q2, q3) := (iptlo(lo), ipthi(hi)) */
    295 	vtbl.8	d4, {d8-d9}, d4
    296 	vtbl.8	d5, {d8-d9}, d5
    297 	vtbl.8	d6, {d10-d11}, d6
    298 	vtbl.8	d7, {d10-d11}, d7
    299 
    300 	/* q0 := rk[0] + iptlo(lo) + ipthi(hi) */
    301 	veor	q0, q14, q2
    302 	veor	q0, q0, q3
    303 
    304 	b	2f
    305 
    306 1:	vld1.64	{d28-d29}, [r0 :128]!	/* q14 = *rk++ */
    307 
    308 	/* q0 := A = rk[i] + sb1_0(io) + sb1_1(jo) */
    309 	vtbl.8	d24, {d12-d13}, d4
    310 	vtbl.8	d25, {d12-d13}, d5
    311 	vtbl.8	d26, {d14-d15}, d6
    312 	vtbl.8	d27, {d14-d15}, d7
    313 	veor	q0, q14, q12
    314 	veor	q0, q0, q13
    315 
    316 	/* q14 := A2 = sb2_0[io] + sb2_1[jo] */
    317 	vtbl.8	d24, {d16-d17}, d4
    318 	vtbl.8	d25, {d16-d17}, d5
    319 	vtbl.8	d26, {d18-d19}, d6
    320 	vtbl.8	d27, {d18-d19}, d7
    321 	veor	q14, q12, q13
    322 
    323 	/* (q12, q13) := (mc_forward[rmod4], mc_backward[rmod4]) */
    324 	add	r6, r4, r3, lsl #4
    325 	add	r7, r5, r3, lsl #4
    326 	vld1.64	{d24-d25}, [r6]
    327 	vld1.64	{d26-d27}, [r7]
    328 
    329 	/* q15 := A2_B = A2 + A(mcf) */
    330 	vtbl.8	d30, {d0-d1}, d24
    331 	vtbl.8	d31, {d0-d1}, d25
    332 	veor	q15, q15, q14
    333 
    334 	/* q14 := A2_B_D = A2_B + A(mcb) */
    335 	vtbl.8	d28, {d0-d1}, d26
    336 	vtbl.8	d29, {d0-d1}, d27
    337 	veor	q14, q14, q15
    338 
    339 	/* q0 := x = A2_B_D + A2_B(mcf) */
    340 	vtbl.8	d0, {d30-d31}, d24
    341 	vtbl.8	d1, {d30-d31}, d25
    342 	veor	q0, q0, q14
    343 
    344 2:	/*
    345 	 * SubBytes
    346 	 */
    347 
    348 	/* (q2, q3) := (k, i) */
    349 	vshr.u8	q3, q0, #4
    350 	vand	q2, q0, q1		/* q2 := x & 0x0f0f... */
    351 	vand	q3, q3, q1		/* q3 := (x >> 4) & 0x0f0f... */
    352 
    353 	/* q0 := a/k */
    354 	vtbl.8	d0, {d22-d23}, d4
    355 	vtbl.8	d1, {d22-d23}, d5
    356 
    357 	/* q2 := j = i + k */
    358 	veor	q2, q3, q2
    359 
    360 	/* q12 := ir = 1/i */
    361 	vtbl.8	d24, {d20-d21}, d6
    362 	vtbl.8	d25, {d20-d21}, d7
    363 
    364 	/* q13 := jr = 1/j */
    365 	vtbl.8	d26, {d20-d21}, d4
    366 	vtbl.8	d27, {d20-d21}, d5
    367 
    368 	/* q12 := iak = 1/i + a/k */
    369 	veor	q12, q12, q0
    370 
    371 	/* q13 := jak = 1/j + a/k */
    372 	veor	q13, q13, q0
    373 
    374 	/* q12 := iakr = 1/(1/i + a/k) */
    375 	vtbl.8	d24, {d20-d21}, d24
    376 	vtbl.8	d25, {d20-d21}, d25
    377 
    378 	/* q13 := jakr = 1/(1/j + a/k) */
    379 	vtbl.8	d26, {d20-d21}, d26
    380 	vtbl.8	d27, {d20-d21}, d27
    381 
    382 	/* q2 := io = j + 1/(1/i + a/k) */
    383 	veor	q2, q2, q12
    384 
    385 	/* q3 := jo = i + 1/(1/j + a/k) */
    386 	veor	q3, q3, q13
    387 
    388 	/* advance round */
    389 	add	r3, r3, #1
    390 	subs	r1, r1, #1
    391 	and	r3, r3, #3
    392 	bne	1b
    393 
    394 	/* (q6, q7, q15) := (sbo[0], sbo[1], sr[rmod4]) */
    395 	ldr	r8, =sr
    396 	ldr	r6, =sbo_0
    397 	ldr	r7, =sbo_1
    398 	add	r8, r8, r3, lsl #4
    399 	vld1.64	{d12-d13}, [r6 :128]
    400 	vld1.64	{d14-d15}, [r7 :128]
    401 	vld1.64	{d30-d31}, [r8 :128]
    402 
    403 	vld1.64	{d28-d29}, [r0 :128]!	/* q14 = *rk++ */
    404 
    405 	/* (q2, q3) := (sbo_0(io), sbo_1(jo)) */
    406 	vtbl.8	d4, {d12-d13}, d4
    407 	vtbl.8	d5, {d12-d13}, d5
    408 	vtbl.8	d6, {d14-d15}, d6
    409 	vtbl.8	d7, {d14-d15}, d7
    410 
    411 	/* q2 := x = rk[nr] + sbo_0(io) + sbo_1(jo) */
    412 	veor	q2, q2, q14
    413 	veor	q2, q2, q3
    414 
    415 	/* q0 := x(sr[rmod4]) */
    416 	vtbl.8	d0, {d4-d5}, d30
    417 	vtbl.8	d1, {d4-d5}, d31
    418 
    419 	vpop	{d8-d15}
    420 	pop	{r4, r5, r6, r7, r8, r10, r11, lr}
    421 #ifdef _KERNEL
    422 	vmov	r0, r1, d0
    423 	vmov	r2, r3, d1
    424 #endif
    425 	bx	lr
    426 END(aes_neon_enc1)
    427 
    428 /*
    429  * aes_neon_dec1(dec, x, nrounds)
    430  *
    431  *	With -mfloat-abi=hard:
    432  *
    433  * uint8x16_t@q0
    434  * aes_neon_dec1(const struct aesdec *dec@r0, uint8x16_t x@q0,
    435  *     unsigned nrounds@r1)
    436  *
    437  *	With -mfloat-abi=soft(fp) (here spelled `#ifdef _KERNEL'):
    438  *
    439  * uint8x16_t@(r0,r1,r2,r3)
    440  * aes_neon_dec1(const struct aesdec *dec@r0,
    441  *     uint8x16_t x@(r2,r3,sp[0],sp[4]), nrounds@sp[8])
    442  */
    443 ENTRY(aes_neon_dec1)
    444 #ifdef _KERNEL
    445 	vmov	d0, r2, r3		/* d0 := x lo */
    446 	vldr	d1, [sp]		/* d1 := x hi */
    447 	ldr	r1, [sp, #8]		/* r1 := nrounds */
    448 #endif
    449 	push	{r4, r5, r6, r7, r8, r10, r11, lr}
    450 	vpush	{d8-d15}
    451 
    452 	/*
    453 	 * r3: 3 & ~(nrounds - 1)
    454 	 * q0={d0-d1}: x/ak
    455 	 * q1={d2-d3}: 0x0f0f...
    456 	 * q2={d4-d5}: lo/k/j/io
    457 	 * q3={d6-d7}: hi/i/jo
    458 	 * q4={d8-d9}: diptlo/dsb9[0]
    459 	 * q5={d10-d11}: dipthi/dsb9[1]
    460 	 * q6={d12-d13}: dsbb[0]/dsbo[0]
    461 	 * q7={d14-d15}: dsbb[1]/dsbo[1]
    462 	 * q8={d16-d17}: dsbd[0]/dsbe[0]
    463 	 * q9={d18-d19}: dsbd[1]/dsbe[0]
    464 	 * q10={d20-d21}: inv
    465 	 * q11={d22-d23}: inva
    466 	 * q12={d24-d25}: ir/iak/iakr/dsbX_0(io)
    467 	 * q13={d26-d27}: jr/jak/jakr/dsbX_1(jo)
    468 	 * q14={d28-d29}: rk/xmc
    469 	 * q15={d30-d31}: mc/sr[3 & ~(nrounds - 1)]
    470 	 */
    471 
    472 	vld1.64	{d28-d29}, [r0 :128]!	/* q14 = *rk++ */
    473 	rsb	r3, r1, #0		/* r3 := ~(x - 1) = -x */
    474 	vmov.i8	q1, #0x0f
    475 	and	r3, r3, #3		/* r3 := 3 & ~(x - 1) */
    476 
    477 	/* (q4, q5) := (diptlo, dipthi) */
    478 	ldr	r6, =diptlo
    479 	ldr	r7, =dipthi
    480 	vld1.64	{d8-d9}, [r6 :128]
    481 	vld1.64	{d10-d11}, [r7 :128]
    482 
    483 	/* load the rest of the constants */
    484 	ldr	r4, =dsbb_0
    485 	ldr	r5, =dsbb_1
    486 	ldr	r6, =inv
    487 	ldr	r7, =inva
    488 	ldr	r8, =.Lmc_forward_3
    489 	vld1.64	{d12-d13}, [r4 :128]	/* q6 := dsbb[0] */
    490 	vld1.64	{d14-d15}, [r5 :128]	/* q7 := dsbb[1] */
    491 	vld1.64	{d20-d21}, [r6 :128]	/* q10 := inv */
    492 	vld1.64	{d22-d23}, [r7 :128]	/* q11 := inva */
    493 	vld1.64	{d30-d31}, [r8 :128]	/* q15 := mc_forward[3] */
    494 
    495 	/* (q2, q3) := (lo, hi) */
    496 	vshr.u8	q3, q0, #4
    497 	vand	q2, q0, q1		/* q2 := x & 0x0f0f... */
    498 	vand	q3, q3, q1		/* q3 := (x >> 4) & 0x0f0f... */
    499 
    500 	/* (q2, q3) := (diptlo(lo), dipthi(hi)) */
    501 	vtbl.8	d4, {d8-d9}, d4
    502 	vtbl.8	d5, {d8-d9}, d5
    503 	vtbl.8	d6, {d10-d11}, d6
    504 	vtbl.8	d7, {d10-d11}, d7
    505 
    506 	/* load dsb9 */
    507 	ldr	r4, =dsb9_0
    508 	ldr	r5, =dsb9_1
    509 	vld1.64	{d8-d9}, [r4 :128]	/* q4 := dsb9[0] */
    510 	vld1.64	{d10-d11}, [r5 :128]	/* q5 := dsb9[1] */
    511 
    512 	/* q0 := rk[0] + diptlo(lo) + dipthi(hi) */
    513 	veor	q0, q14, q2
    514 	veor	q0, q0, q3
    515 
    516 	b	2f
    517 
    518 1:	/* load dsbd */
    519 	ldr	r4, =dsbd_0
    520 	vld1.64	{d16-d17}, [r4 :128]!	/* q8 := dsbd[0] */
    521 	vld1.64	{d18-d19}, [r4 :128]	/* q9 := dsbd[1] */
    522 
    523 	vld1.64	{d28-d29}, [r0 :128]!	/* q14 = *rk++ */
    524 
    525 	/* q0 := rk[i] + dsb9_0(io) + dsb9_1(jo) */
    526 	vtbl.8	d24, {d8-d9}, d4
    527 	vtbl.8	d25, {d8-d9}, d5
    528 	vtbl.8	d26, {d10-d11}, d6
    529 	vtbl.8	d27, {d10-d11}, d7
    530 	veor	q0, q14, q12
    531 	veor	q0, q0, q13
    532 
    533 	/* q14 := x(mc) */
    534 	vtbl.8	d28, {d0-d1}, d30
    535 	vtbl.8	d29, {d0-d1}, d31
    536 
    537 	/* q0 := x(mc) + dsbd_0(io) + dsbd_1(jo) */
    538 	vtbl.8	d24, {d16-d17}, d4
    539 	vtbl.8	d25, {d16-d17}, d5
    540 	vtbl.8	d26, {d18-d19}, d6
    541 	vtbl.8	d27, {d18-d19}, d7
    542 	veor	q0, q14, q12
    543 	veor	q0, q0, q13
    544 
    545 	/* load dsbe */
    546 	ldr	r4, =dsbe_0
    547 	vld1.64	{d16-d17}, [r4 :128]!	/* q8 := dsbe[0] */
    548 	vld1.64	{d18-d19}, [r4 :128]	/* q9 := dsbe[1] */
    549 
    550 	/* q0 := x(mc) + dsbb_0(io) + dsbb_1(jo) */
    551 	vtbl.8	d28, {d0-d1}, d30
    552 	vtbl.8	d29, {d0-d1}, d31
    553 	vtbl.8	d24, {d12-d13}, d4
    554 	vtbl.8	d25, {d12-d13}, d5
    555 	vtbl.8	d26, {d14-d15}, d6
    556 	vtbl.8	d27, {d14-d15}, d7
    557 	veor	q0, q14, q12
    558 	veor	q0, q0, q13
    559 
    560 	/* q0 := x(mc) + dsbe_0(io) + dsbe_1(jo) */
    561 	vtbl.8	d28, {d0-d1}, d30
    562 	vtbl.8	d29, {d0-d1}, d31
    563 	vtbl.8	d24, {d16-d17}, d4
    564 	vtbl.8	d25, {d16-d17}, d5
    565 	vtbl.8	d26, {d18-d19}, d6
    566 	vtbl.8	d27, {d18-d19}, d7
    567 	veor	q0, q14, q12
    568 	veor	q0, q0, q13
    569 
    570 	/* q15 := mc := mc <<< 12*8 */
    571 	vext.8	q15, q15, q15, #12
    572 
    573 2:	/*
    574 	 * SubBytes
    575 	 */
    576 
    577 	/* (q2, q3) := (k, i) */
    578 	vshr.u8	q3, q0, #4
    579 	vand	q2, q0, q1		/* q2 := x & 0x0f0f... */
    580 	vand	q3, q3, q1		/* q3 := (x >> 4) & 0x0f0f... */
    581 
    582 	/* q0 := a/k */
    583 	vtbl.8	d0, {d22-d23}, d4
    584 	vtbl.8	d1, {d22-d23}, d5
    585 
    586 	/* q2 := j = i + k */
    587 	veor	q2, q3, q2
    588 
    589 	/* q12 := ir = 1/i */
    590 	vtbl.8	d24, {d20-d21}, d6
    591 	vtbl.8	d25, {d20-d21}, d7
    592 
    593 	/* q13 := jr = 1/j */
    594 	vtbl.8	d26, {d20-d21}, d4
    595 	vtbl.8	d27, {d20-d21}, d5
    596 
    597 	/* q12 := iak = 1/i + a/k */
    598 	veor	q12, q12, q0
    599 
    600 	/* q13 := jak = 1/j + a/k */
    601 	veor	q13, q13, q0
    602 
    603 	/* q12 := iakr = 1/(1/i + a/k) */
    604 	vtbl.8	d24, {d20-d21}, d24
    605 	vtbl.8	d25, {d20-d21}, d25
    606 
    607 	/* q13 := jakr = 1/(1/j + a/k) */
    608 	vtbl.8	d26, {d20-d21}, d26
    609 	vtbl.8	d27, {d20-d21}, d27
    610 
    611 	/* q2 := io = j + 1/(1/i + a/k) */
    612 	veor	q2, q2, q12
    613 
    614 	/* q3 := jo = i + 1/(1/j + a/k) */
    615 	veor	q3, q3, q13
    616 
    617 	/* advance round */
    618 	subs	r1, r1, #1
    619 	bne	1b
    620 
    621 	/* (q6, q7, q15) := (dsbo[0], dsbo[1], sr[i]) */
    622 	ldr	r8, =sr
    623 	ldr	r6, =dsbo_0
    624 	ldr	r7, =dsbo_1
    625 	add	r8, r8, r3, lsl #4
    626 	vld1.64	{d12-d13}, [r6 :128]
    627 	vld1.64	{d14-d15}, [r7 :128]
    628 	vld1.64	{d30-d31}, [r8 :128]
    629 
    630 	vld1.64	{d28-d29}, [r0 :128]!	/* q14 = *rk++ */
    631 
    632 	/* (q2, q3) := (dsbo_0(io), dsbo_1(jo)) */
    633 	vtbl.8	d4, {d12-d13}, d4
    634 	vtbl.8	d5, {d12-d13}, d5
    635 	vtbl.8	d6, {d14-d15}, d6
    636 	vtbl.8	d7, {d14-d15}, d7
    637 
    638 	/* q2 := x = rk[nr] + dsbo_0(io) + dsbo_1(jo) */
    639 	veor	q2, q2, q14
    640 	veor	q2, q2, q3
    641 
    642 	/* q0 := x(sr[i]) */
    643 	vtbl.8	d0, {d4-d5}, d30
    644 	vtbl.8	d1, {d4-d5}, d31
    645 
    646 	vpop	{d8-d15}
    647 	pop	{r4, r5, r6, r7, r8, r10, r11, lr}
    648 #ifdef _KERNEL
    649 	vmov	r0, r1, d0
    650 	vmov	r2, r3, d1
    651 #endif
    652 	bx	lr
    653 END(aes_neon_dec1)
    654