aes_neon_subr.c revision 1.4 1 1.4 riastrad /* $NetBSD: aes_neon_subr.c,v 1.4 2020/07/28 20:11:09 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * Redistribution and use in source and binary forms, with or without
8 1.1 riastrad * modification, are permitted provided that the following conditions
9 1.1 riastrad * are met:
10 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
11 1.1 riastrad * notice, this list of conditions and the following disclaimer.
12 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
14 1.1 riastrad * documentation and/or other materials provided with the distribution.
15 1.1 riastrad *
16 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
27 1.1 riastrad */
28 1.1 riastrad
29 1.1 riastrad #include <sys/cdefs.h>
30 1.4 riastrad __KERNEL_RCSID(1, "$NetBSD: aes_neon_subr.c,v 1.4 2020/07/28 20:11:09 riastradh Exp $");
31 1.3 riastrad
32 1.3 riastrad #include <sys/endian.h>
33 1.1 riastrad
34 1.2 riastrad #ifdef _KERNEL
35 1.1 riastrad #include <sys/systm.h>
36 1.1 riastrad #include <lib/libkern/libkern.h>
37 1.2 riastrad #else
38 1.2 riastrad #include <assert.h>
39 1.2 riastrad #include <inttypes.h>
40 1.2 riastrad #include <stdio.h>
41 1.2 riastrad #define KASSERT assert
42 1.2 riastrad #endif
43 1.1 riastrad
44 1.1 riastrad #include <crypto/aes/arch/arm/aes_neon.h>
45 1.1 riastrad
46 1.1 riastrad #include "aes_neon_impl.h"
47 1.1 riastrad
48 1.1 riastrad static inline uint8x16_t
49 1.1 riastrad loadblock(const void *in)
50 1.1 riastrad {
51 1.1 riastrad return vld1q_u8(in);
52 1.1 riastrad }
53 1.1 riastrad
54 1.1 riastrad static inline void
55 1.1 riastrad storeblock(void *out, uint8x16_t block)
56 1.1 riastrad {
57 1.1 riastrad vst1q_u8(out, block);
58 1.1 riastrad }
59 1.1 riastrad
60 1.1 riastrad void
61 1.1 riastrad aes_neon_enc(const struct aesenc *enc, const uint8_t in[static 16],
62 1.1 riastrad uint8_t out[static 16], uint32_t nrounds)
63 1.1 riastrad {
64 1.1 riastrad uint8x16_t block;
65 1.1 riastrad
66 1.1 riastrad block = loadblock(in);
67 1.1 riastrad block = aes_neon_enc1(enc, block, nrounds);
68 1.1 riastrad storeblock(out, block);
69 1.1 riastrad }
70 1.1 riastrad
71 1.1 riastrad void
72 1.1 riastrad aes_neon_dec(const struct aesdec *dec, const uint8_t in[static 16],
73 1.1 riastrad uint8_t out[static 16], uint32_t nrounds)
74 1.1 riastrad {
75 1.1 riastrad uint8x16_t block;
76 1.1 riastrad
77 1.1 riastrad block = loadblock(in);
78 1.1 riastrad block = aes_neon_dec1(dec, block, nrounds);
79 1.1 riastrad storeblock(out, block);
80 1.1 riastrad }
81 1.1 riastrad
82 1.1 riastrad void
83 1.1 riastrad aes_neon_cbc_enc(const struct aesenc *enc, const uint8_t in[static 16],
84 1.1 riastrad uint8_t out[static 16], size_t nbytes, uint8_t iv[static 16],
85 1.1 riastrad uint32_t nrounds)
86 1.1 riastrad {
87 1.1 riastrad uint8x16_t cv;
88 1.1 riastrad
89 1.1 riastrad KASSERT(nbytes);
90 1.1 riastrad
91 1.1 riastrad cv = loadblock(iv);
92 1.1 riastrad for (; nbytes; nbytes -= 16, in += 16, out += 16) {
93 1.1 riastrad cv ^= loadblock(in);
94 1.1 riastrad cv = aes_neon_enc1(enc, cv, nrounds);
95 1.1 riastrad storeblock(out, cv);
96 1.1 riastrad }
97 1.1 riastrad storeblock(iv, cv);
98 1.1 riastrad }
99 1.1 riastrad
100 1.1 riastrad void
101 1.1 riastrad aes_neon_cbc_dec(const struct aesdec *dec, const uint8_t in[static 16],
102 1.1 riastrad uint8_t out[static 16], size_t nbytes, uint8_t iv[static 16],
103 1.1 riastrad uint32_t nrounds)
104 1.1 riastrad {
105 1.1 riastrad uint8x16_t iv0, cv, b;
106 1.1 riastrad
107 1.1 riastrad KASSERT(nbytes);
108 1.1 riastrad KASSERT(nbytes % 16 == 0);
109 1.1 riastrad
110 1.1 riastrad iv0 = loadblock(iv);
111 1.1 riastrad cv = loadblock(in + nbytes - 16);
112 1.1 riastrad storeblock(iv, cv);
113 1.1 riastrad
114 1.4 riastrad if (nbytes % 32) {
115 1.4 riastrad KASSERT(nbytes % 32 == 16);
116 1.1 riastrad b = aes_neon_dec1(dec, cv, nrounds);
117 1.1 riastrad if ((nbytes -= 16) == 0)
118 1.4 riastrad goto out;
119 1.4 riastrad cv = loadblock(in + nbytes - 16);
120 1.4 riastrad storeblock(out + nbytes, cv ^ b);
121 1.4 riastrad }
122 1.4 riastrad
123 1.4 riastrad for (;;) {
124 1.4 riastrad uint8x16x2_t b2;
125 1.4 riastrad
126 1.4 riastrad KASSERT(nbytes >= 32);
127 1.4 riastrad
128 1.4 riastrad b2.val[1] = cv;
129 1.4 riastrad b2.val[0] = cv = loadblock(in + nbytes - 32);
130 1.4 riastrad b2 = aes_neon_dec2(dec, b2, nrounds);
131 1.4 riastrad storeblock(out + nbytes - 16, cv ^ b2.val[1]);
132 1.4 riastrad if ((nbytes -= 32) == 0) {
133 1.4 riastrad b = b2.val[0];
134 1.4 riastrad goto out;
135 1.4 riastrad }
136 1.1 riastrad cv = loadblock(in + nbytes - 16);
137 1.4 riastrad storeblock(out + nbytes, cv ^ b2.val[0]);
138 1.1 riastrad }
139 1.4 riastrad
140 1.4 riastrad out: storeblock(out, b ^ iv0);
141 1.1 riastrad }
142 1.1 riastrad
143 1.1 riastrad static inline uint8x16_t
144 1.1 riastrad aes_neon_xts_update(uint8x16_t t8)
145 1.1 riastrad {
146 1.1 riastrad const int32x4_t zero = vdupq_n_s32(0);
147 1.1 riastrad const int32x4_t carry = {0x87, 1, 1, 1};
148 1.1 riastrad int32x4_t t, t_;
149 1.1 riastrad uint32x4_t mask;
150 1.1 riastrad
151 1.1 riastrad t = vreinterpretq_s32_u8(t8);
152 1.1 riastrad mask = vcltq_s32(t, zero); /* -1 if high bit set else 0 */
153 1.1 riastrad mask = vextq_u32(mask, mask, 3); /* rotate quarters */
154 1.1 riastrad t_ = vsliq_n_s32(zero, t, 1); /* shift */
155 1.1 riastrad t_ ^= carry & mask;
156 1.1 riastrad
157 1.1 riastrad return vreinterpretq_u8_s32(t_);
158 1.1 riastrad }
159 1.1 riastrad
160 1.1 riastrad static int
161 1.1 riastrad aes_neon_xts_update_selftest(void)
162 1.1 riastrad {
163 1.1 riastrad static const struct {
164 1.1 riastrad uint32_t in[4], out[4];
165 1.1 riastrad } cases[] = {
166 1.1 riastrad [0] = { {1}, {2} },
167 1.1 riastrad [1] = { {0x80000000U,0,0,0}, {0,1,0,0} },
168 1.1 riastrad [2] = { {0,0x80000000U,0,0}, {0,0,1,0} },
169 1.1 riastrad [3] = { {0,0,0x80000000U,0}, {0,0,0,1} },
170 1.1 riastrad [4] = { {0,0,0,0x80000000U}, {0x87,0,0,0} },
171 1.1 riastrad [5] = { {0,0x80000000U,0,0x80000000U}, {0x87,0,1,0} },
172 1.1 riastrad };
173 1.1 riastrad unsigned i;
174 1.1 riastrad uint32_t t[4];
175 1.1 riastrad int result = 0;
176 1.1 riastrad
177 1.1 riastrad for (i = 0; i < sizeof(cases)/sizeof(cases[0]); i++) {
178 1.1 riastrad t[0] = cases[i].in[0];
179 1.1 riastrad t[1] = cases[i].in[1];
180 1.1 riastrad t[2] = cases[i].in[2];
181 1.1 riastrad t[3] = cases[i].in[3];
182 1.1 riastrad storeblock(t, aes_neon_xts_update(loadblock(t)));
183 1.1 riastrad if (t[0] != cases[i].out[0] ||
184 1.1 riastrad t[1] != cases[i].out[1] ||
185 1.1 riastrad t[2] != cases[i].out[2] ||
186 1.1 riastrad t[3] != cases[i].out[3]) {
187 1.1 riastrad printf("%s %u:"
188 1.1 riastrad " %"PRIx32" %"PRIx32" %"PRIx32" %"PRIx32"\n",
189 1.1 riastrad __func__, i, t[0], t[1], t[2], t[3]);
190 1.1 riastrad result = -1;
191 1.1 riastrad }
192 1.1 riastrad }
193 1.1 riastrad
194 1.1 riastrad return result;
195 1.1 riastrad }
196 1.1 riastrad
197 1.1 riastrad void
198 1.1 riastrad aes_neon_xts_enc(const struct aesenc *enc, const uint8_t in[static 16],
199 1.1 riastrad uint8_t out[static 16], size_t nbytes, uint8_t tweak[static 16],
200 1.1 riastrad uint32_t nrounds)
201 1.1 riastrad {
202 1.1 riastrad uint8x16_t t, b;
203 1.1 riastrad
204 1.1 riastrad KASSERT(nbytes);
205 1.1 riastrad KASSERT(nbytes % 16 == 0);
206 1.1 riastrad
207 1.1 riastrad t = loadblock(tweak);
208 1.4 riastrad if (nbytes % 32) {
209 1.4 riastrad KASSERT(nbytes % 32 == 16);
210 1.1 riastrad b = t ^ loadblock(in);
211 1.1 riastrad b = aes_neon_enc1(enc, b, nrounds);
212 1.1 riastrad storeblock(out, t ^ b);
213 1.1 riastrad t = aes_neon_xts_update(t);
214 1.4 riastrad nbytes -= 16;
215 1.4 riastrad in += 16;
216 1.4 riastrad out += 16;
217 1.4 riastrad }
218 1.4 riastrad for (; nbytes; nbytes -= 32, in += 32, out += 32) {
219 1.4 riastrad uint8x16_t t1;
220 1.4 riastrad uint8x16x2_t b2;
221 1.4 riastrad
222 1.4 riastrad t1 = aes_neon_xts_update(t);
223 1.4 riastrad b2.val[0] = t ^ loadblock(in);
224 1.4 riastrad b2.val[1] = t1 ^ loadblock(in + 16);
225 1.4 riastrad b2 = aes_neon_enc2(enc, b2, nrounds);
226 1.4 riastrad storeblock(out, b2.val[0] ^ t);
227 1.4 riastrad storeblock(out + 16, b2.val[1] ^ t1);
228 1.4 riastrad
229 1.4 riastrad t = aes_neon_xts_update(t1);
230 1.1 riastrad }
231 1.1 riastrad storeblock(tweak, t);
232 1.1 riastrad }
233 1.1 riastrad
234 1.1 riastrad void
235 1.1 riastrad aes_neon_xts_dec(const struct aesdec *dec, const uint8_t in[static 16],
236 1.1 riastrad uint8_t out[static 16], size_t nbytes, uint8_t tweak[static 16],
237 1.1 riastrad uint32_t nrounds)
238 1.1 riastrad {
239 1.1 riastrad uint8x16_t t, b;
240 1.1 riastrad
241 1.1 riastrad KASSERT(nbytes);
242 1.1 riastrad KASSERT(nbytes % 16 == 0);
243 1.1 riastrad
244 1.1 riastrad t = loadblock(tweak);
245 1.4 riastrad if (nbytes % 32) {
246 1.4 riastrad KASSERT(nbytes % 32 == 16);
247 1.1 riastrad b = t ^ loadblock(in);
248 1.1 riastrad b = aes_neon_dec1(dec, b, nrounds);
249 1.1 riastrad storeblock(out, t ^ b);
250 1.1 riastrad t = aes_neon_xts_update(t);
251 1.4 riastrad nbytes -= 16;
252 1.4 riastrad in += 16;
253 1.4 riastrad out += 16;
254 1.4 riastrad }
255 1.4 riastrad for (; nbytes; nbytes -= 32, in += 32, out += 32) {
256 1.4 riastrad uint8x16_t t1;
257 1.4 riastrad uint8x16x2_t b2;
258 1.4 riastrad
259 1.4 riastrad t1 = aes_neon_xts_update(t);
260 1.4 riastrad b2.val[0] = t ^ loadblock(in);
261 1.4 riastrad b2.val[1] = t1 ^ loadblock(in + 16);
262 1.4 riastrad b2 = aes_neon_dec2(dec, b2, nrounds);
263 1.4 riastrad storeblock(out, b2.val[0] ^ t);
264 1.4 riastrad storeblock(out + 16, b2.val[1] ^ t1);
265 1.4 riastrad
266 1.4 riastrad t = aes_neon_xts_update(t1);
267 1.1 riastrad }
268 1.1 riastrad storeblock(tweak, t);
269 1.1 riastrad }
270 1.1 riastrad
271 1.3 riastrad void
272 1.3 riastrad aes_neon_cbcmac_update1(const struct aesenc *enc, const uint8_t in[static 16],
273 1.3 riastrad size_t nbytes, uint8_t auth0[static 16], uint32_t nrounds)
274 1.3 riastrad {
275 1.3 riastrad uint8x16_t auth;
276 1.3 riastrad
277 1.3 riastrad KASSERT(nbytes);
278 1.3 riastrad KASSERT(nbytes % 16 == 0);
279 1.3 riastrad
280 1.3 riastrad auth = loadblock(auth0);
281 1.3 riastrad for (; nbytes; nbytes -= 16, in += 16)
282 1.3 riastrad auth = aes_neon_enc1(enc, auth ^ loadblock(in), nrounds);
283 1.3 riastrad storeblock(auth0, auth);
284 1.3 riastrad }
285 1.3 riastrad
286 1.3 riastrad /*
287 1.3 riastrad * XXX On aarch64, we have enough registers that we should be able to
288 1.3 riastrad * pipeline two simultaneous vpaes computations in an `aes_neon_enc2'
289 1.3 riastrad * function, which should substantially improve CCM throughput.
290 1.3 riastrad */
291 1.3 riastrad
292 1.3 riastrad #if _BYTE_ORDER == _LITTLE_ENDIAN
293 1.3 riastrad #define vbetoh32q_u8 vrev32q_u8
294 1.3 riastrad #define vhtobe32q_u8 vrev32q_u8
295 1.3 riastrad #elif _BYTE_ORDER == _BIG_ENDIAN
296 1.3 riastrad #define vbetoh32q_u8(x) (x)
297 1.3 riastrad #define vhtobe32q_u8(x) (x)
298 1.3 riastrad #else
299 1.3 riastrad #error what kind of endian are you anyway
300 1.3 riastrad #endif
301 1.3 riastrad
302 1.3 riastrad void
303 1.3 riastrad aes_neon_ccm_enc1(const struct aesenc *enc, const uint8_t in[static 16],
304 1.3 riastrad uint8_t out[static 16], size_t nbytes, uint8_t authctr[static 32],
305 1.3 riastrad uint32_t nrounds)
306 1.3 riastrad {
307 1.3 riastrad const uint32x4_t ctr32_inc = {0, 0, 0, 1};
308 1.3 riastrad uint8x16_t auth, ptxt, ctr_be;
309 1.3 riastrad uint32x4_t ctr;
310 1.3 riastrad
311 1.3 riastrad KASSERT(nbytes);
312 1.3 riastrad KASSERT(nbytes % 16 == 0);
313 1.3 riastrad
314 1.3 riastrad auth = loadblock(authctr);
315 1.3 riastrad ctr_be = loadblock(authctr + 16);
316 1.3 riastrad ctr = vreinterpretq_u32_u8(vbetoh32q_u8(ctr_be));
317 1.3 riastrad for (; nbytes; nbytes -= 16, in += 16, out += 16) {
318 1.4 riastrad uint8x16x2_t b2;
319 1.3 riastrad ptxt = loadblock(in);
320 1.3 riastrad ctr = vaddq_u32(ctr, ctr32_inc);
321 1.3 riastrad ctr_be = vhtobe32q_u8(vreinterpretq_u8_u32(ctr));
322 1.4 riastrad
323 1.4 riastrad b2.val[0] = auth ^ ptxt;
324 1.4 riastrad b2.val[1] = ctr_be;
325 1.4 riastrad b2 = aes_neon_enc2(enc, b2, nrounds);
326 1.4 riastrad auth = b2.val[0];
327 1.4 riastrad storeblock(out, ptxt ^ b2.val[1]);
328 1.3 riastrad }
329 1.3 riastrad storeblock(authctr, auth);
330 1.3 riastrad storeblock(authctr + 16, ctr_be);
331 1.3 riastrad }
332 1.3 riastrad
333 1.3 riastrad void
334 1.3 riastrad aes_neon_ccm_dec1(const struct aesenc *enc, const uint8_t in[static 16],
335 1.3 riastrad uint8_t out[static 16], size_t nbytes, uint8_t authctr[static 32],
336 1.3 riastrad uint32_t nrounds)
337 1.3 riastrad {
338 1.3 riastrad const uint32x4_t ctr32_inc = {0, 0, 0, 1};
339 1.4 riastrad uint8x16_t auth, ctr_be, ptxt, pad;
340 1.3 riastrad uint32x4_t ctr;
341 1.3 riastrad
342 1.3 riastrad KASSERT(nbytes);
343 1.3 riastrad KASSERT(nbytes % 16 == 0);
344 1.3 riastrad
345 1.3 riastrad ctr_be = loadblock(authctr + 16);
346 1.3 riastrad ctr = vreinterpretq_u32_u8(vbetoh32q_u8(ctr_be));
347 1.4 riastrad ctr = vaddq_u32(ctr, ctr32_inc);
348 1.4 riastrad ctr_be = vhtobe32q_u8(vreinterpretq_u8_u32(ctr));
349 1.4 riastrad pad = aes_neon_enc1(enc, ctr_be, nrounds);
350 1.4 riastrad auth = loadblock(authctr);
351 1.4 riastrad for (;; in += 16, out += 16) {
352 1.4 riastrad uint8x16x2_t b2;
353 1.4 riastrad
354 1.4 riastrad ptxt = loadblock(in) ^ pad;
355 1.4 riastrad auth ^= ptxt;
356 1.4 riastrad storeblock(out, ptxt);
357 1.4 riastrad
358 1.4 riastrad if ((nbytes -= 16) == 0)
359 1.4 riastrad break;
360 1.4 riastrad
361 1.3 riastrad ctr = vaddq_u32(ctr, ctr32_inc);
362 1.3 riastrad ctr_be = vhtobe32q_u8(vreinterpretq_u8_u32(ctr));
363 1.4 riastrad b2.val[0] = auth;
364 1.4 riastrad b2.val[1] = ctr_be;
365 1.4 riastrad b2 = aes_neon_enc2(enc, b2, nrounds);
366 1.4 riastrad auth = b2.val[0];
367 1.4 riastrad pad = b2.val[1];
368 1.3 riastrad }
369 1.4 riastrad auth = aes_neon_enc1(enc, auth, nrounds);
370 1.3 riastrad storeblock(authctr, auth);
371 1.3 riastrad storeblock(authctr + 16, ctr_be);
372 1.3 riastrad }
373 1.3 riastrad
374 1.1 riastrad int
375 1.1 riastrad aes_neon_selftest(void)
376 1.1 riastrad {
377 1.1 riastrad
378 1.1 riastrad if (aes_neon_xts_update_selftest())
379 1.1 riastrad return -1;
380 1.1 riastrad
381 1.1 riastrad return 0;
382 1.1 riastrad }
383