Home | History | Annotate | Line # | Download | only in x86
      1 /*
      2  * Copyright (c) 2016 Thomas Pornin <pornin (at) bolet.org>
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining
      5  * a copy of this software and associated documentation files (the
      6  * "Software"), to deal in the Software without restriction, including
      7  * without limitation the rights to use, copy, modify, merge, publish,
      8  * distribute, sublicense, and/or sell copies of the Software, and to
      9  * permit persons to whom the Software is furnished to do so, subject to
     10  * the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be
     13  * included in all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     22  * SOFTWARE.
     23  */
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(1, "$NetBSD: aes_sse2_dec.c,v 1.1 2020/06/29 23:47:54 riastradh Exp $");
     27 
     28 #include <sys/types.h>
     29 
     30 #include "aes_sse2_impl.h"
     31 
     32 /* see inner.h */
     33 void
     34 aes_sse2_bitslice_invSbox(__m128i q[static 4])
     35 {
     36 	/*
     37 	 * See br_aes_ct_bitslice_invSbox(). This is the natural extension
     38 	 * to 64-bit registers.
     39 	 */
     40 	__m128i q0, q1, q2, q3, q4, q5, q6, q7;
     41 
     42 	q0 = ~q[0];
     43 	q1 = ~q[1];
     44 	q2 = q[2];
     45 	q3 = q[3];
     46 	q4 = _mm_shuffle_epi32(q[0], 0x0e);
     47 	q5 = _mm_shuffle_epi32(~q[1], 0x0e);
     48 	q6 = _mm_shuffle_epi32(~q[2], 0x0e);
     49 	q7 = _mm_shuffle_epi32(q[3], 0x0e);
     50 
     51 	q[3] = _mm_unpacklo_epi64(q5 ^ q0 ^ q2, q1 ^ q4 ^ q6);
     52 	q[2] = _mm_unpacklo_epi64(q4 ^ q7 ^ q1, q0 ^ q3 ^ q5);
     53 	q[1] = _mm_unpacklo_epi64(q3 ^ q6 ^ q0, q7 ^ q2 ^ q4);
     54 	q[0] = _mm_unpacklo_epi64(q2 ^ q5 ^ q7, q6 ^ q1 ^ q3);
     55 
     56 	aes_sse2_bitslice_Sbox(q);
     57 
     58 	q0 = ~q[0];
     59 	q1 = ~q[1];
     60 	q2 = q[2];
     61 	q3 = q[3];
     62 	q4 = _mm_shuffle_epi32(q[0], 0x0e);
     63 	q5 = _mm_shuffle_epi32(~q[1], 0x0e);
     64 	q6 = _mm_shuffle_epi32(~q[2], 0x0e);
     65 	q7 = _mm_shuffle_epi32(q[3], 0x0e);
     66 
     67 	q[3] = _mm_unpacklo_epi64(q5 ^ q0 ^ q2, q1 ^ q4 ^ q6);
     68 	q[2] = _mm_unpacklo_epi64(q4 ^ q7 ^ q1, q0 ^ q3 ^ q5);
     69 	q[1] = _mm_unpacklo_epi64(q3 ^ q6 ^ q0, q7 ^ q2 ^ q4);
     70 	q[0] = _mm_unpacklo_epi64(q2 ^ q5 ^ q7, q6 ^ q1 ^ q3);
     71 }
     72 
     73 static inline void
     74 add_round_key(__m128i q[static 4], const uint64_t sk[static 8])
     75 {
     76 	q[0] ^= _mm_set_epi64x(sk[4], sk[0]);
     77 	q[1] ^= _mm_set_epi64x(sk[5], sk[1]);
     78 	q[2] ^= _mm_set_epi64x(sk[6], sk[2]);
     79 	q[3] ^= _mm_set_epi64x(sk[7], sk[3]);
     80 }
     81 
     82 static inline __m128i
     83 inv_shift_row(__m128i q)
     84 {
     85 	__m128i x, y0, y1, y2, y3, y4, y5, y6;
     86 
     87 	x = q;
     88 	y0 = x & _mm_set1_epi64x(0x000000000000FFFF);
     89 	y1 = x & _mm_set1_epi64x(0x000000000FFF0000);
     90 	y2 = x & _mm_set1_epi64x(0x00000000F0000000);
     91 	y3 = x & _mm_set1_epi64x(0x000000FF00000000);
     92 	y4 = x & _mm_set1_epi64x(0x0000FF0000000000);
     93 	y5 = x & _mm_set1_epi64x(0x000F000000000000);
     94 	y6 = x & _mm_set1_epi64x(0xFFF0000000000000);
     95 	y1 = _mm_slli_epi64(y1, 4);
     96 	y2 = _mm_srli_epi64(y2, 12);
     97 	y3 = _mm_slli_epi64(y3, 8);
     98 	y4 = _mm_srli_epi64(y4, 8);
     99 	y5 = _mm_slli_epi64(y5, 12);
    100 	y6 = _mm_srli_epi64(y6, 4);
    101 	return y0 | y1 | y2 | y3 | y4 | y5 | y6;
    102 }
    103 
    104 static inline void
    105 inv_shift_rows(__m128i q[static 4])
    106 {
    107 
    108 	q[0] = inv_shift_row(q[0]);
    109 	q[1] = inv_shift_row(q[1]);
    110 	q[2] = inv_shift_row(q[2]);
    111 	q[3] = inv_shift_row(q[3]);
    112 }
    113 
    114 static inline __m128i
    115 rotr32(__m128i x)
    116 {
    117 	return _mm_slli_epi64(x, 32) | _mm_srli_epi64(x, 32);
    118 }
    119 
    120 static inline void
    121 inv_mix_columns(__m128i q[4])
    122 {
    123 	__m128i q0, q1, q2, q3, q4, q5, q6, q7;
    124 	__m128i r0, r1, r2, r3, r4, r5, r6, r7;
    125 	__m128i s0, s1, s2, s3, s4, s5, s6, s7;
    126 
    127 	q0 = q[0];
    128 	q1 = q[1];
    129 	q2 = q[2];
    130 	q3 = q[3];
    131 	r0 = _mm_srli_epi64(q0, 16) | _mm_slli_epi64(q0, 48);
    132 	r1 = _mm_srli_epi64(q1, 16) | _mm_slli_epi64(q1, 48);
    133 	r2 = _mm_srli_epi64(q2, 16) | _mm_slli_epi64(q2, 48);
    134 	r3 = _mm_srli_epi64(q3, 16) | _mm_slli_epi64(q3, 48);
    135 
    136 	q7 = _mm_shuffle_epi32(q3, 0x0e);
    137 	q6 = _mm_shuffle_epi32(q2, 0x0e);
    138 	q5 = _mm_shuffle_epi32(q1, 0x0e);
    139 	q4 = _mm_shuffle_epi32(q0, 0x0e);
    140 
    141 	r7 = _mm_shuffle_epi32(r3, 0x0e);
    142 	r6 = _mm_shuffle_epi32(r2, 0x0e);
    143 	r5 = _mm_shuffle_epi32(r1, 0x0e);
    144 	r4 = _mm_shuffle_epi32(r0, 0x0e);
    145 
    146 	s0 = q5 ^ q6 ^ q7 ^ r0 ^ r5 ^ r7 ^ rotr32(q0 ^ q5 ^ q6 ^ r0 ^ r5);
    147 	s1 = q0 ^ q5 ^ r0 ^ r1 ^ r5 ^ r6 ^ r7 ^ rotr32(q1 ^ q5 ^ q7 ^ r1 ^ r5 ^ r6);
    148 	s2 = q0 ^ q1 ^ q6 ^ r1 ^ r2 ^ r6 ^ r7 ^ rotr32(q0 ^ q2 ^ q6 ^ r2 ^ r6 ^ r7);
    149 	s3 = q0 ^ q1 ^ q2 ^ q5 ^ q6 ^ r0 ^ r2 ^ r3 ^ r5 ^ rotr32(q0 ^ q1 ^ q3 ^ q5 ^ q6 ^ q7 ^ r0 ^ r3 ^ r5 ^ r7);
    150 	s4 = q1 ^ q2 ^ q3 ^ q5 ^ r1 ^ r3 ^ r4 ^ r5 ^ r6 ^ r7 ^ rotr32(q1 ^ q2 ^ q4 ^ q5 ^ q7 ^ r1 ^ r4 ^ r5 ^ r6);
    151 	s5 = q2 ^ q3 ^ q4 ^ q6 ^ r2 ^ r4 ^ r5 ^ r6 ^ r7 ^ rotr32(q2 ^ q3 ^ q5 ^ q6 ^ r2 ^ r5 ^ r6 ^ r7);
    152 	s6 = q3 ^ q4 ^ q5 ^ q7 ^ r3 ^ r5 ^ r6 ^ r7 ^ rotr32(q3 ^ q4 ^ q6 ^ q7 ^ r3 ^ r6 ^ r7);
    153 	s7 = q4 ^ q5 ^ q6 ^ r4 ^ r6 ^ r7 ^ rotr32(q4 ^ q5 ^ q7 ^ r4 ^ r7);
    154 
    155 	q[0] = _mm_unpacklo_epi64(s0, s4);
    156 	q[1] = _mm_unpacklo_epi64(s1, s5);
    157 	q[2] = _mm_unpacklo_epi64(s2, s6);
    158 	q[3] = _mm_unpacklo_epi64(s3, s7);
    159 }
    160 
    161 /* see inner.h */
    162 void
    163 aes_sse2_bitslice_decrypt(unsigned num_rounds,
    164 	const uint64_t *skey, __m128i q[static 4])
    165 {
    166 	unsigned u;
    167 
    168 	add_round_key(q, skey + (num_rounds << 3));
    169 	for (u = num_rounds - 1; u > 0; u --) {
    170 		inv_shift_rows(q);
    171 		aes_sse2_bitslice_invSbox(q);
    172 		add_round_key(q, skey + (u << 3));
    173 		inv_mix_columns(q);
    174 	}
    175 	inv_shift_rows(q);
    176 	aes_sse2_bitslice_invSbox(q);
    177 	add_round_key(q, skey);
    178 }
    179