summaryrefslogtreecommitdiff
path: root/libavutil/aes.c (plain)
blob: 397ea773898b14d0aee3f09d6b0318b1b9396ddd
1/*
2 * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * some optimization ideas from aes128.c by Reimar Doeffinger
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include "common.h"
24#include "aes.h"
25#include "aes_internal.h"
26#include "intreadwrite.h"
27#include "timer.h"
28
29const int av_aes_size= sizeof(AVAES);
30
31struct AVAES *av_aes_alloc(void)
32{
33 return av_mallocz(sizeof(struct AVAES));
34}
35
36static const uint8_t rcon[10] = {
37 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
38};
39
40static uint8_t sbox[256];
41static uint8_t inv_sbox[256];
42#if CONFIG_SMALL
43static uint32_t enc_multbl[1][256];
44static uint32_t dec_multbl[1][256];
45#else
46static uint32_t enc_multbl[4][256];
47static uint32_t dec_multbl[4][256];
48#endif
49
50#if HAVE_BIGENDIAN
51# define ROT(x, s) (((x) >> (s)) | ((x) << (32-(s))))
52#else
53# define ROT(x, s) (((x) << (s)) | ((x) >> (32-(s))))
54#endif
55
56static inline void addkey(av_aes_block *dst, const av_aes_block *src,
57 const av_aes_block *round_key)
58{
59 dst->u64[0] = src->u64[0] ^ round_key->u64[0];
60 dst->u64[1] = src->u64[1] ^ round_key->u64[1];
61}
62
63static inline void addkey_s(av_aes_block *dst, const uint8_t *src,
64 const av_aes_block *round_key)
65{
66 dst->u64[0] = AV_RN64(src) ^ round_key->u64[0];
67 dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1];
68}
69
70static inline void addkey_d(uint8_t *dst, const av_aes_block *src,
71 const av_aes_block *round_key)
72{
73 AV_WN64(dst, src->u64[0] ^ round_key->u64[0]);
74 AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]);
75}
76
77static void subshift(av_aes_block s0[2], int s, const uint8_t *box)
78{
79 av_aes_block *s1 = (av_aes_block *) (s0[0].u8 - s);
80 av_aes_block *s3 = (av_aes_block *) (s0[0].u8 + s);
81
82 s0[0].u8[ 0] = box[s0[1].u8[ 0]];
83 s0[0].u8[ 4] = box[s0[1].u8[ 4]];
84 s0[0].u8[ 8] = box[s0[1].u8[ 8]];
85 s0[0].u8[12] = box[s0[1].u8[12]];
86 s1[0].u8[ 3] = box[s1[1].u8[ 7]];
87 s1[0].u8[ 7] = box[s1[1].u8[11]];
88 s1[0].u8[11] = box[s1[1].u8[15]];
89 s1[0].u8[15] = box[s1[1].u8[ 3]];
90 s0[0].u8[ 2] = box[s0[1].u8[10]];
91 s0[0].u8[10] = box[s0[1].u8[ 2]];
92 s0[0].u8[ 6] = box[s0[1].u8[14]];
93 s0[0].u8[14] = box[s0[1].u8[ 6]];
94 s3[0].u8[ 1] = box[s3[1].u8[13]];
95 s3[0].u8[13] = box[s3[1].u8[ 9]];
96 s3[0].u8[ 9] = box[s3[1].u8[ 5]];
97 s3[0].u8[ 5] = box[s3[1].u8[ 1]];
98}
99
100static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d)
101{
102#if CONFIG_SMALL
103 return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24);
104#else
105 return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d];
106#endif
107}
108
109static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, int s3)
110{
111 uint8_t (*src)[4] = state[1].u8x4;
112 state[0].u32[0] = mix_core(multbl, src[0][0], src[s1 ][1], src[2][2], src[s3 ][3]);
113 state[0].u32[1] = mix_core(multbl, src[1][0], src[s3 - 1][1], src[3][2], src[s1 - 1][3]);
114 state[0].u32[2] = mix_core(multbl, src[2][0], src[s3 ][1], src[0][2], src[s1 ][3]);
115 state[0].u32[3] = mix_core(multbl, src[3][0], src[s1 - 1][1], src[1][2], src[s3 - 1][3]);
116}
117
118static inline void aes_crypt(AVAES *a, int s, const uint8_t *sbox,
119 uint32_t multbl[][256])
120{
121 int r;
122
123 for (r = a->rounds - 1; r > 0; r--) {
124 mix(a->state, multbl, 3 - s, 1 + s);
125 addkey(&a->state[1], &a->state[0], &a->round_key[r]);
126 }
127
128 subshift(&a->state[0], s, sbox);
129}
130
131static void aes_encrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
132 int count, uint8_t *iv, int rounds)
133{
134 while (count--) {
135 addkey_s(&a->state[1], src, &a->round_key[rounds]);
136 if (iv)
137 addkey_s(&a->state[1], iv, &a->state[1]);
138 aes_crypt(a, 2, sbox, enc_multbl);
139 addkey_d(dst, &a->state[0], &a->round_key[0]);
140 if (iv)
141 memcpy(iv, dst, 16);
142 src += 16;
143 dst += 16;
144 }
145}
146
147static void aes_decrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
148 int count, uint8_t *iv, int rounds)
149{
150 while (count--) {
151 addkey_s(&a->state[1], src, &a->round_key[rounds]);
152 aes_crypt(a, 0, inv_sbox, dec_multbl);
153 if (iv) {
154 addkey_s(&a->state[0], iv, &a->state[0]);
155 memcpy(iv, src, 16);
156 }
157 addkey_d(dst, &a->state[0], &a->round_key[0]);
158 src += 16;
159 dst += 16;
160 }
161}
162
163void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src,
164 int count, uint8_t *iv, int decrypt)
165{
166 a->crypt(a, dst, src, count, iv, a->rounds);
167}
168
169static void init_multbl2(uint32_t tbl[][256], const int c[4],
170 const uint8_t *log8, const uint8_t *alog8,
171 const uint8_t *sbox)
172{
173 int i;
174
175 for (i = 0; i < 256; i++) {
176 int x = sbox[i];
177 if (x) {
178 int k, l, m, n;
179 x = log8[x];
180 k = alog8[x + log8[c[0]]];
181 l = alog8[x + log8[c[1]]];
182 m = alog8[x + log8[c[2]]];
183 n = alog8[x + log8[c[3]]];
184 tbl[0][i] = AV_NE(MKBETAG(k, l, m, n), MKTAG(k, l, m, n));
185#if !CONFIG_SMALL
186 tbl[1][i] = ROT(tbl[0][i], 8);
187 tbl[2][i] = ROT(tbl[0][i], 16);
188 tbl[3][i] = ROT(tbl[0][i], 24);
189#endif
190 }
191 }
192}
193
194// this is based on the reference AES code by Paulo Barreto and Vincent Rijmen
195int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt)
196{
197 int i, j, t, rconpointer = 0;
198 uint8_t tk[8][4];
199 int KC = key_bits >> 5;
200 int rounds = KC + 6;
201 uint8_t log8[256];
202 uint8_t alog8[512];
203
204 a->crypt = decrypt ? aes_decrypt : aes_encrypt;
205
206 if (!enc_multbl[FF_ARRAY_ELEMS(enc_multbl) - 1][FF_ARRAY_ELEMS(enc_multbl[0]) - 1]) {
207 j = 1;
208 for (i = 0; i < 255; i++) {
209 alog8[i] = alog8[i + 255] = j;
210 log8[j] = i;
211 j ^= j + j;
212 if (j > 255)
213 j ^= 0x11B;
214 }
215 for (i = 0; i < 256; i++) {
216 j = i ? alog8[255 - log8[i]] : 0;
217 j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4);
218 j = (j ^ (j >> 8) ^ 99) & 255;
219 inv_sbox[j] = i;
220 sbox[i] = j;
221 }
222 init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb },
223 log8, alog8, inv_sbox);
224 init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 },
225 log8, alog8, sbox);
226 }
227
228 if (key_bits != 128 && key_bits != 192 && key_bits != 256)
229 return AVERROR(EINVAL);
230
231 a->rounds = rounds;
232
233 memcpy(tk, key, KC * 4);
234 memcpy(a->round_key[0].u8, key, KC * 4);
235
236 for (t = KC * 4; t < (rounds + 1) * 16; t += KC * 4) {
237 for (i = 0; i < 4; i++)
238 tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]];
239 tk[0][0] ^= rcon[rconpointer++];
240
241 for (j = 1; j < KC; j++) {
242 if (KC != 8 || j != KC >> 1)
243 for (i = 0; i < 4; i++)
244 tk[j][i] ^= tk[j - 1][i];
245 else
246 for (i = 0; i < 4; i++)
247 tk[j][i] ^= sbox[tk[j - 1][i]];
248 }
249
250 memcpy(a->round_key[0].u8 + t, tk, KC * 4);
251 }
252
253 if (decrypt) {
254 for (i = 1; i < rounds; i++) {
255 av_aes_block tmp[3];
256 tmp[2] = a->round_key[i];
257 subshift(&tmp[1], 0, sbox);
258 mix(tmp, dec_multbl, 1, 3);
259 a->round_key[i] = tmp[0];
260 }
261 } else {
262 for (i = 0; i < (rounds + 1) >> 1; i++)
263 FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds - i]);
264 }
265
266 return 0;
267}
268
269