blob: ff4d21eddb83c87c4ff5e52049989e2b084b030d
1 | /* |
2 | * CTR: Counter mode |
3 | * |
4 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. |
10 | * |
11 | */ |
12 | |
13 | #include <crypto/algapi.h> |
14 | #include <crypto/ctr.h> |
15 | #include <crypto/internal/skcipher.h> |
16 | #include <linux/err.h> |
17 | #include <linux/init.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> |
20 | #include <linux/random.h> |
21 | #include <linux/scatterlist.h> |
22 | #include <linux/slab.h> |
23 | |
24 | struct crypto_ctr_ctx { |
25 | struct crypto_cipher *child; |
26 | }; |
27 | |
28 | struct crypto_rfc3686_ctx { |
29 | struct crypto_skcipher *child; |
30 | u8 nonce[CTR_RFC3686_NONCE_SIZE]; |
31 | }; |
32 | |
33 | struct crypto_rfc3686_req_ctx { |
34 | u8 iv[CTR_RFC3686_BLOCK_SIZE]; |
35 | struct skcipher_request subreq CRYPTO_MINALIGN_ATTR; |
36 | }; |
37 | |
38 | static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, |
39 | unsigned int keylen) |
40 | { |
41 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); |
42 | struct crypto_cipher *child = ctx->child; |
43 | int err; |
44 | |
45 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
46 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
47 | CRYPTO_TFM_REQ_MASK); |
48 | err = crypto_cipher_setkey(child, key, keylen); |
49 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
50 | CRYPTO_TFM_RES_MASK); |
51 | |
52 | return err; |
53 | } |
54 | |
55 | static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, |
56 | struct crypto_cipher *tfm) |
57 | { |
58 | unsigned int bsize = crypto_cipher_blocksize(tfm); |
59 | unsigned long alignmask = crypto_cipher_alignmask(tfm); |
60 | u8 *ctrblk = walk->iv; |
61 | u8 tmp[bsize + alignmask]; |
62 | u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); |
63 | u8 *src = walk->src.virt.addr; |
64 | u8 *dst = walk->dst.virt.addr; |
65 | unsigned int nbytes = walk->nbytes; |
66 | |
67 | crypto_cipher_encrypt_one(tfm, keystream, ctrblk); |
68 | crypto_xor(keystream, src, nbytes); |
69 | memcpy(dst, keystream, nbytes); |
70 | |
71 | crypto_inc(ctrblk, bsize); |
72 | } |
73 | |
74 | static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, |
75 | struct crypto_cipher *tfm) |
76 | { |
77 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
78 | crypto_cipher_alg(tfm)->cia_encrypt; |
79 | unsigned int bsize = crypto_cipher_blocksize(tfm); |
80 | u8 *ctrblk = walk->iv; |
81 | u8 *src = walk->src.virt.addr; |
82 | u8 *dst = walk->dst.virt.addr; |
83 | unsigned int nbytes = walk->nbytes; |
84 | |
85 | do { |
86 | /* create keystream */ |
87 | fn(crypto_cipher_tfm(tfm), dst, ctrblk); |
88 | crypto_xor(dst, src, bsize); |
89 | |
90 | /* increment counter in counterblock */ |
91 | crypto_inc(ctrblk, bsize); |
92 | |
93 | src += bsize; |
94 | dst += bsize; |
95 | } while ((nbytes -= bsize) >= bsize); |
96 | |
97 | return nbytes; |
98 | } |
99 | |
100 | static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, |
101 | struct crypto_cipher *tfm) |
102 | { |
103 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
104 | crypto_cipher_alg(tfm)->cia_encrypt; |
105 | unsigned int bsize = crypto_cipher_blocksize(tfm); |
106 | unsigned long alignmask = crypto_cipher_alignmask(tfm); |
107 | unsigned int nbytes = walk->nbytes; |
108 | u8 *ctrblk = walk->iv; |
109 | u8 *src = walk->src.virt.addr; |
110 | u8 tmp[bsize + alignmask]; |
111 | u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); |
112 | |
113 | do { |
114 | /* create keystream */ |
115 | fn(crypto_cipher_tfm(tfm), keystream, ctrblk); |
116 | crypto_xor(src, keystream, bsize); |
117 | |
118 | /* increment counter in counterblock */ |
119 | crypto_inc(ctrblk, bsize); |
120 | |
121 | src += bsize; |
122 | } while ((nbytes -= bsize) >= bsize); |
123 | |
124 | return nbytes; |
125 | } |
126 | |
127 | static int crypto_ctr_crypt(struct blkcipher_desc *desc, |
128 | struct scatterlist *dst, struct scatterlist *src, |
129 | unsigned int nbytes) |
130 | { |
131 | struct blkcipher_walk walk; |
132 | struct crypto_blkcipher *tfm = desc->tfm; |
133 | struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); |
134 | struct crypto_cipher *child = ctx->child; |
135 | unsigned int bsize = crypto_cipher_blocksize(child); |
136 | int err; |
137 | |
138 | blkcipher_walk_init(&walk, dst, src, nbytes); |
139 | err = blkcipher_walk_virt_block(desc, &walk, bsize); |
140 | |
141 | while (walk.nbytes >= bsize) { |
142 | if (walk.src.virt.addr == walk.dst.virt.addr) |
143 | nbytes = crypto_ctr_crypt_inplace(&walk, child); |
144 | else |
145 | nbytes = crypto_ctr_crypt_segment(&walk, child); |
146 | |
147 | err = blkcipher_walk_done(desc, &walk, nbytes); |
148 | } |
149 | |
150 | if (walk.nbytes) { |
151 | crypto_ctr_crypt_final(&walk, child); |
152 | err = blkcipher_walk_done(desc, &walk, 0); |
153 | } |
154 | |
155 | return err; |
156 | } |
157 | |
158 | static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) |
159 | { |
160 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
161 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
162 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
163 | struct crypto_cipher *cipher; |
164 | |
165 | cipher = crypto_spawn_cipher(spawn); |
166 | if (IS_ERR(cipher)) |
167 | return PTR_ERR(cipher); |
168 | |
169 | ctx->child = cipher; |
170 | |
171 | return 0; |
172 | } |
173 | |
174 | static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) |
175 | { |
176 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
177 | |
178 | crypto_free_cipher(ctx->child); |
179 | } |
180 | |
181 | static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) |
182 | { |
183 | struct crypto_instance *inst; |
184 | struct crypto_alg *alg; |
185 | int err; |
186 | |
187 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
188 | if (err) |
189 | return ERR_PTR(err); |
190 | |
191 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, |
192 | CRYPTO_ALG_TYPE_MASK); |
193 | if (IS_ERR(alg)) |
194 | return ERR_CAST(alg); |
195 | |
196 | /* Block size must be >= 4 bytes. */ |
197 | err = -EINVAL; |
198 | if (alg->cra_blocksize < 4) |
199 | goto out_put_alg; |
200 | |
201 | /* If this is false we'd fail the alignment of crypto_inc. */ |
202 | if (alg->cra_blocksize % 4) |
203 | goto out_put_alg; |
204 | |
205 | inst = crypto_alloc_instance("ctr", alg); |
206 | if (IS_ERR(inst)) |
207 | goto out; |
208 | |
209 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
210 | inst->alg.cra_priority = alg->cra_priority; |
211 | inst->alg.cra_blocksize = 1; |
212 | inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); |
213 | inst->alg.cra_type = &crypto_blkcipher_type; |
214 | |
215 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
216 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; |
217 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; |
218 | |
219 | inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); |
220 | |
221 | inst->alg.cra_init = crypto_ctr_init_tfm; |
222 | inst->alg.cra_exit = crypto_ctr_exit_tfm; |
223 | |
224 | inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; |
225 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; |
226 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; |
227 | |
228 | inst->alg.cra_blkcipher.geniv = "chainiv"; |
229 | |
230 | out: |
231 | crypto_mod_put(alg); |
232 | return inst; |
233 | |
234 | out_put_alg: |
235 | inst = ERR_PTR(err); |
236 | goto out; |
237 | } |
238 | |
239 | static void crypto_ctr_free(struct crypto_instance *inst) |
240 | { |
241 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
242 | kfree(inst); |
243 | } |
244 | |
245 | static struct crypto_template crypto_ctr_tmpl = { |
246 | .name = "ctr", |
247 | .alloc = crypto_ctr_alloc, |
248 | .free = crypto_ctr_free, |
249 | .module = THIS_MODULE, |
250 | }; |
251 | |
252 | static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, |
253 | const u8 *key, unsigned int keylen) |
254 | { |
255 | struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent); |
256 | struct crypto_skcipher *child = ctx->child; |
257 | int err; |
258 | |
259 | /* the nonce is stored in bytes at end of key */ |
260 | if (keylen < CTR_RFC3686_NONCE_SIZE) |
261 | return -EINVAL; |
262 | |
263 | memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), |
264 | CTR_RFC3686_NONCE_SIZE); |
265 | |
266 | keylen -= CTR_RFC3686_NONCE_SIZE; |
267 | |
268 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
269 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
270 | CRYPTO_TFM_REQ_MASK); |
271 | err = crypto_skcipher_setkey(child, key, keylen); |
272 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & |
273 | CRYPTO_TFM_RES_MASK); |
274 | |
275 | return err; |
276 | } |
277 | |
278 | static int crypto_rfc3686_crypt(struct skcipher_request *req) |
279 | { |
280 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
281 | struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); |
282 | struct crypto_skcipher *child = ctx->child; |
283 | unsigned long align = crypto_skcipher_alignmask(tfm); |
284 | struct crypto_rfc3686_req_ctx *rctx = |
285 | (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1); |
286 | struct skcipher_request *subreq = &rctx->subreq; |
287 | u8 *iv = rctx->iv; |
288 | |
289 | /* set up counter block */ |
290 | memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); |
291 | memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); |
292 | |
293 | /* initialize counter portion of counter block */ |
294 | *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = |
295 | cpu_to_be32(1); |
296 | |
297 | skcipher_request_set_tfm(subreq, child); |
298 | skcipher_request_set_callback(subreq, req->base.flags, |
299 | req->base.complete, req->base.data); |
300 | skcipher_request_set_crypt(subreq, req->src, req->dst, |
301 | req->cryptlen, iv); |
302 | |
303 | return crypto_skcipher_encrypt(subreq); |
304 | } |
305 | |
306 | static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) |
307 | { |
308 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
309 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); |
310 | struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); |
311 | struct crypto_skcipher *cipher; |
312 | unsigned long align; |
313 | unsigned int reqsize; |
314 | |
315 | cipher = crypto_spawn_skcipher2(spawn); |
316 | if (IS_ERR(cipher)) |
317 | return PTR_ERR(cipher); |
318 | |
319 | ctx->child = cipher; |
320 | |
321 | align = crypto_skcipher_alignmask(tfm); |
322 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
323 | reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) + |
324 | crypto_skcipher_reqsize(cipher); |
325 | crypto_skcipher_set_reqsize(tfm, reqsize); |
326 | |
327 | return 0; |
328 | } |
329 | |
330 | static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm) |
331 | { |
332 | struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); |
333 | |
334 | crypto_free_skcipher(ctx->child); |
335 | } |
336 | |
337 | static void crypto_rfc3686_free(struct skcipher_instance *inst) |
338 | { |
339 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); |
340 | |
341 | crypto_drop_skcipher(spawn); |
342 | kfree(inst); |
343 | } |
344 | |
345 | static int crypto_rfc3686_create(struct crypto_template *tmpl, |
346 | struct rtattr **tb) |
347 | { |
348 | struct crypto_attr_type *algt; |
349 | struct skcipher_instance *inst; |
350 | struct skcipher_alg *alg; |
351 | struct crypto_skcipher_spawn *spawn; |
352 | const char *cipher_name; |
353 | int err; |
354 | |
355 | algt = crypto_get_attr_type(tb); |
356 | if (IS_ERR(algt)) |
357 | return PTR_ERR(algt); |
358 | |
359 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) |
360 | return -EINVAL; |
361 | |
362 | cipher_name = crypto_attr_alg_name(tb[1]); |
363 | if (IS_ERR(cipher_name)) |
364 | return PTR_ERR(cipher_name); |
365 | |
366 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
367 | if (!inst) |
368 | return -ENOMEM; |
369 | |
370 | spawn = skcipher_instance_ctx(inst); |
371 | |
372 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); |
373 | err = crypto_grab_skcipher2(spawn, cipher_name, 0, |
374 | crypto_requires_sync(algt->type, |
375 | algt->mask)); |
376 | if (err) |
377 | goto err_free_inst; |
378 | |
379 | alg = crypto_spawn_skcipher_alg(spawn); |
380 | |
381 | /* We only support 16-byte blocks. */ |
382 | err = -EINVAL; |
383 | if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) |
384 | goto err_drop_spawn; |
385 | |
386 | /* Not a stream cipher? */ |
387 | if (alg->base.cra_blocksize != 1) |
388 | goto err_drop_spawn; |
389 | |
390 | err = -ENAMETOOLONG; |
391 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
392 | "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) |
393 | goto err_drop_spawn; |
394 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
395 | "rfc3686(%s)", alg->base.cra_driver_name) >= |
396 | CRYPTO_MAX_ALG_NAME) |
397 | goto err_drop_spawn; |
398 | |
399 | inst->alg.base.cra_priority = alg->base.cra_priority; |
400 | inst->alg.base.cra_blocksize = 1; |
401 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
402 | |
403 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
404 | |
405 | inst->alg.ivsize = CTR_RFC3686_IV_SIZE; |
406 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); |
407 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + |
408 | CTR_RFC3686_NONCE_SIZE; |
409 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + |
410 | CTR_RFC3686_NONCE_SIZE; |
411 | |
412 | inst->alg.setkey = crypto_rfc3686_setkey; |
413 | inst->alg.encrypt = crypto_rfc3686_crypt; |
414 | inst->alg.decrypt = crypto_rfc3686_crypt; |
415 | |
416 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); |
417 | |
418 | inst->alg.init = crypto_rfc3686_init_tfm; |
419 | inst->alg.exit = crypto_rfc3686_exit_tfm; |
420 | |
421 | inst->free = crypto_rfc3686_free; |
422 | |
423 | err = skcipher_register_instance(tmpl, inst); |
424 | if (err) |
425 | goto err_drop_spawn; |
426 | |
427 | out: |
428 | return err; |
429 | |
430 | err_drop_spawn: |
431 | crypto_drop_skcipher(spawn); |
432 | err_free_inst: |
433 | kfree(inst); |
434 | goto out; |
435 | } |
436 | |
437 | static struct crypto_template crypto_rfc3686_tmpl = { |
438 | .name = "rfc3686", |
439 | .create = crypto_rfc3686_create, |
440 | .module = THIS_MODULE, |
441 | }; |
442 | |
443 | static int __init crypto_ctr_module_init(void) |
444 | { |
445 | int err; |
446 | |
447 | err = crypto_register_template(&crypto_ctr_tmpl); |
448 | if (err) |
449 | goto out; |
450 | |
451 | err = crypto_register_template(&crypto_rfc3686_tmpl); |
452 | if (err) |
453 | goto out_drop_ctr; |
454 | |
455 | out: |
456 | return err; |
457 | |
458 | out_drop_ctr: |
459 | crypto_unregister_template(&crypto_ctr_tmpl); |
460 | goto out; |
461 | } |
462 | |
463 | static void __exit crypto_ctr_module_exit(void) |
464 | { |
465 | crypto_unregister_template(&crypto_rfc3686_tmpl); |
466 | crypto_unregister_template(&crypto_ctr_tmpl); |
467 | } |
468 | |
469 | module_init(crypto_ctr_module_init); |
470 | module_exit(crypto_ctr_module_exit); |
471 | |
472 | MODULE_LICENSE("GPL"); |
473 | MODULE_DESCRIPTION("CTR Counter block mode"); |
474 | MODULE_ALIAS_CRYPTO("rfc3686"); |
475 | MODULE_ALIAS_CRYPTO("ctr"); |
476 |