blob: 7406ed0745becd839ee00f6e1a94d82e88639406
1 | /* |
2 | * Software multibuffer async crypto daemon. |
3 | * |
4 | * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> |
5 | * |
6 | * Adapted from crypto daemon. |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. |
12 | * |
13 | */ |
14 | |
15 | #include <crypto/algapi.h> |
16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/internal/aead.h> |
18 | #include <crypto/mcryptd.h> |
19 | #include <crypto/crypto_wq.h> |
20 | #include <linux/err.h> |
21 | #include <linux/init.h> |
22 | #include <linux/kernel.h> |
23 | #include <linux/list.h> |
24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> |
26 | #include <linux/sched.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/hardirq.h> |
29 | |
30 | #define MCRYPTD_MAX_CPU_QLEN 100 |
31 | #define MCRYPTD_BATCH 9 |
32 | |
33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
34 | unsigned int tail); |
35 | |
36 | struct mcryptd_flush_list { |
37 | struct list_head list; |
38 | struct mutex lock; |
39 | }; |
40 | |
41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; |
42 | |
43 | struct hashd_instance_ctx { |
44 | struct crypto_ahash_spawn spawn; |
45 | struct mcryptd_queue *queue; |
46 | }; |
47 | |
48 | static void mcryptd_queue_worker(struct work_struct *work); |
49 | |
50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) |
51 | { |
52 | struct mcryptd_flush_list *flist; |
53 | |
54 | if (!cstate->flusher_engaged) { |
55 | /* put the flusher on the flush list */ |
56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); |
57 | mutex_lock(&flist->lock); |
58 | list_add_tail(&cstate->flush_list, &flist->list); |
59 | cstate->flusher_engaged = true; |
60 | cstate->next_flush = jiffies + delay; |
61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, |
62 | &cstate->flush, delay); |
63 | mutex_unlock(&flist->lock); |
64 | } |
65 | } |
66 | EXPORT_SYMBOL(mcryptd_arm_flusher); |
67 | |
68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, |
69 | unsigned int max_cpu_qlen) |
70 | { |
71 | int cpu; |
72 | struct mcryptd_cpu_queue *cpu_queue; |
73 | |
74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); |
75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); |
76 | if (!queue->cpu_queue) |
77 | return -ENOMEM; |
78 | for_each_possible_cpu(cpu) { |
79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); |
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); |
83 | spin_lock_init(&cpu_queue->q_lock); |
84 | } |
85 | return 0; |
86 | } |
87 | |
88 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) |
89 | { |
90 | int cpu; |
91 | struct mcryptd_cpu_queue *cpu_queue; |
92 | |
93 | for_each_possible_cpu(cpu) { |
94 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
95 | BUG_ON(cpu_queue->queue.qlen); |
96 | } |
97 | free_percpu(queue->cpu_queue); |
98 | } |
99 | |
100 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, |
101 | struct crypto_async_request *request, |
102 | struct mcryptd_hash_request_ctx *rctx) |
103 | { |
104 | int cpu, err; |
105 | struct mcryptd_cpu_queue *cpu_queue; |
106 | |
107 | cpu_queue = raw_cpu_ptr(queue->cpu_queue); |
108 | spin_lock(&cpu_queue->q_lock); |
109 | cpu = smp_processor_id(); |
110 | rctx->tag.cpu = smp_processor_id(); |
111 | |
112 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
113 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", |
114 | cpu, cpu_queue, request); |
115 | spin_unlock(&cpu_queue->q_lock); |
116 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
117 | |
118 | return err; |
119 | } |
120 | |
121 | /* |
122 | * Try to opportunisticlly flush the partially completed jobs if |
123 | * crypto daemon is the only task running. |
124 | */ |
125 | static void mcryptd_opportunistic_flush(void) |
126 | { |
127 | struct mcryptd_flush_list *flist; |
128 | struct mcryptd_alg_cstate *cstate; |
129 | |
130 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); |
131 | while (single_task_running()) { |
132 | mutex_lock(&flist->lock); |
133 | cstate = list_first_entry_or_null(&flist->list, |
134 | struct mcryptd_alg_cstate, flush_list); |
135 | if (!cstate || !cstate->flusher_engaged) { |
136 | mutex_unlock(&flist->lock); |
137 | return; |
138 | } |
139 | list_del(&cstate->flush_list); |
140 | cstate->flusher_engaged = false; |
141 | mutex_unlock(&flist->lock); |
142 | cstate->alg_state->flusher(cstate); |
143 | } |
144 | } |
145 | |
146 | /* |
147 | * Called in workqueue context, do one real cryption work (via |
148 | * req->complete) and reschedule itself if there are more work to |
149 | * do. |
150 | */ |
151 | static void mcryptd_queue_worker(struct work_struct *work) |
152 | { |
153 | struct mcryptd_cpu_queue *cpu_queue; |
154 | struct crypto_async_request *req, *backlog; |
155 | int i; |
156 | |
157 | /* |
158 | * Need to loop through more than once for multi-buffer to |
159 | * be effective. |
160 | */ |
161 | |
162 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); |
163 | i = 0; |
164 | while (i < MCRYPTD_BATCH || single_task_running()) { |
165 | |
166 | spin_lock_bh(&cpu_queue->q_lock); |
167 | backlog = crypto_get_backlog(&cpu_queue->queue); |
168 | req = crypto_dequeue_request(&cpu_queue->queue); |
169 | spin_unlock_bh(&cpu_queue->q_lock); |
170 | |
171 | if (!req) { |
172 | mcryptd_opportunistic_flush(); |
173 | return; |
174 | } |
175 | |
176 | if (backlog) |
177 | backlog->complete(backlog, -EINPROGRESS); |
178 | req->complete(req, 0); |
179 | if (!cpu_queue->queue.qlen) |
180 | return; |
181 | ++i; |
182 | } |
183 | if (cpu_queue->queue.qlen) |
184 | queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); |
185 | } |
186 | |
187 | void mcryptd_flusher(struct work_struct *__work) |
188 | { |
189 | struct mcryptd_alg_cstate *alg_cpu_state; |
190 | struct mcryptd_alg_state *alg_state; |
191 | struct mcryptd_flush_list *flist; |
192 | int cpu; |
193 | |
194 | cpu = smp_processor_id(); |
195 | alg_cpu_state = container_of(to_delayed_work(__work), |
196 | struct mcryptd_alg_cstate, flush); |
197 | alg_state = alg_cpu_state->alg_state; |
198 | if (alg_cpu_state->cpu != cpu) |
199 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", |
200 | cpu, alg_cpu_state->cpu); |
201 | |
202 | if (alg_cpu_state->flusher_engaged) { |
203 | flist = per_cpu_ptr(mcryptd_flist, cpu); |
204 | mutex_lock(&flist->lock); |
205 | list_del(&alg_cpu_state->flush_list); |
206 | alg_cpu_state->flusher_engaged = false; |
207 | mutex_unlock(&flist->lock); |
208 | alg_state->flusher(alg_cpu_state); |
209 | } |
210 | } |
211 | EXPORT_SYMBOL_GPL(mcryptd_flusher); |
212 | |
213 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) |
214 | { |
215 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
216 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
217 | |
218 | return ictx->queue; |
219 | } |
220 | |
221 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
222 | unsigned int tail) |
223 | { |
224 | char *p; |
225 | struct crypto_instance *inst; |
226 | int err; |
227 | |
228 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
229 | if (!p) |
230 | return ERR_PTR(-ENOMEM); |
231 | |
232 | inst = (void *)(p + head); |
233 | |
234 | err = -ENAMETOOLONG; |
235 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
236 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
237 | goto out_free_inst; |
238 | |
239 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
240 | |
241 | inst->alg.cra_priority = alg->cra_priority + 50; |
242 | inst->alg.cra_blocksize = alg->cra_blocksize; |
243 | inst->alg.cra_alignmask = alg->cra_alignmask; |
244 | |
245 | out: |
246 | return p; |
247 | |
248 | out_free_inst: |
249 | kfree(p); |
250 | p = ERR_PTR(err); |
251 | goto out; |
252 | } |
253 | |
254 | static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, |
255 | u32 *mask) |
256 | { |
257 | struct crypto_attr_type *algt; |
258 | |
259 | algt = crypto_get_attr_type(tb); |
260 | if (IS_ERR(algt)) |
261 | return false; |
262 | |
263 | *type |= algt->type & CRYPTO_ALG_INTERNAL; |
264 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; |
265 | |
266 | if (*type & *mask & CRYPTO_ALG_INTERNAL) |
267 | return true; |
268 | else |
269 | return false; |
270 | } |
271 | |
272 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) |
273 | { |
274 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
275 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
276 | struct crypto_ahash_spawn *spawn = &ictx->spawn; |
277 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
278 | struct crypto_ahash *hash; |
279 | |
280 | hash = crypto_spawn_ahash(spawn); |
281 | if (IS_ERR(hash)) |
282 | return PTR_ERR(hash); |
283 | |
284 | ctx->child = hash; |
285 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
286 | sizeof(struct mcryptd_hash_request_ctx) + |
287 | crypto_ahash_reqsize(hash)); |
288 | return 0; |
289 | } |
290 | |
291 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
292 | { |
293 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
294 | |
295 | crypto_free_ahash(ctx->child); |
296 | } |
297 | |
298 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, |
299 | const u8 *key, unsigned int keylen) |
300 | { |
301 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
302 | struct crypto_ahash *child = ctx->child; |
303 | int err; |
304 | |
305 | crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
306 | crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) & |
307 | CRYPTO_TFM_REQ_MASK); |
308 | err = crypto_ahash_setkey(child, key, keylen); |
309 | crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) & |
310 | CRYPTO_TFM_RES_MASK); |
311 | return err; |
312 | } |
313 | |
314 | static int mcryptd_hash_enqueue(struct ahash_request *req, |
315 | crypto_completion_t complete) |
316 | { |
317 | int ret; |
318 | |
319 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
320 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
321 | struct mcryptd_queue *queue = |
322 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); |
323 | |
324 | rctx->complete = req->base.complete; |
325 | req->base.complete = complete; |
326 | |
327 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); |
328 | |
329 | return ret; |
330 | } |
331 | |
332 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) |
333 | { |
334 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
335 | struct crypto_ahash *child = ctx->child; |
336 | struct ahash_request *req = ahash_request_cast(req_async); |
337 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
338 | struct ahash_request *desc = &rctx->areq; |
339 | |
340 | if (unlikely(err == -EINPROGRESS)) |
341 | goto out; |
342 | |
343 | ahash_request_set_tfm(desc, child); |
344 | ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, |
345 | rctx->complete, req_async); |
346 | |
347 | rctx->out = req->result; |
348 | err = crypto_ahash_init(desc); |
349 | |
350 | out: |
351 | local_bh_disable(); |
352 | rctx->complete(&req->base, err); |
353 | local_bh_enable(); |
354 | } |
355 | |
356 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) |
357 | { |
358 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); |
359 | } |
360 | |
361 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) |
362 | { |
363 | struct ahash_request *req = ahash_request_cast(req_async); |
364 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
365 | |
366 | if (unlikely(err == -EINPROGRESS)) |
367 | goto out; |
368 | |
369 | rctx->out = req->result; |
370 | err = ahash_mcryptd_update(&rctx->areq); |
371 | if (err) { |
372 | req->base.complete = rctx->complete; |
373 | goto out; |
374 | } |
375 | |
376 | return; |
377 | out: |
378 | local_bh_disable(); |
379 | rctx->complete(&req->base, err); |
380 | local_bh_enable(); |
381 | } |
382 | |
383 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) |
384 | { |
385 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); |
386 | } |
387 | |
388 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) |
389 | { |
390 | struct ahash_request *req = ahash_request_cast(req_async); |
391 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
392 | |
393 | if (unlikely(err == -EINPROGRESS)) |
394 | goto out; |
395 | |
396 | rctx->out = req->result; |
397 | err = ahash_mcryptd_final(&rctx->areq); |
398 | if (err) { |
399 | req->base.complete = rctx->complete; |
400 | goto out; |
401 | } |
402 | |
403 | return; |
404 | out: |
405 | local_bh_disable(); |
406 | rctx->complete(&req->base, err); |
407 | local_bh_enable(); |
408 | } |
409 | |
410 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) |
411 | { |
412 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); |
413 | } |
414 | |
415 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) |
416 | { |
417 | struct ahash_request *req = ahash_request_cast(req_async); |
418 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
419 | |
420 | if (unlikely(err == -EINPROGRESS)) |
421 | goto out; |
422 | rctx->out = req->result; |
423 | err = ahash_mcryptd_finup(&rctx->areq); |
424 | |
425 | if (err) { |
426 | req->base.complete = rctx->complete; |
427 | goto out; |
428 | } |
429 | |
430 | return; |
431 | out: |
432 | local_bh_disable(); |
433 | rctx->complete(&req->base, err); |
434 | local_bh_enable(); |
435 | } |
436 | |
437 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) |
438 | { |
439 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); |
440 | } |
441 | |
442 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) |
443 | { |
444 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
445 | struct crypto_ahash *child = ctx->child; |
446 | struct ahash_request *req = ahash_request_cast(req_async); |
447 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
448 | struct ahash_request *desc = &rctx->areq; |
449 | |
450 | if (unlikely(err == -EINPROGRESS)) |
451 | goto out; |
452 | |
453 | ahash_request_set_tfm(desc, child); |
454 | ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, |
455 | rctx->complete, req_async); |
456 | |
457 | rctx->out = req->result; |
458 | err = ahash_mcryptd_digest(desc); |
459 | |
460 | out: |
461 | local_bh_disable(); |
462 | rctx->complete(&req->base, err); |
463 | local_bh_enable(); |
464 | } |
465 | |
466 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) |
467 | { |
468 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); |
469 | } |
470 | |
471 | static int mcryptd_hash_export(struct ahash_request *req, void *out) |
472 | { |
473 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
474 | |
475 | return crypto_ahash_export(&rctx->areq, out); |
476 | } |
477 | |
478 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) |
479 | { |
480 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
481 | |
482 | return crypto_ahash_import(&rctx->areq, in); |
483 | } |
484 | |
485 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
486 | struct mcryptd_queue *queue) |
487 | { |
488 | struct hashd_instance_ctx *ctx; |
489 | struct ahash_instance *inst; |
490 | struct hash_alg_common *halg; |
491 | struct crypto_alg *alg; |
492 | u32 type = 0; |
493 | u32 mask = 0; |
494 | int err; |
495 | |
496 | if (!mcryptd_check_internal(tb, &type, &mask)) |
497 | return -EINVAL; |
498 | |
499 | halg = ahash_attr_alg(tb[1], type, mask); |
500 | if (IS_ERR(halg)) |
501 | return PTR_ERR(halg); |
502 | |
503 | alg = &halg->base; |
504 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); |
505 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), |
506 | sizeof(*ctx)); |
507 | err = PTR_ERR(inst); |
508 | if (IS_ERR(inst)) |
509 | goto out_put_alg; |
510 | |
511 | ctx = ahash_instance_ctx(inst); |
512 | ctx->queue = queue; |
513 | |
514 | err = crypto_init_ahash_spawn(&ctx->spawn, halg, |
515 | ahash_crypto_instance(inst)); |
516 | if (err) |
517 | goto out_free_inst; |
518 | |
519 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
520 | (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
521 | CRYPTO_ALG_OPTIONAL_KEY)); |
522 | |
523 | inst->alg.halg.digestsize = halg->digestsize; |
524 | inst->alg.halg.statesize = halg->statesize; |
525 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
526 | |
527 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; |
528 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; |
529 | |
530 | inst->alg.init = mcryptd_hash_init_enqueue; |
531 | inst->alg.update = mcryptd_hash_update_enqueue; |
532 | inst->alg.final = mcryptd_hash_final_enqueue; |
533 | inst->alg.finup = mcryptd_hash_finup_enqueue; |
534 | inst->alg.export = mcryptd_hash_export; |
535 | inst->alg.import = mcryptd_hash_import; |
536 | if (crypto_hash_alg_has_setkey(halg)) |
537 | inst->alg.setkey = mcryptd_hash_setkey; |
538 | inst->alg.digest = mcryptd_hash_digest_enqueue; |
539 | |
540 | err = ahash_register_instance(tmpl, inst); |
541 | if (err) { |
542 | crypto_drop_ahash(&ctx->spawn); |
543 | out_free_inst: |
544 | kfree(inst); |
545 | } |
546 | |
547 | out_put_alg: |
548 | crypto_mod_put(alg); |
549 | return err; |
550 | } |
551 | |
552 | static struct mcryptd_queue mqueue; |
553 | |
554 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
555 | { |
556 | struct crypto_attr_type *algt; |
557 | |
558 | algt = crypto_get_attr_type(tb); |
559 | if (IS_ERR(algt)) |
560 | return PTR_ERR(algt); |
561 | |
562 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
563 | case CRYPTO_ALG_TYPE_DIGEST: |
564 | return mcryptd_create_hash(tmpl, tb, &mqueue); |
565 | break; |
566 | } |
567 | |
568 | return -EINVAL; |
569 | } |
570 | |
571 | static void mcryptd_free(struct crypto_instance *inst) |
572 | { |
573 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
574 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
575 | |
576 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
577 | case CRYPTO_ALG_TYPE_AHASH: |
578 | crypto_drop_ahash(&hctx->spawn); |
579 | kfree(ahash_instance(inst)); |
580 | return; |
581 | default: |
582 | crypto_drop_spawn(&ctx->spawn); |
583 | kfree(inst); |
584 | } |
585 | } |
586 | |
587 | static struct crypto_template mcryptd_tmpl = { |
588 | .name = "mcryptd", |
589 | .create = mcryptd_create, |
590 | .free = mcryptd_free, |
591 | .module = THIS_MODULE, |
592 | }; |
593 | |
594 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, |
595 | u32 type, u32 mask) |
596 | { |
597 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
598 | struct crypto_ahash *tfm; |
599 | |
600 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
601 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
602 | return ERR_PTR(-EINVAL); |
603 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); |
604 | if (IS_ERR(tfm)) |
605 | return ERR_CAST(tfm); |
606 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
607 | crypto_free_ahash(tfm); |
608 | return ERR_PTR(-EINVAL); |
609 | } |
610 | |
611 | return __mcryptd_ahash_cast(tfm); |
612 | } |
613 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); |
614 | |
615 | int ahash_mcryptd_digest(struct ahash_request *desc) |
616 | { |
617 | return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc); |
618 | } |
619 | |
620 | int ahash_mcryptd_update(struct ahash_request *desc) |
621 | { |
622 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
623 | |
624 | return crypto_ahash_update(desc); |
625 | } |
626 | |
627 | int ahash_mcryptd_finup(struct ahash_request *desc) |
628 | { |
629 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
630 | |
631 | return crypto_ahash_finup(desc); |
632 | } |
633 | |
634 | int ahash_mcryptd_final(struct ahash_request *desc) |
635 | { |
636 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
637 | |
638 | return crypto_ahash_final(desc); |
639 | } |
640 | |
641 | struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) |
642 | { |
643 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
644 | |
645 | return ctx->child; |
646 | } |
647 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); |
648 | |
649 | struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req) |
650 | { |
651 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
652 | return &rctx->areq; |
653 | } |
654 | EXPORT_SYMBOL_GPL(mcryptd_ahash_desc); |
655 | |
656 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) |
657 | { |
658 | crypto_free_ahash(&tfm->base); |
659 | } |
660 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); |
661 | |
662 | static int __init mcryptd_init(void) |
663 | { |
664 | int err, cpu; |
665 | struct mcryptd_flush_list *flist; |
666 | |
667 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); |
668 | for_each_possible_cpu(cpu) { |
669 | flist = per_cpu_ptr(mcryptd_flist, cpu); |
670 | INIT_LIST_HEAD(&flist->list); |
671 | mutex_init(&flist->lock); |
672 | } |
673 | |
674 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); |
675 | if (err) { |
676 | free_percpu(mcryptd_flist); |
677 | return err; |
678 | } |
679 | |
680 | err = crypto_register_template(&mcryptd_tmpl); |
681 | if (err) { |
682 | mcryptd_fini_queue(&mqueue); |
683 | free_percpu(mcryptd_flist); |
684 | } |
685 | |
686 | return err; |
687 | } |
688 | |
689 | static void __exit mcryptd_exit(void) |
690 | { |
691 | mcryptd_fini_queue(&mqueue); |
692 | crypto_unregister_template(&mcryptd_tmpl); |
693 | free_percpu(mcryptd_flist); |
694 | } |
695 | |
696 | subsys_initcall(mcryptd_init); |
697 | module_exit(mcryptd_exit); |
698 | |
699 | MODULE_LICENSE("GPL"); |
700 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); |
701 | MODULE_ALIAS_CRYPTO("mcryptd"); |
702 |