blob: 6989ba0046df275cb6b8d14470b8618caf40beb4
1 | /* |
2 | * Handle async block request by crypto hardware engine. |
3 | * |
4 | * Copyright (C) 2016 Linaro, Inc. |
5 | * |
6 | * Author: Baolin Wang <baolin.wang@linaro.org> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. |
12 | * |
13 | */ |
14 | |
15 | #include <linux/err.h> |
16 | #include <linux/delay.h> |
17 | #include <crypto/engine.h> |
18 | #include <crypto/internal/hash.h> |
19 | #include "internal.h" |
20 | |
21 | #define CRYPTO_ENGINE_MAX_QLEN 10 |
22 | |
23 | /** |
24 | * crypto_pump_requests - dequeue one request from engine queue to process |
25 | * @engine: the hardware engine |
26 | * @in_kthread: true if we are in the context of the request pump thread |
27 | * |
28 | * This function checks if there is any request in the engine queue that |
29 | * needs processing and if so call out to the driver to initialize hardware |
30 | * and handle each request. |
31 | */ |
32 | static void crypto_pump_requests(struct crypto_engine *engine, |
33 | bool in_kthread) |
34 | { |
35 | struct crypto_async_request *async_req, *backlog; |
36 | struct ahash_request *hreq; |
37 | struct ablkcipher_request *breq; |
38 | unsigned long flags; |
39 | bool was_busy = false; |
40 | int ret, rtype; |
41 | |
42 | spin_lock_irqsave(&engine->queue_lock, flags); |
43 | |
44 | /* Make sure we are not already running a request */ |
45 | if (engine->cur_req) |
46 | goto out; |
47 | |
48 | /* If another context is idling then defer */ |
49 | if (engine->idling) { |
50 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
51 | goto out; |
52 | } |
53 | |
54 | /* Check if the engine queue is idle */ |
55 | if (!crypto_queue_len(&engine->queue) || !engine->running) { |
56 | if (!engine->busy) |
57 | goto out; |
58 | |
59 | /* Only do teardown in the thread */ |
60 | if (!in_kthread) { |
61 | kthread_queue_work(&engine->kworker, |
62 | &engine->pump_requests); |
63 | goto out; |
64 | } |
65 | |
66 | engine->busy = false; |
67 | engine->idling = true; |
68 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
69 | |
70 | if (engine->unprepare_crypt_hardware && |
71 | engine->unprepare_crypt_hardware(engine)) |
72 | pr_err("failed to unprepare crypt hardware\n"); |
73 | |
74 | spin_lock_irqsave(&engine->queue_lock, flags); |
75 | engine->idling = false; |
76 | goto out; |
77 | } |
78 | |
79 | /* Get the fist request from the engine queue to handle */ |
80 | backlog = crypto_get_backlog(&engine->queue); |
81 | async_req = crypto_dequeue_request(&engine->queue); |
82 | if (!async_req) |
83 | goto out; |
84 | |
85 | engine->cur_req = async_req; |
86 | if (backlog) |
87 | backlog->complete(backlog, -EINPROGRESS); |
88 | |
89 | if (engine->busy) |
90 | was_busy = true; |
91 | else |
92 | engine->busy = true; |
93 | |
94 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
95 | |
96 | rtype = crypto_tfm_alg_type(engine->cur_req->tfm); |
97 | /* Until here we get the request need to be encrypted successfully */ |
98 | if (!was_busy && engine->prepare_crypt_hardware) { |
99 | ret = engine->prepare_crypt_hardware(engine); |
100 | if (ret) { |
101 | pr_err("failed to prepare crypt hardware\n"); |
102 | goto req_err; |
103 | } |
104 | } |
105 | |
106 | switch (rtype) { |
107 | case CRYPTO_ALG_TYPE_AHASH: |
108 | hreq = ahash_request_cast(engine->cur_req); |
109 | if (engine->prepare_hash_request) { |
110 | ret = engine->prepare_hash_request(engine, hreq); |
111 | if (ret) { |
112 | pr_err("failed to prepare request: %d\n", ret); |
113 | goto req_err; |
114 | } |
115 | engine->cur_req_prepared = true; |
116 | } |
117 | ret = engine->hash_one_request(engine, hreq); |
118 | if (ret) { |
119 | pr_err("failed to hash one request from queue\n"); |
120 | goto req_err; |
121 | } |
122 | return; |
123 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
124 | breq = ablkcipher_request_cast(engine->cur_req); |
125 | if (engine->prepare_cipher_request) { |
126 | ret = engine->prepare_cipher_request(engine, breq); |
127 | if (ret) { |
128 | pr_err("failed to prepare request: %d\n", ret); |
129 | goto req_err; |
130 | } |
131 | engine->cur_req_prepared = true; |
132 | } |
133 | ret = engine->cipher_one_request(engine, breq); |
134 | if (ret) { |
135 | pr_err("failed to cipher one request from queue\n"); |
136 | goto req_err; |
137 | } |
138 | return; |
139 | default: |
140 | pr_err("failed to prepare request of unknown type\n"); |
141 | return; |
142 | } |
143 | |
144 | req_err: |
145 | switch (rtype) { |
146 | case CRYPTO_ALG_TYPE_AHASH: |
147 | hreq = ahash_request_cast(engine->cur_req); |
148 | crypto_finalize_hash_request(engine, hreq, ret); |
149 | break; |
150 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
151 | breq = ablkcipher_request_cast(engine->cur_req); |
152 | crypto_finalize_cipher_request(engine, breq, ret); |
153 | break; |
154 | } |
155 | return; |
156 | |
157 | out: |
158 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
159 | } |
160 | |
161 | static void crypto_pump_work(struct kthread_work *work) |
162 | { |
163 | struct crypto_engine *engine = |
164 | container_of(work, struct crypto_engine, pump_requests); |
165 | |
166 | crypto_pump_requests(engine, true); |
167 | } |
168 | |
169 | /** |
170 | * crypto_transfer_cipher_request - transfer the new request into the |
171 | * enginequeue |
172 | * @engine: the hardware engine |
173 | * @req: the request need to be listed into the engine queue |
174 | */ |
175 | int crypto_transfer_cipher_request(struct crypto_engine *engine, |
176 | struct ablkcipher_request *req, |
177 | bool need_pump) |
178 | { |
179 | unsigned long flags; |
180 | int ret; |
181 | |
182 | spin_lock_irqsave(&engine->queue_lock, flags); |
183 | |
184 | if (!engine->running) { |
185 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
186 | return -ESHUTDOWN; |
187 | } |
188 | |
189 | ret = ablkcipher_enqueue_request(&engine->queue, req); |
190 | |
191 | if (!engine->busy && need_pump) |
192 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
193 | |
194 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
195 | return ret; |
196 | } |
197 | EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request); |
198 | |
199 | /** |
200 | * crypto_transfer_cipher_request_to_engine - transfer one request to list |
201 | * into the engine queue |
202 | * @engine: the hardware engine |
203 | * @req: the request need to be listed into the engine queue |
204 | */ |
205 | int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, |
206 | struct ablkcipher_request *req) |
207 | { |
208 | return crypto_transfer_cipher_request(engine, req, true); |
209 | } |
210 | EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine); |
211 | |
212 | /** |
213 | * crypto_transfer_hash_request - transfer the new request into the |
214 | * enginequeue |
215 | * @engine: the hardware engine |
216 | * @req: the request need to be listed into the engine queue |
217 | */ |
218 | int crypto_transfer_hash_request(struct crypto_engine *engine, |
219 | struct ahash_request *req, bool need_pump) |
220 | { |
221 | unsigned long flags; |
222 | int ret; |
223 | |
224 | spin_lock_irqsave(&engine->queue_lock, flags); |
225 | |
226 | if (!engine->running) { |
227 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
228 | return -ESHUTDOWN; |
229 | } |
230 | |
231 | ret = ahash_enqueue_request(&engine->queue, req); |
232 | |
233 | if (!engine->busy && need_pump) |
234 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
235 | |
236 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
237 | return ret; |
238 | } |
239 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request); |
240 | |
241 | /** |
242 | * crypto_transfer_hash_request_to_engine - transfer one request to list |
243 | * into the engine queue |
244 | * @engine: the hardware engine |
245 | * @req: the request need to be listed into the engine queue |
246 | */ |
247 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, |
248 | struct ahash_request *req) |
249 | { |
250 | return crypto_transfer_hash_request(engine, req, true); |
251 | } |
252 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); |
253 | |
254 | /** |
255 | * crypto_finalize_cipher_request - finalize one request if the request is done |
256 | * @engine: the hardware engine |
257 | * @req: the request need to be finalized |
258 | * @err: error number |
259 | */ |
260 | void crypto_finalize_cipher_request(struct crypto_engine *engine, |
261 | struct ablkcipher_request *req, int err) |
262 | { |
263 | unsigned long flags; |
264 | bool finalize_cur_req = false; |
265 | int ret; |
266 | |
267 | spin_lock_irqsave(&engine->queue_lock, flags); |
268 | if (engine->cur_req == &req->base) |
269 | finalize_cur_req = true; |
270 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
271 | |
272 | if (finalize_cur_req) { |
273 | if (engine->cur_req_prepared && |
274 | engine->unprepare_cipher_request) { |
275 | ret = engine->unprepare_cipher_request(engine, req); |
276 | if (ret) |
277 | pr_err("failed to unprepare request\n"); |
278 | } |
279 | spin_lock_irqsave(&engine->queue_lock, flags); |
280 | engine->cur_req = NULL; |
281 | engine->cur_req_prepared = false; |
282 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
283 | } |
284 | |
285 | req->base.complete(&req->base, err); |
286 | |
287 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
288 | } |
289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); |
290 | |
291 | /** |
292 | * crypto_finalize_hash_request - finalize one request if the request is done |
293 | * @engine: the hardware engine |
294 | * @req: the request need to be finalized |
295 | * @err: error number |
296 | */ |
297 | void crypto_finalize_hash_request(struct crypto_engine *engine, |
298 | struct ahash_request *req, int err) |
299 | { |
300 | unsigned long flags; |
301 | bool finalize_cur_req = false; |
302 | int ret; |
303 | |
304 | spin_lock_irqsave(&engine->queue_lock, flags); |
305 | if (engine->cur_req == &req->base) |
306 | finalize_cur_req = true; |
307 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
308 | |
309 | if (finalize_cur_req) { |
310 | if (engine->cur_req_prepared && |
311 | engine->unprepare_hash_request) { |
312 | ret = engine->unprepare_hash_request(engine, req); |
313 | if (ret) |
314 | pr_err("failed to unprepare request\n"); |
315 | } |
316 | spin_lock_irqsave(&engine->queue_lock, flags); |
317 | engine->cur_req = NULL; |
318 | engine->cur_req_prepared = false; |
319 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
320 | } |
321 | |
322 | req->base.complete(&req->base, err); |
323 | |
324 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
325 | } |
326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
327 | |
328 | /** |
329 | * crypto_engine_start - start the hardware engine |
330 | * @engine: the hardware engine need to be started |
331 | * |
332 | * Return 0 on success, else on fail. |
333 | */ |
334 | int crypto_engine_start(struct crypto_engine *engine) |
335 | { |
336 | unsigned long flags; |
337 | |
338 | spin_lock_irqsave(&engine->queue_lock, flags); |
339 | |
340 | if (engine->running || engine->busy) { |
341 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
342 | return -EBUSY; |
343 | } |
344 | |
345 | engine->running = true; |
346 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
347 | |
348 | kthread_queue_work(&engine->kworker, &engine->pump_requests); |
349 | |
350 | return 0; |
351 | } |
352 | EXPORT_SYMBOL_GPL(crypto_engine_start); |
353 | |
354 | /** |
355 | * crypto_engine_stop - stop the hardware engine |
356 | * @engine: the hardware engine need to be stopped |
357 | * |
358 | * Return 0 on success, else on fail. |
359 | */ |
360 | int crypto_engine_stop(struct crypto_engine *engine) |
361 | { |
362 | unsigned long flags; |
363 | unsigned int limit = 500; |
364 | int ret = 0; |
365 | |
366 | spin_lock_irqsave(&engine->queue_lock, flags); |
367 | |
368 | /* |
369 | * If the engine queue is not empty or the engine is on busy state, |
370 | * we need to wait for a while to pump the requests of engine queue. |
371 | */ |
372 | while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { |
373 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
374 | msleep(20); |
375 | spin_lock_irqsave(&engine->queue_lock, flags); |
376 | } |
377 | |
378 | if (crypto_queue_len(&engine->queue) || engine->busy) |
379 | ret = -EBUSY; |
380 | else |
381 | engine->running = false; |
382 | |
383 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
384 | |
385 | if (ret) |
386 | pr_warn("could not stop engine\n"); |
387 | |
388 | return ret; |
389 | } |
390 | EXPORT_SYMBOL_GPL(crypto_engine_stop); |
391 | |
392 | /** |
393 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and |
394 | * initialize it. |
395 | * @dev: the device attached with one hardware engine |
396 | * @rt: whether this queue is set to run as a realtime task |
397 | * |
398 | * This must be called from context that can sleep. |
399 | * Return: the crypto engine structure on success, else NULL. |
400 | */ |
401 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) |
402 | { |
403 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
404 | struct crypto_engine *engine; |
405 | |
406 | if (!dev) |
407 | return NULL; |
408 | |
409 | engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); |
410 | if (!engine) |
411 | return NULL; |
412 | |
413 | engine->rt = rt; |
414 | engine->running = false; |
415 | engine->busy = false; |
416 | engine->idling = false; |
417 | engine->cur_req_prepared = false; |
418 | engine->priv_data = dev; |
419 | snprintf(engine->name, sizeof(engine->name), |
420 | "%s-engine", dev_name(dev)); |
421 | |
422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); |
423 | spin_lock_init(&engine->queue_lock); |
424 | |
425 | kthread_init_worker(&engine->kworker); |
426 | engine->kworker_task = kthread_run(kthread_worker_fn, |
427 | &engine->kworker, "%s", |
428 | engine->name); |
429 | if (IS_ERR(engine->kworker_task)) { |
430 | dev_err(dev, "failed to create crypto request pump task\n"); |
431 | return NULL; |
432 | } |
433 | kthread_init_work(&engine->pump_requests, crypto_pump_work); |
434 | |
435 | if (engine->rt) { |
436 | dev_info(dev, "will run requests pump with realtime priority\n"); |
437 | sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); |
438 | } |
439 | |
440 | return engine; |
441 | } |
442 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); |
443 | |
444 | /** |
445 | * crypto_engine_exit - free the resources of hardware engine when exit |
446 | * @engine: the hardware engine need to be freed |
447 | * |
448 | * Return 0 for success. |
449 | */ |
450 | int crypto_engine_exit(struct crypto_engine *engine) |
451 | { |
452 | int ret; |
453 | |
454 | ret = crypto_engine_stop(engine); |
455 | if (ret) |
456 | return ret; |
457 | |
458 | kthread_flush_worker(&engine->kworker); |
459 | kthread_stop(engine->kworker_task); |
460 | |
461 | return 0; |
462 | } |
463 | EXPORT_SYMBOL_GPL(crypto_engine_exit); |
464 | |
465 | MODULE_LICENSE("GPL"); |
466 | MODULE_DESCRIPTION("Crypto hardware engine framework"); |
467 |