summaryrefslogtreecommitdiff
path: root/block/blk-mq.c (plain)
blob: 24fc09cf7f175adc5e2bb3f616798a39a39292e8
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/kmemleak.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
23#include <linux/delay.h>
24#include <linux/crash_dump.h>
25#include <linux/prefetch.h>
26
27#include <trace/events/block.h>
28
29#include <linux/blk-mq.h>
30#include "blk.h"
31#include "blk-mq.h"
32#include "blk-mq-tag.h"
33
34static DEFINE_MUTEX(all_q_mutex);
35static LIST_HEAD(all_q_list);
36
37/*
38 * Check if any of the ctx's have pending work in this hardware queue
39 */
40static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41{
42 return sbitmap_any_bit_set(&hctx->ctx_map);
43}
44
45/*
46 * Mark this ctx as having pending work in this hardware queue
47 */
48static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
49 struct blk_mq_ctx *ctx)
50{
51 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
52 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
53}
54
55static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
56 struct blk_mq_ctx *ctx)
57{
58 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
59}
60
61void blk_mq_freeze_queue_start(struct request_queue *q)
62{
63 int freeze_depth;
64
65 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
66 if (freeze_depth == 1) {
67 percpu_ref_kill(&q->q_usage_counter);
68 blk_mq_run_hw_queues(q, false);
69 }
70}
71EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
72
73static void blk_mq_freeze_queue_wait(struct request_queue *q)
74{
75 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
76}
77
78/*
79 * Guarantee no request is in use, so we can change any data structure of
80 * the queue afterward.
81 */
82void blk_freeze_queue(struct request_queue *q)
83{
84 /*
85 * In the !blk_mq case we are only calling this to kill the
86 * q_usage_counter, otherwise this increases the freeze depth
87 * and waits for it to return to zero. For this reason there is
88 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
89 * exported to drivers as the only user for unfreeze is blk_mq.
90 */
91 blk_mq_freeze_queue_start(q);
92 blk_mq_freeze_queue_wait(q);
93}
94
95void blk_mq_freeze_queue(struct request_queue *q)
96{
97 /*
98 * ...just an alias to keep freeze and unfreeze actions balanced
99 * in the blk_mq_* namespace
100 */
101 blk_freeze_queue(q);
102}
103EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
104
105void blk_mq_unfreeze_queue(struct request_queue *q)
106{
107 int freeze_depth;
108
109 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
110 WARN_ON_ONCE(freeze_depth < 0);
111 if (!freeze_depth) {
112 percpu_ref_reinit(&q->q_usage_counter);
113 wake_up_all(&q->mq_freeze_wq);
114 }
115}
116EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
117
118void blk_mq_wake_waiters(struct request_queue *q)
119{
120 struct blk_mq_hw_ctx *hctx;
121 unsigned int i;
122
123 queue_for_each_hw_ctx(q, hctx, i)
124 if (blk_mq_hw_queue_mapped(hctx))
125 blk_mq_tag_wakeup_all(hctx->tags, true);
126
127 /*
128 * If we are called because the queue has now been marked as
129 * dying, we need to ensure that processes currently waiting on
130 * the queue are notified as well.
131 */
132 wake_up_all(&q->mq_freeze_wq);
133}
134
135bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
136{
137 return blk_mq_has_free_tags(hctx->tags);
138}
139EXPORT_SYMBOL(blk_mq_can_queue);
140
141static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
142 struct request *rq, int op,
143 unsigned int op_flags)
144{
145 if (blk_queue_io_stat(q))
146 op_flags |= REQ_IO_STAT;
147
148 INIT_LIST_HEAD(&rq->queuelist);
149 /* csd/requeue_work/fifo_time is initialized before use */
150 rq->q = q;
151 rq->mq_ctx = ctx;
152 req_set_op_attrs(rq, op, op_flags);
153 /* do not touch atomic flags, it needs atomic ops against the timer */
154 rq->cpu = -1;
155 INIT_HLIST_NODE(&rq->hash);
156 RB_CLEAR_NODE(&rq->rb_node);
157 rq->rq_disk = NULL;
158 rq->part = NULL;
159 rq->start_time = jiffies;
160#ifdef CONFIG_BLK_CGROUP
161 rq->rl = NULL;
162 set_start_time_ns(rq);
163 rq->io_start_time_ns = 0;
164#endif
165 rq->nr_phys_segments = 0;
166#if defined(CONFIG_BLK_DEV_INTEGRITY)
167 rq->nr_integrity_segments = 0;
168#endif
169 rq->special = NULL;
170 /* tag was already set */
171 rq->errors = 0;
172
173 rq->cmd = rq->__cmd;
174
175 rq->extra_len = 0;
176 rq->sense_len = 0;
177 rq->resid_len = 0;
178 rq->sense = NULL;
179
180 INIT_LIST_HEAD(&rq->timeout_list);
181 rq->timeout = 0;
182
183 rq->end_io = NULL;
184 rq->end_io_data = NULL;
185 rq->next_rq = NULL;
186
187 ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
188}
189
190static struct request *
191__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
192{
193 struct request *rq;
194 unsigned int tag;
195
196 tag = blk_mq_get_tag(data);
197 if (tag != BLK_MQ_TAG_FAIL) {
198 rq = data->hctx->tags->rqs[tag];
199
200 if (blk_mq_tag_busy(data->hctx)) {
201 rq->cmd_flags = REQ_MQ_INFLIGHT;
202 atomic_inc(&data->hctx->nr_active);
203 }
204
205 rq->tag = tag;
206 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
207 return rq;
208 }
209
210 return NULL;
211}
212
213struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
214 unsigned int flags)
215{
216 struct blk_mq_ctx *ctx;
217 struct blk_mq_hw_ctx *hctx;
218 struct request *rq;
219 struct blk_mq_alloc_data alloc_data;
220 int ret;
221
222 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
223 if (ret)
224 return ERR_PTR(ret);
225
226 ctx = blk_mq_get_ctx(q);
227 hctx = blk_mq_map_queue(q, ctx->cpu);
228 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
229 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
230 blk_mq_put_ctx(ctx);
231
232 if (!rq) {
233 blk_queue_exit(q);
234 return ERR_PTR(-EWOULDBLOCK);
235 }
236
237 rq->__data_len = 0;
238 rq->__sector = (sector_t) -1;
239 rq->bio = rq->biotail = NULL;
240 return rq;
241}
242EXPORT_SYMBOL(blk_mq_alloc_request);
243
244struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
245 unsigned int flags, unsigned int hctx_idx)
246{
247 struct blk_mq_hw_ctx *hctx;
248 struct blk_mq_ctx *ctx;
249 struct request *rq;
250 struct blk_mq_alloc_data alloc_data;
251 int ret;
252
253 /*
254 * If the tag allocator sleeps we could get an allocation for a
255 * different hardware context. No need to complicate the low level
256 * allocator for this for the rare use case of a command tied to
257 * a specific queue.
258 */
259 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
260 return ERR_PTR(-EINVAL);
261
262 if (hctx_idx >= q->nr_hw_queues)
263 return ERR_PTR(-EIO);
264
265 ret = blk_queue_enter(q, true);
266 if (ret)
267 return ERR_PTR(ret);
268
269 /*
270 * Check if the hardware context is actually mapped to anything.
271 * If not tell the caller that it should skip this queue.
272 */
273 hctx = q->queue_hw_ctx[hctx_idx];
274 if (!blk_mq_hw_queue_mapped(hctx)) {
275 ret = -EXDEV;
276 goto out_queue_exit;
277 }
278 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
279
280 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
281 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
282 if (!rq) {
283 ret = -EWOULDBLOCK;
284 goto out_queue_exit;
285 }
286
287 return rq;
288
289out_queue_exit:
290 blk_queue_exit(q);
291 return ERR_PTR(ret);
292}
293EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
294
295static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
296 struct blk_mq_ctx *ctx, struct request *rq)
297{
298 const int tag = rq->tag;
299 struct request_queue *q = rq->q;
300
301 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
302 atomic_dec(&hctx->nr_active);
303 rq->cmd_flags = 0;
304
305 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
306 blk_mq_put_tag(hctx, ctx, tag);
307 blk_queue_exit(q);
308}
309
310void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
311{
312 struct blk_mq_ctx *ctx = rq->mq_ctx;
313
314 ctx->rq_completed[rq_is_sync(rq)]++;
315 __blk_mq_free_request(hctx, ctx, rq);
316
317}
318EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
319
320void blk_mq_free_request(struct request *rq)
321{
322 blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
323}
324EXPORT_SYMBOL_GPL(blk_mq_free_request);
325
326inline void __blk_mq_end_request(struct request *rq, int error)
327{
328 blk_account_io_done(rq);
329
330 if (rq->end_io) {
331 rq->end_io(rq, error);
332 } else {
333 if (unlikely(blk_bidi_rq(rq)))
334 blk_mq_free_request(rq->next_rq);
335 blk_mq_free_request(rq);
336 }
337}
338EXPORT_SYMBOL(__blk_mq_end_request);
339
340void blk_mq_end_request(struct request *rq, int error)
341{
342 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
343 BUG();
344 __blk_mq_end_request(rq, error);
345}
346EXPORT_SYMBOL(blk_mq_end_request);
347
348static void __blk_mq_complete_request_remote(void *data)
349{
350 struct request *rq = data;
351
352 rq->q->softirq_done_fn(rq);
353}
354
355static void blk_mq_ipi_complete_request(struct request *rq)
356{
357 struct blk_mq_ctx *ctx = rq->mq_ctx;
358 bool shared = false;
359 int cpu;
360
361 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
362 rq->q->softirq_done_fn(rq);
363 return;
364 }
365
366 cpu = get_cpu();
367 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
368 shared = cpus_share_cache(cpu, ctx->cpu);
369
370 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
371 rq->csd.func = __blk_mq_complete_request_remote;
372 rq->csd.info = rq;
373 rq->csd.flags = 0;
374 smp_call_function_single_async(ctx->cpu, &rq->csd);
375 } else {
376 rq->q->softirq_done_fn(rq);
377 }
378 put_cpu();
379}
380
381static void __blk_mq_complete_request(struct request *rq)
382{
383 struct request_queue *q = rq->q;
384
385 if (!q->softirq_done_fn)
386 blk_mq_end_request(rq, rq->errors);
387 else
388 blk_mq_ipi_complete_request(rq);
389}
390
391/**
392 * blk_mq_complete_request - end I/O on a request
393 * @rq: the request being processed
394 *
395 * Description:
396 * Ends all I/O on a request. It does not handle partial completions.
397 * The actual completion happens out-of-order, through a IPI handler.
398 **/
399void blk_mq_complete_request(struct request *rq, int error)
400{
401 struct request_queue *q = rq->q;
402
403 if (unlikely(blk_should_fake_timeout(q)))
404 return;
405 if (!blk_mark_rq_complete(rq)) {
406 rq->errors = error;
407 __blk_mq_complete_request(rq);
408 }
409}
410EXPORT_SYMBOL(blk_mq_complete_request);
411
412int blk_mq_request_started(struct request *rq)
413{
414 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
415}
416EXPORT_SYMBOL_GPL(blk_mq_request_started);
417
418void blk_mq_start_request(struct request *rq)
419{
420 struct request_queue *q = rq->q;
421
422 trace_block_rq_issue(q, rq);
423
424 rq->resid_len = blk_rq_bytes(rq);
425 if (unlikely(blk_bidi_rq(rq)))
426 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
427
428 blk_add_timer(rq);
429
430 /*
431 * Ensure that ->deadline is visible before set the started
432 * flag and clear the completed flag.
433 */
434 smp_mb__before_atomic();
435
436 /*
437 * Mark us as started and clear complete. Complete might have been
438 * set if requeue raced with timeout, which then marked it as
439 * complete. So be sure to clear complete again when we start
440 * the request, otherwise we'll ignore the completion event.
441 */
442 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
443 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
444 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
445 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
446
447 if (q->dma_drain_size && blk_rq_bytes(rq)) {
448 /*
449 * Make sure space for the drain appears. We know we can do
450 * this because max_hw_segments has been adjusted to be one
451 * fewer than the device can handle.
452 */
453 rq->nr_phys_segments++;
454 }
455}
456EXPORT_SYMBOL(blk_mq_start_request);
457
458static void __blk_mq_requeue_request(struct request *rq)
459{
460 struct request_queue *q = rq->q;
461
462 trace_block_rq_requeue(q, rq);
463
464 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
465 if (q->dma_drain_size && blk_rq_bytes(rq))
466 rq->nr_phys_segments--;
467 }
468}
469
470void blk_mq_requeue_request(struct request *rq)
471{
472 __blk_mq_requeue_request(rq);
473
474 BUG_ON(blk_queued_rq(rq));
475 blk_mq_add_to_requeue_list(rq, true);
476}
477EXPORT_SYMBOL(blk_mq_requeue_request);
478
479static void blk_mq_requeue_work(struct work_struct *work)
480{
481 struct request_queue *q =
482 container_of(work, struct request_queue, requeue_work.work);
483 LIST_HEAD(rq_list);
484 struct request *rq, *next;
485 unsigned long flags;
486
487 spin_lock_irqsave(&q->requeue_lock, flags);
488 list_splice_init(&q->requeue_list, &rq_list);
489 spin_unlock_irqrestore(&q->requeue_lock, flags);
490
491 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
492 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
493 continue;
494
495 rq->cmd_flags &= ~REQ_SOFTBARRIER;
496 list_del_init(&rq->queuelist);
497 blk_mq_insert_request(rq, true, false, false);
498 }
499
500 while (!list_empty(&rq_list)) {
501 rq = list_entry(rq_list.next, struct request, queuelist);
502 list_del_init(&rq->queuelist);
503 blk_mq_insert_request(rq, false, false, false);
504 }
505
506 /*
507 * Use the start variant of queue running here, so that running
508 * the requeue work will kick stopped queues.
509 */
510 blk_mq_start_hw_queues(q);
511}
512
513void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
514{
515 struct request_queue *q = rq->q;
516 unsigned long flags;
517
518 /*
519 * We abuse this flag that is otherwise used by the I/O scheduler to
520 * request head insertation from the workqueue.
521 */
522 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
523
524 spin_lock_irqsave(&q->requeue_lock, flags);
525 if (at_head) {
526 rq->cmd_flags |= REQ_SOFTBARRIER;
527 list_add(&rq->queuelist, &q->requeue_list);
528 } else {
529 list_add_tail(&rq->queuelist, &q->requeue_list);
530 }
531 spin_unlock_irqrestore(&q->requeue_lock, flags);
532}
533EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
534
535void blk_mq_cancel_requeue_work(struct request_queue *q)
536{
537 cancel_delayed_work_sync(&q->requeue_work);
538}
539EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
540
541void blk_mq_kick_requeue_list(struct request_queue *q)
542{
543 kblockd_schedule_delayed_work(&q->requeue_work, 0);
544}
545EXPORT_SYMBOL(blk_mq_kick_requeue_list);
546
547void blk_mq_delay_kick_requeue_list(struct request_queue *q,
548 unsigned long msecs)
549{
550 kblockd_schedule_delayed_work(&q->requeue_work,
551 msecs_to_jiffies(msecs));
552}
553EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
554
555void blk_mq_abort_requeue_list(struct request_queue *q)
556{
557 unsigned long flags;
558 LIST_HEAD(rq_list);
559
560 spin_lock_irqsave(&q->requeue_lock, flags);
561 list_splice_init(&q->requeue_list, &rq_list);
562 spin_unlock_irqrestore(&q->requeue_lock, flags);
563
564 while (!list_empty(&rq_list)) {
565 struct request *rq;
566
567 rq = list_first_entry(&rq_list, struct request, queuelist);
568 list_del_init(&rq->queuelist);
569 rq->errors = -EIO;
570 blk_mq_end_request(rq, rq->errors);
571 }
572}
573EXPORT_SYMBOL(blk_mq_abort_requeue_list);
574
575struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
576{
577 if (tag < tags->nr_tags) {
578 prefetch(tags->rqs[tag]);
579 return tags->rqs[tag];
580 }
581
582 return NULL;
583}
584EXPORT_SYMBOL(blk_mq_tag_to_rq);
585
586struct blk_mq_timeout_data {
587 unsigned long next;
588 unsigned int next_set;
589};
590
591void blk_mq_rq_timed_out(struct request *req, bool reserved)
592{
593 struct blk_mq_ops *ops = req->q->mq_ops;
594 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
595
596 /*
597 * We know that complete is set at this point. If STARTED isn't set
598 * anymore, then the request isn't active and the "timeout" should
599 * just be ignored. This can happen due to the bitflag ordering.
600 * Timeout first checks if STARTED is set, and if it is, assumes
601 * the request is active. But if we race with completion, then
602 * we both flags will get cleared. So check here again, and ignore
603 * a timeout event with a request that isn't active.
604 */
605 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
606 return;
607
608 if (ops->timeout)
609 ret = ops->timeout(req, reserved);
610
611 switch (ret) {
612 case BLK_EH_HANDLED:
613 __blk_mq_complete_request(req);
614 break;
615 case BLK_EH_RESET_TIMER:
616 blk_add_timer(req);
617 blk_clear_rq_complete(req);
618 break;
619 case BLK_EH_NOT_HANDLED:
620 break;
621 default:
622 printk(KERN_ERR "block: bad eh return: %d\n", ret);
623 break;
624 }
625}
626
627static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
628 struct request *rq, void *priv, bool reserved)
629{
630 struct blk_mq_timeout_data *data = priv;
631
632 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
633 return;
634
635 if (time_after_eq(jiffies, rq->deadline)) {
636 if (!blk_mark_rq_complete(rq))
637 blk_mq_rq_timed_out(rq, reserved);
638 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
639 data->next = rq->deadline;
640 data->next_set = 1;
641 }
642}
643
644static void blk_mq_timeout_work(struct work_struct *work)
645{
646 struct request_queue *q =
647 container_of(work, struct request_queue, timeout_work);
648 struct blk_mq_timeout_data data = {
649 .next = 0,
650 .next_set = 0,
651 };
652 int i;
653
654 /* A deadlock might occur if a request is stuck requiring a
655 * timeout at the same time a queue freeze is waiting
656 * completion, since the timeout code would not be able to
657 * acquire the queue reference here.
658 *
659 * That's why we don't use blk_queue_enter here; instead, we use
660 * percpu_ref_tryget directly, because we need to be able to
661 * obtain a reference even in the short window between the queue
662 * starting to freeze, by dropping the first reference in
663 * blk_mq_freeze_queue_start, and the moment the last request is
664 * consumed, marked by the instant q_usage_counter reaches
665 * zero.
666 */
667 if (!percpu_ref_tryget(&q->q_usage_counter))
668 return;
669
670 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
671
672 if (data.next_set) {
673 data.next = blk_rq_timeout(round_jiffies_up(data.next));
674 mod_timer(&q->timeout, data.next);
675 } else {
676 struct blk_mq_hw_ctx *hctx;
677
678 queue_for_each_hw_ctx(q, hctx, i) {
679 /* the hctx may be unmapped, so check it here */
680 if (blk_mq_hw_queue_mapped(hctx))
681 blk_mq_tag_idle(hctx);
682 }
683 }
684 blk_queue_exit(q);
685}
686
687/*
688 * Reverse check our software queue for entries that we could potentially
689 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
690 * too much time checking for merges.
691 */
692static bool blk_mq_attempt_merge(struct request_queue *q,
693 struct blk_mq_ctx *ctx, struct bio *bio)
694{
695 struct request *rq;
696 int checked = 8;
697
698 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
699 int el_ret;
700
701 if (!checked--)
702 break;
703
704 if (!blk_rq_merge_ok(rq, bio))
705 continue;
706
707 el_ret = blk_try_merge(rq, bio);
708 if (el_ret == ELEVATOR_BACK_MERGE) {
709 if (bio_attempt_back_merge(q, rq, bio)) {
710 ctx->rq_merged++;
711 return true;
712 }
713 break;
714 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
715 if (bio_attempt_front_merge(q, rq, bio)) {
716 ctx->rq_merged++;
717 return true;
718 }
719 break;
720 }
721 }
722
723 return false;
724}
725
726struct flush_busy_ctx_data {
727 struct blk_mq_hw_ctx *hctx;
728 struct list_head *list;
729};
730
731static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
732{
733 struct flush_busy_ctx_data *flush_data = data;
734 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
735 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
736
737 sbitmap_clear_bit(sb, bitnr);
738 spin_lock(&ctx->lock);
739 list_splice_tail_init(&ctx->rq_list, flush_data->list);
740 spin_unlock(&ctx->lock);
741 return true;
742}
743
744/*
745 * Process software queues that have been marked busy, splicing them
746 * to the for-dispatch
747 */
748static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
749{
750 struct flush_busy_ctx_data data = {
751 .hctx = hctx,
752 .list = list,
753 };
754
755 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
756}
757
758static inline unsigned int queued_to_index(unsigned int queued)
759{
760 if (!queued)
761 return 0;
762
763 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
764}
765
766/*
767 * Run this hardware queue, pulling any software queues mapped to it in.
768 * Note that this function currently has various problems around ordering
769 * of IO. In particular, we'd like FIFO behaviour on handling existing
770 * items on the hctx->dispatch list. Ignore that for now.
771 */
772static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
773{
774 struct request_queue *q = hctx->queue;
775 struct request *rq;
776 LIST_HEAD(rq_list);
777 LIST_HEAD(driver_list);
778 struct list_head *dptr;
779 int queued;
780
781 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
782 return;
783
784 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
785 cpu_online(hctx->next_cpu));
786
787 hctx->run++;
788
789 /*
790 * Touch any software queue that has pending entries.
791 */
792 flush_busy_ctxs(hctx, &rq_list);
793
794 /*
795 * If we have previous entries on our dispatch list, grab them
796 * and stuff them at the front for more fair dispatch.
797 */
798 if (!list_empty_careful(&hctx->dispatch)) {
799 spin_lock(&hctx->lock);
800 if (!list_empty(&hctx->dispatch))
801 list_splice_init(&hctx->dispatch, &rq_list);
802 spin_unlock(&hctx->lock);
803 }
804
805 /*
806 * Start off with dptr being NULL, so we start the first request
807 * immediately, even if we have more pending.
808 */
809 dptr = NULL;
810
811 /*
812 * Now process all the entries, sending them to the driver.
813 */
814 queued = 0;
815 while (!list_empty(&rq_list)) {
816 struct blk_mq_queue_data bd;
817 int ret;
818
819 rq = list_first_entry(&rq_list, struct request, queuelist);
820 list_del_init(&rq->queuelist);
821
822 bd.rq = rq;
823 bd.list = dptr;
824 bd.last = list_empty(&rq_list);
825
826 ret = q->mq_ops->queue_rq(hctx, &bd);
827 switch (ret) {
828 case BLK_MQ_RQ_QUEUE_OK:
829 queued++;
830 break;
831 case BLK_MQ_RQ_QUEUE_BUSY:
832 list_add(&rq->queuelist, &rq_list);
833 __blk_mq_requeue_request(rq);
834 break;
835 default:
836 pr_err("blk-mq: bad return on queue: %d\n", ret);
837 case BLK_MQ_RQ_QUEUE_ERROR:
838 rq->errors = -EIO;
839 blk_mq_end_request(rq, rq->errors);
840 break;
841 }
842
843 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
844 break;
845
846 /*
847 * We've done the first request. If we have more than 1
848 * left in the list, set dptr to defer issue.
849 */
850 if (!dptr && rq_list.next != rq_list.prev)
851 dptr = &driver_list;
852 }
853
854 hctx->dispatched[queued_to_index(queued)]++;
855
856 /*
857 * Any items that need requeuing? Stuff them into hctx->dispatch,
858 * that is where we will continue on next queue run.
859 */
860 if (!list_empty(&rq_list)) {
861 spin_lock(&hctx->lock);
862 list_splice(&rq_list, &hctx->dispatch);
863 spin_unlock(&hctx->lock);
864 /*
865 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
866 * it's possible the queue is stopped and restarted again
867 * before this. Queue restart will dispatch requests. And since
868 * requests in rq_list aren't added into hctx->dispatch yet,
869 * the requests in rq_list might get lost.
870 *
871 * blk_mq_run_hw_queue() already checks the STOPPED bit
872 **/
873 blk_mq_run_hw_queue(hctx, true);
874 }
875}
876
877/*
878 * It'd be great if the workqueue API had a way to pass
879 * in a mask and had some smarts for more clever placement.
880 * For now we just round-robin here, switching for every
881 * BLK_MQ_CPU_WORK_BATCH queued items.
882 */
883static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
884{
885 if (hctx->queue->nr_hw_queues == 1)
886 return WORK_CPU_UNBOUND;
887
888 if (--hctx->next_cpu_batch <= 0) {
889 int next_cpu;
890
891 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
892 if (next_cpu >= nr_cpu_ids)
893 next_cpu = cpumask_first(hctx->cpumask);
894
895 hctx->next_cpu = next_cpu;
896 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
897 }
898
899 return hctx->next_cpu;
900}
901
902void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
903{
904 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
905 !blk_mq_hw_queue_mapped(hctx)))
906 return;
907
908 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
909 int cpu = get_cpu();
910 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
911 __blk_mq_run_hw_queue(hctx);
912 put_cpu();
913 return;
914 }
915
916 put_cpu();
917 }
918
919 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
920}
921
922void blk_mq_run_hw_queues(struct request_queue *q, bool async)
923{
924 struct blk_mq_hw_ctx *hctx;
925 int i;
926
927 queue_for_each_hw_ctx(q, hctx, i) {
928 if ((!blk_mq_hctx_has_pending(hctx) &&
929 list_empty_careful(&hctx->dispatch)) ||
930 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
931 continue;
932
933 blk_mq_run_hw_queue(hctx, async);
934 }
935}
936EXPORT_SYMBOL(blk_mq_run_hw_queues);
937
938void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
939{
940 cancel_work(&hctx->run_work);
941 cancel_delayed_work(&hctx->delay_work);
942 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
943}
944EXPORT_SYMBOL(blk_mq_stop_hw_queue);
945
946void blk_mq_stop_hw_queues(struct request_queue *q)
947{
948 struct blk_mq_hw_ctx *hctx;
949 int i;
950
951 queue_for_each_hw_ctx(q, hctx, i)
952 blk_mq_stop_hw_queue(hctx);
953}
954EXPORT_SYMBOL(blk_mq_stop_hw_queues);
955
956void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
957{
958 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
959
960 blk_mq_run_hw_queue(hctx, false);
961}
962EXPORT_SYMBOL(blk_mq_start_hw_queue);
963
964void blk_mq_start_hw_queues(struct request_queue *q)
965{
966 struct blk_mq_hw_ctx *hctx;
967 int i;
968
969 queue_for_each_hw_ctx(q, hctx, i)
970 blk_mq_start_hw_queue(hctx);
971}
972EXPORT_SYMBOL(blk_mq_start_hw_queues);
973
974void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
975{
976 struct blk_mq_hw_ctx *hctx;
977 int i;
978
979 queue_for_each_hw_ctx(q, hctx, i) {
980 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
981 continue;
982
983 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
984 blk_mq_run_hw_queue(hctx, async);
985 }
986}
987EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
988
989static void blk_mq_run_work_fn(struct work_struct *work)
990{
991 struct blk_mq_hw_ctx *hctx;
992
993 hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
994
995 __blk_mq_run_hw_queue(hctx);
996}
997
998static void blk_mq_delay_work_fn(struct work_struct *work)
999{
1000 struct blk_mq_hw_ctx *hctx;
1001
1002 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1003
1004 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1005 __blk_mq_run_hw_queue(hctx);
1006}
1007
1008void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1009{
1010 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1011 return;
1012
1013 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1014 &hctx->delay_work, msecs_to_jiffies(msecs));
1015}
1016EXPORT_SYMBOL(blk_mq_delay_queue);
1017
1018static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1019 struct request *rq,
1020 bool at_head)
1021{
1022 struct blk_mq_ctx *ctx = rq->mq_ctx;
1023
1024 trace_block_rq_insert(hctx->queue, rq);
1025
1026 if (at_head)
1027 list_add(&rq->queuelist, &ctx->rq_list);
1028 else
1029 list_add_tail(&rq->queuelist, &ctx->rq_list);
1030}
1031
1032static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1033 struct request *rq, bool at_head)
1034{
1035 struct blk_mq_ctx *ctx = rq->mq_ctx;
1036
1037 __blk_mq_insert_req_list(hctx, rq, at_head);
1038 blk_mq_hctx_mark_pending(hctx, ctx);
1039}
1040
1041void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1042 bool async)
1043{
1044 struct blk_mq_ctx *ctx = rq->mq_ctx;
1045 struct request_queue *q = rq->q;
1046 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1047
1048 spin_lock(&ctx->lock);
1049 __blk_mq_insert_request(hctx, rq, at_head);
1050 spin_unlock(&ctx->lock);
1051
1052 if (run_queue)
1053 blk_mq_run_hw_queue(hctx, async);
1054}
1055
1056static void blk_mq_insert_requests(struct request_queue *q,
1057 struct blk_mq_ctx *ctx,
1058 struct list_head *list,
1059 int depth,
1060 bool from_schedule)
1061
1062{
1063 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1064
1065 trace_block_unplug(q, depth, !from_schedule);
1066
1067 /*
1068 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1069 * offline now
1070 */
1071 spin_lock(&ctx->lock);
1072 while (!list_empty(list)) {
1073 struct request *rq;
1074
1075 rq = list_first_entry(list, struct request, queuelist);
1076 BUG_ON(rq->mq_ctx != ctx);
1077 list_del_init(&rq->queuelist);
1078 __blk_mq_insert_req_list(hctx, rq, false);
1079 }
1080 blk_mq_hctx_mark_pending(hctx, ctx);
1081 spin_unlock(&ctx->lock);
1082
1083 blk_mq_run_hw_queue(hctx, from_schedule);
1084}
1085
1086static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1087{
1088 struct request *rqa = container_of(a, struct request, queuelist);
1089 struct request *rqb = container_of(b, struct request, queuelist);
1090
1091 return !(rqa->mq_ctx < rqb->mq_ctx ||
1092 (rqa->mq_ctx == rqb->mq_ctx &&
1093 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1094}
1095
1096void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1097{
1098 struct blk_mq_ctx *this_ctx;
1099 struct request_queue *this_q;
1100 struct request *rq;
1101 LIST_HEAD(list);
1102 LIST_HEAD(ctx_list);
1103 unsigned int depth;
1104
1105 list_splice_init(&plug->mq_list, &list);
1106
1107 list_sort(NULL, &list, plug_ctx_cmp);
1108
1109 this_q = NULL;
1110 this_ctx = NULL;
1111 depth = 0;
1112
1113 while (!list_empty(&list)) {
1114 rq = list_entry_rq(list.next);
1115 list_del_init(&rq->queuelist);
1116 BUG_ON(!rq->q);
1117 if (rq->mq_ctx != this_ctx) {
1118 if (this_ctx) {
1119 blk_mq_insert_requests(this_q, this_ctx,
1120 &ctx_list, depth,
1121 from_schedule);
1122 }
1123
1124 this_ctx = rq->mq_ctx;
1125 this_q = rq->q;
1126 depth = 0;
1127 }
1128
1129 depth++;
1130 list_add_tail(&rq->queuelist, &ctx_list);
1131 }
1132
1133 /*
1134 * If 'this_ctx' is set, we know we have entries to complete
1135 * on 'ctx_list'. Do those.
1136 */
1137 if (this_ctx) {
1138 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1139 from_schedule);
1140 }
1141}
1142
1143static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1144{
1145 init_request_from_bio(rq, bio);
1146
1147 blk_account_io_start(rq, 1);
1148}
1149
1150static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1151{
1152 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1153 !blk_queue_nomerges(hctx->queue);
1154}
1155
1156static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1157 struct blk_mq_ctx *ctx,
1158 struct request *rq, struct bio *bio)
1159{
1160 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1161 blk_mq_bio_to_request(rq, bio);
1162 spin_lock(&ctx->lock);
1163insert_rq:
1164 __blk_mq_insert_request(hctx, rq, false);
1165 spin_unlock(&ctx->lock);
1166 return false;
1167 } else {
1168 struct request_queue *q = hctx->queue;
1169
1170 spin_lock(&ctx->lock);
1171 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1172 blk_mq_bio_to_request(rq, bio);
1173 goto insert_rq;
1174 }
1175
1176 spin_unlock(&ctx->lock);
1177 __blk_mq_free_request(hctx, ctx, rq);
1178 return true;
1179 }
1180}
1181
1182struct blk_map_ctx {
1183 struct blk_mq_hw_ctx *hctx;
1184 struct blk_mq_ctx *ctx;
1185};
1186
1187static struct request *blk_mq_map_request(struct request_queue *q,
1188 struct bio *bio,
1189 struct blk_map_ctx *data)
1190{
1191 struct blk_mq_hw_ctx *hctx;
1192 struct blk_mq_ctx *ctx;
1193 struct request *rq;
1194 int op = bio_data_dir(bio);
1195 int op_flags = 0;
1196 struct blk_mq_alloc_data alloc_data;
1197
1198 blk_queue_enter_live(q);
1199 ctx = blk_mq_get_ctx(q);
1200 hctx = blk_mq_map_queue(q, ctx->cpu);
1201
1202 if (rw_is_sync(bio_op(bio), bio->bi_opf))
1203 op_flags |= REQ_SYNC;
1204
1205 trace_block_getrq(q, bio, op);
1206 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
1207 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
1208
1209 data->hctx = alloc_data.hctx;
1210 data->ctx = alloc_data.ctx;
1211 data->hctx->queued++;
1212 return rq;
1213}
1214
1215static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1216{
1217 int ret;
1218 struct request_queue *q = rq->q;
1219 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1220 struct blk_mq_queue_data bd = {
1221 .rq = rq,
1222 .list = NULL,
1223 .last = 1
1224 };
1225 blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1226
1227 /*
1228 * For OK queue, we are done. For error, kill it. Any other
1229 * error (busy), just add it to our list as we previously
1230 * would have done
1231 */
1232 ret = q->mq_ops->queue_rq(hctx, &bd);
1233 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1234 *cookie = new_cookie;
1235 return 0;
1236 }
1237
1238 __blk_mq_requeue_request(rq);
1239
1240 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1241 *cookie = BLK_QC_T_NONE;
1242 rq->errors = -EIO;
1243 blk_mq_end_request(rq, rq->errors);
1244 return 0;
1245 }
1246
1247 return -1;
1248}
1249
1250/*
1251 * Multiple hardware queue variant. This will not use per-process plugs,
1252 * but will attempt to bypass the hctx queueing if we can go straight to
1253 * hardware for SYNC IO.
1254 */
1255static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1256{
1257 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
1258 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1259 struct blk_map_ctx data;
1260 struct request *rq;
1261 unsigned int request_count = 0;
1262 struct blk_plug *plug;
1263 struct request *same_queue_rq = NULL;
1264 blk_qc_t cookie;
1265
1266 blk_queue_bounce(q, &bio);
1267
1268 blk_queue_split(q, &bio, q->bio_split);
1269
1270 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1271 bio_io_error(bio);
1272 return BLK_QC_T_NONE;
1273 }
1274
1275 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1276 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1277 return BLK_QC_T_NONE;
1278
1279 rq = blk_mq_map_request(q, bio, &data);
1280 if (unlikely(!rq))
1281 return BLK_QC_T_NONE;
1282
1283 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1284
1285 if (unlikely(is_flush_fua)) {
1286 blk_mq_bio_to_request(rq, bio);
1287 blk_insert_flush(rq);
1288 goto run_queue;
1289 }
1290
1291 plug = current->plug;
1292 /*
1293 * If the driver supports defer issued based on 'last', then
1294 * queue it up like normal since we can potentially save some
1295 * CPU this way.
1296 */
1297 if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1298 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1299 struct request *old_rq = NULL;
1300
1301 blk_mq_bio_to_request(rq, bio);
1302
1303 /*
1304 * We do limited pluging. If the bio can be merged, do that.
1305 * Otherwise the existing request in the plug list will be
1306 * issued. So the plug list will have one request at most
1307 */
1308 if (plug) {
1309 /*
1310 * The plug list might get flushed before this. If that
1311 * happens, same_queue_rq is invalid and plug list is
1312 * empty
1313 */
1314 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1315 old_rq = same_queue_rq;
1316 list_del_init(&old_rq->queuelist);
1317 }
1318 list_add_tail(&rq->queuelist, &plug->mq_list);
1319 } else /* is_sync */
1320 old_rq = rq;
1321 blk_mq_put_ctx(data.ctx);
1322 if (!old_rq)
1323 goto done;
1324 if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
1325 blk_mq_direct_issue_request(old_rq, &cookie) != 0)
1326 blk_mq_insert_request(old_rq, false, true, true);
1327 goto done;
1328 }
1329
1330 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1331 /*
1332 * For a SYNC request, send it to the hardware immediately. For
1333 * an ASYNC request, just ensure that we run it later on. The
1334 * latter allows for merging opportunities and more efficient
1335 * dispatching.
1336 */
1337run_queue:
1338 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1339 }
1340 blk_mq_put_ctx(data.ctx);
1341done:
1342 return cookie;
1343}
1344
1345/*
1346 * Single hardware queue variant. This will attempt to use any per-process
1347 * plug for merging and IO deferral.
1348 */
1349static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1350{
1351 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
1352 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1353 struct blk_plug *plug;
1354 unsigned int request_count = 0;
1355 struct blk_map_ctx data;
1356 struct request *rq;
1357 blk_qc_t cookie;
1358
1359 blk_queue_bounce(q, &bio);
1360
1361 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1362 bio_io_error(bio);
1363 return BLK_QC_T_NONE;
1364 }
1365
1366 blk_queue_split(q, &bio, q->bio_split);
1367
1368 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1369 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1370 return BLK_QC_T_NONE;
1371 } else
1372 request_count = blk_plug_queued_count(q);
1373
1374 rq = blk_mq_map_request(q, bio, &data);
1375 if (unlikely(!rq))
1376 return BLK_QC_T_NONE;
1377
1378 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1379
1380 if (unlikely(is_flush_fua)) {
1381 blk_mq_bio_to_request(rq, bio);
1382 blk_insert_flush(rq);
1383 goto run_queue;
1384 }
1385
1386 /*
1387 * A task plug currently exists. Since this is completely lockless,
1388 * utilize that to temporarily store requests until the task is
1389 * either done or scheduled away.
1390 */
1391 plug = current->plug;
1392 if (plug) {
1393 blk_mq_bio_to_request(rq, bio);
1394 if (!request_count)
1395 trace_block_plug(q);
1396
1397 blk_mq_put_ctx(data.ctx);
1398
1399 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1400 blk_flush_plug_list(plug, false);
1401 trace_block_plug(q);
1402 }
1403
1404 list_add_tail(&rq->queuelist, &plug->mq_list);
1405 return cookie;
1406 }
1407
1408 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1409 /*
1410 * For a SYNC request, send it to the hardware immediately. For
1411 * an ASYNC request, just ensure that we run it later on. The
1412 * latter allows for merging opportunities and more efficient
1413 * dispatching.
1414 */
1415run_queue:
1416 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1417 }
1418
1419 blk_mq_put_ctx(data.ctx);
1420 return cookie;
1421}
1422
1423static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1424 struct blk_mq_tags *tags, unsigned int hctx_idx)
1425{
1426 struct page *page;
1427
1428 if (tags->rqs && set->ops->exit_request) {
1429 int i;
1430
1431 for (i = 0; i < tags->nr_tags; i++) {
1432 if (!tags->rqs[i])
1433 continue;
1434 set->ops->exit_request(set->driver_data, tags->rqs[i],
1435 hctx_idx, i);
1436 tags->rqs[i] = NULL;
1437 }
1438 }
1439
1440 while (!list_empty(&tags->page_list)) {
1441 page = list_first_entry(&tags->page_list, struct page, lru);
1442 list_del_init(&page->lru);
1443 /*
1444 * Remove kmemleak object previously allocated in
1445 * blk_mq_init_rq_map().
1446 */
1447 kmemleak_free(page_address(page));
1448 __free_pages(page, page->private);
1449 }
1450
1451 kfree(tags->rqs);
1452
1453 blk_mq_free_tags(tags);
1454}
1455
1456static size_t order_to_size(unsigned int order)
1457{
1458 return (size_t)PAGE_SIZE << order;
1459}
1460
1461static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1462 unsigned int hctx_idx)
1463{
1464 struct blk_mq_tags *tags;
1465 unsigned int i, j, entries_per_page, max_order = 4;
1466 size_t rq_size, left;
1467
1468 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1469 set->numa_node,
1470 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1471 if (!tags)
1472 return NULL;
1473
1474 INIT_LIST_HEAD(&tags->page_list);
1475
1476 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1477 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1478 set->numa_node);
1479 if (!tags->rqs) {
1480 blk_mq_free_tags(tags);
1481 return NULL;
1482 }
1483
1484 /*
1485 * rq_size is the size of the request plus driver payload, rounded
1486 * to the cacheline size
1487 */
1488 rq_size = round_up(sizeof(struct request) + set->cmd_size,
1489 cache_line_size());
1490 left = rq_size * set->queue_depth;
1491
1492 for (i = 0; i < set->queue_depth; ) {
1493 int this_order = max_order;
1494 struct page *page;
1495 int to_do;
1496 void *p;
1497
1498 while (this_order && left < order_to_size(this_order - 1))
1499 this_order--;
1500
1501 do {
1502 page = alloc_pages_node(set->numa_node,
1503 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1504 this_order);
1505 if (page)
1506 break;
1507 if (!this_order--)
1508 break;
1509 if (order_to_size(this_order) < rq_size)
1510 break;
1511 } while (1);
1512
1513 if (!page)
1514 goto fail;
1515
1516 page->private = this_order;
1517 list_add_tail(&page->lru, &tags->page_list);
1518
1519 p = page_address(page);
1520 /*
1521 * Allow kmemleak to scan these pages as they contain pointers
1522 * to additional allocations like via ops->init_request().
1523 */
1524 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1525 entries_per_page = order_to_size(this_order) / rq_size;
1526 to_do = min(entries_per_page, set->queue_depth - i);
1527 left -= to_do * rq_size;
1528 for (j = 0; j < to_do; j++) {
1529 tags->rqs[i] = p;
1530 if (set->ops->init_request) {
1531 if (set->ops->init_request(set->driver_data,
1532 tags->rqs[i], hctx_idx, i,
1533 set->numa_node)) {
1534 tags->rqs[i] = NULL;
1535 goto fail;
1536 }
1537 }
1538
1539 p += rq_size;
1540 i++;
1541 }
1542 }
1543 return tags;
1544
1545fail:
1546 blk_mq_free_rq_map(set, tags, hctx_idx);
1547 return NULL;
1548}
1549
1550/*
1551 * 'cpu' is going away. splice any existing rq_list entries from this
1552 * software queue to the hw queue dispatch list, and ensure that it
1553 * gets run.
1554 */
1555static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1556{
1557 struct blk_mq_hw_ctx *hctx;
1558 struct blk_mq_ctx *ctx;
1559 LIST_HEAD(tmp);
1560
1561 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1562 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1563
1564 spin_lock(&ctx->lock);
1565 if (!list_empty(&ctx->rq_list)) {
1566 list_splice_init(&ctx->rq_list, &tmp);
1567 blk_mq_hctx_clear_pending(hctx, ctx);
1568 }
1569 spin_unlock(&ctx->lock);
1570
1571 if (list_empty(&tmp))
1572 return 0;
1573
1574 spin_lock(&hctx->lock);
1575 list_splice_tail_init(&tmp, &hctx->dispatch);
1576 spin_unlock(&hctx->lock);
1577
1578 blk_mq_run_hw_queue(hctx, true);
1579 return 0;
1580}
1581
1582static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1583{
1584 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1585 &hctx->cpuhp_dead);
1586}
1587
1588/* hctx->ctxs will be freed in queue's release handler */
1589static void blk_mq_exit_hctx(struct request_queue *q,
1590 struct blk_mq_tag_set *set,
1591 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1592{
1593 unsigned flush_start_tag = set->queue_depth;
1594
1595 if (blk_mq_hw_queue_mapped(hctx))
1596 blk_mq_tag_idle(hctx);
1597
1598 if (set->ops->exit_request)
1599 set->ops->exit_request(set->driver_data,
1600 hctx->fq->flush_rq, hctx_idx,
1601 flush_start_tag + hctx_idx);
1602
1603 if (set->ops->exit_hctx)
1604 set->ops->exit_hctx(hctx, hctx_idx);
1605
1606 blk_mq_remove_cpuhp(hctx);
1607 blk_free_flush_queue(hctx->fq);
1608 sbitmap_free(&hctx->ctx_map);
1609}
1610
1611static void blk_mq_exit_hw_queues(struct request_queue *q,
1612 struct blk_mq_tag_set *set, int nr_queue)
1613{
1614 struct blk_mq_hw_ctx *hctx;
1615 unsigned int i;
1616
1617 queue_for_each_hw_ctx(q, hctx, i) {
1618 if (i == nr_queue)
1619 break;
1620 blk_mq_exit_hctx(q, set, hctx, i);
1621 }
1622}
1623
1624static void blk_mq_free_hw_queues(struct request_queue *q,
1625 struct blk_mq_tag_set *set)
1626{
1627 struct blk_mq_hw_ctx *hctx;
1628 unsigned int i;
1629
1630 queue_for_each_hw_ctx(q, hctx, i)
1631 free_cpumask_var(hctx->cpumask);
1632}
1633
1634static int blk_mq_init_hctx(struct request_queue *q,
1635 struct blk_mq_tag_set *set,
1636 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1637{
1638 int node;
1639 unsigned flush_start_tag = set->queue_depth;
1640
1641 node = hctx->numa_node;
1642 if (node == NUMA_NO_NODE)
1643 node = hctx->numa_node = set->numa_node;
1644
1645 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1646 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1647 spin_lock_init(&hctx->lock);
1648 INIT_LIST_HEAD(&hctx->dispatch);
1649 hctx->queue = q;
1650 hctx->queue_num = hctx_idx;
1651 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1652
1653 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1654
1655 hctx->tags = set->tags[hctx_idx];
1656
1657 /*
1658 * Allocate space for all possible cpus to avoid allocation at
1659 * runtime
1660 */
1661 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1662 GFP_KERNEL, node);
1663 if (!hctx->ctxs)
1664 goto unregister_cpu_notifier;
1665
1666 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1667 node))
1668 goto free_ctxs;
1669
1670 hctx->nr_ctx = 0;
1671
1672 if (set->ops->init_hctx &&
1673 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1674 goto free_bitmap;
1675
1676 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1677 if (!hctx->fq)
1678 goto exit_hctx;
1679
1680 if (set->ops->init_request &&
1681 set->ops->init_request(set->driver_data,
1682 hctx->fq->flush_rq, hctx_idx,
1683 flush_start_tag + hctx_idx, node))
1684 goto free_fq;
1685
1686 return 0;
1687
1688 free_fq:
1689 kfree(hctx->fq);
1690 exit_hctx:
1691 if (set->ops->exit_hctx)
1692 set->ops->exit_hctx(hctx, hctx_idx);
1693 free_bitmap:
1694 sbitmap_free(&hctx->ctx_map);
1695 free_ctxs:
1696 kfree(hctx->ctxs);
1697 unregister_cpu_notifier:
1698 blk_mq_remove_cpuhp(hctx);
1699 return -1;
1700}
1701
1702static void blk_mq_init_cpu_queues(struct request_queue *q,
1703 unsigned int nr_hw_queues)
1704{
1705 unsigned int i;
1706
1707 for_each_possible_cpu(i) {
1708 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1709 struct blk_mq_hw_ctx *hctx;
1710
1711 __ctx->cpu = i;
1712 spin_lock_init(&__ctx->lock);
1713 INIT_LIST_HEAD(&__ctx->rq_list);
1714 __ctx->queue = q;
1715
1716 /* If the cpu isn't online, the cpu is mapped to first hctx */
1717 if (!cpu_online(i))
1718 continue;
1719
1720 hctx = blk_mq_map_queue(q, i);
1721
1722 /*
1723 * Set local node, IFF we have more than one hw queue. If
1724 * not, we remain on the home node of the device
1725 */
1726 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1727 hctx->numa_node = local_memory_node(cpu_to_node(i));
1728 }
1729}
1730
1731static void blk_mq_map_swqueue(struct request_queue *q,
1732 const struct cpumask *online_mask)
1733{
1734 unsigned int i;
1735 struct blk_mq_hw_ctx *hctx;
1736 struct blk_mq_ctx *ctx;
1737 struct blk_mq_tag_set *set = q->tag_set;
1738
1739 /*
1740 * Avoid others reading imcomplete hctx->cpumask through sysfs
1741 */
1742 mutex_lock(&q->sysfs_lock);
1743
1744 queue_for_each_hw_ctx(q, hctx, i) {
1745 cpumask_clear(hctx->cpumask);
1746 hctx->nr_ctx = 0;
1747 }
1748
1749 /*
1750 * Map software to hardware queues
1751 */
1752 for_each_possible_cpu(i) {
1753 /* If the cpu isn't online, the cpu is mapped to first hctx */
1754 if (!cpumask_test_cpu(i, online_mask))
1755 continue;
1756
1757 ctx = per_cpu_ptr(q->queue_ctx, i);
1758 hctx = blk_mq_map_queue(q, i);
1759
1760 cpumask_set_cpu(i, hctx->cpumask);
1761 ctx->index_hw = hctx->nr_ctx;
1762 hctx->ctxs[hctx->nr_ctx++] = ctx;
1763 }
1764
1765 mutex_unlock(&q->sysfs_lock);
1766
1767 queue_for_each_hw_ctx(q, hctx, i) {
1768 /*
1769 * If no software queues are mapped to this hardware queue,
1770 * disable it and free the request entries.
1771 */
1772 if (!hctx->nr_ctx) {
1773 if (set->tags[i]) {
1774 blk_mq_free_rq_map(set, set->tags[i], i);
1775 set->tags[i] = NULL;
1776 }
1777 hctx->tags = NULL;
1778 continue;
1779 }
1780
1781 /* unmapped hw queue can be remapped after CPU topo changed */
1782 if (!set->tags[i])
1783 set->tags[i] = blk_mq_init_rq_map(set, i);
1784 hctx->tags = set->tags[i];
1785 WARN_ON(!hctx->tags);
1786
1787 /*
1788 * Set the map size to the number of mapped software queues.
1789 * This is more accurate and more efficient than looping
1790 * over all possibly mapped software queues.
1791 */
1792 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
1793
1794 /*
1795 * Initialize batch roundrobin counts
1796 */
1797 hctx->next_cpu = cpumask_first(hctx->cpumask);
1798 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1799 }
1800}
1801
1802static void queue_set_hctx_shared(struct request_queue *q, bool shared)
1803{
1804 struct blk_mq_hw_ctx *hctx;
1805 int i;
1806
1807 queue_for_each_hw_ctx(q, hctx, i) {
1808 if (shared)
1809 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1810 else
1811 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1812 }
1813}
1814
1815static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
1816{
1817 struct request_queue *q;
1818
1819 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1820 blk_mq_freeze_queue(q);
1821 queue_set_hctx_shared(q, shared);
1822 blk_mq_unfreeze_queue(q);
1823 }
1824}
1825
1826static void blk_mq_del_queue_tag_set(struct request_queue *q)
1827{
1828 struct blk_mq_tag_set *set = q->tag_set;
1829
1830 mutex_lock(&set->tag_list_lock);
1831 list_del_init(&q->tag_set_list);
1832 if (list_is_singular(&set->tag_list)) {
1833 /* just transitioned to unshared */
1834 set->flags &= ~BLK_MQ_F_TAG_SHARED;
1835 /* update existing queue */
1836 blk_mq_update_tag_set_depth(set, false);
1837 }
1838 mutex_unlock(&set->tag_list_lock);
1839}
1840
1841static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1842 struct request_queue *q)
1843{
1844 q->tag_set = set;
1845
1846 mutex_lock(&set->tag_list_lock);
1847
1848 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
1849 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
1850 set->flags |= BLK_MQ_F_TAG_SHARED;
1851 /* update existing queue */
1852 blk_mq_update_tag_set_depth(set, true);
1853 }
1854 if (set->flags & BLK_MQ_F_TAG_SHARED)
1855 queue_set_hctx_shared(q, true);
1856 list_add_tail(&q->tag_set_list, &set->tag_list);
1857
1858 mutex_unlock(&set->tag_list_lock);
1859}
1860
1861/*
1862 * It is the actual release handler for mq, but we do it from
1863 * request queue's release handler for avoiding use-after-free
1864 * and headache because q->mq_kobj shouldn't have been introduced,
1865 * but we can't group ctx/kctx kobj without it.
1866 */
1867void blk_mq_release(struct request_queue *q)
1868{
1869 struct blk_mq_hw_ctx *hctx;
1870 unsigned int i;
1871
1872 /* hctx kobj stays in hctx */
1873 queue_for_each_hw_ctx(q, hctx, i) {
1874 if (!hctx)
1875 continue;
1876 kfree(hctx->ctxs);
1877 kfree(hctx);
1878 }
1879
1880 q->mq_map = NULL;
1881
1882 kfree(q->queue_hw_ctx);
1883
1884 /* ctx kobj stays in queue_ctx */
1885 free_percpu(q->queue_ctx);
1886}
1887
1888struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1889{
1890 struct request_queue *uninit_q, *q;
1891
1892 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1893 if (!uninit_q)
1894 return ERR_PTR(-ENOMEM);
1895
1896 q = blk_mq_init_allocated_queue(set, uninit_q);
1897 if (IS_ERR(q))
1898 blk_cleanup_queue(uninit_q);
1899
1900 return q;
1901}
1902EXPORT_SYMBOL(blk_mq_init_queue);
1903
1904static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1905 struct request_queue *q)
1906{
1907 int i, j;
1908 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
1909
1910 blk_mq_sysfs_unregister(q);
1911
1912 /* protect against switching io scheduler */
1913 mutex_lock(&q->sysfs_lock);
1914 for (i = 0; i < set->nr_hw_queues; i++) {
1915 int node;
1916
1917 if (hctxs[i])
1918 continue;
1919
1920 node = blk_mq_hw_queue_to_node(q->mq_map, i);
1921 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1922 GFP_KERNEL, node);
1923 if (!hctxs[i])
1924 break;
1925
1926 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1927 node)) {
1928 kfree(hctxs[i]);
1929 hctxs[i] = NULL;
1930 break;
1931 }
1932
1933 atomic_set(&hctxs[i]->nr_active, 0);
1934 hctxs[i]->numa_node = node;
1935 hctxs[i]->queue_num = i;
1936
1937 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
1938 free_cpumask_var(hctxs[i]->cpumask);
1939 kfree(hctxs[i]);
1940 hctxs[i] = NULL;
1941 break;
1942 }
1943 blk_mq_hctx_kobj_init(hctxs[i]);
1944 }
1945 for (j = i; j < q->nr_hw_queues; j++) {
1946 struct blk_mq_hw_ctx *hctx = hctxs[j];
1947
1948 if (hctx) {
1949 if (hctx->tags) {
1950 blk_mq_free_rq_map(set, hctx->tags, j);
1951 set->tags[j] = NULL;
1952 }
1953 blk_mq_exit_hctx(q, set, hctx, j);
1954 free_cpumask_var(hctx->cpumask);
1955 kobject_put(&hctx->kobj);
1956 kfree(hctx->ctxs);
1957 kfree(hctx);
1958 hctxs[j] = NULL;
1959
1960 }
1961 }
1962 q->nr_hw_queues = i;
1963 mutex_unlock(&q->sysfs_lock);
1964 blk_mq_sysfs_register(q);
1965}
1966
1967struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1968 struct request_queue *q)
1969{
1970 /* mark the queue as mq asap */
1971 q->mq_ops = set->ops;
1972
1973 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
1974 if (!q->queue_ctx)
1975 goto err_exit;
1976
1977 /* init q->mq_kobj and sw queues' kobjects */
1978 blk_mq_sysfs_init(q);
1979
1980 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
1981 GFP_KERNEL, set->numa_node);
1982 if (!q->queue_hw_ctx)
1983 goto err_percpu;
1984
1985 q->mq_map = set->mq_map;
1986
1987 blk_mq_realloc_hw_ctxs(set, q);
1988 if (!q->nr_hw_queues)
1989 goto err_hctxs;
1990
1991 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
1992 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
1993
1994 q->nr_queues = nr_cpu_ids;
1995
1996 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1997
1998 if (!(set->flags & BLK_MQ_F_SG_MERGE))
1999 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2000
2001 q->sg_reserved_size = INT_MAX;
2002
2003 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2004 INIT_LIST_HEAD(&q->requeue_list);
2005 spin_lock_init(&q->requeue_lock);
2006
2007 if (q->nr_hw_queues > 1)
2008 blk_queue_make_request(q, blk_mq_make_request);
2009 else
2010 blk_queue_make_request(q, blk_sq_make_request);
2011
2012 /*
2013 * Do this after blk_queue_make_request() overrides it...
2014 */
2015 q->nr_requests = set->queue_depth;
2016
2017 if (set->ops->complete)
2018 blk_queue_softirq_done(q, set->ops->complete);
2019
2020 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2021
2022 get_online_cpus();
2023 mutex_lock(&all_q_mutex);
2024
2025 list_add_tail(&q->all_q_node, &all_q_list);
2026 blk_mq_add_queue_tag_set(set, q);
2027 blk_mq_map_swqueue(q, cpu_online_mask);
2028
2029 mutex_unlock(&all_q_mutex);
2030 put_online_cpus();
2031
2032 return q;
2033
2034err_hctxs:
2035 kfree(q->queue_hw_ctx);
2036err_percpu:
2037 free_percpu(q->queue_ctx);
2038err_exit:
2039 q->mq_ops = NULL;
2040 return ERR_PTR(-ENOMEM);
2041}
2042EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2043
2044void blk_mq_free_queue(struct request_queue *q)
2045{
2046 struct blk_mq_tag_set *set = q->tag_set;
2047
2048 mutex_lock(&all_q_mutex);
2049 list_del_init(&q->all_q_node);
2050 mutex_unlock(&all_q_mutex);
2051
2052 blk_mq_del_queue_tag_set(q);
2053
2054 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2055 blk_mq_free_hw_queues(q, set);
2056}
2057
2058/* Basically redo blk_mq_init_queue with queue frozen */
2059static void blk_mq_queue_reinit(struct request_queue *q,
2060 const struct cpumask *online_mask)
2061{
2062 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2063
2064 blk_mq_sysfs_unregister(q);
2065
2066 /*
2067 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2068 * we should change hctx numa_node according to new topology (this
2069 * involves free and re-allocate memory, worthy doing?)
2070 */
2071
2072 blk_mq_map_swqueue(q, online_mask);
2073
2074 blk_mq_sysfs_register(q);
2075}
2076
2077/*
2078 * New online cpumask which is going to be set in this hotplug event.
2079 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2080 * one-by-one and dynamically allocating this could result in a failure.
2081 */
2082static struct cpumask cpuhp_online_new;
2083
2084static void blk_mq_queue_reinit_work(void)
2085{
2086 struct request_queue *q;
2087
2088 mutex_lock(&all_q_mutex);
2089 /*
2090 * We need to freeze and reinit all existing queues. Freezing
2091 * involves synchronous wait for an RCU grace period and doing it
2092 * one by one may take a long time. Start freezing all queues in
2093 * one swoop and then wait for the completions so that freezing can
2094 * take place in parallel.
2095 */
2096 list_for_each_entry(q, &all_q_list, all_q_node)
2097 blk_mq_freeze_queue_start(q);
2098 list_for_each_entry(q, &all_q_list, all_q_node) {
2099 blk_mq_freeze_queue_wait(q);
2100
2101 /*
2102 * timeout handler can't touch hw queue during the
2103 * reinitialization
2104 */
2105 del_timer_sync(&q->timeout);
2106 }
2107
2108 list_for_each_entry(q, &all_q_list, all_q_node)
2109 blk_mq_queue_reinit(q, &cpuhp_online_new);
2110
2111 list_for_each_entry(q, &all_q_list, all_q_node)
2112 blk_mq_unfreeze_queue(q);
2113
2114 mutex_unlock(&all_q_mutex);
2115}
2116
2117static int blk_mq_queue_reinit_dead(unsigned int cpu)
2118{
2119 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2120 blk_mq_queue_reinit_work();
2121 return 0;
2122}
2123
2124/*
2125 * Before hotadded cpu starts handling requests, new mappings must be
2126 * established. Otherwise, these requests in hw queue might never be
2127 * dispatched.
2128 *
2129 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2130 * for CPU0, and ctx1 for CPU1).
2131 *
2132 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2133 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2134 *
2135 * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
2136 * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2137 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
2138 * is ignored.
2139 */
2140static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2141{
2142 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2143 cpumask_set_cpu(cpu, &cpuhp_online_new);
2144 blk_mq_queue_reinit_work();
2145 return 0;
2146}
2147
2148static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2149{
2150 int i;
2151
2152 for (i = 0; i < set->nr_hw_queues; i++) {
2153 set->tags[i] = blk_mq_init_rq_map(set, i);
2154 if (!set->tags[i])
2155 goto out_unwind;
2156 }
2157
2158 return 0;
2159
2160out_unwind:
2161 while (--i >= 0)
2162 blk_mq_free_rq_map(set, set->tags[i], i);
2163
2164 return -ENOMEM;
2165}
2166
2167/*
2168 * Allocate the request maps associated with this tag_set. Note that this
2169 * may reduce the depth asked for, if memory is tight. set->queue_depth
2170 * will be updated to reflect the allocated depth.
2171 */
2172static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2173{
2174 unsigned int depth;
2175 int err;
2176
2177 depth = set->queue_depth;
2178 do {
2179 err = __blk_mq_alloc_rq_maps(set);
2180 if (!err)
2181 break;
2182
2183 set->queue_depth >>= 1;
2184 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2185 err = -ENOMEM;
2186 break;
2187 }
2188 } while (set->queue_depth);
2189
2190 if (!set->queue_depth || err) {
2191 pr_err("blk-mq: failed to allocate request map\n");
2192 return -ENOMEM;
2193 }
2194
2195 if (depth != set->queue_depth)
2196 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2197 depth, set->queue_depth);
2198
2199 return 0;
2200}
2201
2202/*
2203 * Alloc a tag set to be associated with one or more request queues.
2204 * May fail with EINVAL for various error conditions. May adjust the
2205 * requested depth down, if if it too large. In that case, the set
2206 * value will be stored in set->queue_depth.
2207 */
2208int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2209{
2210 int ret;
2211
2212 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2213
2214 if (!set->nr_hw_queues)
2215 return -EINVAL;
2216 if (!set->queue_depth)
2217 return -EINVAL;
2218 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2219 return -EINVAL;
2220
2221 if (!set->ops->queue_rq)
2222 return -EINVAL;
2223
2224 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2225 pr_info("blk-mq: reduced tag depth to %u\n",
2226 BLK_MQ_MAX_DEPTH);
2227 set->queue_depth = BLK_MQ_MAX_DEPTH;
2228 }
2229
2230 /*
2231 * If a crashdump is active, then we are potentially in a very
2232 * memory constrained environment. Limit us to 1 queue and
2233 * 64 tags to prevent using too much memory.
2234 */
2235 if (is_kdump_kernel()) {
2236 set->nr_hw_queues = 1;
2237 set->queue_depth = min(64U, set->queue_depth);
2238 }
2239 /*
2240 * There is no use for more h/w queues than cpus.
2241 */
2242 if (set->nr_hw_queues > nr_cpu_ids)
2243 set->nr_hw_queues = nr_cpu_ids;
2244
2245 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2246 GFP_KERNEL, set->numa_node);
2247 if (!set->tags)
2248 return -ENOMEM;
2249
2250 ret = -ENOMEM;
2251 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2252 GFP_KERNEL, set->numa_node);
2253 if (!set->mq_map)
2254 goto out_free_tags;
2255
2256 if (set->ops->map_queues)
2257 ret = set->ops->map_queues(set);
2258 else
2259 ret = blk_mq_map_queues(set);
2260 if (ret)
2261 goto out_free_mq_map;
2262
2263 ret = blk_mq_alloc_rq_maps(set);
2264 if (ret)
2265 goto out_free_mq_map;
2266
2267 mutex_init(&set->tag_list_lock);
2268 INIT_LIST_HEAD(&set->tag_list);
2269
2270 return 0;
2271
2272out_free_mq_map:
2273 kfree(set->mq_map);
2274 set->mq_map = NULL;
2275out_free_tags:
2276 kfree(set->tags);
2277 set->tags = NULL;
2278 return ret;
2279}
2280EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2281
2282void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2283{
2284 int i;
2285
2286 for (i = 0; i < nr_cpu_ids; i++) {
2287 if (set->tags[i])
2288 blk_mq_free_rq_map(set, set->tags[i], i);
2289 }
2290
2291 kfree(set->mq_map);
2292 set->mq_map = NULL;
2293
2294 kfree(set->tags);
2295 set->tags = NULL;
2296}
2297EXPORT_SYMBOL(blk_mq_free_tag_set);
2298
2299int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2300{
2301 struct blk_mq_tag_set *set = q->tag_set;
2302 struct blk_mq_hw_ctx *hctx;
2303 int i, ret;
2304
2305 if (!set || nr > set->queue_depth)
2306 return -EINVAL;
2307
2308 ret = 0;
2309 queue_for_each_hw_ctx(q, hctx, i) {
2310 if (!hctx->tags)
2311 continue;
2312 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2313 if (ret)
2314 break;
2315 }
2316
2317 if (!ret)
2318 q->nr_requests = nr;
2319
2320 return ret;
2321}
2322
2323void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2324{
2325 struct request_queue *q;
2326
2327 if (nr_hw_queues > nr_cpu_ids)
2328 nr_hw_queues = nr_cpu_ids;
2329 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2330 return;
2331
2332 list_for_each_entry(q, &set->tag_list, tag_set_list)
2333 blk_mq_freeze_queue(q);
2334
2335 set->nr_hw_queues = nr_hw_queues;
2336 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2337 blk_mq_realloc_hw_ctxs(set, q);
2338
2339 if (q->nr_hw_queues > 1)
2340 blk_queue_make_request(q, blk_mq_make_request);
2341 else
2342 blk_queue_make_request(q, blk_sq_make_request);
2343
2344 blk_mq_queue_reinit(q, cpu_online_mask);
2345 }
2346
2347 list_for_each_entry(q, &set->tag_list, tag_set_list)
2348 blk_mq_unfreeze_queue(q);
2349}
2350EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2351
2352void blk_mq_disable_hotplug(void)
2353{
2354 mutex_lock(&all_q_mutex);
2355}
2356
2357void blk_mq_enable_hotplug(void)
2358{
2359 mutex_unlock(&all_q_mutex);
2360}
2361
2362static int __init blk_mq_init(void)
2363{
2364 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2365 blk_mq_hctx_notify_dead);
2366
2367 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2368 blk_mq_queue_reinit_prepare,
2369 blk_mq_queue_reinit_dead);
2370 return 0;
2371}
2372subsys_initcall(blk_mq_init);
2373