blob: 4bc701b32ce24c6ddda10fcbb266e5d07f41cd74
1 | /* |
2 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
3 | * fairer distribution of tags between multiple submitters when a shared tag map |
4 | * is used. |
5 | * |
6 | * Copyright (C) 2013-2014 Jens Axboe |
7 | */ |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> |
10 | |
11 | #include <linux/blk-mq.h> |
12 | #include "blk.h" |
13 | #include "blk-mq.h" |
14 | #include "blk-mq-tag.h" |
15 | |
16 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
17 | { |
18 | if (!tags) |
19 | return true; |
20 | |
21 | return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
22 | } |
23 | |
24 | /* |
25 | * If a previously inactive queue goes active, bump the active user count. |
26 | */ |
27 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
28 | { |
29 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && |
30 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
31 | atomic_inc(&hctx->tags->active_queues); |
32 | |
33 | return true; |
34 | } |
35 | |
36 | /* |
37 | * Wakeup all potentially sleeping on tags |
38 | */ |
39 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
40 | { |
41 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
42 | if (include_reserve) |
43 | sbitmap_queue_wake_all(&tags->breserved_tags); |
44 | } |
45 | |
46 | /* |
47 | * If a previously busy queue goes inactive, potential waiters could now |
48 | * be allowed to queue. Wake them up and check. |
49 | */ |
50 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
51 | { |
52 | struct blk_mq_tags *tags = hctx->tags; |
53 | |
54 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
55 | return; |
56 | |
57 | atomic_dec(&tags->active_queues); |
58 | |
59 | blk_mq_tag_wakeup_all(tags, false); |
60 | } |
61 | |
62 | /* |
63 | * For shared tag users, we track the number of currently active users |
64 | * and attempt to provide a fair share of the tag depth for each of them. |
65 | */ |
66 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
67 | struct sbitmap_queue *bt) |
68 | { |
69 | unsigned int depth, users; |
70 | |
71 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) |
72 | return true; |
73 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
74 | return true; |
75 | |
76 | /* |
77 | * Don't try dividing an ant |
78 | */ |
79 | if (bt->sb.depth == 1) |
80 | return true; |
81 | |
82 | users = atomic_read(&hctx->tags->active_queues); |
83 | if (!users) |
84 | return true; |
85 | |
86 | /* |
87 | * Allow at least some tags |
88 | */ |
89 | depth = max((bt->sb.depth + users - 1) / users, 4U); |
90 | return atomic_read(&hctx->nr_active) < depth; |
91 | } |
92 | |
93 | static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) |
94 | { |
95 | if (!hctx_may_queue(hctx, bt)) |
96 | return -1; |
97 | return __sbitmap_queue_get(bt); |
98 | } |
99 | |
100 | static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt, |
101 | struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags) |
102 | { |
103 | struct sbq_wait_state *ws; |
104 | DEFINE_WAIT(wait); |
105 | int tag; |
106 | |
107 | tag = __bt_get(hctx, bt); |
108 | if (tag != -1) |
109 | return tag; |
110 | |
111 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
112 | return -1; |
113 | |
114 | ws = bt_wait_ptr(bt, hctx); |
115 | do { |
116 | prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); |
117 | |
118 | tag = __bt_get(hctx, bt); |
119 | if (tag != -1) |
120 | break; |
121 | |
122 | /* |
123 | * We're out of tags on this hardware queue, kick any |
124 | * pending IO submits before going to sleep waiting for |
125 | * some to complete. Note that hctx can be NULL here for |
126 | * reserved tag allocation. |
127 | */ |
128 | if (hctx) |
129 | blk_mq_run_hw_queue(hctx, false); |
130 | |
131 | /* |
132 | * Retry tag allocation after running the hardware queue, |
133 | * as running the queue may also have found completions. |
134 | */ |
135 | tag = __bt_get(hctx, bt); |
136 | if (tag != -1) |
137 | break; |
138 | |
139 | blk_mq_put_ctx(data->ctx); |
140 | |
141 | io_schedule(); |
142 | |
143 | data->ctx = blk_mq_get_ctx(data->q); |
144 | data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); |
145 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
146 | bt = &data->hctx->tags->breserved_tags; |
147 | } else { |
148 | hctx = data->hctx; |
149 | bt = &hctx->tags->bitmap_tags; |
150 | } |
151 | finish_wait(&ws->wait, &wait); |
152 | ws = bt_wait_ptr(bt, hctx); |
153 | } while (1); |
154 | |
155 | finish_wait(&ws->wait, &wait); |
156 | return tag; |
157 | } |
158 | |
159 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
160 | { |
161 | int tag; |
162 | |
163 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
164 | data->hctx->tags); |
165 | if (tag >= 0) |
166 | return tag + data->hctx->tags->nr_reserved_tags; |
167 | |
168 | return BLK_MQ_TAG_FAIL; |
169 | } |
170 | |
171 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
172 | { |
173 | int tag; |
174 | |
175 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
176 | WARN_ON_ONCE(1); |
177 | return BLK_MQ_TAG_FAIL; |
178 | } |
179 | |
180 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, |
181 | data->hctx->tags); |
182 | if (tag < 0) |
183 | return BLK_MQ_TAG_FAIL; |
184 | |
185 | return tag; |
186 | } |
187 | |
188 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
189 | { |
190 | if (data->flags & BLK_MQ_REQ_RESERVED) |
191 | return __blk_mq_get_reserved_tag(data); |
192 | return __blk_mq_get_tag(data); |
193 | } |
194 | |
195 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
196 | unsigned int tag) |
197 | { |
198 | struct blk_mq_tags *tags = hctx->tags; |
199 | |
200 | if (tag >= tags->nr_reserved_tags) { |
201 | const int real_tag = tag - tags->nr_reserved_tags; |
202 | |
203 | BUG_ON(real_tag >= tags->nr_tags); |
204 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
205 | } else { |
206 | BUG_ON(tag >= tags->nr_reserved_tags); |
207 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
208 | } |
209 | } |
210 | |
211 | struct bt_iter_data { |
212 | struct blk_mq_hw_ctx *hctx; |
213 | busy_iter_fn *fn; |
214 | void *data; |
215 | bool reserved; |
216 | }; |
217 | |
218 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
219 | { |
220 | struct bt_iter_data *iter_data = data; |
221 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; |
222 | struct blk_mq_tags *tags = hctx->tags; |
223 | bool reserved = iter_data->reserved; |
224 | struct request *rq; |
225 | |
226 | if (!reserved) |
227 | bitnr += tags->nr_reserved_tags; |
228 | rq = tags->rqs[bitnr]; |
229 | |
230 | if (rq->q == hctx->queue) |
231 | iter_data->fn(hctx, rq, iter_data->data, reserved); |
232 | return true; |
233 | } |
234 | |
235 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, |
236 | busy_iter_fn *fn, void *data, bool reserved) |
237 | { |
238 | struct bt_iter_data iter_data = { |
239 | .hctx = hctx, |
240 | .fn = fn, |
241 | .data = data, |
242 | .reserved = reserved, |
243 | }; |
244 | |
245 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
246 | } |
247 | |
248 | struct bt_tags_iter_data { |
249 | struct blk_mq_tags *tags; |
250 | busy_tag_iter_fn *fn; |
251 | void *data; |
252 | bool reserved; |
253 | }; |
254 | |
255 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
256 | { |
257 | struct bt_tags_iter_data *iter_data = data; |
258 | struct blk_mq_tags *tags = iter_data->tags; |
259 | bool reserved = iter_data->reserved; |
260 | struct request *rq; |
261 | |
262 | if (!reserved) |
263 | bitnr += tags->nr_reserved_tags; |
264 | rq = tags->rqs[bitnr]; |
265 | |
266 | iter_data->fn(rq, iter_data->data, reserved); |
267 | return true; |
268 | } |
269 | |
270 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
271 | busy_tag_iter_fn *fn, void *data, bool reserved) |
272 | { |
273 | struct bt_tags_iter_data iter_data = { |
274 | .tags = tags, |
275 | .fn = fn, |
276 | .data = data, |
277 | .reserved = reserved, |
278 | }; |
279 | |
280 | if (tags->rqs) |
281 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
282 | } |
283 | |
284 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, |
285 | busy_tag_iter_fn *fn, void *priv) |
286 | { |
287 | if (tags->nr_reserved_tags) |
288 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); |
289 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); |
290 | } |
291 | |
292 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
293 | busy_tag_iter_fn *fn, void *priv) |
294 | { |
295 | int i; |
296 | |
297 | for (i = 0; i < tagset->nr_hw_queues; i++) { |
298 | if (tagset->tags && tagset->tags[i]) |
299 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); |
300 | } |
301 | } |
302 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
303 | |
304 | int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) |
305 | { |
306 | int i, j, ret = 0; |
307 | |
308 | if (!set->ops->reinit_request) |
309 | goto out; |
310 | |
311 | for (i = 0; i < set->nr_hw_queues; i++) { |
312 | struct blk_mq_tags *tags = set->tags[i]; |
313 | |
314 | if (!tags) |
315 | continue; |
316 | |
317 | for (j = 0; j < tags->nr_tags; j++) { |
318 | if (!tags->rqs[j]) |
319 | continue; |
320 | |
321 | ret = set->ops->reinit_request(set->driver_data, |
322 | tags->rqs[j]); |
323 | if (ret) |
324 | goto out; |
325 | } |
326 | } |
327 | |
328 | out: |
329 | return ret; |
330 | } |
331 | EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset); |
332 | |
333 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
334 | void *priv) |
335 | { |
336 | struct blk_mq_hw_ctx *hctx; |
337 | int i; |
338 | |
339 | |
340 | queue_for_each_hw_ctx(q, hctx, i) { |
341 | struct blk_mq_tags *tags = hctx->tags; |
342 | |
343 | /* |
344 | * If not software queues are currently mapped to this |
345 | * hardware queue, there's nothing to check |
346 | */ |
347 | if (!blk_mq_hw_queue_mapped(hctx)) |
348 | continue; |
349 | |
350 | if (tags->nr_reserved_tags) |
351 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
352 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
353 | } |
354 | |
355 | } |
356 | |
357 | static unsigned int bt_unused_tags(const struct sbitmap_queue *bt) |
358 | { |
359 | return bt->sb.depth - sbitmap_weight(&bt->sb); |
360 | } |
361 | |
362 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
363 | bool round_robin, int node) |
364 | { |
365 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
366 | node); |
367 | } |
368 | |
369 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, |
370 | int node, int alloc_policy) |
371 | { |
372 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
373 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
374 | |
375 | if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
376 | goto free_tags; |
377 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, |
378 | node)) |
379 | goto free_bitmap_tags; |
380 | |
381 | return tags; |
382 | free_bitmap_tags: |
383 | sbitmap_queue_free(&tags->bitmap_tags); |
384 | free_tags: |
385 | kfree(tags); |
386 | return NULL; |
387 | } |
388 | |
389 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
390 | unsigned int reserved_tags, |
391 | int node, int alloc_policy) |
392 | { |
393 | struct blk_mq_tags *tags; |
394 | |
395 | if (total_tags > BLK_MQ_TAG_MAX) { |
396 | pr_err("blk-mq: tag depth too large\n"); |
397 | return NULL; |
398 | } |
399 | |
400 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); |
401 | if (!tags) |
402 | return NULL; |
403 | |
404 | tags->nr_tags = total_tags; |
405 | tags->nr_reserved_tags = reserved_tags; |
406 | |
407 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
408 | } |
409 | |
410 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
411 | { |
412 | sbitmap_queue_free(&tags->bitmap_tags); |
413 | sbitmap_queue_free(&tags->breserved_tags); |
414 | kfree(tags); |
415 | } |
416 | |
417 | int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) |
418 | { |
419 | tdepth -= tags->nr_reserved_tags; |
420 | if (tdepth > tags->nr_tags) |
421 | return -EINVAL; |
422 | |
423 | /* |
424 | * Don't need (or can't) update reserved tags here, they remain |
425 | * static and should never need resizing. |
426 | */ |
427 | sbitmap_queue_resize(&tags->bitmap_tags, tdepth); |
428 | |
429 | blk_mq_tag_wakeup_all(tags, false); |
430 | return 0; |
431 | } |
432 | |
433 | /** |
434 | * blk_mq_unique_tag() - return a tag that is unique queue-wide |
435 | * @rq: request for which to compute a unique tag |
436 | * |
437 | * The tag field in struct request is unique per hardware queue but not over |
438 | * all hardware queues. Hence this function that returns a tag with the |
439 | * hardware context index in the upper bits and the per hardware queue tag in |
440 | * the lower bits. |
441 | * |
442 | * Note: When called for a request that is queued on a non-multiqueue request |
443 | * queue, the hardware context index is set to zero. |
444 | */ |
445 | u32 blk_mq_unique_tag(struct request *rq) |
446 | { |
447 | struct request_queue *q = rq->q; |
448 | struct blk_mq_hw_ctx *hctx; |
449 | int hwq = 0; |
450 | |
451 | if (q->mq_ops) { |
452 | hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); |
453 | hwq = hctx->queue_num; |
454 | } |
455 | |
456 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | |
457 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
458 | } |
459 | EXPORT_SYMBOL(blk_mq_unique_tag); |
460 | |
461 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
462 | { |
463 | char *orig_page = page; |
464 | unsigned int free, res; |
465 | |
466 | if (!tags) |
467 | return 0; |
468 | |
469 | page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " |
470 | "bits_per_word=%u\n", |
471 | tags->nr_tags, tags->nr_reserved_tags, |
472 | 1U << tags->bitmap_tags.sb.shift); |
473 | |
474 | free = bt_unused_tags(&tags->bitmap_tags); |
475 | res = bt_unused_tags(&tags->breserved_tags); |
476 | |
477 | page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); |
478 | page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); |
479 | |
480 | return page - orig_page; |
481 | } |
482 |