blob: ea374e820775fc123cb8b74ad2038abd5d86feba
1 | /* |
2 | * Functions related to sysfs handling |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/slab.h> |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/backing-dev.h> |
10 | #include <linux/blktrace_api.h> |
11 | #include <linux/blk-mq.h> |
12 | #include <linux/blk-cgroup.h> |
13 | |
14 | #include "blk.h" |
15 | #include "blk-mq.h" |
16 | |
17 | struct queue_sysfs_entry { |
18 | struct attribute attr; |
19 | ssize_t (*show)(struct request_queue *, char *); |
20 | ssize_t (*store)(struct request_queue *, const char *, size_t); |
21 | }; |
22 | |
23 | static ssize_t |
24 | queue_var_show(unsigned long var, char *page) |
25 | { |
26 | return sprintf(page, "%lu\n", var); |
27 | } |
28 | |
29 | static ssize_t |
30 | queue_var_store(unsigned long *var, const char *page, size_t count) |
31 | { |
32 | int err; |
33 | unsigned long v; |
34 | |
35 | err = kstrtoul(page, 10, &v); |
36 | if (err || v > UINT_MAX) |
37 | return -EINVAL; |
38 | |
39 | *var = v; |
40 | |
41 | return count; |
42 | } |
43 | |
44 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
45 | { |
46 | return queue_var_show(q->nr_requests, (page)); |
47 | } |
48 | |
49 | static ssize_t |
50 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
51 | { |
52 | unsigned long nr; |
53 | int ret, err; |
54 | |
55 | if (!q->request_fn && !q->mq_ops) |
56 | return -EINVAL; |
57 | |
58 | ret = queue_var_store(&nr, page, count); |
59 | if (ret < 0) |
60 | return ret; |
61 | |
62 | if (nr < BLKDEV_MIN_RQ) |
63 | nr = BLKDEV_MIN_RQ; |
64 | |
65 | if (q->request_fn) |
66 | err = blk_update_nr_requests(q, nr); |
67 | else |
68 | err = blk_mq_update_nr_requests(q, nr); |
69 | |
70 | if (err) |
71 | return err; |
72 | |
73 | return ret; |
74 | } |
75 | |
76 | static ssize_t queue_ra_show(struct request_queue *q, char *page) |
77 | { |
78 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
79 | (PAGE_SHIFT - 10); |
80 | |
81 | return queue_var_show(ra_kb, (page)); |
82 | } |
83 | |
84 | static ssize_t |
85 | queue_ra_store(struct request_queue *q, const char *page, size_t count) |
86 | { |
87 | unsigned long ra_kb; |
88 | ssize_t ret = queue_var_store(&ra_kb, page, count); |
89 | |
90 | if (ret < 0) |
91 | return ret; |
92 | |
93 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
94 | |
95 | return ret; |
96 | } |
97 | |
98 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) |
99 | { |
100 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
101 | |
102 | return queue_var_show(max_sectors_kb, (page)); |
103 | } |
104 | |
105 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
106 | { |
107 | return queue_var_show(queue_max_segments(q), (page)); |
108 | } |
109 | |
110 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
111 | { |
112 | return queue_var_show(q->limits.max_integrity_segments, (page)); |
113 | } |
114 | |
115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
116 | { |
117 | if (blk_queue_cluster(q)) |
118 | return queue_var_show(queue_max_segment_size(q), (page)); |
119 | |
120 | return queue_var_show(PAGE_SIZE, (page)); |
121 | } |
122 | |
123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
124 | { |
125 | return queue_var_show(queue_logical_block_size(q), page); |
126 | } |
127 | |
128 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
129 | { |
130 | return queue_var_show(queue_physical_block_size(q), page); |
131 | } |
132 | |
133 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
134 | { |
135 | return queue_var_show(queue_io_min(q), page); |
136 | } |
137 | |
138 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) |
139 | { |
140 | return queue_var_show(queue_io_opt(q), page); |
141 | } |
142 | |
143 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
144 | { |
145 | return queue_var_show(q->limits.discard_granularity, page); |
146 | } |
147 | |
148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
149 | { |
150 | |
151 | return sprintf(page, "%llu\n", |
152 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); |
153 | } |
154 | |
155 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
156 | { |
157 | return sprintf(page, "%llu\n", |
158 | (unsigned long long)q->limits.max_discard_sectors << 9); |
159 | } |
160 | |
161 | static ssize_t queue_discard_max_store(struct request_queue *q, |
162 | const char *page, size_t count) |
163 | { |
164 | unsigned long max_discard; |
165 | ssize_t ret = queue_var_store(&max_discard, page, count); |
166 | |
167 | if (ret < 0) |
168 | return ret; |
169 | |
170 | if (max_discard & (q->limits.discard_granularity - 1)) |
171 | return -EINVAL; |
172 | |
173 | max_discard >>= 9; |
174 | if (max_discard > UINT_MAX) |
175 | return -EINVAL; |
176 | |
177 | if (max_discard > q->limits.max_hw_discard_sectors) |
178 | max_discard = q->limits.max_hw_discard_sectors; |
179 | |
180 | q->limits.max_discard_sectors = max_discard; |
181 | return ret; |
182 | } |
183 | |
184 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
185 | { |
186 | return queue_var_show(queue_discard_zeroes_data(q), page); |
187 | } |
188 | |
189 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
190 | { |
191 | return sprintf(page, "%llu\n", |
192 | (unsigned long long)q->limits.max_write_same_sectors << 9); |
193 | } |
194 | |
195 | |
196 | static ssize_t |
197 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
198 | { |
199 | unsigned long max_sectors_kb, |
200 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
201 | page_kb = 1 << (PAGE_SHIFT - 10); |
202 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
203 | |
204 | if (ret < 0) |
205 | return ret; |
206 | |
207 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
208 | q->limits.max_dev_sectors >> 1); |
209 | |
210 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
211 | return -EINVAL; |
212 | |
213 | spin_lock_irq(q->queue_lock); |
214 | q->limits.max_sectors = max_sectors_kb << 1; |
215 | q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
216 | spin_unlock_irq(q->queue_lock); |
217 | |
218 | return ret; |
219 | } |
220 | |
221 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) |
222 | { |
223 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
224 | |
225 | return queue_var_show(max_hw_sectors_kb, (page)); |
226 | } |
227 | |
228 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
229 | static ssize_t \ |
230 | queue_show_##name(struct request_queue *q, char *page) \ |
231 | { \ |
232 | int bit; \ |
233 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ |
234 | return queue_var_show(neg ? !bit : bit, page); \ |
235 | } \ |
236 | static ssize_t \ |
237 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ |
238 | { \ |
239 | unsigned long val; \ |
240 | ssize_t ret; \ |
241 | ret = queue_var_store(&val, page, count); \ |
242 | if (ret < 0) \ |
243 | return ret; \ |
244 | if (neg) \ |
245 | val = !val; \ |
246 | \ |
247 | spin_lock_irq(q->queue_lock); \ |
248 | if (val) \ |
249 | queue_flag_set(QUEUE_FLAG_##flag, q); \ |
250 | else \ |
251 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
252 | spin_unlock_irq(q->queue_lock); \ |
253 | return ret; \ |
254 | } |
255 | |
256 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
257 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); |
258 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); |
259 | #undef QUEUE_SYSFS_BIT_FNS |
260 | |
261 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
262 | { |
263 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
264 | blk_queue_noxmerges(q), page); |
265 | } |
266 | |
267 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, |
268 | size_t count) |
269 | { |
270 | unsigned long nm; |
271 | ssize_t ret = queue_var_store(&nm, page, count); |
272 | |
273 | if (ret < 0) |
274 | return ret; |
275 | |
276 | spin_lock_irq(q->queue_lock); |
277 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
278 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
279 | if (nm == 2) |
280 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
281 | else if (nm) |
282 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
283 | spin_unlock_irq(q->queue_lock); |
284 | |
285 | return ret; |
286 | } |
287 | |
288 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
289 | { |
290 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
291 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
292 | |
293 | return queue_var_show(set << force, page); |
294 | } |
295 | |
296 | static ssize_t |
297 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) |
298 | { |
299 | ssize_t ret = -EINVAL; |
300 | #ifdef CONFIG_SMP |
301 | unsigned long val; |
302 | |
303 | ret = queue_var_store(&val, page, count); |
304 | if (ret < 0) |
305 | return ret; |
306 | |
307 | spin_lock_irq(q->queue_lock); |
308 | if (val == 2) { |
309 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
310 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
311 | } else if (val == 1) { |
312 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
313 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
314 | } else if (val == 0) { |
315 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
316 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
317 | } |
318 | spin_unlock_irq(q->queue_lock); |
319 | #endif |
320 | return ret; |
321 | } |
322 | |
323 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
324 | { |
325 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); |
326 | } |
327 | |
328 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, |
329 | size_t count) |
330 | { |
331 | unsigned long poll_on; |
332 | ssize_t ret; |
333 | |
334 | if (!q->mq_ops || !q->mq_ops->poll) |
335 | return -EINVAL; |
336 | |
337 | ret = queue_var_store(&poll_on, page, count); |
338 | if (ret < 0) |
339 | return ret; |
340 | |
341 | spin_lock_irq(q->queue_lock); |
342 | if (poll_on) |
343 | queue_flag_set(QUEUE_FLAG_POLL, q); |
344 | else |
345 | queue_flag_clear(QUEUE_FLAG_POLL, q); |
346 | spin_unlock_irq(q->queue_lock); |
347 | |
348 | return ret; |
349 | } |
350 | |
351 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
352 | { |
353 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
354 | return sprintf(page, "write back\n"); |
355 | |
356 | return sprintf(page, "write through\n"); |
357 | } |
358 | |
359 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, |
360 | size_t count) |
361 | { |
362 | int set = -1; |
363 | |
364 | if (!strncmp(page, "write back", 10)) |
365 | set = 1; |
366 | else if (!strncmp(page, "write through", 13) || |
367 | !strncmp(page, "none", 4)) |
368 | set = 0; |
369 | |
370 | if (set == -1) |
371 | return -EINVAL; |
372 | |
373 | spin_lock_irq(q->queue_lock); |
374 | if (set) |
375 | queue_flag_set(QUEUE_FLAG_WC, q); |
376 | else |
377 | queue_flag_clear(QUEUE_FLAG_WC, q); |
378 | spin_unlock_irq(q->queue_lock); |
379 | |
380 | return count; |
381 | } |
382 | |
383 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
384 | { |
385 | return queue_var_show(blk_queue_dax(q), page); |
386 | } |
387 | |
388 | static struct queue_sysfs_entry queue_requests_entry = { |
389 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
390 | .show = queue_requests_show, |
391 | .store = queue_requests_store, |
392 | }; |
393 | |
394 | static struct queue_sysfs_entry queue_ra_entry = { |
395 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, |
396 | .show = queue_ra_show, |
397 | .store = queue_ra_store, |
398 | }; |
399 | |
400 | static struct queue_sysfs_entry queue_max_sectors_entry = { |
401 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, |
402 | .show = queue_max_sectors_show, |
403 | .store = queue_max_sectors_store, |
404 | }; |
405 | |
406 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { |
407 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, |
408 | .show = queue_max_hw_sectors_show, |
409 | }; |
410 | |
411 | static struct queue_sysfs_entry queue_max_segments_entry = { |
412 | .attr = {.name = "max_segments", .mode = S_IRUGO }, |
413 | .show = queue_max_segments_show, |
414 | }; |
415 | |
416 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
417 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, |
418 | .show = queue_max_integrity_segments_show, |
419 | }; |
420 | |
421 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
422 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, |
423 | .show = queue_max_segment_size_show, |
424 | }; |
425 | |
426 | static struct queue_sysfs_entry queue_iosched_entry = { |
427 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, |
428 | .show = elv_iosched_show, |
429 | .store = elv_iosched_store, |
430 | }; |
431 | |
432 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
433 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, |
434 | .show = queue_logical_block_size_show, |
435 | }; |
436 | |
437 | static struct queue_sysfs_entry queue_logical_block_size_entry = { |
438 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, |
439 | .show = queue_logical_block_size_show, |
440 | }; |
441 | |
442 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
443 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, |
444 | .show = queue_physical_block_size_show, |
445 | }; |
446 | |
447 | static struct queue_sysfs_entry queue_io_min_entry = { |
448 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, |
449 | .show = queue_io_min_show, |
450 | }; |
451 | |
452 | static struct queue_sysfs_entry queue_io_opt_entry = { |
453 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, |
454 | .show = queue_io_opt_show, |
455 | }; |
456 | |
457 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
458 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, |
459 | .show = queue_discard_granularity_show, |
460 | }; |
461 | |
462 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
463 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, |
464 | .show = queue_discard_max_hw_show, |
465 | }; |
466 | |
467 | static struct queue_sysfs_entry queue_discard_max_entry = { |
468 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, |
469 | .show = queue_discard_max_show, |
470 | .store = queue_discard_max_store, |
471 | }; |
472 | |
473 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
474 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, |
475 | .show = queue_discard_zeroes_data_show, |
476 | }; |
477 | |
478 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
479 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, |
480 | .show = queue_write_same_max_show, |
481 | }; |
482 | |
483 | static struct queue_sysfs_entry queue_nonrot_entry = { |
484 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, |
485 | .show = queue_show_nonrot, |
486 | .store = queue_store_nonrot, |
487 | }; |
488 | |
489 | static struct queue_sysfs_entry queue_nomerges_entry = { |
490 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, |
491 | .show = queue_nomerges_show, |
492 | .store = queue_nomerges_store, |
493 | }; |
494 | |
495 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
496 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, |
497 | .show = queue_rq_affinity_show, |
498 | .store = queue_rq_affinity_store, |
499 | }; |
500 | |
501 | static struct queue_sysfs_entry queue_iostats_entry = { |
502 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, |
503 | .show = queue_show_iostats, |
504 | .store = queue_store_iostats, |
505 | }; |
506 | |
507 | static struct queue_sysfs_entry queue_random_entry = { |
508 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, |
509 | .show = queue_show_random, |
510 | .store = queue_store_random, |
511 | }; |
512 | |
513 | static struct queue_sysfs_entry queue_poll_entry = { |
514 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, |
515 | .show = queue_poll_show, |
516 | .store = queue_poll_store, |
517 | }; |
518 | |
519 | static struct queue_sysfs_entry queue_wc_entry = { |
520 | .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, |
521 | .show = queue_wc_show, |
522 | .store = queue_wc_store, |
523 | }; |
524 | |
525 | static struct queue_sysfs_entry queue_dax_entry = { |
526 | .attr = {.name = "dax", .mode = S_IRUGO }, |
527 | .show = queue_dax_show, |
528 | }; |
529 | |
530 | static struct attribute *default_attrs[] = { |
531 | &queue_requests_entry.attr, |
532 | &queue_ra_entry.attr, |
533 | &queue_max_hw_sectors_entry.attr, |
534 | &queue_max_sectors_entry.attr, |
535 | &queue_max_segments_entry.attr, |
536 | &queue_max_integrity_segments_entry.attr, |
537 | &queue_max_segment_size_entry.attr, |
538 | &queue_iosched_entry.attr, |
539 | &queue_hw_sector_size_entry.attr, |
540 | &queue_logical_block_size_entry.attr, |
541 | &queue_physical_block_size_entry.attr, |
542 | &queue_io_min_entry.attr, |
543 | &queue_io_opt_entry.attr, |
544 | &queue_discard_granularity_entry.attr, |
545 | &queue_discard_max_entry.attr, |
546 | &queue_discard_max_hw_entry.attr, |
547 | &queue_discard_zeroes_data_entry.attr, |
548 | &queue_write_same_max_entry.attr, |
549 | &queue_nonrot_entry.attr, |
550 | &queue_nomerges_entry.attr, |
551 | &queue_rq_affinity_entry.attr, |
552 | &queue_iostats_entry.attr, |
553 | &queue_random_entry.attr, |
554 | &queue_poll_entry.attr, |
555 | &queue_wc_entry.attr, |
556 | &queue_dax_entry.attr, |
557 | NULL, |
558 | }; |
559 | |
560 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
561 | |
562 | static ssize_t |
563 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
564 | { |
565 | struct queue_sysfs_entry *entry = to_queue(attr); |
566 | struct request_queue *q = |
567 | container_of(kobj, struct request_queue, kobj); |
568 | ssize_t res; |
569 | |
570 | if (!entry->show) |
571 | return -EIO; |
572 | mutex_lock(&q->sysfs_lock); |
573 | if (blk_queue_dying(q)) { |
574 | mutex_unlock(&q->sysfs_lock); |
575 | return -ENOENT; |
576 | } |
577 | res = entry->show(q, page); |
578 | mutex_unlock(&q->sysfs_lock); |
579 | return res; |
580 | } |
581 | |
582 | static ssize_t |
583 | queue_attr_store(struct kobject *kobj, struct attribute *attr, |
584 | const char *page, size_t length) |
585 | { |
586 | struct queue_sysfs_entry *entry = to_queue(attr); |
587 | struct request_queue *q; |
588 | ssize_t res; |
589 | |
590 | if (!entry->store) |
591 | return -EIO; |
592 | |
593 | q = container_of(kobj, struct request_queue, kobj); |
594 | mutex_lock(&q->sysfs_lock); |
595 | if (blk_queue_dying(q)) { |
596 | mutex_unlock(&q->sysfs_lock); |
597 | return -ENOENT; |
598 | } |
599 | res = entry->store(q, page, length); |
600 | mutex_unlock(&q->sysfs_lock); |
601 | return res; |
602 | } |
603 | |
604 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
605 | { |
606 | struct request_queue *q = container_of(rcu_head, struct request_queue, |
607 | rcu_head); |
608 | kmem_cache_free(blk_requestq_cachep, q); |
609 | } |
610 | |
611 | /** |
612 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
613 | * @kobj: the kobj belonging to the request queue to be released |
614 | * |
615 | * Description: |
616 | * blk_release_queue is the pair to blk_init_queue() or |
617 | * blk_queue_make_request(). It should be called when a request queue is |
618 | * being released; typically when a block device is being de-registered. |
619 | * Currently, its primary task it to free all the &struct request |
620 | * structures that were allocated to the queue and the queue itself. |
621 | * |
622 | * Note: |
623 | * The low level driver must have finished any outstanding requests first |
624 | * via blk_cleanup_queue(). |
625 | **/ |
626 | static void blk_release_queue(struct kobject *kobj) |
627 | { |
628 | struct request_queue *q = |
629 | container_of(kobj, struct request_queue, kobj); |
630 | |
631 | bdi_exit(&q->backing_dev_info); |
632 | blkcg_exit_queue(q); |
633 | |
634 | if (q->elevator) { |
635 | spin_lock_irq(q->queue_lock); |
636 | ioc_clear_queue(q); |
637 | spin_unlock_irq(q->queue_lock); |
638 | elevator_exit(q->elevator); |
639 | } |
640 | |
641 | blk_exit_rl(&q->root_rl); |
642 | |
643 | if (q->queue_tags) |
644 | __blk_queue_free_tags(q); |
645 | |
646 | if (!q->mq_ops) |
647 | blk_free_flush_queue(q->fq); |
648 | else |
649 | blk_mq_release(q); |
650 | |
651 | blk_trace_shutdown(q); |
652 | |
653 | if (q->bio_split) |
654 | bioset_free(q->bio_split); |
655 | |
656 | ida_simple_remove(&blk_queue_ida, q->id); |
657 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
658 | } |
659 | |
660 | static const struct sysfs_ops queue_sysfs_ops = { |
661 | .show = queue_attr_show, |
662 | .store = queue_attr_store, |
663 | }; |
664 | |
665 | struct kobj_type blk_queue_ktype = { |
666 | .sysfs_ops = &queue_sysfs_ops, |
667 | .default_attrs = default_attrs, |
668 | .release = blk_release_queue, |
669 | }; |
670 | |
671 | int blk_register_queue(struct gendisk *disk) |
672 | { |
673 | int ret; |
674 | struct device *dev = disk_to_dev(disk); |
675 | struct request_queue *q = disk->queue; |
676 | |
677 | if (WARN_ON(!q)) |
678 | return -ENXIO; |
679 | |
680 | /* |
681 | * SCSI probing may synchronously create and destroy a lot of |
682 | * request_queues for non-existent devices. Shutting down a fully |
683 | * functional queue takes measureable wallclock time as RCU grace |
684 | * periods are involved. To avoid excessive latency in these |
685 | * cases, a request_queue starts out in a degraded mode which is |
686 | * faster to shut down and is made fully functional here as |
687 | * request_queues for non-existent devices never get registered. |
688 | */ |
689 | if (!blk_queue_init_done(q)) { |
690 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); |
691 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
692 | blk_queue_bypass_end(q); |
693 | } |
694 | |
695 | ret = blk_trace_init_sysfs(dev); |
696 | if (ret) |
697 | return ret; |
698 | |
699 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
700 | if (ret < 0) { |
701 | blk_trace_remove_sysfs(dev); |
702 | return ret; |
703 | } |
704 | |
705 | kobject_uevent(&q->kobj, KOBJ_ADD); |
706 | |
707 | if (q->mq_ops) |
708 | blk_mq_register_dev(dev, q); |
709 | |
710 | if (!q->request_fn) |
711 | return 0; |
712 | |
713 | ret = elv_register_queue(q); |
714 | if (ret) { |
715 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
716 | kobject_del(&q->kobj); |
717 | blk_trace_remove_sysfs(dev); |
718 | kobject_put(&dev->kobj); |
719 | return ret; |
720 | } |
721 | |
722 | return 0; |
723 | } |
724 | |
725 | void blk_unregister_queue(struct gendisk *disk) |
726 | { |
727 | struct request_queue *q = disk->queue; |
728 | |
729 | if (WARN_ON(!q)) |
730 | return; |
731 | |
732 | if (q->mq_ops) |
733 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
734 | |
735 | if (q->request_fn) |
736 | elv_unregister_queue(q); |
737 | |
738 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
739 | kobject_del(&q->kobj); |
740 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
741 | kobject_put(&disk_to_dev(disk)->kobj); |
742 | } |
743 |