blob: c55bcf67b9560fba32e4e53318ba149dba4320ad
1 | #ifndef INT_BLK_MQ_H |
2 | #define INT_BLK_MQ_H |
3 | |
4 | struct blk_mq_tag_set; |
5 | |
6 | struct blk_mq_ctx { |
7 | struct { |
8 | spinlock_t lock; |
9 | struct list_head rq_list; |
10 | } ____cacheline_aligned_in_smp; |
11 | |
12 | unsigned int cpu; |
13 | unsigned int index_hw; |
14 | |
15 | /* incremented at dispatch time */ |
16 | unsigned long rq_dispatched[2]; |
17 | unsigned long rq_merged; |
18 | |
19 | /* incremented at completion time */ |
20 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; |
21 | |
22 | struct request_queue *queue; |
23 | struct kobject kobj; |
24 | } ____cacheline_aligned_in_smp; |
25 | |
26 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
27 | void blk_mq_freeze_queue(struct request_queue *q); |
28 | void blk_mq_free_queue(struct request_queue *q); |
29 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
30 | void blk_mq_wake_waiters(struct request_queue *q); |
31 | |
32 | /* |
33 | * CPU hotplug helpers |
34 | */ |
35 | void blk_mq_enable_hotplug(void); |
36 | void blk_mq_disable_hotplug(void); |
37 | |
38 | /* |
39 | * CPU -> queue mappings |
40 | */ |
41 | int blk_mq_map_queues(struct blk_mq_tag_set *set); |
42 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
43 | |
44 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
45 | int cpu) |
46 | { |
47 | return q->queue_hw_ctx[q->mq_map[cpu]]; |
48 | } |
49 | |
50 | /* |
51 | * sysfs helpers |
52 | */ |
53 | extern void blk_mq_sysfs_init(struct request_queue *q); |
54 | extern int blk_mq_sysfs_register(struct request_queue *q); |
55 | extern void blk_mq_sysfs_unregister(struct request_queue *q); |
56 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
57 | |
58 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); |
59 | |
60 | void blk_mq_release(struct request_queue *q); |
61 | |
62 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
63 | unsigned int cpu) |
64 | { |
65 | return per_cpu_ptr(q->queue_ctx, cpu); |
66 | } |
67 | |
68 | /* |
69 | * This assumes per-cpu software queueing queues. They could be per-node |
70 | * as well, for instance. For now this is hardcoded as-is. Note that we don't |
71 | * care about preemption, since we know the ctx's are persistent. This does |
72 | * mean that we can't rely on ctx always matching the currently running CPU. |
73 | */ |
74 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) |
75 | { |
76 | return __blk_mq_get_ctx(q, get_cpu()); |
77 | } |
78 | |
79 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) |
80 | { |
81 | put_cpu(); |
82 | } |
83 | |
84 | struct blk_mq_alloc_data { |
85 | /* input parameter */ |
86 | struct request_queue *q; |
87 | unsigned int flags; |
88 | |
89 | /* input & output parameter */ |
90 | struct blk_mq_ctx *ctx; |
91 | struct blk_mq_hw_ctx *hctx; |
92 | }; |
93 | |
94 | static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, |
95 | struct request_queue *q, unsigned int flags, |
96 | struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) |
97 | { |
98 | data->q = q; |
99 | data->flags = flags; |
100 | data->ctx = ctx; |
101 | data->hctx = hctx; |
102 | } |
103 | |
104 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
105 | { |
106 | return hctx->nr_ctx && hctx->tags; |
107 | } |
108 | |
109 | #endif |
110 |