summaryrefslogtreecommitdiff
path: root/block/blk-timeout.c (plain)
blob: 220661a50f58557df475a550058d68ee48bf1497
1/*
2 * Functions related to generic timeout handling of requests.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/blkdev.h>
7#include <linux/fault-inject.h>
8
9#include "blk.h"
10#include "blk-mq.h"
11
12#ifdef CONFIG_FAIL_IO_TIMEOUT
13
14static DECLARE_FAULT_ATTR(fail_io_timeout);
15
16static int __init setup_fail_io_timeout(char *str)
17{
18 return setup_fault_attr(&fail_io_timeout, str);
19}
20__setup("fail_io_timeout=", setup_fail_io_timeout);
21
22int blk_should_fake_timeout(struct request_queue *q)
23{
24 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
25 return 0;
26
27 return should_fail(&fail_io_timeout, 1);
28}
29
30static int __init fail_io_timeout_debugfs(void)
31{
32 struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
33 NULL, &fail_io_timeout);
34
35 return PTR_ERR_OR_ZERO(dir);
36}
37
38late_initcall(fail_io_timeout_debugfs);
39
40ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
41 char *buf)
42{
43 struct gendisk *disk = dev_to_disk(dev);
44 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
45
46 return sprintf(buf, "%d\n", set != 0);
47}
48
49ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
50 const char *buf, size_t count)
51{
52 struct gendisk *disk = dev_to_disk(dev);
53 int val;
54
55 if (count) {
56 struct request_queue *q = disk->queue;
57 char *p = (char *) buf;
58
59 val = simple_strtoul(p, &p, 10);
60 spin_lock_irq(q->queue_lock);
61 if (val)
62 queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
63 else
64 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
65 spin_unlock_irq(q->queue_lock);
66 }
67
68 return count;
69}
70
71#endif /* CONFIG_FAIL_IO_TIMEOUT */
72
73/*
74 * blk_delete_timer - Delete/cancel timer for a given function.
75 * @req: request that we are canceling timer for
76 *
77 */
78void blk_delete_timer(struct request *req)
79{
80 list_del_init(&req->timeout_list);
81}
82
83static void blk_rq_timed_out(struct request *req)
84{
85 struct request_queue *q = req->q;
86 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87
88 if (q->rq_timed_out_fn)
89 ret = q->rq_timed_out_fn(req);
90 switch (ret) {
91 case BLK_EH_HANDLED:
92 /* Can we use req->errors here? */
93 __blk_complete_request(req);
94 break;
95 case BLK_EH_RESET_TIMER:
96 blk_add_timer(req);
97 blk_clear_rq_complete(req);
98 break;
99 case BLK_EH_NOT_HANDLED:
100 /*
101 * LLD handles this for now but in the future
102 * we can send a request msg to abort the command
103 * and we can move more of the generic scsi eh code to
104 * the blk layer.
105 */
106 break;
107 default:
108 printk(KERN_ERR "block: bad eh return: %d\n", ret);
109 break;
110 }
111}
112
113static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
114 unsigned int *next_set)
115{
116 if (time_after_eq(jiffies, rq->deadline)) {
117 list_del_init(&rq->timeout_list);
118
119 /*
120 * Check if we raced with end io completion
121 */
122 if (!blk_mark_rq_complete(rq))
123 blk_rq_timed_out(rq);
124 } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
125 *next_timeout = rq->deadline;
126 *next_set = 1;
127 }
128}
129
130void blk_timeout_work(struct work_struct *work)
131{
132 struct request_queue *q =
133 container_of(work, struct request_queue, timeout_work);
134 unsigned long flags, next = 0;
135 struct request *rq, *tmp;
136 int next_set = 0;
137
138 spin_lock_irqsave(q->queue_lock, flags);
139
140 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
141 blk_rq_check_expired(rq, &next, &next_set);
142
143 if (next_set)
144 mod_timer(&q->timeout, round_jiffies_up(next));
145
146 spin_unlock_irqrestore(q->queue_lock, flags);
147}
148
149/**
150 * blk_abort_request -- Request request recovery for the specified command
151 * @req: pointer to the request of interest
152 *
153 * This function requests that the block layer start recovery for the
154 * request by deleting the timer and calling the q's timeout function.
155 * LLDDs who implement their own error recovery MAY ignore the timeout
156 * event if they generated blk_abort_req. Must hold queue lock.
157 */
158void blk_abort_request(struct request *req)
159{
160 if (blk_mark_rq_complete(req))
161 return;
162
163 if (req->q->mq_ops) {
164 blk_mq_rq_timed_out(req, false);
165 } else {
166 blk_delete_timer(req);
167 blk_rq_timed_out(req);
168 }
169}
170EXPORT_SYMBOL_GPL(blk_abort_request);
171
172unsigned long blk_rq_timeout(unsigned long timeout)
173{
174 unsigned long maxt;
175
176 maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
177 if (time_after(timeout, maxt))
178 timeout = maxt;
179
180 return timeout;
181}
182
183/**
184 * blk_add_timer - Start timeout timer for a single request
185 * @req: request that is about to start running.
186 *
187 * Notes:
188 * Each request has its own timer, and as it is added to the queue, we
189 * set up the timer. When the request completes, we cancel the timer.
190 * Queue lock must be held for the non-mq case, mq case doesn't care.
191 */
192void blk_add_timer(struct request *req)
193{
194 struct request_queue *q = req->q;
195 unsigned long expiry;
196
197 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
198 if (!q->mq_ops && !q->rq_timed_out_fn)
199 return;
200
201 BUG_ON(!list_empty(&req->timeout_list));
202
203 /*
204 * Some LLDs, like scsi, peek at the timeout to prevent a
205 * command from being retried forever.
206 */
207 if (!req->timeout)
208 req->timeout = q->rq_timeout;
209
210 req->deadline = jiffies + req->timeout;
211
212 /*
213 * Only the non-mq case needs to add the request to a protected list.
214 * For the mq case we simply scan the tag map.
215 */
216 if (!q->mq_ops)
217 list_add_tail(&req->timeout_list, &req->q->timeout_list);
218
219 /*
220 * If the timer isn't already pending or this timeout is earlier
221 * than an existing one, modify the timer. Round up to next nearest
222 * second.
223 */
224 expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
225
226 if (!timer_pending(&q->timeout) ||
227 time_before(expiry, q->timeout.expires)) {
228 unsigned long diff = q->timeout.expires - expiry;
229
230 /*
231 * Due to added timer slack to group timers, the timer
232 * will often be a little in front of what we asked for.
233 * So apply some tolerance here too, otherwise we keep
234 * modifying the timer because expires for value X
235 * will be X + something.
236 */
237 if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
238 mod_timer(&q->timeout, expiry);
239 }
240
241}
242