blob: a8b4f526d8bb22a7ab1fca78b7fb1d12e5a5da64
1 | /* |
2 | * Functions related to mapping data to requests |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> |
8 | #include <linux/uio.h> |
9 | |
10 | #include "blk.h" |
11 | |
12 | /* |
13 | * Append a bio to a passthrough request. Only works can be merged into |
14 | * the request based on the driver constraints. |
15 | */ |
16 | int blk_rq_append_bio(struct request *rq, struct bio *bio) |
17 | { |
18 | if (!rq->bio) { |
19 | blk_rq_bio_prep(rq->q, rq, bio); |
20 | } else { |
21 | if (!ll_back_merge_fn(rq->q, rq, bio)) |
22 | return -EINVAL; |
23 | |
24 | rq->biotail->bi_next = bio; |
25 | rq->biotail = bio; |
26 | rq->__data_len += bio->bi_iter.bi_size; |
27 | } |
28 | |
29 | return 0; |
30 | } |
31 | EXPORT_SYMBOL(blk_rq_append_bio); |
32 | |
33 | static int __blk_rq_unmap_user(struct bio *bio) |
34 | { |
35 | int ret = 0; |
36 | |
37 | if (bio) { |
38 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
39 | bio_unmap_user(bio); |
40 | else |
41 | ret = bio_uncopy_user(bio); |
42 | } |
43 | |
44 | return ret; |
45 | } |
46 | |
47 | static int __blk_rq_map_user_iov(struct request *rq, |
48 | struct rq_map_data *map_data, struct iov_iter *iter, |
49 | gfp_t gfp_mask, bool copy) |
50 | { |
51 | struct request_queue *q = rq->q; |
52 | struct bio *bio, *orig_bio; |
53 | int ret; |
54 | |
55 | if (copy) |
56 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); |
57 | else |
58 | bio = bio_map_user_iov(q, iter, gfp_mask); |
59 | |
60 | if (IS_ERR(bio)) |
61 | return PTR_ERR(bio); |
62 | |
63 | if (map_data && map_data->null_mapped) |
64 | bio_set_flag(bio, BIO_NULL_MAPPED); |
65 | |
66 | iov_iter_advance(iter, bio->bi_iter.bi_size); |
67 | if (map_data) |
68 | map_data->offset += bio->bi_iter.bi_size; |
69 | |
70 | orig_bio = bio; |
71 | blk_queue_bounce(q, &bio); |
72 | |
73 | /* |
74 | * We link the bounce buffer in and could have to traverse it |
75 | * later so we have to get a ref to prevent it from being freed |
76 | */ |
77 | bio_get(bio); |
78 | |
79 | ret = blk_rq_append_bio(rq, bio); |
80 | if (ret) { |
81 | bio_endio(bio); |
82 | __blk_rq_unmap_user(orig_bio); |
83 | bio_put(bio); |
84 | return ret; |
85 | } |
86 | |
87 | return 0; |
88 | } |
89 | |
90 | /** |
91 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
92 | * @q: request queue where request should be inserted |
93 | * @rq: request to map data to |
94 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
95 | * @iter: iovec iterator |
96 | * @gfp_mask: memory allocation flags |
97 | * |
98 | * Description: |
99 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
100 | * a kernel bounce buffer is used. |
101 | * |
102 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
103 | * still in process context. |
104 | * |
105 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
106 | * before being submitted to the device, as pages mapped may be out of |
107 | * reach. It's the callers responsibility to make sure this happens. The |
108 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
109 | * unmapping. |
110 | */ |
111 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
112 | struct rq_map_data *map_data, |
113 | const struct iov_iter *iter, gfp_t gfp_mask) |
114 | { |
115 | bool copy = false; |
116 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
117 | struct bio *bio = NULL; |
118 | struct iov_iter i; |
119 | int ret = -EINVAL; |
120 | |
121 | if (!iter_is_iovec(iter)) |
122 | goto fail; |
123 | |
124 | if (map_data) |
125 | copy = true; |
126 | else if (iov_iter_alignment(iter) & align) |
127 | copy = true; |
128 | else if (queue_virt_boundary(q)) |
129 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); |
130 | |
131 | i = *iter; |
132 | do { |
133 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); |
134 | if (ret) |
135 | goto unmap_rq; |
136 | if (!bio) |
137 | bio = rq->bio; |
138 | } while (iov_iter_count(&i)); |
139 | |
140 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
141 | rq->cmd_flags |= REQ_COPY_USER; |
142 | return 0; |
143 | |
144 | unmap_rq: |
145 | __blk_rq_unmap_user(bio); |
146 | fail: |
147 | rq->bio = NULL; |
148 | return ret; |
149 | } |
150 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
151 | |
152 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
153 | struct rq_map_data *map_data, void __user *ubuf, |
154 | unsigned long len, gfp_t gfp_mask) |
155 | { |
156 | struct iovec iov; |
157 | struct iov_iter i; |
158 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
159 | |
160 | if (unlikely(ret < 0)) |
161 | return ret; |
162 | |
163 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
164 | } |
165 | EXPORT_SYMBOL(blk_rq_map_user); |
166 | |
167 | /** |
168 | * blk_rq_unmap_user - unmap a request with user data |
169 | * @bio: start of bio list |
170 | * |
171 | * Description: |
172 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
173 | * supply the original rq->bio from the blk_rq_map_user() return, since |
174 | * the I/O completion may have changed rq->bio. |
175 | */ |
176 | int blk_rq_unmap_user(struct bio *bio) |
177 | { |
178 | struct bio *mapped_bio; |
179 | int ret = 0, ret2; |
180 | |
181 | while (bio) { |
182 | mapped_bio = bio; |
183 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
184 | mapped_bio = bio->bi_private; |
185 | |
186 | ret2 = __blk_rq_unmap_user(mapped_bio); |
187 | if (ret2 && !ret) |
188 | ret = ret2; |
189 | |
190 | mapped_bio = bio; |
191 | bio = bio->bi_next; |
192 | bio_put(mapped_bio); |
193 | } |
194 | |
195 | return ret; |
196 | } |
197 | EXPORT_SYMBOL(blk_rq_unmap_user); |
198 | |
199 | /** |
200 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
201 | * @q: request queue where request should be inserted |
202 | * @rq: request to fill |
203 | * @kbuf: the kernel buffer |
204 | * @len: length of user data |
205 | * @gfp_mask: memory allocation flags |
206 | * |
207 | * Description: |
208 | * Data will be mapped directly if possible. Otherwise a bounce |
209 | * buffer is used. Can be called multiple times to append multiple |
210 | * buffers. |
211 | */ |
212 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
213 | unsigned int len, gfp_t gfp_mask) |
214 | { |
215 | int reading = rq_data_dir(rq) == READ; |
216 | unsigned long addr = (unsigned long) kbuf; |
217 | int do_copy = 0; |
218 | struct bio *bio; |
219 | int ret; |
220 | |
221 | if (len > (queue_max_hw_sectors(q) << 9)) |
222 | return -EINVAL; |
223 | if (!len || !kbuf) |
224 | return -EINVAL; |
225 | |
226 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
227 | if (do_copy) |
228 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
229 | else |
230 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
231 | |
232 | if (IS_ERR(bio)) |
233 | return PTR_ERR(bio); |
234 | |
235 | if (!reading) |
236 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
237 | |
238 | if (do_copy) |
239 | rq->cmd_flags |= REQ_COPY_USER; |
240 | |
241 | ret = blk_rq_append_bio(rq, bio); |
242 | if (unlikely(ret)) { |
243 | /* request is too big */ |
244 | bio_put(bio); |
245 | return ret; |
246 | } |
247 | |
248 | blk_queue_bounce(q, &rq->bio); |
249 | return 0; |
250 | } |
251 | EXPORT_SYMBOL(blk_rq_map_kern); |
252 |