blob: 2642e5fc8b69a03494b62638d4eca98ee07b7edc
1 | /* |
2 | * Functions related to segment and merge handling |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> |
8 | #include <linux/scatterlist.h> |
9 | |
10 | #include <trace/events/block.h> |
11 | |
12 | #include "blk.h" |
13 | |
14 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
15 | struct bio *bio, |
16 | struct bio_set *bs, |
17 | unsigned *nsegs) |
18 | { |
19 | unsigned int max_discard_sectors, granularity; |
20 | int alignment; |
21 | sector_t tmp; |
22 | unsigned split_sectors; |
23 | |
24 | *nsegs = 1; |
25 | |
26 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
27 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
28 | |
29 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
30 | max_discard_sectors -= max_discard_sectors % granularity; |
31 | |
32 | if (unlikely(!max_discard_sectors)) { |
33 | /* XXX: warn */ |
34 | return NULL; |
35 | } |
36 | |
37 | if (bio_sectors(bio) <= max_discard_sectors) |
38 | return NULL; |
39 | |
40 | split_sectors = max_discard_sectors; |
41 | |
42 | /* |
43 | * If the next starting sector would be misaligned, stop the discard at |
44 | * the previous aligned sector. |
45 | */ |
46 | alignment = (q->limits.discard_alignment >> 9) % granularity; |
47 | |
48 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; |
49 | tmp = sector_div(tmp, granularity); |
50 | |
51 | if (split_sectors > tmp) |
52 | split_sectors -= tmp; |
53 | |
54 | return bio_split(bio, split_sectors, GFP_NOIO, bs); |
55 | } |
56 | |
57 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
58 | struct bio *bio, |
59 | struct bio_set *bs, |
60 | unsigned *nsegs) |
61 | { |
62 | *nsegs = 1; |
63 | |
64 | if (!q->limits.max_write_same_sectors) |
65 | return NULL; |
66 | |
67 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) |
68 | return NULL; |
69 | |
70 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); |
71 | } |
72 | |
73 | static inline unsigned get_max_io_size(struct request_queue *q, |
74 | struct bio *bio) |
75 | { |
76 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); |
77 | unsigned mask = queue_logical_block_size(q) - 1; |
78 | |
79 | /* aligned to logical block size */ |
80 | sectors &= ~(mask >> 9); |
81 | |
82 | return sectors; |
83 | } |
84 | |
85 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
86 | struct bio *bio, |
87 | struct bio_set *bs, |
88 | unsigned *segs) |
89 | { |
90 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
91 | struct bvec_iter iter; |
92 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
93 | unsigned front_seg_size = bio->bi_seg_front_size; |
94 | bool do_split = true; |
95 | struct bio *new = NULL; |
96 | const unsigned max_sectors = get_max_io_size(q, bio); |
97 | unsigned bvecs = 0; |
98 | |
99 | bio_for_each_segment(bv, bio, iter) { |
100 | /* |
101 | * With arbitrary bio size, the incoming bio may be very |
102 | * big. We have to split the bio into small bios so that |
103 | * each holds at most BIO_MAX_PAGES bvecs because |
104 | * bio_clone() can fail to allocate big bvecs. |
105 | * |
106 | * It should have been better to apply the limit per |
107 | * request queue in which bio_clone() is involved, |
108 | * instead of globally. The biggest blocker is the |
109 | * bio_clone() in bio bounce. |
110 | * |
111 | * If bio is splitted by this reason, we should have |
112 | * allowed to continue bios merging, but don't do |
113 | * that now for making the change simple. |
114 | * |
115 | * TODO: deal with bio bounce's bio_clone() gracefully |
116 | * and convert the global limit into per-queue limit. |
117 | */ |
118 | if (bvecs++ >= BIO_MAX_PAGES) |
119 | goto split; |
120 | |
121 | /* |
122 | * If the queue doesn't support SG gaps and adding this |
123 | * offset would create a gap, disallow it. |
124 | */ |
125 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
126 | goto split; |
127 | |
128 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
129 | /* |
130 | * Consider this a new segment if we're splitting in |
131 | * the middle of this vector. |
132 | */ |
133 | if (nsegs < queue_max_segments(q) && |
134 | sectors < max_sectors) { |
135 | nsegs++; |
136 | sectors = max_sectors; |
137 | } |
138 | if (sectors) |
139 | goto split; |
140 | /* Make this single bvec as the 1st segment */ |
141 | } |
142 | |
143 | if (bvprvp && blk_queue_cluster(q)) { |
144 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
145 | goto new_segment; |
146 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
147 | goto new_segment; |
148 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
149 | goto new_segment; |
150 | |
151 | seg_size += bv.bv_len; |
152 | bvprv = bv; |
153 | bvprvp = &bvprv; |
154 | sectors += bv.bv_len >> 9; |
155 | |
156 | if (nsegs == 1 && seg_size > front_seg_size) |
157 | front_seg_size = seg_size; |
158 | continue; |
159 | } |
160 | new_segment: |
161 | if (nsegs == queue_max_segments(q)) |
162 | goto split; |
163 | |
164 | nsegs++; |
165 | bvprv = bv; |
166 | bvprvp = &bvprv; |
167 | seg_size = bv.bv_len; |
168 | sectors += bv.bv_len >> 9; |
169 | |
170 | if (nsegs == 1 && seg_size > front_seg_size) |
171 | front_seg_size = seg_size; |
172 | } |
173 | |
174 | do_split = false; |
175 | split: |
176 | *segs = nsegs; |
177 | |
178 | if (do_split) { |
179 | new = bio_split(bio, sectors, GFP_NOIO, bs); |
180 | if (new) |
181 | bio = new; |
182 | } |
183 | |
184 | bio->bi_seg_front_size = front_seg_size; |
185 | if (seg_size > bio->bi_seg_back_size) |
186 | bio->bi_seg_back_size = seg_size; |
187 | |
188 | return do_split ? new : NULL; |
189 | } |
190 | |
191 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
192 | struct bio_set *bs) |
193 | { |
194 | struct bio *split, *res; |
195 | unsigned nsegs; |
196 | |
197 | switch (bio_op(*bio)) { |
198 | case REQ_OP_DISCARD: |
199 | case REQ_OP_SECURE_ERASE: |
200 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); |
201 | break; |
202 | case REQ_OP_WRITE_SAME: |
203 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); |
204 | break; |
205 | default: |
206 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
207 | break; |
208 | } |
209 | |
210 | /* physical segments can be figured out during splitting */ |
211 | res = split ? split : *bio; |
212 | res->bi_phys_segments = nsegs; |
213 | bio_set_flag(res, BIO_SEG_VALID); |
214 | |
215 | if (split) { |
216 | /* there isn't chance to merge the splitted bio */ |
217 | split->bi_opf |= REQ_NOMERGE; |
218 | |
219 | bio_chain(split, *bio); |
220 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
221 | generic_make_request(*bio); |
222 | *bio = split; |
223 | } |
224 | } |
225 | EXPORT_SYMBOL(blk_queue_split); |
226 | |
227 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
228 | struct bio *bio, |
229 | bool no_sg_merge) |
230 | { |
231 | struct bio_vec bv, bvprv = { NULL }; |
232 | int cluster, prev = 0; |
233 | unsigned int seg_size, nr_phys_segs; |
234 | struct bio *fbio, *bbio; |
235 | struct bvec_iter iter; |
236 | |
237 | if (!bio) |
238 | return 0; |
239 | |
240 | /* |
241 | * This should probably be returning 0, but blk_add_request_payload() |
242 | * (Christoph!!!!) |
243 | */ |
244 | if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE) |
245 | return 1; |
246 | |
247 | if (bio_op(bio) == REQ_OP_WRITE_SAME) |
248 | return 1; |
249 | |
250 | fbio = bio; |
251 | cluster = blk_queue_cluster(q); |
252 | seg_size = 0; |
253 | nr_phys_segs = 0; |
254 | for_each_bio(bio) { |
255 | bio_for_each_segment(bv, bio, iter) { |
256 | /* |
257 | * If SG merging is disabled, each bio vector is |
258 | * a segment |
259 | */ |
260 | if (no_sg_merge) |
261 | goto new_segment; |
262 | |
263 | if (prev && cluster) { |
264 | if (seg_size + bv.bv_len |
265 | > queue_max_segment_size(q)) |
266 | goto new_segment; |
267 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
268 | goto new_segment; |
269 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
270 | goto new_segment; |
271 | |
272 | seg_size += bv.bv_len; |
273 | bvprv = bv; |
274 | continue; |
275 | } |
276 | new_segment: |
277 | if (nr_phys_segs == 1 && seg_size > |
278 | fbio->bi_seg_front_size) |
279 | fbio->bi_seg_front_size = seg_size; |
280 | |
281 | nr_phys_segs++; |
282 | bvprv = bv; |
283 | prev = 1; |
284 | seg_size = bv.bv_len; |
285 | } |
286 | bbio = bio; |
287 | } |
288 | |
289 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
290 | fbio->bi_seg_front_size = seg_size; |
291 | if (seg_size > bbio->bi_seg_back_size) |
292 | bbio->bi_seg_back_size = seg_size; |
293 | |
294 | return nr_phys_segs; |
295 | } |
296 | |
297 | void blk_recalc_rq_segments(struct request *rq) |
298 | { |
299 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
300 | &rq->q->queue_flags); |
301 | |
302 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, |
303 | no_sg_merge); |
304 | } |
305 | |
306 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
307 | { |
308 | unsigned short seg_cnt; |
309 | |
310 | /* estimate segment number by bi_vcnt for non-cloned bio */ |
311 | if (bio_flagged(bio, BIO_CLONED)) |
312 | seg_cnt = bio_segments(bio); |
313 | else |
314 | seg_cnt = bio->bi_vcnt; |
315 | |
316 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
317 | (seg_cnt < queue_max_segments(q))) |
318 | bio->bi_phys_segments = seg_cnt; |
319 | else { |
320 | struct bio *nxt = bio->bi_next; |
321 | |
322 | bio->bi_next = NULL; |
323 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
324 | bio->bi_next = nxt; |
325 | } |
326 | |
327 | bio_set_flag(bio, BIO_SEG_VALID); |
328 | } |
329 | EXPORT_SYMBOL(blk_recount_segments); |
330 | |
331 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
332 | struct bio *nxt) |
333 | { |
334 | struct bio_vec end_bv = { NULL }, nxt_bv; |
335 | |
336 | if (!blk_queue_cluster(q)) |
337 | return 0; |
338 | |
339 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
340 | queue_max_segment_size(q)) |
341 | return 0; |
342 | |
343 | if (!bio_has_data(bio)) |
344 | return 1; |
345 | |
346 | bio_get_last_bvec(bio, &end_bv); |
347 | bio_get_first_bvec(nxt, &nxt_bv); |
348 | |
349 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) |
350 | return 0; |
351 | |
352 | /* |
353 | * bio and nxt are contiguous in memory; check if the queue allows |
354 | * these two to be merged into one |
355 | */ |
356 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
357 | return 1; |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | static inline void |
363 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
364 | struct scatterlist *sglist, struct bio_vec *bvprv, |
365 | struct scatterlist **sg, int *nsegs, int *cluster) |
366 | { |
367 | |
368 | int nbytes = bvec->bv_len; |
369 | |
370 | if (*sg && *cluster) { |
371 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
372 | goto new_segment; |
373 | |
374 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
375 | goto new_segment; |
376 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
377 | goto new_segment; |
378 | |
379 | (*sg)->length += nbytes; |
380 | } else { |
381 | new_segment: |
382 | if (!*sg) |
383 | *sg = sglist; |
384 | else { |
385 | /* |
386 | * If the driver previously mapped a shorter |
387 | * list, we could see a termination bit |
388 | * prematurely unless it fully inits the sg |
389 | * table on each mapping. We KNOW that there |
390 | * must be more entries here or the driver |
391 | * would be buggy, so force clear the |
392 | * termination bit to avoid doing a full |
393 | * sg_init_table() in drivers for each command. |
394 | */ |
395 | sg_unmark_end(*sg); |
396 | *sg = sg_next(*sg); |
397 | } |
398 | |
399 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
400 | (*nsegs)++; |
401 | } |
402 | *bvprv = *bvec; |
403 | } |
404 | |
405 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
406 | struct scatterlist *sglist, |
407 | struct scatterlist **sg) |
408 | { |
409 | struct bio_vec bvec, bvprv = { NULL }; |
410 | struct bvec_iter iter; |
411 | int nsegs, cluster; |
412 | |
413 | nsegs = 0; |
414 | cluster = blk_queue_cluster(q); |
415 | |
416 | switch (bio_op(bio)) { |
417 | case REQ_OP_DISCARD: |
418 | case REQ_OP_SECURE_ERASE: |
419 | /* |
420 | * This is a hack - drivers should be neither modifying the |
421 | * biovec, nor relying on bi_vcnt - but because of |
422 | * blk_add_request_payload(), a discard bio may or may not have |
423 | * a payload we need to set up here (thank you Christoph) and |
424 | * bi_vcnt is really the only way of telling if we need to. |
425 | */ |
426 | if (!bio->bi_vcnt) |
427 | return 0; |
428 | /* Fall through */ |
429 | case REQ_OP_WRITE_SAME: |
430 | *sg = sglist; |
431 | bvec = bio_iovec(bio); |
432 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); |
433 | return 1; |
434 | default: |
435 | break; |
436 | } |
437 | |
438 | for_each_bio(bio) |
439 | bio_for_each_segment(bvec, bio, iter) |
440 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
441 | &nsegs, &cluster); |
442 | |
443 | return nsegs; |
444 | } |
445 | |
446 | /* |
447 | * map a request to scatterlist, return number of sg entries setup. Caller |
448 | * must make sure sg can hold rq->nr_phys_segments entries |
449 | */ |
450 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
451 | struct scatterlist *sglist) |
452 | { |
453 | struct scatterlist *sg = NULL; |
454 | int nsegs = 0; |
455 | |
456 | if (rq->bio) |
457 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
458 | |
459 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
460 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
461 | unsigned int pad_len = |
462 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
463 | |
464 | sg->length += pad_len; |
465 | rq->extra_len += pad_len; |
466 | } |
467 | |
468 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
469 | if (op_is_write(req_op(rq))) |
470 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
471 | |
472 | sg_unmark_end(sg); |
473 | sg = sg_next(sg); |
474 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
475 | q->dma_drain_size, |
476 | ((unsigned long)q->dma_drain_buffer) & |
477 | (PAGE_SIZE - 1)); |
478 | nsegs++; |
479 | rq->extra_len += q->dma_drain_size; |
480 | } |
481 | |
482 | if (sg) |
483 | sg_mark_end(sg); |
484 | |
485 | /* |
486 | * Something must have been wrong if the figured number of |
487 | * segment is bigger than number of req's physical segments |
488 | */ |
489 | WARN_ON(nsegs > rq->nr_phys_segments); |
490 | |
491 | return nsegs; |
492 | } |
493 | EXPORT_SYMBOL(blk_rq_map_sg); |
494 | |
495 | static inline int ll_new_hw_segment(struct request_queue *q, |
496 | struct request *req, |
497 | struct bio *bio) |
498 | { |
499 | int nr_phys_segs = bio_phys_segments(q, bio); |
500 | |
501 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
502 | goto no_merge; |
503 | |
504 | if (blk_integrity_merge_bio(q, req, bio) == false) |
505 | goto no_merge; |
506 | |
507 | /* |
508 | * This will form the start of a new hw segment. Bump both |
509 | * counters. |
510 | */ |
511 | req->nr_phys_segments += nr_phys_segs; |
512 | return 1; |
513 | |
514 | no_merge: |
515 | req->cmd_flags |= REQ_NOMERGE; |
516 | if (req == q->last_merge) |
517 | q->last_merge = NULL; |
518 | return 0; |
519 | } |
520 | |
521 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
522 | struct bio *bio) |
523 | { |
524 | if (req_gap_back_merge(req, bio)) |
525 | return 0; |
526 | if (blk_integrity_rq(req) && |
527 | integrity_req_gap_back_merge(req, bio)) |
528 | return 0; |
529 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
530 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
531 | req->cmd_flags |= REQ_NOMERGE; |
532 | if (req == q->last_merge) |
533 | q->last_merge = NULL; |
534 | return 0; |
535 | } |
536 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
537 | blk_recount_segments(q, req->biotail); |
538 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
539 | blk_recount_segments(q, bio); |
540 | |
541 | return ll_new_hw_segment(q, req, bio); |
542 | } |
543 | |
544 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
545 | struct bio *bio) |
546 | { |
547 | |
548 | if (req_gap_front_merge(req, bio)) |
549 | return 0; |
550 | if (blk_integrity_rq(req) && |
551 | integrity_req_gap_front_merge(req, bio)) |
552 | return 0; |
553 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
554 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
555 | req->cmd_flags |= REQ_NOMERGE; |
556 | if (req == q->last_merge) |
557 | q->last_merge = NULL; |
558 | return 0; |
559 | } |
560 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
561 | blk_recount_segments(q, bio); |
562 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
563 | blk_recount_segments(q, req->bio); |
564 | |
565 | return ll_new_hw_segment(q, req, bio); |
566 | } |
567 | |
568 | /* |
569 | * blk-mq uses req->special to carry normal driver per-request payload, it |
570 | * does not indicate a prepared command that we cannot merge with. |
571 | */ |
572 | static bool req_no_special_merge(struct request *req) |
573 | { |
574 | struct request_queue *q = req->q; |
575 | |
576 | return !q->mq_ops && req->special; |
577 | } |
578 | |
579 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
580 | struct request *next) |
581 | { |
582 | int total_phys_segments; |
583 | unsigned int seg_size = |
584 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
585 | |
586 | /* |
587 | * First check if the either of the requests are re-queued |
588 | * requests. Can't merge them if they are. |
589 | */ |
590 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
591 | return 0; |
592 | |
593 | if (req_gap_back_merge(req, next->bio)) |
594 | return 0; |
595 | |
596 | /* |
597 | * Will it become too large? |
598 | */ |
599 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
600 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
601 | return 0; |
602 | |
603 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
604 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
605 | if (req->nr_phys_segments == 1) |
606 | req->bio->bi_seg_front_size = seg_size; |
607 | if (next->nr_phys_segments == 1) |
608 | next->biotail->bi_seg_back_size = seg_size; |
609 | total_phys_segments--; |
610 | } |
611 | |
612 | if (total_phys_segments > queue_max_segments(q)) |
613 | return 0; |
614 | |
615 | if (blk_integrity_merge_rq(q, req, next) == false) |
616 | return 0; |
617 | |
618 | /* Merge is OK... */ |
619 | req->nr_phys_segments = total_phys_segments; |
620 | return 1; |
621 | } |
622 | |
623 | /** |
624 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
625 | * @rq: request to mark as mixed merge |
626 | * |
627 | * Description: |
628 | * @rq is about to be mixed merged. Make sure the attributes |
629 | * which can be mixed are set in each bio and mark @rq as mixed |
630 | * merged. |
631 | */ |
632 | void blk_rq_set_mixed_merge(struct request *rq) |
633 | { |
634 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
635 | struct bio *bio; |
636 | |
637 | if (rq->cmd_flags & REQ_MIXED_MERGE) |
638 | return; |
639 | |
640 | /* |
641 | * @rq will no longer represent mixable attributes for all the |
642 | * contained bios. It will just track those of the first one. |
643 | * Distributes the attributs to each bio. |
644 | */ |
645 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
646 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
647 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); |
648 | bio->bi_opf |= ff; |
649 | } |
650 | rq->cmd_flags |= REQ_MIXED_MERGE; |
651 | } |
652 | |
653 | static void blk_account_io_merge(struct request *req) |
654 | { |
655 | if (blk_do_io_stat(req)) { |
656 | struct hd_struct *part; |
657 | int cpu; |
658 | |
659 | cpu = part_stat_lock(); |
660 | part = req->part; |
661 | |
662 | part_round_stats(cpu, part); |
663 | part_dec_in_flight(part, rq_data_dir(req)); |
664 | |
665 | hd_struct_put(part); |
666 | part_stat_unlock(); |
667 | } |
668 | } |
669 | |
670 | /* |
671 | * Has to be called with the request spinlock acquired |
672 | */ |
673 | static int attempt_merge(struct request_queue *q, struct request *req, |
674 | struct request *next) |
675 | { |
676 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
677 | return 0; |
678 | |
679 | if (req_op(req) != req_op(next)) |
680 | return 0; |
681 | |
682 | /* |
683 | * not contiguous |
684 | */ |
685 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
686 | return 0; |
687 | |
688 | if (rq_data_dir(req) != rq_data_dir(next) |
689 | || req->rq_disk != next->rq_disk |
690 | || req_no_special_merge(next)) |
691 | return 0; |
692 | |
693 | if (req_op(req) == REQ_OP_WRITE_SAME && |
694 | !blk_write_same_mergeable(req->bio, next->bio)) |
695 | return 0; |
696 | |
697 | /* |
698 | * If we are allowed to merge, then append bio list |
699 | * from next to rq and release next. merge_requests_fn |
700 | * will have updated segment counts, update sector |
701 | * counts here. |
702 | */ |
703 | if (!ll_merge_requests_fn(q, req, next)) |
704 | return 0; |
705 | |
706 | /* |
707 | * If failfast settings disagree or any of the two is already |
708 | * a mixed merge, mark both as mixed before proceeding. This |
709 | * makes sure that all involved bios have mixable attributes |
710 | * set properly. |
711 | */ |
712 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || |
713 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
714 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
715 | blk_rq_set_mixed_merge(req); |
716 | blk_rq_set_mixed_merge(next); |
717 | } |
718 | |
719 | /* |
720 | * At this point we have either done a back merge |
721 | * or front merge. We need the smaller start_time of |
722 | * the merged requests to be the current request |
723 | * for accounting purposes. |
724 | */ |
725 | if (time_after(req->start_time, next->start_time)) |
726 | req->start_time = next->start_time; |
727 | |
728 | req->biotail->bi_next = next->bio; |
729 | req->biotail = next->biotail; |
730 | |
731 | req->__data_len += blk_rq_bytes(next); |
732 | |
733 | elv_merge_requests(q, req, next); |
734 | |
735 | /* |
736 | * 'next' is going away, so update stats accordingly |
737 | */ |
738 | blk_account_io_merge(next); |
739 | |
740 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
741 | if (blk_rq_cpu_valid(next)) |
742 | req->cpu = next->cpu; |
743 | |
744 | /* owner-ship of bio passed from next to req */ |
745 | next->bio = NULL; |
746 | __blk_put_request(q, next); |
747 | return 1; |
748 | } |
749 | |
750 | int attempt_back_merge(struct request_queue *q, struct request *rq) |
751 | { |
752 | struct request *next = elv_latter_request(q, rq); |
753 | |
754 | if (next) |
755 | return attempt_merge(q, rq, next); |
756 | |
757 | return 0; |
758 | } |
759 | |
760 | int attempt_front_merge(struct request_queue *q, struct request *rq) |
761 | { |
762 | struct request *prev = elv_former_request(q, rq); |
763 | |
764 | if (prev) |
765 | return attempt_merge(q, prev, rq); |
766 | |
767 | return 0; |
768 | } |
769 | |
770 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
771 | struct request *next) |
772 | { |
773 | struct elevator_queue *e = q->elevator; |
774 | |
775 | if (e->type->ops.elevator_allow_rq_merge_fn) |
776 | if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next)) |
777 | return 0; |
778 | |
779 | return attempt_merge(q, rq, next); |
780 | } |
781 | |
782 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
783 | { |
784 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
785 | return false; |
786 | |
787 | if (req_op(rq) != bio_op(bio)) |
788 | return false; |
789 | |
790 | /* different data direction or already started, don't merge */ |
791 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
792 | return false; |
793 | |
794 | /* must be same device and not a special request */ |
795 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
796 | return false; |
797 | |
798 | /* only merge integrity protected bio into ditto rq */ |
799 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
800 | return false; |
801 | |
802 | /* must be using the same buffer */ |
803 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
804 | !blk_write_same_mergeable(rq->bio, bio)) |
805 | return false; |
806 | |
807 | return true; |
808 | } |
809 | |
810 | int blk_try_merge(struct request *rq, struct bio *bio) |
811 | { |
812 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
813 | return ELEVATOR_BACK_MERGE; |
814 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
815 | return ELEVATOR_FRONT_MERGE; |
816 | return ELEVATOR_NO_MERGE; |
817 | } |
818 |