blob: 65f16cf4f8509b094585e119e7bcc47a5ae45b64
1 | /* |
2 | * Functions related to setting various queue properties from drivers |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/init.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
10 | #include <linux/gcd.h> |
11 | #include <linux/lcm.h> |
12 | #include <linux/jiffies.h> |
13 | #include <linux/gfp.h> |
14 | |
15 | #include "blk.h" |
16 | |
17 | unsigned long blk_max_low_pfn; |
18 | EXPORT_SYMBOL(blk_max_low_pfn); |
19 | |
20 | unsigned long blk_max_pfn; |
21 | |
22 | /** |
23 | * blk_queue_prep_rq - set a prepare_request function for queue |
24 | * @q: queue |
25 | * @pfn: prepare_request function |
26 | * |
27 | * It's possible for a queue to register a prepare_request callback which |
28 | * is invoked before the request is handed to the request_fn. The goal of |
29 | * the function is to prepare a request for I/O, it can be used to build a |
30 | * cdb from the request data for instance. |
31 | * |
32 | */ |
33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
34 | { |
35 | q->prep_rq_fn = pfn; |
36 | } |
37 | EXPORT_SYMBOL(blk_queue_prep_rq); |
38 | |
39 | /** |
40 | * blk_queue_unprep_rq - set an unprepare_request function for queue |
41 | * @q: queue |
42 | * @ufn: unprepare_request function |
43 | * |
44 | * It's possible for a queue to register an unprepare_request callback |
45 | * which is invoked before the request is finally completed. The goal |
46 | * of the function is to deallocate any data that was allocated in the |
47 | * prepare_request callback. |
48 | * |
49 | */ |
50 | void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) |
51 | { |
52 | q->unprep_rq_fn = ufn; |
53 | } |
54 | EXPORT_SYMBOL(blk_queue_unprep_rq); |
55 | |
56 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
57 | { |
58 | q->softirq_done_fn = fn; |
59 | } |
60 | EXPORT_SYMBOL(blk_queue_softirq_done); |
61 | |
62 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
63 | { |
64 | q->rq_timeout = timeout; |
65 | } |
66 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
67 | |
68 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) |
69 | { |
70 | q->rq_timed_out_fn = fn; |
71 | } |
72 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
73 | |
74 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) |
75 | { |
76 | q->lld_busy_fn = fn; |
77 | } |
78 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
79 | |
80 | /** |
81 | * blk_set_default_limits - reset limits to default values |
82 | * @lim: the queue_limits structure to reset |
83 | * |
84 | * Description: |
85 | * Returns a queue_limit struct to its default state. |
86 | */ |
87 | void blk_set_default_limits(struct queue_limits *lim) |
88 | { |
89 | lim->max_segments = BLK_MAX_SEGMENTS; |
90 | lim->max_integrity_segments = 0; |
91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
92 | lim->virt_boundary_mask = 0; |
93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
94 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; |
95 | lim->max_dev_sectors = 0; |
96 | lim->chunk_sectors = 0; |
97 | lim->max_write_same_sectors = 0; |
98 | lim->max_discard_sectors = 0; |
99 | lim->max_hw_discard_sectors = 0; |
100 | lim->discard_granularity = 0; |
101 | lim->discard_alignment = 0; |
102 | lim->discard_misaligned = 0; |
103 | lim->discard_zeroes_data = 0; |
104 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
105 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
106 | lim->alignment_offset = 0; |
107 | lim->io_opt = 0; |
108 | lim->misaligned = 0; |
109 | lim->cluster = 1; |
110 | } |
111 | EXPORT_SYMBOL(blk_set_default_limits); |
112 | |
113 | /** |
114 | * blk_set_stacking_limits - set default limits for stacking devices |
115 | * @lim: the queue_limits structure to reset |
116 | * |
117 | * Description: |
118 | * Returns a queue_limit struct to its default state. Should be used |
119 | * by stacking drivers like DM that have no internal limits. |
120 | */ |
121 | void blk_set_stacking_limits(struct queue_limits *lim) |
122 | { |
123 | blk_set_default_limits(lim); |
124 | |
125 | /* Inherit limits from component devices */ |
126 | lim->discard_zeroes_data = 1; |
127 | lim->max_segments = USHRT_MAX; |
128 | lim->max_hw_sectors = UINT_MAX; |
129 | lim->max_segment_size = UINT_MAX; |
130 | lim->max_sectors = UINT_MAX; |
131 | lim->max_dev_sectors = UINT_MAX; |
132 | lim->max_write_same_sectors = UINT_MAX; |
133 | } |
134 | EXPORT_SYMBOL(blk_set_stacking_limits); |
135 | |
136 | /** |
137 | * blk_queue_make_request - define an alternate make_request function for a device |
138 | * @q: the request queue for the device to be affected |
139 | * @mfn: the alternate make_request function |
140 | * |
141 | * Description: |
142 | * The normal way for &struct bios to be passed to a device |
143 | * driver is for them to be collected into requests on a request |
144 | * queue, and then to allow the device driver to select requests |
145 | * off that queue when it is ready. This works well for many block |
146 | * devices. However some block devices (typically virtual devices |
147 | * such as md or lvm) do not benefit from the processing on the |
148 | * request queue, and are served best by having the requests passed |
149 | * directly to them. This can be achieved by providing a function |
150 | * to blk_queue_make_request(). |
151 | * |
152 | * Caveat: |
153 | * The driver that does this *must* be able to deal appropriately |
154 | * with buffers in "highmemory". This can be accomplished by either calling |
155 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
156 | * blk_queue_bounce() to create a buffer in normal memory. |
157 | **/ |
158 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
159 | { |
160 | /* |
161 | * set defaults |
162 | */ |
163 | q->nr_requests = BLKDEV_MAX_RQ; |
164 | |
165 | q->make_request_fn = mfn; |
166 | blk_queue_dma_alignment(q, 511); |
167 | blk_queue_congestion_threshold(q); |
168 | q->nr_batching = BLK_BATCH_REQ; |
169 | |
170 | blk_set_default_limits(&q->limits); |
171 | |
172 | /* |
173 | * by default assume old behaviour and bounce for any highmem page |
174 | */ |
175 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
176 | } |
177 | EXPORT_SYMBOL(blk_queue_make_request); |
178 | |
179 | /** |
180 | * blk_queue_bounce_limit - set bounce buffer limit for queue |
181 | * @q: the request queue for the device |
182 | * @max_addr: the maximum address the device can handle |
183 | * |
184 | * Description: |
185 | * Different hardware can have different requirements as to what pages |
186 | * it can do I/O directly to. A low level driver can call |
187 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
188 | * buffers for doing I/O to pages residing above @max_addr. |
189 | **/ |
190 | void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) |
191 | { |
192 | unsigned long b_pfn = max_addr >> PAGE_SHIFT; |
193 | int dma = 0; |
194 | |
195 | q->bounce_gfp = GFP_NOIO; |
196 | #if BITS_PER_LONG == 64 |
197 | /* |
198 | * Assume anything <= 4GB can be handled by IOMMU. Actually |
199 | * some IOMMUs can handle everything, but I don't know of a |
200 | * way to test this here. |
201 | */ |
202 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
203 | dma = 1; |
204 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
205 | #else |
206 | if (b_pfn < blk_max_low_pfn) |
207 | dma = 1; |
208 | q->limits.bounce_pfn = b_pfn; |
209 | #endif |
210 | if (dma) { |
211 | init_emergency_isa_pool(); |
212 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
213 | q->limits.bounce_pfn = b_pfn; |
214 | } |
215 | } |
216 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
217 | |
218 | /** |
219 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
220 | * @q: the request queue for the device |
221 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
222 | * |
223 | * Description: |
224 | * Enables a low level driver to set a hard upper limit, |
225 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
226 | * the device driver based upon the capabilities of the I/O |
227 | * controller. |
228 | * |
229 | * max_dev_sectors is a hard limit imposed by the storage device for |
230 | * READ/WRITE requests. It is set by the disk driver. |
231 | * |
232 | * max_sectors is a soft limit imposed by the block layer for |
233 | * filesystem type requests. This value can be overridden on a |
234 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
235 | * The soft limit can not exceed max_hw_sectors. |
236 | **/ |
237 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
238 | { |
239 | struct queue_limits *limits = &q->limits; |
240 | unsigned int max_sectors; |
241 | |
242 | if ((max_hw_sectors << 9) < PAGE_SIZE) { |
243 | max_hw_sectors = 1 << (PAGE_SHIFT - 9); |
244 | printk(KERN_INFO "%s: set to minimum %d\n", |
245 | __func__, max_hw_sectors); |
246 | } |
247 | |
248 | limits->max_hw_sectors = max_hw_sectors; |
249 | max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); |
250 | max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); |
251 | limits->max_sectors = max_sectors; |
252 | q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9); |
253 | } |
254 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
255 | |
256 | /** |
257 | * blk_queue_chunk_sectors - set size of the chunk for this queue |
258 | * @q: the request queue for the device |
259 | * @chunk_sectors: chunk sectors in the usual 512b unit |
260 | * |
261 | * Description: |
262 | * If a driver doesn't want IOs to cross a given chunk size, it can set |
263 | * this limit and prevent merging across chunks. Note that the chunk size |
264 | * must currently be a power-of-2 in sectors. Also note that the block |
265 | * layer must accept a page worth of data at any offset. So if the |
266 | * crossing of chunks is a hard limitation in the driver, it must still be |
267 | * prepared to split single page bios. |
268 | **/ |
269 | void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) |
270 | { |
271 | BUG_ON(!is_power_of_2(chunk_sectors)); |
272 | q->limits.chunk_sectors = chunk_sectors; |
273 | } |
274 | EXPORT_SYMBOL(blk_queue_chunk_sectors); |
275 | |
276 | /** |
277 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
278 | * @q: the request queue for the device |
279 | * @max_discard_sectors: maximum number of sectors to discard |
280 | **/ |
281 | void blk_queue_max_discard_sectors(struct request_queue *q, |
282 | unsigned int max_discard_sectors) |
283 | { |
284 | q->limits.max_hw_discard_sectors = max_discard_sectors; |
285 | q->limits.max_discard_sectors = max_discard_sectors; |
286 | } |
287 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
288 | |
289 | /** |
290 | * blk_queue_max_write_same_sectors - set max sectors for a single write same |
291 | * @q: the request queue for the device |
292 | * @max_write_same_sectors: maximum number of sectors to write per command |
293 | **/ |
294 | void blk_queue_max_write_same_sectors(struct request_queue *q, |
295 | unsigned int max_write_same_sectors) |
296 | { |
297 | q->limits.max_write_same_sectors = max_write_same_sectors; |
298 | } |
299 | EXPORT_SYMBOL(blk_queue_max_write_same_sectors); |
300 | |
301 | /** |
302 | * blk_queue_max_segments - set max hw segments for a request for this queue |
303 | * @q: the request queue for the device |
304 | * @max_segments: max number of segments |
305 | * |
306 | * Description: |
307 | * Enables a low level driver to set an upper limit on the number of |
308 | * hw data segments in a request. |
309 | **/ |
310 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
311 | { |
312 | if (!max_segments) { |
313 | max_segments = 1; |
314 | printk(KERN_INFO "%s: set to minimum %d\n", |
315 | __func__, max_segments); |
316 | } |
317 | |
318 | q->limits.max_segments = max_segments; |
319 | } |
320 | EXPORT_SYMBOL(blk_queue_max_segments); |
321 | |
322 | /** |
323 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
324 | * @q: the request queue for the device |
325 | * @max_size: max size of segment in bytes |
326 | * |
327 | * Description: |
328 | * Enables a low level driver to set an upper limit on the size of a |
329 | * coalesced segment |
330 | **/ |
331 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
332 | { |
333 | if (max_size < PAGE_SIZE) { |
334 | max_size = PAGE_SIZE; |
335 | printk(KERN_INFO "%s: set to minimum %d\n", |
336 | __func__, max_size); |
337 | } |
338 | |
339 | q->limits.max_segment_size = max_size; |
340 | } |
341 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
342 | |
343 | /** |
344 | * blk_queue_logical_block_size - set logical block size for the queue |
345 | * @q: the request queue for the device |
346 | * @size: the logical block size, in bytes |
347 | * |
348 | * Description: |
349 | * This should be set to the lowest possible block size that the |
350 | * storage device can address. The default of 512 covers most |
351 | * hardware. |
352 | **/ |
353 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
354 | { |
355 | q->limits.logical_block_size = size; |
356 | |
357 | if (q->limits.physical_block_size < size) |
358 | q->limits.physical_block_size = size; |
359 | |
360 | if (q->limits.io_min < q->limits.physical_block_size) |
361 | q->limits.io_min = q->limits.physical_block_size; |
362 | } |
363 | EXPORT_SYMBOL(blk_queue_logical_block_size); |
364 | |
365 | /** |
366 | * blk_queue_physical_block_size - set physical block size for the queue |
367 | * @q: the request queue for the device |
368 | * @size: the physical block size, in bytes |
369 | * |
370 | * Description: |
371 | * This should be set to the lowest possible sector size that the |
372 | * hardware can operate on without reverting to read-modify-write |
373 | * operations. |
374 | */ |
375 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
376 | { |
377 | q->limits.physical_block_size = size; |
378 | |
379 | if (q->limits.physical_block_size < q->limits.logical_block_size) |
380 | q->limits.physical_block_size = q->limits.logical_block_size; |
381 | |
382 | if (q->limits.io_min < q->limits.physical_block_size) |
383 | q->limits.io_min = q->limits.physical_block_size; |
384 | } |
385 | EXPORT_SYMBOL(blk_queue_physical_block_size); |
386 | |
387 | /** |
388 | * blk_queue_alignment_offset - set physical block alignment offset |
389 | * @q: the request queue for the device |
390 | * @offset: alignment offset in bytes |
391 | * |
392 | * Description: |
393 | * Some devices are naturally misaligned to compensate for things like |
394 | * the legacy DOS partition table 63-sector offset. Low-level drivers |
395 | * should call this function for devices whose first sector is not |
396 | * naturally aligned. |
397 | */ |
398 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) |
399 | { |
400 | q->limits.alignment_offset = |
401 | offset & (q->limits.physical_block_size - 1); |
402 | q->limits.misaligned = 0; |
403 | } |
404 | EXPORT_SYMBOL(blk_queue_alignment_offset); |
405 | |
406 | /** |
407 | * blk_limits_io_min - set minimum request size for a device |
408 | * @limits: the queue limits |
409 | * @min: smallest I/O size in bytes |
410 | * |
411 | * Description: |
412 | * Some devices have an internal block size bigger than the reported |
413 | * hardware sector size. This function can be used to signal the |
414 | * smallest I/O the device can perform without incurring a performance |
415 | * penalty. |
416 | */ |
417 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) |
418 | { |
419 | limits->io_min = min; |
420 | |
421 | if (limits->io_min < limits->logical_block_size) |
422 | limits->io_min = limits->logical_block_size; |
423 | |
424 | if (limits->io_min < limits->physical_block_size) |
425 | limits->io_min = limits->physical_block_size; |
426 | } |
427 | EXPORT_SYMBOL(blk_limits_io_min); |
428 | |
429 | /** |
430 | * blk_queue_io_min - set minimum request size for the queue |
431 | * @q: the request queue for the device |
432 | * @min: smallest I/O size in bytes |
433 | * |
434 | * Description: |
435 | * Storage devices may report a granularity or preferred minimum I/O |
436 | * size which is the smallest request the device can perform without |
437 | * incurring a performance penalty. For disk drives this is often the |
438 | * physical block size. For RAID arrays it is often the stripe chunk |
439 | * size. A properly aligned multiple of minimum_io_size is the |
440 | * preferred request size for workloads where a high number of I/O |
441 | * operations is desired. |
442 | */ |
443 | void blk_queue_io_min(struct request_queue *q, unsigned int min) |
444 | { |
445 | blk_limits_io_min(&q->limits, min); |
446 | } |
447 | EXPORT_SYMBOL(blk_queue_io_min); |
448 | |
449 | /** |
450 | * blk_limits_io_opt - set optimal request size for a device |
451 | * @limits: the queue limits |
452 | * @opt: smallest I/O size in bytes |
453 | * |
454 | * Description: |
455 | * Storage devices may report an optimal I/O size, which is the |
456 | * device's preferred unit for sustained I/O. This is rarely reported |
457 | * for disk drives. For RAID arrays it is usually the stripe width or |
458 | * the internal track size. A properly aligned multiple of |
459 | * optimal_io_size is the preferred request size for workloads where |
460 | * sustained throughput is desired. |
461 | */ |
462 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) |
463 | { |
464 | limits->io_opt = opt; |
465 | } |
466 | EXPORT_SYMBOL(blk_limits_io_opt); |
467 | |
468 | /** |
469 | * blk_queue_io_opt - set optimal request size for the queue |
470 | * @q: the request queue for the device |
471 | * @opt: optimal request size in bytes |
472 | * |
473 | * Description: |
474 | * Storage devices may report an optimal I/O size, which is the |
475 | * device's preferred unit for sustained I/O. This is rarely reported |
476 | * for disk drives. For RAID arrays it is usually the stripe width or |
477 | * the internal track size. A properly aligned multiple of |
478 | * optimal_io_size is the preferred request size for workloads where |
479 | * sustained throughput is desired. |
480 | */ |
481 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) |
482 | { |
483 | blk_limits_io_opt(&q->limits, opt); |
484 | } |
485 | EXPORT_SYMBOL(blk_queue_io_opt); |
486 | |
487 | /** |
488 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
489 | * @t: the stacking driver (top) |
490 | * @b: the underlying device (bottom) |
491 | **/ |
492 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
493 | { |
494 | blk_stack_limits(&t->limits, &b->limits, 0); |
495 | } |
496 | EXPORT_SYMBOL(blk_queue_stack_limits); |
497 | |
498 | /** |
499 | * blk_stack_limits - adjust queue_limits for stacked devices |
500 | * @t: the stacking driver limits (top device) |
501 | * @b: the underlying queue limits (bottom, component device) |
502 | * @start: first data sector within component device |
503 | * |
504 | * Description: |
505 | * This function is used by stacking drivers like MD and DM to ensure |
506 | * that all component devices have compatible block sizes and |
507 | * alignments. The stacking driver must provide a queue_limits |
508 | * struct (top) and then iteratively call the stacking function for |
509 | * all component (bottom) devices. The stacking function will |
510 | * attempt to combine the values and ensure proper alignment. |
511 | * |
512 | * Returns 0 if the top and bottom queue_limits are compatible. The |
513 | * top device's block sizes and alignment offsets may be adjusted to |
514 | * ensure alignment with the bottom device. If no compatible sizes |
515 | * and alignments exist, -1 is returned and the resulting top |
516 | * queue_limits will have the misaligned flag set to indicate that |
517 | * the alignment_offset is undefined. |
518 | */ |
519 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
520 | sector_t start) |
521 | { |
522 | unsigned int top, bottom, alignment, ret = 0; |
523 | |
524 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
525 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
526 | t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); |
527 | t->max_write_same_sectors = min(t->max_write_same_sectors, |
528 | b->max_write_same_sectors); |
529 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
530 | |
531 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
532 | b->seg_boundary_mask); |
533 | t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, |
534 | b->virt_boundary_mask); |
535 | |
536 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
537 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, |
538 | b->max_integrity_segments); |
539 | |
540 | t->max_segment_size = min_not_zero(t->max_segment_size, |
541 | b->max_segment_size); |
542 | |
543 | t->misaligned |= b->misaligned; |
544 | |
545 | alignment = queue_limit_alignment_offset(b, start); |
546 | |
547 | /* Bottom device has different alignment. Check that it is |
548 | * compatible with the current top alignment. |
549 | */ |
550 | if (t->alignment_offset != alignment) { |
551 | |
552 | top = max(t->physical_block_size, t->io_min) |
553 | + t->alignment_offset; |
554 | bottom = max(b->physical_block_size, b->io_min) + alignment; |
555 | |
556 | /* Verify that top and bottom intervals line up */ |
557 | if (max(top, bottom) % min(top, bottom)) { |
558 | t->misaligned = 1; |
559 | ret = -1; |
560 | } |
561 | } |
562 | |
563 | t->logical_block_size = max(t->logical_block_size, |
564 | b->logical_block_size); |
565 | |
566 | t->physical_block_size = max(t->physical_block_size, |
567 | b->physical_block_size); |
568 | |
569 | t->io_min = max(t->io_min, b->io_min); |
570 | t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); |
571 | |
572 | t->cluster &= b->cluster; |
573 | t->discard_zeroes_data &= b->discard_zeroes_data; |
574 | |
575 | /* Physical block size a multiple of the logical block size? */ |
576 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
577 | t->physical_block_size = t->logical_block_size; |
578 | t->misaligned = 1; |
579 | ret = -1; |
580 | } |
581 | |
582 | /* Minimum I/O a multiple of the physical block size? */ |
583 | if (t->io_min & (t->physical_block_size - 1)) { |
584 | t->io_min = t->physical_block_size; |
585 | t->misaligned = 1; |
586 | ret = -1; |
587 | } |
588 | |
589 | /* Optimal I/O a multiple of the physical block size? */ |
590 | if (t->io_opt & (t->physical_block_size - 1)) { |
591 | t->io_opt = 0; |
592 | t->misaligned = 1; |
593 | ret = -1; |
594 | } |
595 | |
596 | t->raid_partial_stripes_expensive = |
597 | max(t->raid_partial_stripes_expensive, |
598 | b->raid_partial_stripes_expensive); |
599 | |
600 | /* Find lowest common alignment_offset */ |
601 | t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) |
602 | % max(t->physical_block_size, t->io_min); |
603 | |
604 | /* Verify that new alignment_offset is on a logical block boundary */ |
605 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
606 | t->misaligned = 1; |
607 | ret = -1; |
608 | } |
609 | |
610 | /* Discard alignment and granularity */ |
611 | if (b->discard_granularity) { |
612 | alignment = queue_limit_discard_alignment(b, start); |
613 | |
614 | if (t->discard_granularity != 0 && |
615 | t->discard_alignment != alignment) { |
616 | top = t->discard_granularity + t->discard_alignment; |
617 | bottom = b->discard_granularity + alignment; |
618 | |
619 | /* Verify that top and bottom intervals line up */ |
620 | if ((max(top, bottom) % min(top, bottom)) != 0) |
621 | t->discard_misaligned = 1; |
622 | } |
623 | |
624 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, |
625 | b->max_discard_sectors); |
626 | t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, |
627 | b->max_hw_discard_sectors); |
628 | t->discard_granularity = max(t->discard_granularity, |
629 | b->discard_granularity); |
630 | t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % |
631 | t->discard_granularity; |
632 | } |
633 | |
634 | return ret; |
635 | } |
636 | EXPORT_SYMBOL(blk_stack_limits); |
637 | |
638 | /** |
639 | * bdev_stack_limits - adjust queue limits for stacked drivers |
640 | * @t: the stacking driver limits (top device) |
641 | * @bdev: the component block_device (bottom) |
642 | * @start: first data sector within component device |
643 | * |
644 | * Description: |
645 | * Merges queue limits for a top device and a block_device. Returns |
646 | * 0 if alignment didn't change. Returns -1 if adding the bottom |
647 | * device caused misalignment. |
648 | */ |
649 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
650 | sector_t start) |
651 | { |
652 | struct request_queue *bq = bdev_get_queue(bdev); |
653 | |
654 | start += get_start_sect(bdev); |
655 | |
656 | return blk_stack_limits(t, &bq->limits, start); |
657 | } |
658 | EXPORT_SYMBOL(bdev_stack_limits); |
659 | |
660 | /** |
661 | * disk_stack_limits - adjust queue limits for stacked drivers |
662 | * @disk: MD/DM gendisk (top) |
663 | * @bdev: the underlying block device (bottom) |
664 | * @offset: offset to beginning of data within component device |
665 | * |
666 | * Description: |
667 | * Merges the limits for a top level gendisk and a bottom level |
668 | * block_device. |
669 | */ |
670 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
671 | sector_t offset) |
672 | { |
673 | struct request_queue *t = disk->queue; |
674 | |
675 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
676 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
677 | |
678 | disk_name(disk, 0, top); |
679 | bdevname(bdev, bottom); |
680 | |
681 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
682 | top, bottom); |
683 | } |
684 | } |
685 | EXPORT_SYMBOL(disk_stack_limits); |
686 | |
687 | /** |
688 | * blk_queue_dma_pad - set pad mask |
689 | * @q: the request queue for the device |
690 | * @mask: pad mask |
691 | * |
692 | * Set dma pad mask. |
693 | * |
694 | * Appending pad buffer to a request modifies the last entry of a |
695 | * scatter list such that it includes the pad buffer. |
696 | **/ |
697 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) |
698 | { |
699 | q->dma_pad_mask = mask; |
700 | } |
701 | EXPORT_SYMBOL(blk_queue_dma_pad); |
702 | |
703 | /** |
704 | * blk_queue_update_dma_pad - update pad mask |
705 | * @q: the request queue for the device |
706 | * @mask: pad mask |
707 | * |
708 | * Update dma pad mask. |
709 | * |
710 | * Appending pad buffer to a request modifies the last entry of a |
711 | * scatter list such that it includes the pad buffer. |
712 | **/ |
713 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) |
714 | { |
715 | if (mask > q->dma_pad_mask) |
716 | q->dma_pad_mask = mask; |
717 | } |
718 | EXPORT_SYMBOL(blk_queue_update_dma_pad); |
719 | |
720 | /** |
721 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
722 | * @q: the request queue for the device |
723 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
724 | * @buf: physically contiguous buffer |
725 | * @size: size of the buffer in bytes |
726 | * |
727 | * Some devices have excess DMA problems and can't simply discard (or |
728 | * zero fill) the unwanted piece of the transfer. They have to have a |
729 | * real area of memory to transfer it into. The use case for this is |
730 | * ATAPI devices in DMA mode. If the packet command causes a transfer |
731 | * bigger than the transfer size some HBAs will lock up if there |
732 | * aren't DMA elements to contain the excess transfer. What this API |
733 | * does is adjust the queue so that the buf is always appended |
734 | * silently to the scatterlist. |
735 | * |
736 | * Note: This routine adjusts max_hw_segments to make room for appending |
737 | * the drain buffer. If you call blk_queue_max_segments() after calling |
738 | * this routine, you must set the limit to one fewer than your device |
739 | * can support otherwise there won't be room for the drain buffer. |
740 | */ |
741 | int blk_queue_dma_drain(struct request_queue *q, |
742 | dma_drain_needed_fn *dma_drain_needed, |
743 | void *buf, unsigned int size) |
744 | { |
745 | if (queue_max_segments(q) < 2) |
746 | return -EINVAL; |
747 | /* make room for appending the drain */ |
748 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
749 | q->dma_drain_needed = dma_drain_needed; |
750 | q->dma_drain_buffer = buf; |
751 | q->dma_drain_size = size; |
752 | |
753 | return 0; |
754 | } |
755 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
756 | |
757 | /** |
758 | * blk_queue_segment_boundary - set boundary rules for segment merging |
759 | * @q: the request queue for the device |
760 | * @mask: the memory boundary mask |
761 | **/ |
762 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
763 | { |
764 | if (mask < PAGE_SIZE - 1) { |
765 | mask = PAGE_SIZE - 1; |
766 | printk(KERN_INFO "%s: set to minimum %lx\n", |
767 | __func__, mask); |
768 | } |
769 | |
770 | q->limits.seg_boundary_mask = mask; |
771 | } |
772 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
773 | |
774 | /** |
775 | * blk_queue_virt_boundary - set boundary rules for bio merging |
776 | * @q: the request queue for the device |
777 | * @mask: the memory boundary mask |
778 | **/ |
779 | void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) |
780 | { |
781 | q->limits.virt_boundary_mask = mask; |
782 | } |
783 | EXPORT_SYMBOL(blk_queue_virt_boundary); |
784 | |
785 | /** |
786 | * blk_queue_dma_alignment - set dma length and memory alignment |
787 | * @q: the request queue for the device |
788 | * @mask: alignment mask |
789 | * |
790 | * description: |
791 | * set required memory and length alignment for direct dma transactions. |
792 | * this is used when building direct io requests for the queue. |
793 | * |
794 | **/ |
795 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
796 | { |
797 | q->dma_alignment = mask; |
798 | } |
799 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
800 | |
801 | /** |
802 | * blk_queue_update_dma_alignment - update dma length and memory alignment |
803 | * @q: the request queue for the device |
804 | * @mask: alignment mask |
805 | * |
806 | * description: |
807 | * update required memory and length alignment for direct dma transactions. |
808 | * If the requested alignment is larger than the current alignment, then |
809 | * the current queue alignment is updated to the new value, otherwise it |
810 | * is left alone. The design of this is to allow multiple objects |
811 | * (driver, device, transport etc) to set their respective |
812 | * alignments without having them interfere. |
813 | * |
814 | **/ |
815 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) |
816 | { |
817 | BUG_ON(mask > PAGE_SIZE); |
818 | |
819 | if (mask > q->dma_alignment) |
820 | q->dma_alignment = mask; |
821 | } |
822 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
823 | |
824 | void blk_queue_flush_queueable(struct request_queue *q, bool queueable) |
825 | { |
826 | spin_lock_irq(q->queue_lock); |
827 | if (queueable) |
828 | clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); |
829 | else |
830 | set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); |
831 | spin_unlock_irq(q->queue_lock); |
832 | } |
833 | EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); |
834 | |
835 | /** |
836 | * blk_queue_write_cache - configure queue's write cache |
837 | * @q: the request queue for the device |
838 | * @wc: write back cache on or off |
839 | * @fua: device supports FUA writes, if true |
840 | * |
841 | * Tell the block layer about the write cache of @q. |
842 | */ |
843 | void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) |
844 | { |
845 | spin_lock_irq(q->queue_lock); |
846 | if (wc) |
847 | queue_flag_set(QUEUE_FLAG_WC, q); |
848 | else |
849 | queue_flag_clear(QUEUE_FLAG_WC, q); |
850 | if (fua) |
851 | queue_flag_set(QUEUE_FLAG_FUA, q); |
852 | else |
853 | queue_flag_clear(QUEUE_FLAG_FUA, q); |
854 | spin_unlock_irq(q->queue_lock); |
855 | } |
856 | EXPORT_SYMBOL_GPL(blk_queue_write_cache); |
857 | |
858 | static int __init blk_settings_init(void) |
859 | { |
860 | blk_max_low_pfn = max_low_pfn - 1; |
861 | blk_max_pfn = max_pfn - 1; |
862 | return 0; |
863 | } |
864 | subsys_initcall(blk_settings_init); |
865 |