blob: b6656a66af0fea69852ec83dbe4329a0f61ce4e4
1 | /* |
2 | * mm/readahead.c - address_space-level file readahead. |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds |
5 | * |
6 | * 09Apr2002 Andrew Morton |
7 | * Initial version. |
8 | */ |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/dax.h> |
12 | #include <linux/gfp.h> |
13 | #include <linux/export.h> |
14 | #include <linux/blkdev.h> |
15 | #include <linux/backing-dev.h> |
16 | #include <linux/task_io_accounting_ops.h> |
17 | #include <linux/pagevec.h> |
18 | #include <linux/pagemap.h> |
19 | #include <linux/syscalls.h> |
20 | #include <linux/file.h> |
21 | #include <linux/mm_inline.h> |
22 | |
23 | #include "internal.h" |
24 | |
25 | /* |
26 | * Initialise a struct file's readahead state. Assumes that the caller has |
27 | * memset *ra to zero. |
28 | */ |
29 | void |
30 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) |
31 | { |
32 | ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; |
33 | ra->prev_pos = -1; |
34 | } |
35 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
36 | |
37 | /* |
38 | * see if a page needs releasing upon read_cache_pages() failure |
39 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache |
40 | * before calling, such as the NFS fs marking pages that are cached locally |
41 | * on disk, thus we need to give the fs a chance to clean up in the event of |
42 | * an error |
43 | */ |
44 | static void read_cache_pages_invalidate_page(struct address_space *mapping, |
45 | struct page *page) |
46 | { |
47 | if (page_has_private(page)) { |
48 | if (!trylock_page(page)) |
49 | BUG(); |
50 | page->mapping = mapping; |
51 | do_invalidatepage(page, 0, PAGE_SIZE); |
52 | page->mapping = NULL; |
53 | unlock_page(page); |
54 | } |
55 | put_page(page); |
56 | } |
57 | |
58 | /* |
59 | * release a list of pages, invalidating them first if need be |
60 | */ |
61 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, |
62 | struct list_head *pages) |
63 | { |
64 | struct page *victim; |
65 | |
66 | while (!list_empty(pages)) { |
67 | victim = lru_to_page(pages); |
68 | list_del(&victim->lru); |
69 | read_cache_pages_invalidate_page(mapping, victim); |
70 | } |
71 | } |
72 | |
73 | /** |
74 | * read_cache_pages - populate an address space with some pages & start reads against them |
75 | * @mapping: the address_space |
76 | * @pages: The address of a list_head which contains the target pages. These |
77 | * pages have their ->index populated and are otherwise uninitialised. |
78 | * @filler: callback routine for filling a single page. |
79 | * @data: private data for the callback routine. |
80 | * |
81 | * Hides the details of the LRU cache etc from the filesystems. |
82 | */ |
83 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, |
84 | int (*filler)(struct file *, struct page *), void *data) |
85 | { |
86 | struct page *page; |
87 | int ret = 0; |
88 | |
89 | while (!list_empty(pages)) { |
90 | page = lru_to_page(pages); |
91 | list_del(&page->lru); |
92 | if (add_to_page_cache_lru(page, mapping, page->index, |
93 | readahead_gfp_mask(mapping))) { |
94 | read_cache_pages_invalidate_page(mapping, page); |
95 | continue; |
96 | } |
97 | put_page(page); |
98 | |
99 | ret = filler(data, page); |
100 | if (unlikely(ret)) { |
101 | read_cache_pages_invalidate_pages(mapping, pages); |
102 | break; |
103 | } |
104 | task_io_account_read(PAGE_SIZE); |
105 | } |
106 | return ret; |
107 | } |
108 | |
109 | EXPORT_SYMBOL(read_cache_pages); |
110 | |
111 | static int read_pages(struct address_space *mapping, struct file *filp, |
112 | struct list_head *pages, unsigned int nr_pages, gfp_t gfp) |
113 | { |
114 | struct blk_plug plug; |
115 | unsigned page_idx; |
116 | int ret; |
117 | |
118 | blk_start_plug(&plug); |
119 | |
120 | if (mapping->a_ops->readpages) { |
121 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); |
122 | /* Clean up the remaining pages */ |
123 | put_pages_list(pages); |
124 | goto out; |
125 | } |
126 | |
127 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
128 | struct page *page = lru_to_page(pages); |
129 | list_del(&page->lru); |
130 | if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) |
131 | mapping->a_ops->readpage(filp, page); |
132 | put_page(page); |
133 | } |
134 | ret = 0; |
135 | |
136 | out: |
137 | blk_finish_plug(&plug); |
138 | |
139 | return ret; |
140 | } |
141 | |
142 | /* |
143 | * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all |
144 | * the pages first, then submits them all for I/O. This avoids the very bad |
145 | * behaviour which would occur if page allocations are causing VM writeback. |
146 | * We really don't want to intermingle reads and writes like that. |
147 | * |
148 | * Returns the number of pages requested, or the maximum amount of I/O allowed. |
149 | */ |
150 | int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, |
151 | pgoff_t offset, unsigned long nr_to_read, |
152 | unsigned long lookahead_size) |
153 | { |
154 | struct inode *inode = mapping->host; |
155 | struct page *page; |
156 | unsigned long end_index; /* The last page we want to read */ |
157 | LIST_HEAD(page_pool); |
158 | int page_idx; |
159 | int ret = 0; |
160 | loff_t isize = i_size_read(inode); |
161 | gfp_t gfp_mask = readahead_gfp_mask(mapping); |
162 | |
163 | #ifdef CONFIG_AMLOGIC_CMA |
164 | if (filp->f_mode & (FMODE_WRITE | FMODE_WRITE_IOCTL)) |
165 | gfp_mask |= __GFP_WRITE; |
166 | #endif /* CONFIG_AMLOGIC_CMA */ |
167 | |
168 | if (isize == 0) |
169 | goto out; |
170 | |
171 | end_index = ((isize - 1) >> PAGE_SHIFT); |
172 | |
173 | /* |
174 | * Preallocate as many pages as we will need. |
175 | */ |
176 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { |
177 | pgoff_t page_offset = offset + page_idx; |
178 | |
179 | if (page_offset > end_index) |
180 | break; |
181 | |
182 | rcu_read_lock(); |
183 | page = radix_tree_lookup(&mapping->page_tree, page_offset); |
184 | rcu_read_unlock(); |
185 | if (page && !radix_tree_exceptional_entry(page)) |
186 | continue; |
187 | |
188 | page = __page_cache_alloc(gfp_mask); |
189 | if (!page) |
190 | break; |
191 | page->index = page_offset; |
192 | list_add(&page->lru, &page_pool); |
193 | if (page_idx == nr_to_read - lookahead_size) |
194 | SetPageReadahead(page); |
195 | ret++; |
196 | } |
197 | |
198 | /* |
199 | * Now start the IO. We ignore I/O errors - if the page is not |
200 | * uptodate then the caller will launch readpage again, and |
201 | * will then handle the error. |
202 | */ |
203 | if (ret) |
204 | read_pages(mapping, filp, &page_pool, ret, gfp_mask); |
205 | BUG_ON(!list_empty(&page_pool)); |
206 | out: |
207 | return ret; |
208 | } |
209 | |
210 | /* |
211 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much |
212 | * memory at once. |
213 | */ |
214 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, |
215 | pgoff_t offset, unsigned long nr_to_read) |
216 | { |
217 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
218 | struct file_ra_state *ra = &filp->f_ra; |
219 | unsigned long max_pages; |
220 | |
221 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) |
222 | return -EINVAL; |
223 | |
224 | /* |
225 | * If the request exceeds the readahead window, allow the read to |
226 | * be up to the optimal hardware IO size |
227 | */ |
228 | max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); |
229 | nr_to_read = min(nr_to_read, max_pages); |
230 | while (nr_to_read) { |
231 | int err; |
232 | |
233 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; |
234 | |
235 | if (this_chunk > nr_to_read) |
236 | this_chunk = nr_to_read; |
237 | err = __do_page_cache_readahead(mapping, filp, |
238 | offset, this_chunk, 0); |
239 | if (err < 0) |
240 | return err; |
241 | |
242 | offset += this_chunk; |
243 | nr_to_read -= this_chunk; |
244 | } |
245 | return 0; |
246 | } |
247 | |
248 | /* |
249 | * Set the initial window size, round to next power of 2 and square |
250 | * for small size, x 4 for medium, and x 2 for large |
251 | * for 128k (32 page) max ra |
252 | * 1-8 page = 32k initial, > 8 page = 128k initial |
253 | */ |
254 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) |
255 | { |
256 | unsigned long newsize = roundup_pow_of_two(size); |
257 | |
258 | if (newsize <= max / 32) |
259 | newsize = newsize * 4; |
260 | else if (newsize <= max / 4) |
261 | newsize = newsize * 2; |
262 | else |
263 | newsize = max; |
264 | |
265 | return newsize; |
266 | } |
267 | |
268 | /* |
269 | * Get the previous window size, ramp it up, and |
270 | * return it as the new window size. |
271 | */ |
272 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
273 | unsigned long max) |
274 | { |
275 | unsigned long cur = ra->size; |
276 | unsigned long newsize; |
277 | |
278 | if (cur < max / 16) |
279 | newsize = 4 * cur; |
280 | else |
281 | newsize = 2 * cur; |
282 | |
283 | return min(newsize, max); |
284 | } |
285 | |
286 | /* |
287 | * On-demand readahead design. |
288 | * |
289 | * The fields in struct file_ra_state represent the most-recently-executed |
290 | * readahead attempt: |
291 | * |
292 | * |<----- async_size ---------| |
293 | * |------------------- size -------------------->| |
294 | * |==================#===========================| |
295 | * ^start ^page marked with PG_readahead |
296 | * |
297 | * To overlap application thinking time and disk I/O time, we do |
298 | * `readahead pipelining': Do not wait until the application consumed all |
299 | * readahead pages and stalled on the missing page at readahead_index; |
300 | * Instead, submit an asynchronous readahead I/O as soon as there are |
301 | * only async_size pages left in the readahead window. Normally async_size |
302 | * will be equal to size, for maximum pipelining. |
303 | * |
304 | * In interleaved sequential reads, concurrent streams on the same fd can |
305 | * be invalidating each other's readahead state. So we flag the new readahead |
306 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
307 | * indicator. The flag won't be set on already cached pages, to avoid the |
308 | * readahead-for-nothing fuss, saving pointless page cache lookups. |
309 | * |
310 | * prev_pos tracks the last visited byte in the _previous_ read request. |
311 | * It should be maintained by the caller, and will be used for detecting |
312 | * small random reads. Note that the readahead algorithm checks loosely |
313 | * for sequential patterns. Hence interleaved reads might be served as |
314 | * sequential ones. |
315 | * |
316 | * There is a special-case: if the first page which the application tries to |
317 | * read happens to be the first page of the file, it is assumed that a linear |
318 | * read is about to happen and the window is immediately set to the initial size |
319 | * based on I/O request size and the max_readahead. |
320 | * |
321 | * The code ramps up the readahead size aggressively at first, but slow down as |
322 | * it approaches max_readhead. |
323 | */ |
324 | |
325 | /* |
326 | * Count contiguously cached pages from @offset-1 to @offset-@max, |
327 | * this count is a conservative estimation of |
328 | * - length of the sequential read sequence, or |
329 | * - thrashing threshold in memory tight systems |
330 | */ |
331 | static pgoff_t count_history_pages(struct address_space *mapping, |
332 | pgoff_t offset, unsigned long max) |
333 | { |
334 | pgoff_t head; |
335 | |
336 | rcu_read_lock(); |
337 | head = page_cache_prev_hole(mapping, offset - 1, max); |
338 | rcu_read_unlock(); |
339 | |
340 | return offset - 1 - head; |
341 | } |
342 | |
343 | /* |
344 | * page cache context based read-ahead |
345 | */ |
346 | static int try_context_readahead(struct address_space *mapping, |
347 | struct file_ra_state *ra, |
348 | pgoff_t offset, |
349 | unsigned long req_size, |
350 | unsigned long max) |
351 | { |
352 | pgoff_t size; |
353 | |
354 | size = count_history_pages(mapping, offset, max); |
355 | |
356 | /* |
357 | * not enough history pages: |
358 | * it could be a random read |
359 | */ |
360 | if (size <= req_size) |
361 | return 0; |
362 | |
363 | /* |
364 | * starts from beginning of file: |
365 | * it is a strong indication of long-run stream (or whole-file-read) |
366 | */ |
367 | if (size >= offset) |
368 | size *= 2; |
369 | |
370 | ra->start = offset; |
371 | ra->size = min(size + req_size, max); |
372 | ra->async_size = 1; |
373 | |
374 | return 1; |
375 | } |
376 | |
377 | /* |
378 | * A minimal readahead algorithm for trivial sequential/random reads. |
379 | */ |
380 | static unsigned long |
381 | ondemand_readahead(struct address_space *mapping, |
382 | struct file_ra_state *ra, struct file *filp, |
383 | bool hit_readahead_marker, pgoff_t offset, |
384 | unsigned long req_size) |
385 | { |
386 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
387 | unsigned long max_pages = ra->ra_pages; |
388 | unsigned long add_pages; |
389 | pgoff_t prev_offset; |
390 | |
391 | /* |
392 | * If the request exceeds the readahead window, allow the read to |
393 | * be up to the optimal hardware IO size |
394 | */ |
395 | if (req_size > max_pages && bdi->io_pages > max_pages) |
396 | max_pages = min(req_size, bdi->io_pages); |
397 | |
398 | /* |
399 | * start of file |
400 | */ |
401 | if (!offset) |
402 | goto initial_readahead; |
403 | |
404 | /* |
405 | * It's the expected callback offset, assume sequential access. |
406 | * Ramp up sizes, and push forward the readahead window. |
407 | */ |
408 | if ((offset == (ra->start + ra->size - ra->async_size) || |
409 | offset == (ra->start + ra->size))) { |
410 | ra->start += ra->size; |
411 | ra->size = get_next_ra_size(ra, max_pages); |
412 | ra->async_size = ra->size; |
413 | goto readit; |
414 | } |
415 | |
416 | /* |
417 | * Hit a marked page without valid readahead state. |
418 | * E.g. interleaved reads. |
419 | * Query the pagecache for async_size, which normally equals to |
420 | * readahead size. Ramp it up and use it as the new readahead size. |
421 | */ |
422 | if (hit_readahead_marker) { |
423 | pgoff_t start; |
424 | |
425 | rcu_read_lock(); |
426 | start = page_cache_next_hole(mapping, offset + 1, max_pages); |
427 | rcu_read_unlock(); |
428 | |
429 | if (!start || start - offset > max_pages) |
430 | return 0; |
431 | |
432 | ra->start = start; |
433 | ra->size = start - offset; /* old async_size */ |
434 | ra->size += req_size; |
435 | ra->size = get_next_ra_size(ra, max_pages); |
436 | ra->async_size = ra->size; |
437 | goto readit; |
438 | } |
439 | |
440 | /* |
441 | * oversize read |
442 | */ |
443 | if (req_size > max_pages) |
444 | goto initial_readahead; |
445 | |
446 | /* |
447 | * sequential cache miss |
448 | * trivial case: (offset - prev_offset) == 1 |
449 | * unaligned reads: (offset - prev_offset) == 0 |
450 | */ |
451 | prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; |
452 | if (offset - prev_offset <= 1UL) |
453 | goto initial_readahead; |
454 | |
455 | /* |
456 | * Query the page cache and look for the traces(cached history pages) |
457 | * that a sequential stream would leave behind. |
458 | */ |
459 | if (try_context_readahead(mapping, ra, offset, req_size, max_pages)) |
460 | goto readit; |
461 | |
462 | /* |
463 | * standalone, small random read |
464 | * Read as is, and do not pollute the readahead state. |
465 | */ |
466 | return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); |
467 | |
468 | initial_readahead: |
469 | ra->start = offset; |
470 | ra->size = get_init_ra_size(req_size, max_pages); |
471 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; |
472 | |
473 | readit: |
474 | /* |
475 | * Will this read hit the readahead marker made by itself? |
476 | * If so, trigger the readahead marker hit now, and merge |
477 | * the resulted next readahead window into the current one. |
478 | * Take care of maximum IO pages as above. |
479 | */ |
480 | if (offset == ra->start && ra->size == ra->async_size) { |
481 | add_pages = get_next_ra_size(ra, max_pages); |
482 | if (ra->size + add_pages <= max_pages) { |
483 | ra->async_size = add_pages; |
484 | ra->size += add_pages; |
485 | } else { |
486 | ra->size = max_pages; |
487 | ra->async_size = max_pages >> 1; |
488 | } |
489 | } |
490 | |
491 | return ra_submit(ra, mapping, filp); |
492 | } |
493 | |
494 | /** |
495 | * page_cache_sync_readahead - generic file readahead |
496 | * @mapping: address_space which holds the pagecache and I/O vectors |
497 | * @ra: file_ra_state which holds the readahead state |
498 | * @filp: passed on to ->readpage() and ->readpages() |
499 | * @offset: start offset into @mapping, in pagecache page-sized units |
500 | * @req_size: hint: total size of the read which the caller is performing in |
501 | * pagecache pages |
502 | * |
503 | * page_cache_sync_readahead() should be called when a cache miss happened: |
504 | * it will submit the read. The readahead logic may decide to piggyback more |
505 | * pages onto the read request if access patterns suggest it will improve |
506 | * performance. |
507 | */ |
508 | void page_cache_sync_readahead(struct address_space *mapping, |
509 | struct file_ra_state *ra, struct file *filp, |
510 | pgoff_t offset, unsigned long req_size) |
511 | { |
512 | /* no read-ahead */ |
513 | if (!ra->ra_pages) |
514 | return; |
515 | |
516 | /* be dumb */ |
517 | if (filp && (filp->f_mode & FMODE_RANDOM)) { |
518 | force_page_cache_readahead(mapping, filp, offset, req_size); |
519 | return; |
520 | } |
521 | |
522 | /* do read-ahead */ |
523 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); |
524 | } |
525 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); |
526 | |
527 | /** |
528 | * page_cache_async_readahead - file readahead for marked pages |
529 | * @mapping: address_space which holds the pagecache and I/O vectors |
530 | * @ra: file_ra_state which holds the readahead state |
531 | * @filp: passed on to ->readpage() and ->readpages() |
532 | * @page: the page at @offset which has the PG_readahead flag set |
533 | * @offset: start offset into @mapping, in pagecache page-sized units |
534 | * @req_size: hint: total size of the read which the caller is performing in |
535 | * pagecache pages |
536 | * |
537 | * page_cache_async_readahead() should be called when a page is used which |
538 | * has the PG_readahead flag; this is a marker to suggest that the application |
539 | * has used up enough of the readahead window that we should start pulling in |
540 | * more pages. |
541 | */ |
542 | void |
543 | page_cache_async_readahead(struct address_space *mapping, |
544 | struct file_ra_state *ra, struct file *filp, |
545 | struct page *page, pgoff_t offset, |
546 | unsigned long req_size) |
547 | { |
548 | /* no read-ahead */ |
549 | if (!ra->ra_pages) |
550 | return; |
551 | |
552 | /* |
553 | * Same bit is used for PG_readahead and PG_reclaim. |
554 | */ |
555 | if (PageWriteback(page)) |
556 | return; |
557 | |
558 | ClearPageReadahead(page); |
559 | |
560 | /* |
561 | * Defer asynchronous read-ahead on IO congestion. |
562 | */ |
563 | if (inode_read_congested(mapping->host)) |
564 | return; |
565 | |
566 | /* do read-ahead */ |
567 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
568 | } |
569 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |
570 | |
571 | static ssize_t |
572 | do_readahead(struct address_space *mapping, struct file *filp, |
573 | pgoff_t index, unsigned long nr) |
574 | { |
575 | if (!mapping || !mapping->a_ops) |
576 | return -EINVAL; |
577 | |
578 | /* |
579 | * Readahead doesn't make sense for DAX inodes, but we don't want it |
580 | * to report a failure either. Instead, we just return success and |
581 | * don't do any work. |
582 | */ |
583 | if (dax_mapping(mapping)) |
584 | return 0; |
585 | |
586 | return force_page_cache_readahead(mapping, filp, index, nr); |
587 | } |
588 | |
589 | SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) |
590 | { |
591 | ssize_t ret; |
592 | struct fd f; |
593 | |
594 | ret = -EBADF; |
595 | f = fdget(fd); |
596 | if (f.file) { |
597 | if (f.file->f_mode & FMODE_READ) { |
598 | struct address_space *mapping = f.file->f_mapping; |
599 | pgoff_t start = offset >> PAGE_SHIFT; |
600 | pgoff_t end = (offset + count - 1) >> PAGE_SHIFT; |
601 | unsigned long len = end - start + 1; |
602 | ret = do_readahead(mapping, f.file, start, len); |
603 | } |
604 | fdput(f); |
605 | } |
606 | return ret; |
607 | } |
608 |