blob: dc0e05228174aea33ceccdfde8598024fc1fce6b
1 | /* |
2 | * fs/mpage.c |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds. |
5 | * |
6 | * Contains functions related to preparing and submitting BIOs which contain |
7 | * multiple pagecache pages. |
8 | * |
9 | * 15May2002 Andrew Morton |
10 | * Initial version |
11 | * 27Jun2002 axboe@suse.de |
12 | * use bio_add_page() to build bio's just the right size |
13 | */ |
14 | |
15 | #include <linux/kernel.h> |
16 | #include <linux/export.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/kdev_t.h> |
19 | #include <linux/gfp.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/fs.h> |
22 | #include <linux/buffer_head.h> |
23 | #include <linux/blkdev.h> |
24 | #include <linux/highmem.h> |
25 | #include <linux/prefetch.h> |
26 | #include <linux/mpage.h> |
27 | #include <linux/mm_inline.h> |
28 | #include <linux/writeback.h> |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/pagevec.h> |
31 | #include <linux/cleancache.h> |
32 | #include "internal.h" |
33 | |
34 | #define CREATE_TRACE_POINTS |
35 | #include <trace/events/android_fs.h> |
36 | |
37 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start); |
38 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end); |
39 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start); |
40 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end); |
41 | |
42 | /* |
43 | * I/O completion handler for multipage BIOs. |
44 | * |
45 | * The mpage code never puts partial pages into a BIO (except for end-of-file). |
46 | * If a page does not map to a contiguous run of blocks then it simply falls |
47 | * back to block_read_full_page(). |
48 | * |
49 | * Why is this? If a page's completion depends on a number of different BIOs |
50 | * which can complete in any order (or at the same time) then determining the |
51 | * status of that page is hard. See end_buffer_async_read() for the details. |
52 | * There is no point in duplicating all that complexity. |
53 | */ |
54 | static void mpage_end_io(struct bio *bio) |
55 | { |
56 | struct bio_vec *bv; |
57 | int i; |
58 | |
59 | if (trace_android_fs_dataread_end_enabled() && |
60 | (bio_data_dir(bio) == READ)) { |
61 | struct page *first_page = bio->bi_io_vec[0].bv_page; |
62 | |
63 | if (first_page != NULL) |
64 | trace_android_fs_dataread_end(first_page->mapping->host, |
65 | page_offset(first_page), |
66 | bio->bi_iter.bi_size); |
67 | } |
68 | |
69 | bio_for_each_segment_all(bv, bio, i) { |
70 | struct page *page = bv->bv_page; |
71 | page_endio(page, op_is_write(bio_op(bio)), bio->bi_error); |
72 | } |
73 | |
74 | bio_put(bio); |
75 | } |
76 | |
77 | static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) |
78 | { |
79 | if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) { |
80 | struct page *first_page = bio->bi_io_vec[0].bv_page; |
81 | |
82 | if (first_page != NULL) { |
83 | #ifdef CONFIG_AMLOGIC_VMAP |
84 | trace_android_fs_dataread_wrap( |
85 | first_page->mapping->host, |
86 | page_offset(first_page), |
87 | bio->bi_iter.bi_size); |
88 | #else |
89 | char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; |
90 | |
91 | path = android_fstrace_get_pathname(pathbuf, |
92 | MAX_TRACE_PATHBUF_LEN, |
93 | first_page->mapping->host); |
94 | trace_android_fs_dataread_start( |
95 | first_page->mapping->host, |
96 | page_offset(first_page), |
97 | bio->bi_iter.bi_size, |
98 | current->pid, |
99 | path, |
100 | current->comm); |
101 | #endif |
102 | } |
103 | } |
104 | bio->bi_end_io = mpage_end_io; |
105 | bio_set_op_attrs(bio, op, op_flags); |
106 | guard_bio_eod(op, bio); |
107 | submit_bio(bio); |
108 | return NULL; |
109 | } |
110 | |
111 | static struct bio * |
112 | mpage_alloc(struct block_device *bdev, |
113 | sector_t first_sector, int nr_vecs, |
114 | gfp_t gfp_flags) |
115 | { |
116 | struct bio *bio; |
117 | |
118 | /* Restrict the given (page cache) mask for slab allocations */ |
119 | gfp_flags &= GFP_KERNEL; |
120 | bio = bio_alloc(gfp_flags, nr_vecs); |
121 | |
122 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { |
123 | while (!bio && (nr_vecs /= 2)) |
124 | bio = bio_alloc(gfp_flags, nr_vecs); |
125 | } |
126 | |
127 | if (bio) { |
128 | bio->bi_bdev = bdev; |
129 | bio->bi_iter.bi_sector = first_sector; |
130 | } |
131 | return bio; |
132 | } |
133 | |
134 | /* |
135 | * support function for mpage_readpages. The fs supplied get_block might |
136 | * return an up to date buffer. This is used to map that buffer into |
137 | * the page, which allows readpage to avoid triggering a duplicate call |
138 | * to get_block. |
139 | * |
140 | * The idea is to avoid adding buffers to pages that don't already have |
141 | * them. So when the buffer is up to date and the page size == block size, |
142 | * this marks the page up to date instead of adding new buffers. |
143 | */ |
144 | static void |
145 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) |
146 | { |
147 | struct inode *inode = page->mapping->host; |
148 | struct buffer_head *page_bh, *head; |
149 | int block = 0; |
150 | |
151 | if (!page_has_buffers(page)) { |
152 | /* |
153 | * don't make any buffers if there is only one buffer on |
154 | * the page and the page just needs to be set up to date |
155 | */ |
156 | if (inode->i_blkbits == PAGE_SHIFT && |
157 | buffer_uptodate(bh)) { |
158 | SetPageUptodate(page); |
159 | return; |
160 | } |
161 | create_empty_buffers(page, i_blocksize(inode), 0); |
162 | } |
163 | head = page_buffers(page); |
164 | page_bh = head; |
165 | do { |
166 | if (block == page_block) { |
167 | page_bh->b_state = bh->b_state; |
168 | page_bh->b_bdev = bh->b_bdev; |
169 | page_bh->b_blocknr = bh->b_blocknr; |
170 | break; |
171 | } |
172 | page_bh = page_bh->b_this_page; |
173 | block++; |
174 | } while (page_bh != head); |
175 | } |
176 | |
177 | /* |
178 | * This is the worker routine which does all the work of mapping the disk |
179 | * blocks and constructs largest possible bios, submits them for IO if the |
180 | * blocks are not contiguous on the disk. |
181 | * |
182 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to |
183 | * represent the validity of its disk mapping and to decide when to do the next |
184 | * get_block() call. |
185 | */ |
186 | static struct bio * |
187 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
188 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
189 | unsigned long *first_logical_block, get_block_t get_block, |
190 | gfp_t gfp) |
191 | { |
192 | struct inode *inode = page->mapping->host; |
193 | const unsigned blkbits = inode->i_blkbits; |
194 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
195 | const unsigned blocksize = 1 << blkbits; |
196 | sector_t block_in_file; |
197 | sector_t last_block; |
198 | sector_t last_block_in_file; |
199 | sector_t blocks[MAX_BUF_PER_PAGE]; |
200 | unsigned page_block; |
201 | unsigned first_hole = blocks_per_page; |
202 | struct block_device *bdev = NULL; |
203 | int length; |
204 | int fully_mapped = 1; |
205 | unsigned nblocks; |
206 | unsigned relative_block; |
207 | |
208 | if (page_has_buffers(page)) |
209 | goto confused; |
210 | |
211 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
212 | last_block = block_in_file + nr_pages * blocks_per_page; |
213 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; |
214 | if (last_block > last_block_in_file) |
215 | last_block = last_block_in_file; |
216 | page_block = 0; |
217 | |
218 | /* |
219 | * Map blocks using the result from the previous get_blocks call first. |
220 | */ |
221 | nblocks = map_bh->b_size >> blkbits; |
222 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && |
223 | block_in_file < (*first_logical_block + nblocks)) { |
224 | unsigned map_offset = block_in_file - *first_logical_block; |
225 | unsigned last = nblocks - map_offset; |
226 | |
227 | for (relative_block = 0; ; relative_block++) { |
228 | if (relative_block == last) { |
229 | clear_buffer_mapped(map_bh); |
230 | break; |
231 | } |
232 | if (page_block == blocks_per_page) |
233 | break; |
234 | blocks[page_block] = map_bh->b_blocknr + map_offset + |
235 | relative_block; |
236 | page_block++; |
237 | block_in_file++; |
238 | } |
239 | bdev = map_bh->b_bdev; |
240 | } |
241 | |
242 | /* |
243 | * Then do more get_blocks calls until we are done with this page. |
244 | */ |
245 | map_bh->b_page = page; |
246 | while (page_block < blocks_per_page) { |
247 | map_bh->b_state = 0; |
248 | map_bh->b_size = 0; |
249 | |
250 | if (block_in_file < last_block) { |
251 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
252 | if (get_block(inode, block_in_file, map_bh, 0)) |
253 | goto confused; |
254 | *first_logical_block = block_in_file; |
255 | } |
256 | |
257 | if (!buffer_mapped(map_bh)) { |
258 | fully_mapped = 0; |
259 | if (first_hole == blocks_per_page) |
260 | first_hole = page_block; |
261 | page_block++; |
262 | block_in_file++; |
263 | continue; |
264 | } |
265 | |
266 | /* some filesystems will copy data into the page during |
267 | * the get_block call, in which case we don't want to |
268 | * read it again. map_buffer_to_page copies the data |
269 | * we just collected from get_block into the page's buffers |
270 | * so readpage doesn't have to repeat the get_block call |
271 | */ |
272 | if (buffer_uptodate(map_bh)) { |
273 | map_buffer_to_page(page, map_bh, page_block); |
274 | goto confused; |
275 | } |
276 | |
277 | if (first_hole != blocks_per_page) |
278 | goto confused; /* hole -> non-hole */ |
279 | |
280 | /* Contiguous blocks? */ |
281 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
282 | goto confused; |
283 | nblocks = map_bh->b_size >> blkbits; |
284 | for (relative_block = 0; ; relative_block++) { |
285 | if (relative_block == nblocks) { |
286 | clear_buffer_mapped(map_bh); |
287 | break; |
288 | } else if (page_block == blocks_per_page) |
289 | break; |
290 | blocks[page_block] = map_bh->b_blocknr+relative_block; |
291 | page_block++; |
292 | block_in_file++; |
293 | } |
294 | bdev = map_bh->b_bdev; |
295 | } |
296 | |
297 | if (first_hole != blocks_per_page) { |
298 | zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); |
299 | if (first_hole == 0) { |
300 | SetPageUptodate(page); |
301 | unlock_page(page); |
302 | goto out; |
303 | } |
304 | } else if (fully_mapped) { |
305 | SetPageMappedToDisk(page); |
306 | } |
307 | |
308 | if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && |
309 | cleancache_get_page(page) == 0) { |
310 | SetPageUptodate(page); |
311 | goto confused; |
312 | } |
313 | |
314 | /* |
315 | * This page will go to BIO. Do we need to send this BIO off first? |
316 | */ |
317 | if (bio && (*last_block_in_bio != blocks[0] - 1)) |
318 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
319 | |
320 | alloc_new: |
321 | if (bio == NULL) { |
322 | if (first_hole == blocks_per_page) { |
323 | if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), |
324 | page)) |
325 | goto out; |
326 | } |
327 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
328 | min_t(int, nr_pages, BIO_MAX_PAGES), gfp); |
329 | if (bio == NULL) |
330 | goto confused; |
331 | } |
332 | |
333 | length = first_hole << blkbits; |
334 | if (bio_add_page(bio, page, length, 0) < length) { |
335 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
336 | goto alloc_new; |
337 | } |
338 | |
339 | relative_block = block_in_file - *first_logical_block; |
340 | nblocks = map_bh->b_size >> blkbits; |
341 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || |
342 | (first_hole != blocks_per_page)) |
343 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
344 | else |
345 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
346 | out: |
347 | return bio; |
348 | |
349 | confused: |
350 | if (bio) |
351 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
352 | if (!PageUptodate(page)) |
353 | block_read_full_page(page, get_block); |
354 | else |
355 | unlock_page(page); |
356 | goto out; |
357 | } |
358 | |
359 | /** |
360 | * mpage_readpages - populate an address space with some pages & start reads against them |
361 | * @mapping: the address_space |
362 | * @pages: The address of a list_head which contains the target pages. These |
363 | * pages have their ->index populated and are otherwise uninitialised. |
364 | * The page at @pages->prev has the lowest file offset, and reads should be |
365 | * issued in @pages->prev to @pages->next order. |
366 | * @nr_pages: The number of pages at *@pages |
367 | * @get_block: The filesystem's block mapper function. |
368 | * |
369 | * This function walks the pages and the blocks within each page, building and |
370 | * emitting large BIOs. |
371 | * |
372 | * If anything unusual happens, such as: |
373 | * |
374 | * - encountering a page which has buffers |
375 | * - encountering a page which has a non-hole after a hole |
376 | * - encountering a page with non-contiguous blocks |
377 | * |
378 | * then this code just gives up and calls the buffer_head-based read function. |
379 | * It does handle a page which has holes at the end - that is a common case: |
380 | * the end-of-file on blocksize < PAGE_SIZE setups. |
381 | * |
382 | * BH_Boundary explanation: |
383 | * |
384 | * There is a problem. The mpage read code assembles several pages, gets all |
385 | * their disk mappings, and then submits them all. That's fine, but obtaining |
386 | * the disk mappings may require I/O. Reads of indirect blocks, for example. |
387 | * |
388 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be |
389 | * submitted in the following order: |
390 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 |
391 | * |
392 | * because the indirect block has to be read to get the mappings of blocks |
393 | * 13,14,15,16. Obviously, this impacts performance. |
394 | * |
395 | * So what we do it to allow the filesystem's get_block() function to set |
396 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block |
397 | * after this one will require I/O against a block which is probably close to |
398 | * this one. So you should push what I/O you have currently accumulated. |
399 | * |
400 | * This all causes the disk requests to be issued in the correct order. |
401 | */ |
402 | int |
403 | mpage_readpages(struct address_space *mapping, struct list_head *pages, |
404 | unsigned nr_pages, get_block_t get_block) |
405 | { |
406 | struct bio *bio = NULL; |
407 | unsigned page_idx; |
408 | sector_t last_block_in_bio = 0; |
409 | struct buffer_head map_bh; |
410 | unsigned long first_logical_block = 0; |
411 | gfp_t gfp = readahead_gfp_mask(mapping); |
412 | |
413 | map_bh.b_state = 0; |
414 | map_bh.b_size = 0; |
415 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
416 | struct page *page = lru_to_page(pages); |
417 | |
418 | prefetchw(&page->flags); |
419 | list_del(&page->lru); |
420 | if (!add_to_page_cache_lru(page, mapping, |
421 | page->index, |
422 | gfp)) { |
423 | bio = do_mpage_readpage(bio, page, |
424 | nr_pages - page_idx, |
425 | &last_block_in_bio, &map_bh, |
426 | &first_logical_block, |
427 | get_block, gfp); |
428 | } |
429 | put_page(page); |
430 | } |
431 | BUG_ON(!list_empty(pages)); |
432 | if (bio) |
433 | mpage_bio_submit(REQ_OP_READ, 0, bio); |
434 | return 0; |
435 | } |
436 | EXPORT_SYMBOL(mpage_readpages); |
437 | |
438 | /* |
439 | * This isn't called much at all |
440 | */ |
441 | int mpage_readpage(struct page *page, get_block_t get_block) |
442 | { |
443 | struct bio *bio = NULL; |
444 | sector_t last_block_in_bio = 0; |
445 | struct buffer_head map_bh; |
446 | unsigned long first_logical_block = 0; |
447 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); |
448 | |
449 | map_bh.b_state = 0; |
450 | map_bh.b_size = 0; |
451 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
452 | &map_bh, &first_logical_block, get_block, gfp); |
453 | if (bio) |
454 | mpage_bio_submit(REQ_OP_READ, 0, bio); |
455 | return 0; |
456 | } |
457 | EXPORT_SYMBOL(mpage_readpage); |
458 | |
459 | /* |
460 | * Writing is not so simple. |
461 | * |
462 | * If the page has buffers then they will be used for obtaining the disk |
463 | * mapping. We only support pages which are fully mapped-and-dirty, with a |
464 | * special case for pages which are unmapped at the end: end-of-file. |
465 | * |
466 | * If the page has no buffers (preferred) then the page is mapped here. |
467 | * |
468 | * If all blocks are found to be contiguous then the page can go into the |
469 | * BIO. Otherwise fall back to the mapping's writepage(). |
470 | * |
471 | * FIXME: This code wants an estimate of how many pages are still to be |
472 | * written, so it can intelligently allocate a suitably-sized BIO. For now, |
473 | * just allocate full-size (16-page) BIOs. |
474 | */ |
475 | |
476 | struct mpage_data { |
477 | struct bio *bio; |
478 | sector_t last_block_in_bio; |
479 | get_block_t *get_block; |
480 | unsigned use_writepage; |
481 | }; |
482 | |
483 | /* |
484 | * We have our BIO, so we can now mark the buffers clean. Make |
485 | * sure to only clean buffers which we know we'll be writing. |
486 | */ |
487 | static void clean_buffers(struct page *page, unsigned first_unmapped) |
488 | { |
489 | unsigned buffer_counter = 0; |
490 | struct buffer_head *bh, *head; |
491 | if (!page_has_buffers(page)) |
492 | return; |
493 | head = page_buffers(page); |
494 | bh = head; |
495 | |
496 | do { |
497 | if (buffer_counter++ == first_unmapped) |
498 | break; |
499 | clear_buffer_dirty(bh); |
500 | bh = bh->b_this_page; |
501 | } while (bh != head); |
502 | |
503 | /* |
504 | * we cannot drop the bh if the page is not uptodate or a concurrent |
505 | * readpage would fail to serialize with the bh and it would read from |
506 | * disk before we reach the platter. |
507 | */ |
508 | if (buffer_heads_over_limit && PageUptodate(page)) |
509 | try_to_free_buffers(page); |
510 | } |
511 | |
512 | /* |
513 | * For situations where we want to clean all buffers attached to a page. |
514 | * We don't need to calculate how many buffers are attached to the page, |
515 | * we just need to specify a number larger than the maximum number of buffers. |
516 | */ |
517 | void clean_page_buffers(struct page *page) |
518 | { |
519 | clean_buffers(page, ~0U); |
520 | } |
521 | |
522 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
523 | void *data) |
524 | { |
525 | struct mpage_data *mpd = data; |
526 | struct bio *bio = mpd->bio; |
527 | struct address_space *mapping = page->mapping; |
528 | struct inode *inode = page->mapping->host; |
529 | const unsigned blkbits = inode->i_blkbits; |
530 | unsigned long end_index; |
531 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
532 | sector_t last_block; |
533 | sector_t block_in_file; |
534 | sector_t blocks[MAX_BUF_PER_PAGE]; |
535 | unsigned page_block; |
536 | unsigned first_unmapped = blocks_per_page; |
537 | struct block_device *bdev = NULL; |
538 | int boundary = 0; |
539 | sector_t boundary_block = 0; |
540 | struct block_device *boundary_bdev = NULL; |
541 | int length; |
542 | struct buffer_head map_bh; |
543 | loff_t i_size = i_size_read(inode); |
544 | int ret = 0; |
545 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); |
546 | |
547 | if (page_has_buffers(page)) { |
548 | struct buffer_head *head = page_buffers(page); |
549 | struct buffer_head *bh = head; |
550 | |
551 | /* If they're all mapped and dirty, do it */ |
552 | page_block = 0; |
553 | do { |
554 | BUG_ON(buffer_locked(bh)); |
555 | if (!buffer_mapped(bh)) { |
556 | /* |
557 | * unmapped dirty buffers are created by |
558 | * __set_page_dirty_buffers -> mmapped data |
559 | */ |
560 | if (buffer_dirty(bh)) |
561 | goto confused; |
562 | if (first_unmapped == blocks_per_page) |
563 | first_unmapped = page_block; |
564 | continue; |
565 | } |
566 | |
567 | if (first_unmapped != blocks_per_page) |
568 | goto confused; /* hole -> non-hole */ |
569 | |
570 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) |
571 | goto confused; |
572 | if (page_block) { |
573 | if (bh->b_blocknr != blocks[page_block-1] + 1) |
574 | goto confused; |
575 | } |
576 | blocks[page_block++] = bh->b_blocknr; |
577 | boundary = buffer_boundary(bh); |
578 | if (boundary) { |
579 | boundary_block = bh->b_blocknr; |
580 | boundary_bdev = bh->b_bdev; |
581 | } |
582 | bdev = bh->b_bdev; |
583 | } while ((bh = bh->b_this_page) != head); |
584 | |
585 | if (first_unmapped) |
586 | goto page_is_mapped; |
587 | |
588 | /* |
589 | * Page has buffers, but they are all unmapped. The page was |
590 | * created by pagein or read over a hole which was handled by |
591 | * block_read_full_page(). If this address_space is also |
592 | * using mpage_readpages then this can rarely happen. |
593 | */ |
594 | goto confused; |
595 | } |
596 | |
597 | /* |
598 | * The page has no buffers: map it to disk |
599 | */ |
600 | BUG_ON(!PageUptodate(page)); |
601 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
602 | last_block = (i_size - 1) >> blkbits; |
603 | map_bh.b_page = page; |
604 | for (page_block = 0; page_block < blocks_per_page; ) { |
605 | |
606 | map_bh.b_state = 0; |
607 | map_bh.b_size = 1 << blkbits; |
608 | if (mpd->get_block(inode, block_in_file, &map_bh, 1)) |
609 | goto confused; |
610 | if (buffer_new(&map_bh)) |
611 | unmap_underlying_metadata(map_bh.b_bdev, |
612 | map_bh.b_blocknr); |
613 | if (buffer_boundary(&map_bh)) { |
614 | boundary_block = map_bh.b_blocknr; |
615 | boundary_bdev = map_bh.b_bdev; |
616 | } |
617 | if (page_block) { |
618 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) |
619 | goto confused; |
620 | } |
621 | blocks[page_block++] = map_bh.b_blocknr; |
622 | boundary = buffer_boundary(&map_bh); |
623 | bdev = map_bh.b_bdev; |
624 | if (block_in_file == last_block) |
625 | break; |
626 | block_in_file++; |
627 | } |
628 | BUG_ON(page_block == 0); |
629 | |
630 | first_unmapped = page_block; |
631 | |
632 | page_is_mapped: |
633 | end_index = i_size >> PAGE_SHIFT; |
634 | if (page->index >= end_index) { |
635 | /* |
636 | * The page straddles i_size. It must be zeroed out on each |
637 | * and every writepage invocation because it may be mmapped. |
638 | * "A file is mapped in multiples of the page size. For a file |
639 | * that is not a multiple of the page size, the remaining memory |
640 | * is zeroed when mapped, and writes to that region are not |
641 | * written out to the file." |
642 | */ |
643 | unsigned offset = i_size & (PAGE_SIZE - 1); |
644 | |
645 | if (page->index > end_index || !offset) |
646 | goto confused; |
647 | zero_user_segment(page, offset, PAGE_SIZE); |
648 | } |
649 | |
650 | /* |
651 | * This page will go to BIO. Do we need to send this BIO off first? |
652 | */ |
653 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) |
654 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
655 | |
656 | alloc_new: |
657 | if (bio == NULL) { |
658 | if (first_unmapped == blocks_per_page) { |
659 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), |
660 | page, wbc)) |
661 | goto out; |
662 | } |
663 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
664 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); |
665 | if (bio == NULL) |
666 | goto confused; |
667 | |
668 | wbc_init_bio(wbc, bio); |
669 | } |
670 | |
671 | /* |
672 | * Must try to add the page before marking the buffer clean or |
673 | * the confused fail path above (OOM) will be very confused when |
674 | * it finds all bh marked clean (i.e. it will not write anything) |
675 | */ |
676 | wbc_account_io(wbc, page, PAGE_SIZE); |
677 | length = first_unmapped << blkbits; |
678 | if (bio_add_page(bio, page, length, 0) < length) { |
679 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
680 | goto alloc_new; |
681 | } |
682 | |
683 | clean_buffers(page, first_unmapped); |
684 | |
685 | BUG_ON(PageWriteback(page)); |
686 | set_page_writeback(page); |
687 | unlock_page(page); |
688 | if (boundary || (first_unmapped != blocks_per_page)) { |
689 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
690 | if (boundary_block) { |
691 | write_boundary_block(boundary_bdev, |
692 | boundary_block, 1 << blkbits); |
693 | } |
694 | } else { |
695 | mpd->last_block_in_bio = blocks[blocks_per_page - 1]; |
696 | } |
697 | goto out; |
698 | |
699 | confused: |
700 | if (bio) |
701 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
702 | |
703 | if (mpd->use_writepage) { |
704 | ret = mapping->a_ops->writepage(page, wbc); |
705 | } else { |
706 | ret = -EAGAIN; |
707 | goto out; |
708 | } |
709 | /* |
710 | * The caller has a ref on the inode, so *mapping is stable |
711 | */ |
712 | mapping_set_error(mapping, ret); |
713 | out: |
714 | mpd->bio = bio; |
715 | return ret; |
716 | } |
717 | |
718 | /** |
719 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
720 | * @mapping: address space structure to write |
721 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write |
722 | * @get_block: the filesystem's block mapper function. |
723 | * If this is NULL then use a_ops->writepage. Otherwise, go |
724 | * direct-to-BIO. |
725 | * |
726 | * This is a library function, which implements the writepages() |
727 | * address_space_operation. |
728 | * |
729 | * If a page is already under I/O, generic_writepages() skips it, even |
730 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, |
731 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() |
732 | * and msync() need to guarantee that all the data which was dirty at the time |
733 | * the call was made get new I/O started against them. If wbc->sync_mode is |
734 | * WB_SYNC_ALL then we were called for data integrity and we must wait for |
735 | * existing IO to complete. |
736 | */ |
737 | int |
738 | mpage_writepages(struct address_space *mapping, |
739 | struct writeback_control *wbc, get_block_t get_block) |
740 | { |
741 | struct blk_plug plug; |
742 | int ret; |
743 | |
744 | blk_start_plug(&plug); |
745 | |
746 | if (!get_block) |
747 | ret = generic_writepages(mapping, wbc); |
748 | else { |
749 | struct mpage_data mpd = { |
750 | .bio = NULL, |
751 | .last_block_in_bio = 0, |
752 | .get_block = get_block, |
753 | .use_writepage = 1, |
754 | }; |
755 | |
756 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); |
757 | if (mpd.bio) { |
758 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
759 | WRITE_SYNC : 0); |
760 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
761 | } |
762 | } |
763 | blk_finish_plug(&plug); |
764 | return ret; |
765 | } |
766 | EXPORT_SYMBOL(mpage_writepages); |
767 | |
768 | int mpage_writepage(struct page *page, get_block_t get_block, |
769 | struct writeback_control *wbc) |
770 | { |
771 | struct mpage_data mpd = { |
772 | .bio = NULL, |
773 | .last_block_in_bio = 0, |
774 | .get_block = get_block, |
775 | .use_writepage = 0, |
776 | }; |
777 | int ret = __mpage_writepage(page, wbc, &mpd); |
778 | if (mpd.bio) { |
779 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
780 | WRITE_SYNC : 0); |
781 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
782 | } |
783 | return ret; |
784 | } |
785 | EXPORT_SYMBOL(mpage_writepage); |
786 |