blob: 99c2f10188c0f1c4db1496182f6ea7a6d4f90675
1 | #include <linux/kernel.h> |
2 | #include <linux/errno.h> |
3 | #include <linux/err.h> |
4 | #include <linux/spinlock.h> |
5 | |
6 | #include <linux/mm.h> |
7 | #include <linux/memremap.h> |
8 | #include <linux/pagemap.h> |
9 | #include <linux/rmap.h> |
10 | #include <linux/swap.h> |
11 | #include <linux/swapops.h> |
12 | |
13 | #include <linux/sched.h> |
14 | #include <linux/rwsem.h> |
15 | #include <linux/hugetlb.h> |
16 | |
17 | #include <asm/mmu_context.h> |
18 | #include <asm/pgtable.h> |
19 | #include <asm/tlbflush.h> |
20 | |
21 | #include "internal.h" |
22 | |
23 | static struct page *no_page_table(struct vm_area_struct *vma, |
24 | unsigned int flags) |
25 | { |
26 | /* |
27 | * When core dumping an enormous anonymous area that nobody |
28 | * has touched so far, we don't want to allocate unnecessary pages or |
29 | * page tables. Return error instead of NULL to skip handle_mm_fault, |
30 | * then get_dump_page() will return NULL to leave a hole in the dump. |
31 | * But we can only make this optimization where a hole would surely |
32 | * be zero-filled if handle_mm_fault() actually did handle it. |
33 | */ |
34 | if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) |
35 | return ERR_PTR(-EFAULT); |
36 | return NULL; |
37 | } |
38 | |
39 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
40 | pte_t *pte, unsigned int flags) |
41 | { |
42 | /* No page to get reference */ |
43 | if (flags & FOLL_GET) |
44 | return -EFAULT; |
45 | |
46 | if (flags & FOLL_TOUCH) { |
47 | pte_t entry = *pte; |
48 | |
49 | if (flags & FOLL_WRITE) |
50 | entry = pte_mkdirty(entry); |
51 | entry = pte_mkyoung(entry); |
52 | |
53 | if (!pte_same(*pte, entry)) { |
54 | set_pte_at(vma->vm_mm, address, pte, entry); |
55 | update_mmu_cache(vma, address, pte); |
56 | } |
57 | } |
58 | |
59 | /* Proper page table entry exists, but no corresponding struct page */ |
60 | return -EEXIST; |
61 | } |
62 | |
63 | /* |
64 | * FOLL_FORCE can write to even unwritable pte's, but only |
65 | * after we've gone through a COW cycle and they are dirty. |
66 | */ |
67 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) |
68 | { |
69 | return pte_write(pte) || |
70 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); |
71 | } |
72 | |
73 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
74 | unsigned long address, pmd_t *pmd, unsigned int flags) |
75 | { |
76 | struct mm_struct *mm = vma->vm_mm; |
77 | struct dev_pagemap *pgmap = NULL; |
78 | struct page *page; |
79 | spinlock_t *ptl; |
80 | pte_t *ptep, pte; |
81 | |
82 | retry: |
83 | if (unlikely(pmd_bad(*pmd))) |
84 | return no_page_table(vma, flags); |
85 | |
86 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
87 | pte = *ptep; |
88 | if (!pte_present(pte)) { |
89 | swp_entry_t entry; |
90 | /* |
91 | * KSM's break_ksm() relies upon recognizing a ksm page |
92 | * even while it is being migrated, so for that case we |
93 | * need migration_entry_wait(). |
94 | */ |
95 | if (likely(!(flags & FOLL_MIGRATION))) |
96 | goto no_page; |
97 | if (pte_none(pte)) |
98 | goto no_page; |
99 | entry = pte_to_swp_entry(pte); |
100 | if (!is_migration_entry(entry)) |
101 | goto no_page; |
102 | pte_unmap_unlock(ptep, ptl); |
103 | migration_entry_wait(mm, pmd, address); |
104 | goto retry; |
105 | } |
106 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
107 | goto no_page; |
108 | if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { |
109 | pte_unmap_unlock(ptep, ptl); |
110 | return NULL; |
111 | } |
112 | |
113 | page = vm_normal_page(vma, address, pte); |
114 | if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { |
115 | /* |
116 | * Only return device mapping pages in the FOLL_GET case since |
117 | * they are only valid while holding the pgmap reference. |
118 | */ |
119 | pgmap = get_dev_pagemap(pte_pfn(pte), NULL); |
120 | if (pgmap) |
121 | page = pte_page(pte); |
122 | else |
123 | goto no_page; |
124 | } else if (unlikely(!page)) { |
125 | if (flags & FOLL_DUMP) { |
126 | /* Avoid special (like zero) pages in core dumps */ |
127 | page = ERR_PTR(-EFAULT); |
128 | goto out; |
129 | } |
130 | |
131 | if (is_zero_pfn(pte_pfn(pte))) { |
132 | page = pte_page(pte); |
133 | } else { |
134 | int ret; |
135 | |
136 | ret = follow_pfn_pte(vma, address, ptep, flags); |
137 | page = ERR_PTR(ret); |
138 | goto out; |
139 | } |
140 | } |
141 | |
142 | if (flags & FOLL_SPLIT && PageTransCompound(page)) { |
143 | int ret; |
144 | get_page(page); |
145 | pte_unmap_unlock(ptep, ptl); |
146 | lock_page(page); |
147 | ret = split_huge_page(page); |
148 | unlock_page(page); |
149 | put_page(page); |
150 | if (ret) |
151 | return ERR_PTR(ret); |
152 | goto retry; |
153 | } |
154 | |
155 | if (flags & FOLL_GET) { |
156 | get_page(page); |
157 | |
158 | /* drop the pgmap reference now that we hold the page */ |
159 | if (pgmap) { |
160 | put_dev_pagemap(pgmap); |
161 | pgmap = NULL; |
162 | } |
163 | } |
164 | if (flags & FOLL_TOUCH) { |
165 | if ((flags & FOLL_WRITE) && |
166 | !pte_dirty(pte) && !PageDirty(page)) |
167 | set_page_dirty(page); |
168 | /* |
169 | * pte_mkyoung() would be more correct here, but atomic care |
170 | * is needed to avoid losing the dirty bit: it is easier to use |
171 | * mark_page_accessed(). |
172 | */ |
173 | mark_page_accessed(page); |
174 | } |
175 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
176 | /* Do not mlock pte-mapped THP */ |
177 | if (PageTransCompound(page)) |
178 | goto out; |
179 | |
180 | /* |
181 | * The preliminary mapping check is mainly to avoid the |
182 | * pointless overhead of lock_page on the ZERO_PAGE |
183 | * which might bounce very badly if there is contention. |
184 | * |
185 | * If the page is already locked, we don't need to |
186 | * handle it now - vmscan will handle it later if and |
187 | * when it attempts to reclaim the page. |
188 | */ |
189 | if (page->mapping && trylock_page(page)) { |
190 | lru_add_drain(); /* push cached pages to LRU */ |
191 | /* |
192 | * Because we lock page here, and migration is |
193 | * blocked by the pte's page reference, and we |
194 | * know the page is still mapped, we don't even |
195 | * need to check for file-cache page truncation. |
196 | */ |
197 | mlock_vma_page(page); |
198 | unlock_page(page); |
199 | } |
200 | } |
201 | out: |
202 | pte_unmap_unlock(ptep, ptl); |
203 | return page; |
204 | no_page: |
205 | pte_unmap_unlock(ptep, ptl); |
206 | if (!pte_none(pte)) |
207 | return NULL; |
208 | return no_page_table(vma, flags); |
209 | } |
210 | |
211 | /** |
212 | * follow_page_mask - look up a page descriptor from a user-virtual address |
213 | * @vma: vm_area_struct mapping @address |
214 | * @address: virtual address to look up |
215 | * @flags: flags modifying lookup behaviour |
216 | * @page_mask: on output, *page_mask is set according to the size of the page |
217 | * |
218 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> |
219 | * |
220 | * Returns the mapped (struct page *), %NULL if no mapping exists, or |
221 | * an error pointer if there is a mapping to something not represented |
222 | * by a page descriptor (see also vm_normal_page()). |
223 | */ |
224 | struct page *follow_page_mask(struct vm_area_struct *vma, |
225 | unsigned long address, unsigned int flags, |
226 | unsigned int *page_mask) |
227 | { |
228 | pgd_t *pgd; |
229 | pud_t *pud; |
230 | pmd_t *pmd; |
231 | spinlock_t *ptl; |
232 | struct page *page; |
233 | struct mm_struct *mm = vma->vm_mm; |
234 | |
235 | *page_mask = 0; |
236 | |
237 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); |
238 | if (!IS_ERR(page)) { |
239 | BUG_ON(flags & FOLL_GET); |
240 | return page; |
241 | } |
242 | |
243 | pgd = pgd_offset(mm, address); |
244 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
245 | return no_page_table(vma, flags); |
246 | |
247 | pud = pud_offset(pgd, address); |
248 | if (pud_none(*pud)) |
249 | return no_page_table(vma, flags); |
250 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { |
251 | page = follow_huge_pud(mm, address, pud, flags); |
252 | if (page) |
253 | return page; |
254 | return no_page_table(vma, flags); |
255 | } |
256 | if (unlikely(pud_bad(*pud))) |
257 | return no_page_table(vma, flags); |
258 | |
259 | pmd = pmd_offset(pud, address); |
260 | if (pmd_none(*pmd)) |
261 | return no_page_table(vma, flags); |
262 | if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { |
263 | page = follow_huge_pmd(mm, address, pmd, flags); |
264 | if (page) |
265 | return page; |
266 | return no_page_table(vma, flags); |
267 | } |
268 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
269 | return no_page_table(vma, flags); |
270 | if (pmd_devmap(*pmd)) { |
271 | ptl = pmd_lock(mm, pmd); |
272 | page = follow_devmap_pmd(vma, address, pmd, flags); |
273 | spin_unlock(ptl); |
274 | if (page) |
275 | return page; |
276 | } |
277 | if (likely(!pmd_trans_huge(*pmd))) |
278 | return follow_page_pte(vma, address, pmd, flags); |
279 | |
280 | ptl = pmd_lock(mm, pmd); |
281 | if (unlikely(!pmd_trans_huge(*pmd))) { |
282 | spin_unlock(ptl); |
283 | return follow_page_pte(vma, address, pmd, flags); |
284 | } |
285 | if (flags & FOLL_SPLIT) { |
286 | int ret; |
287 | page = pmd_page(*pmd); |
288 | if (is_huge_zero_page(page)) { |
289 | spin_unlock(ptl); |
290 | ret = 0; |
291 | split_huge_pmd(vma, pmd, address); |
292 | if (pmd_trans_unstable(pmd)) |
293 | ret = -EBUSY; |
294 | } else { |
295 | get_page(page); |
296 | spin_unlock(ptl); |
297 | lock_page(page); |
298 | ret = split_huge_page(page); |
299 | unlock_page(page); |
300 | put_page(page); |
301 | if (pmd_none(*pmd)) |
302 | return no_page_table(vma, flags); |
303 | } |
304 | |
305 | return ret ? ERR_PTR(ret) : |
306 | follow_page_pte(vma, address, pmd, flags); |
307 | } |
308 | |
309 | page = follow_trans_huge_pmd(vma, address, pmd, flags); |
310 | spin_unlock(ptl); |
311 | *page_mask = HPAGE_PMD_NR - 1; |
312 | return page; |
313 | } |
314 | |
315 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
316 | unsigned int gup_flags, struct vm_area_struct **vma, |
317 | struct page **page) |
318 | { |
319 | pgd_t *pgd; |
320 | pud_t *pud; |
321 | pmd_t *pmd; |
322 | pte_t *pte; |
323 | int ret = -EFAULT; |
324 | |
325 | /* user gate pages are read-only */ |
326 | if (gup_flags & FOLL_WRITE) |
327 | return -EFAULT; |
328 | if (address > TASK_SIZE) |
329 | pgd = pgd_offset_k(address); |
330 | else |
331 | pgd = pgd_offset_gate(mm, address); |
332 | BUG_ON(pgd_none(*pgd)); |
333 | pud = pud_offset(pgd, address); |
334 | BUG_ON(pud_none(*pud)); |
335 | pmd = pmd_offset(pud, address); |
336 | if (pmd_none(*pmd)) |
337 | return -EFAULT; |
338 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
339 | pte = pte_offset_map(pmd, address); |
340 | if (pte_none(*pte)) |
341 | goto unmap; |
342 | *vma = get_gate_vma(mm); |
343 | if (!page) |
344 | goto out; |
345 | *page = vm_normal_page(*vma, address, *pte); |
346 | if (!*page) { |
347 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) |
348 | goto unmap; |
349 | *page = pte_page(*pte); |
350 | } |
351 | get_page(*page); |
352 | out: |
353 | ret = 0; |
354 | unmap: |
355 | pte_unmap(pte); |
356 | return ret; |
357 | } |
358 | |
359 | /* |
360 | * mmap_sem must be held on entry. If @nonblocking != NULL and |
361 | * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. |
362 | * If it is, *@nonblocking will be set to 0 and -EBUSY returned. |
363 | */ |
364 | static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, |
365 | unsigned long address, unsigned int *flags, int *nonblocking) |
366 | { |
367 | unsigned int fault_flags = 0; |
368 | int ret; |
369 | |
370 | /* mlock all present pages, but do not fault in new pages */ |
371 | if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) |
372 | return -ENOENT; |
373 | if (*flags & FOLL_WRITE) |
374 | fault_flags |= FAULT_FLAG_WRITE; |
375 | if (*flags & FOLL_REMOTE) |
376 | fault_flags |= FAULT_FLAG_REMOTE; |
377 | if (nonblocking) |
378 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; |
379 | if (*flags & FOLL_NOWAIT) |
380 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
381 | if (*flags & FOLL_TRIED) { |
382 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); |
383 | fault_flags |= FAULT_FLAG_TRIED; |
384 | } |
385 | |
386 | ret = handle_mm_fault(vma, address, fault_flags); |
387 | if (ret & VM_FAULT_ERROR) { |
388 | if (ret & VM_FAULT_OOM) |
389 | return -ENOMEM; |
390 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) |
391 | return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; |
392 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
393 | return -EFAULT; |
394 | BUG(); |
395 | } |
396 | |
397 | if (tsk) { |
398 | if (ret & VM_FAULT_MAJOR) |
399 | tsk->maj_flt++; |
400 | else |
401 | tsk->min_flt++; |
402 | } |
403 | |
404 | if (ret & VM_FAULT_RETRY) { |
405 | if (nonblocking) |
406 | *nonblocking = 0; |
407 | return -EBUSY; |
408 | } |
409 | |
410 | /* |
411 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when |
412 | * necessary, even if maybe_mkwrite decided not to set pte_write. We |
413 | * can thus safely do subsequent page lookups as if they were reads. |
414 | * But only do so when looping for pte_write is futile: in some cases |
415 | * userspace may also be wanting to write to the gotten user page, |
416 | * which a read fault here might prevent (a readonly page might get |
417 | * reCOWed by userspace write). |
418 | */ |
419 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) |
420 | *flags |= FOLL_COW; |
421 | return 0; |
422 | } |
423 | |
424 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
425 | { |
426 | vm_flags_t vm_flags = vma->vm_flags; |
427 | int write = (gup_flags & FOLL_WRITE); |
428 | int foreign = (gup_flags & FOLL_REMOTE); |
429 | |
430 | if (vm_flags & (VM_IO | VM_PFNMAP)) |
431 | return -EFAULT; |
432 | |
433 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) |
434 | return -EFAULT; |
435 | |
436 | if (write) { |
437 | if (!(vm_flags & VM_WRITE)) { |
438 | if (!(gup_flags & FOLL_FORCE)) |
439 | return -EFAULT; |
440 | /* |
441 | * We used to let the write,force case do COW in a |
442 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could |
443 | * set a breakpoint in a read-only mapping of an |
444 | * executable, without corrupting the file (yet only |
445 | * when that file had been opened for writing!). |
446 | * Anon pages in shared mappings are surprising: now |
447 | * just reject it. |
448 | */ |
449 | if (!is_cow_mapping(vm_flags)) |
450 | return -EFAULT; |
451 | } |
452 | } else if (!(vm_flags & VM_READ)) { |
453 | if (!(gup_flags & FOLL_FORCE)) |
454 | return -EFAULT; |
455 | /* |
456 | * Is there actually any vma we can reach here which does not |
457 | * have VM_MAYREAD set? |
458 | */ |
459 | if (!(vm_flags & VM_MAYREAD)) |
460 | return -EFAULT; |
461 | } |
462 | /* |
463 | * gups are always data accesses, not instruction |
464 | * fetches, so execute=false here |
465 | */ |
466 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
467 | return -EFAULT; |
468 | return 0; |
469 | } |
470 | |
471 | /** |
472 | * __get_user_pages() - pin user pages in memory |
473 | * @tsk: task_struct of target task |
474 | * @mm: mm_struct of target mm |
475 | * @start: starting user address |
476 | * @nr_pages: number of pages from start to pin |
477 | * @gup_flags: flags modifying pin behaviour |
478 | * @pages: array that receives pointers to the pages pinned. |
479 | * Should be at least nr_pages long. Or NULL, if caller |
480 | * only intends to ensure the pages are faulted in. |
481 | * @vmas: array of pointers to vmas corresponding to each page. |
482 | * Or NULL if the caller does not require them. |
483 | * @nonblocking: whether waiting for disk IO or mmap_sem contention |
484 | * |
485 | * Returns number of pages pinned. This may be fewer than the number |
486 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
487 | * were pinned, returns -errno. Each page returned must be released |
488 | * with a put_page() call when it is finished with. vmas will only |
489 | * remain valid while mmap_sem is held. |
490 | * |
491 | * Must be called with mmap_sem held. It may be released. See below. |
492 | * |
493 | * __get_user_pages walks a process's page tables and takes a reference to |
494 | * each struct page that each user address corresponds to at a given |
495 | * instant. That is, it takes the page that would be accessed if a user |
496 | * thread accesses the given user virtual address at that instant. |
497 | * |
498 | * This does not guarantee that the page exists in the user mappings when |
499 | * __get_user_pages returns, and there may even be a completely different |
500 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
501 | * and subsequently re faulted). However it does guarantee that the page |
502 | * won't be freed completely. And mostly callers simply care that the page |
503 | * contains data that was valid *at some point in time*. Typically, an IO |
504 | * or similar operation cannot guarantee anything stronger anyway because |
505 | * locks can't be held over the syscall boundary. |
506 | * |
507 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If |
508 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as |
509 | * appropriate) must be called after the page is finished with, and |
510 | * before put_page is called. |
511 | * |
512 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO |
513 | * or mmap_sem contention, and if waiting is needed to pin all pages, |
514 | * *@nonblocking will be set to 0. Further, if @gup_flags does not |
515 | * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in |
516 | * this case. |
517 | * |
518 | * A caller using such a combination of @nonblocking and @gup_flags |
519 | * must therefore hold the mmap_sem for reading only, and recognize |
520 | * when it's been released. Otherwise, it must be held for either |
521 | * reading or writing and will not be released. |
522 | * |
523 | * In most cases, get_user_pages or get_user_pages_fast should be used |
524 | * instead of __get_user_pages. __get_user_pages should be used only if |
525 | * you need some special @gup_flags. |
526 | */ |
527 | static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
528 | unsigned long start, unsigned long nr_pages, |
529 | unsigned int gup_flags, struct page **pages, |
530 | struct vm_area_struct **vmas, int *nonblocking) |
531 | { |
532 | long i = 0; |
533 | unsigned int page_mask; |
534 | struct vm_area_struct *vma = NULL; |
535 | |
536 | if (!nr_pages) |
537 | return 0; |
538 | |
539 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); |
540 | |
541 | /* |
542 | * If FOLL_FORCE is set then do not force a full fault as the hinting |
543 | * fault information is unrelated to the reference behaviour of a task |
544 | * using the address space |
545 | */ |
546 | if (!(gup_flags & FOLL_FORCE)) |
547 | gup_flags |= FOLL_NUMA; |
548 | |
549 | do { |
550 | struct page *page; |
551 | unsigned int foll_flags = gup_flags; |
552 | unsigned int page_increm; |
553 | |
554 | /* first iteration or cross vma bound */ |
555 | if (!vma || start >= vma->vm_end) { |
556 | vma = find_extend_vma(mm, start); |
557 | if (!vma && in_gate_area(mm, start)) { |
558 | int ret; |
559 | ret = get_gate_page(mm, start & PAGE_MASK, |
560 | gup_flags, &vma, |
561 | pages ? &pages[i] : NULL); |
562 | if (ret) |
563 | return i ? : ret; |
564 | page_mask = 0; |
565 | goto next_page; |
566 | } |
567 | |
568 | if (!vma || check_vma_flags(vma, gup_flags)) |
569 | return i ? : -EFAULT; |
570 | if (is_vm_hugetlb_page(vma)) { |
571 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
572 | &start, &nr_pages, i, |
573 | gup_flags); |
574 | continue; |
575 | } |
576 | } |
577 | retry: |
578 | /* |
579 | * If we have a pending SIGKILL, don't keep faulting pages and |
580 | * potentially allocating memory. |
581 | */ |
582 | if (unlikely(fatal_signal_pending(current))) |
583 | return i ? i : -ERESTARTSYS; |
584 | cond_resched(); |
585 | page = follow_page_mask(vma, start, foll_flags, &page_mask); |
586 | if (!page) { |
587 | int ret; |
588 | ret = faultin_page(tsk, vma, start, &foll_flags, |
589 | nonblocking); |
590 | switch (ret) { |
591 | case 0: |
592 | goto retry; |
593 | case -EFAULT: |
594 | case -ENOMEM: |
595 | case -EHWPOISON: |
596 | return i ? i : ret; |
597 | case -EBUSY: |
598 | return i; |
599 | case -ENOENT: |
600 | goto next_page; |
601 | } |
602 | BUG(); |
603 | } else if (PTR_ERR(page) == -EEXIST) { |
604 | /* |
605 | * Proper page table entry exists, but no corresponding |
606 | * struct page. |
607 | */ |
608 | goto next_page; |
609 | } else if (IS_ERR(page)) { |
610 | return i ? i : PTR_ERR(page); |
611 | } |
612 | if (pages) { |
613 | pages[i] = page; |
614 | flush_anon_page(vma, page, start); |
615 | flush_dcache_page(page); |
616 | page_mask = 0; |
617 | } |
618 | next_page: |
619 | if (vmas) { |
620 | vmas[i] = vma; |
621 | page_mask = 0; |
622 | } |
623 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); |
624 | if (page_increm > nr_pages) |
625 | page_increm = nr_pages; |
626 | i += page_increm; |
627 | start += page_increm * PAGE_SIZE; |
628 | nr_pages -= page_increm; |
629 | } while (nr_pages); |
630 | return i; |
631 | } |
632 | |
633 | bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) |
634 | { |
635 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
636 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); |
637 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
638 | |
639 | if (!(vm_flags & vma->vm_flags)) |
640 | return false; |
641 | |
642 | /* |
643 | * The architecture might have a hardware protection |
644 | * mechanism other than read/write that can deny access. |
645 | * |
646 | * gup always represents data access, not instruction |
647 | * fetches, so execute=false here: |
648 | */ |
649 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
650 | return false; |
651 | |
652 | return true; |
653 | } |
654 | |
655 | /* |
656 | * fixup_user_fault() - manually resolve a user page fault |
657 | * @tsk: the task_struct to use for page fault accounting, or |
658 | * NULL if faults are not to be recorded. |
659 | * @mm: mm_struct of target mm |
660 | * @address: user address |
661 | * @fault_flags:flags to pass down to handle_mm_fault() |
662 | * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller |
663 | * does not allow retry |
664 | * |
665 | * This is meant to be called in the specific scenario where for locking reasons |
666 | * we try to access user memory in atomic context (within a pagefault_disable() |
667 | * section), this returns -EFAULT, and we want to resolve the user fault before |
668 | * trying again. |
669 | * |
670 | * Typically this is meant to be used by the futex code. |
671 | * |
672 | * The main difference with get_user_pages() is that this function will |
673 | * unconditionally call handle_mm_fault() which will in turn perform all the |
674 | * necessary SW fixup of the dirty and young bits in the PTE, while |
675 | * get_user_pages() only guarantees to update these in the struct page. |
676 | * |
677 | * This is important for some architectures where those bits also gate the |
678 | * access permission to the page because they are maintained in software. On |
679 | * such architectures, gup() will not be enough to make a subsequent access |
680 | * succeed. |
681 | * |
682 | * This function will not return with an unlocked mmap_sem. So it has not the |
683 | * same semantics wrt the @mm->mmap_sem as does filemap_fault(). |
684 | */ |
685 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, |
686 | unsigned long address, unsigned int fault_flags, |
687 | bool *unlocked) |
688 | { |
689 | struct vm_area_struct *vma; |
690 | int ret, major = 0; |
691 | |
692 | if (unlocked) |
693 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; |
694 | |
695 | retry: |
696 | vma = find_extend_vma(mm, address); |
697 | if (!vma || address < vma->vm_start) |
698 | return -EFAULT; |
699 | |
700 | if (!vma_permits_fault(vma, fault_flags)) |
701 | return -EFAULT; |
702 | |
703 | ret = handle_mm_fault(vma, address, fault_flags); |
704 | major |= ret & VM_FAULT_MAJOR; |
705 | if (ret & VM_FAULT_ERROR) { |
706 | if (ret & VM_FAULT_OOM) |
707 | return -ENOMEM; |
708 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) |
709 | return -EHWPOISON; |
710 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
711 | return -EFAULT; |
712 | BUG(); |
713 | } |
714 | |
715 | if (ret & VM_FAULT_RETRY) { |
716 | down_read(&mm->mmap_sem); |
717 | if (!(fault_flags & FAULT_FLAG_TRIED)) { |
718 | *unlocked = true; |
719 | fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; |
720 | fault_flags |= FAULT_FLAG_TRIED; |
721 | goto retry; |
722 | } |
723 | } |
724 | |
725 | if (tsk) { |
726 | if (major) |
727 | tsk->maj_flt++; |
728 | else |
729 | tsk->min_flt++; |
730 | } |
731 | return 0; |
732 | } |
733 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
734 | |
735 | static __always_inline long __get_user_pages_locked(struct task_struct *tsk, |
736 | struct mm_struct *mm, |
737 | unsigned long start, |
738 | unsigned long nr_pages, |
739 | struct page **pages, |
740 | struct vm_area_struct **vmas, |
741 | int *locked, bool notify_drop, |
742 | unsigned int flags) |
743 | { |
744 | long ret, pages_done; |
745 | bool lock_dropped; |
746 | |
747 | if (locked) { |
748 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ |
749 | BUG_ON(vmas); |
750 | /* check caller initialized locked */ |
751 | BUG_ON(*locked != 1); |
752 | } |
753 | |
754 | if (pages) |
755 | flags |= FOLL_GET; |
756 | |
757 | pages_done = 0; |
758 | lock_dropped = false; |
759 | for (;;) { |
760 | ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, |
761 | vmas, locked); |
762 | if (!locked) |
763 | /* VM_FAULT_RETRY couldn't trigger, bypass */ |
764 | return ret; |
765 | |
766 | /* VM_FAULT_RETRY cannot return errors */ |
767 | if (!*locked) { |
768 | BUG_ON(ret < 0); |
769 | BUG_ON(ret >= nr_pages); |
770 | } |
771 | |
772 | if (!pages) |
773 | /* If it's a prefault don't insist harder */ |
774 | return ret; |
775 | |
776 | if (ret > 0) { |
777 | nr_pages -= ret; |
778 | pages_done += ret; |
779 | if (!nr_pages) |
780 | break; |
781 | } |
782 | if (*locked) { |
783 | /* VM_FAULT_RETRY didn't trigger */ |
784 | if (!pages_done) |
785 | pages_done = ret; |
786 | break; |
787 | } |
788 | /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ |
789 | pages += ret; |
790 | start += ret << PAGE_SHIFT; |
791 | |
792 | /* |
793 | * Repeat on the address that fired VM_FAULT_RETRY |
794 | * without FAULT_FLAG_ALLOW_RETRY but with |
795 | * FAULT_FLAG_TRIED. |
796 | */ |
797 | *locked = 1; |
798 | lock_dropped = true; |
799 | down_read(&mm->mmap_sem); |
800 | ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, |
801 | pages, NULL, NULL); |
802 | if (ret != 1) { |
803 | BUG_ON(ret > 1); |
804 | if (!pages_done) |
805 | pages_done = ret; |
806 | break; |
807 | } |
808 | nr_pages--; |
809 | pages_done++; |
810 | if (!nr_pages) |
811 | break; |
812 | pages++; |
813 | start += PAGE_SIZE; |
814 | } |
815 | if (notify_drop && lock_dropped && *locked) { |
816 | /* |
817 | * We must let the caller know we temporarily dropped the lock |
818 | * and so the critical section protected by it was lost. |
819 | */ |
820 | up_read(&mm->mmap_sem); |
821 | *locked = 0; |
822 | } |
823 | return pages_done; |
824 | } |
825 | |
826 | /* |
827 | * We can leverage the VM_FAULT_RETRY functionality in the page fault |
828 | * paths better by using either get_user_pages_locked() or |
829 | * get_user_pages_unlocked(). |
830 | * |
831 | * get_user_pages_locked() is suitable to replace the form: |
832 | * |
833 | * down_read(&mm->mmap_sem); |
834 | * do_something() |
835 | * get_user_pages(tsk, mm, ..., pages, NULL); |
836 | * up_read(&mm->mmap_sem); |
837 | * |
838 | * to: |
839 | * |
840 | * int locked = 1; |
841 | * down_read(&mm->mmap_sem); |
842 | * do_something() |
843 | * get_user_pages_locked(tsk, mm, ..., pages, &locked); |
844 | * if (locked) |
845 | * up_read(&mm->mmap_sem); |
846 | */ |
847 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
848 | unsigned int gup_flags, struct page **pages, |
849 | int *locked) |
850 | { |
851 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
852 | pages, NULL, locked, true, |
853 | gup_flags | FOLL_TOUCH); |
854 | } |
855 | EXPORT_SYMBOL(get_user_pages_locked); |
856 | |
857 | /* |
858 | * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to |
859 | * pass additional gup_flags as last parameter (like FOLL_HWPOISON). |
860 | * |
861 | * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the |
862 | * caller if required (just like with __get_user_pages). "FOLL_GET", |
863 | * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed |
864 | * according to the parameters "pages", "write", "force" |
865 | * respectively. |
866 | */ |
867 | __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, |
868 | unsigned long start, unsigned long nr_pages, |
869 | struct page **pages, unsigned int gup_flags) |
870 | { |
871 | long ret; |
872 | int locked = 1; |
873 | |
874 | down_read(&mm->mmap_sem); |
875 | ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, |
876 | &locked, false, gup_flags); |
877 | if (locked) |
878 | up_read(&mm->mmap_sem); |
879 | return ret; |
880 | } |
881 | EXPORT_SYMBOL(__get_user_pages_unlocked); |
882 | |
883 | /* |
884 | * get_user_pages_unlocked() is suitable to replace the form: |
885 | * |
886 | * down_read(&mm->mmap_sem); |
887 | * get_user_pages(tsk, mm, ..., pages, NULL); |
888 | * up_read(&mm->mmap_sem); |
889 | * |
890 | * with: |
891 | * |
892 | * get_user_pages_unlocked(tsk, mm, ..., pages); |
893 | * |
894 | * It is functionally equivalent to get_user_pages_fast so |
895 | * get_user_pages_fast should be used instead, if the two parameters |
896 | * "tsk" and "mm" are respectively equal to current and current->mm, |
897 | * or if "force" shall be set to 1 (get_user_pages_fast misses the |
898 | * "force" parameter). |
899 | */ |
900 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
901 | struct page **pages, unsigned int gup_flags) |
902 | { |
903 | return __get_user_pages_unlocked(current, current->mm, start, nr_pages, |
904 | pages, gup_flags | FOLL_TOUCH); |
905 | } |
906 | EXPORT_SYMBOL(get_user_pages_unlocked); |
907 | |
908 | /* |
909 | * get_user_pages_remote() - pin user pages in memory |
910 | * @tsk: the task_struct to use for page fault accounting, or |
911 | * NULL if faults are not to be recorded. |
912 | * @mm: mm_struct of target mm |
913 | * @start: starting user address |
914 | * @nr_pages: number of pages from start to pin |
915 | * @gup_flags: flags modifying lookup behaviour |
916 | * @pages: array that receives pointers to the pages pinned. |
917 | * Should be at least nr_pages long. Or NULL, if caller |
918 | * only intends to ensure the pages are faulted in. |
919 | * @vmas: array of pointers to vmas corresponding to each page. |
920 | * Or NULL if the caller does not require them. |
921 | * |
922 | * Returns number of pages pinned. This may be fewer than the number |
923 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
924 | * were pinned, returns -errno. Each page returned must be released |
925 | * with a put_page() call when it is finished with. vmas will only |
926 | * remain valid while mmap_sem is held. |
927 | * |
928 | * Must be called with mmap_sem held for read or write. |
929 | * |
930 | * get_user_pages walks a process's page tables and takes a reference to |
931 | * each struct page that each user address corresponds to at a given |
932 | * instant. That is, it takes the page that would be accessed if a user |
933 | * thread accesses the given user virtual address at that instant. |
934 | * |
935 | * This does not guarantee that the page exists in the user mappings when |
936 | * get_user_pages returns, and there may even be a completely different |
937 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
938 | * and subsequently re faulted). However it does guarantee that the page |
939 | * won't be freed completely. And mostly callers simply care that the page |
940 | * contains data that was valid *at some point in time*. Typically, an IO |
941 | * or similar operation cannot guarantee anything stronger anyway because |
942 | * locks can't be held over the syscall boundary. |
943 | * |
944 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
945 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must |
946 | * be called after the page is finished with, and before put_page is called. |
947 | * |
948 | * get_user_pages is typically used for fewer-copy IO operations, to get a |
949 | * handle on the memory by some means other than accesses via the user virtual |
950 | * addresses. The pages may be submitted for DMA to devices or accessed via |
951 | * their kernel linear mapping (via the kmap APIs). Care should be taken to |
952 | * use the correct cache flushing APIs. |
953 | * |
954 | * See also get_user_pages_fast, for performance critical applications. |
955 | * |
956 | * get_user_pages should be phased out in favor of |
957 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing |
958 | * should use get_user_pages because it cannot pass |
959 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
960 | */ |
961 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
962 | unsigned long start, unsigned long nr_pages, |
963 | unsigned int gup_flags, struct page **pages, |
964 | struct vm_area_struct **vmas) |
965 | { |
966 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
967 | NULL, false, |
968 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
969 | } |
970 | EXPORT_SYMBOL(get_user_pages_remote); |
971 | |
972 | /* |
973 | * This is the same as get_user_pages_remote(), just with a |
974 | * less-flexible calling convention where we assume that the task |
975 | * and mm being operated on are the current task's. We also |
976 | * obviously don't pass FOLL_REMOTE in here. |
977 | */ |
978 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
979 | unsigned int gup_flags, struct page **pages, |
980 | struct vm_area_struct **vmas) |
981 | { |
982 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
983 | pages, vmas, NULL, false, |
984 | gup_flags | FOLL_TOUCH); |
985 | } |
986 | EXPORT_SYMBOL(get_user_pages); |
987 | |
988 | #ifdef CONFIG_FS_DAX |
989 | /* |
990 | * This is the same as get_user_pages() in that it assumes we are |
991 | * operating on the current task's mm, but it goes further to validate |
992 | * that the vmas associated with the address range are suitable for |
993 | * longterm elevated page reference counts. For example, filesystem-dax |
994 | * mappings are subject to the lifetime enforced by the filesystem and |
995 | * we need guarantees that longterm users like RDMA and V4L2 only |
996 | * establish mappings that have a kernel enforced revocation mechanism. |
997 | * |
998 | * "longterm" == userspace controlled elevated page count lifetime. |
999 | * Contrast this to iov_iter_get_pages() usages which are transient. |
1000 | */ |
1001 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, |
1002 | unsigned int gup_flags, struct page **pages, |
1003 | struct vm_area_struct **vmas_arg) |
1004 | { |
1005 | struct vm_area_struct **vmas = vmas_arg; |
1006 | struct vm_area_struct *vma_prev = NULL; |
1007 | long rc, i; |
1008 | |
1009 | if (!pages) |
1010 | return -EINVAL; |
1011 | |
1012 | if (!vmas) { |
1013 | vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), |
1014 | GFP_KERNEL); |
1015 | if (!vmas) |
1016 | return -ENOMEM; |
1017 | } |
1018 | |
1019 | rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); |
1020 | |
1021 | for (i = 0; i < rc; i++) { |
1022 | struct vm_area_struct *vma = vmas[i]; |
1023 | |
1024 | if (vma == vma_prev) |
1025 | continue; |
1026 | |
1027 | vma_prev = vma; |
1028 | |
1029 | if (vma_is_fsdax(vma)) |
1030 | break; |
1031 | } |
1032 | |
1033 | /* |
1034 | * Either get_user_pages() failed, or the vma validation |
1035 | * succeeded, in either case we don't need to put_page() before |
1036 | * returning. |
1037 | */ |
1038 | if (i >= rc) |
1039 | goto out; |
1040 | |
1041 | for (i = 0; i < rc; i++) |
1042 | put_page(pages[i]); |
1043 | rc = -EOPNOTSUPP; |
1044 | out: |
1045 | if (vmas != vmas_arg) |
1046 | kfree(vmas); |
1047 | return rc; |
1048 | } |
1049 | EXPORT_SYMBOL(get_user_pages_longterm); |
1050 | #endif /* CONFIG_FS_DAX */ |
1051 | |
1052 | /** |
1053 | * populate_vma_page_range() - populate a range of pages in the vma. |
1054 | * @vma: target vma |
1055 | * @start: start address |
1056 | * @end: end address |
1057 | * @nonblocking: |
1058 | * |
1059 | * This takes care of mlocking the pages too if VM_LOCKED is set. |
1060 | * |
1061 | * return 0 on success, negative error code on error. |
1062 | * |
1063 | * vma->vm_mm->mmap_sem must be held. |
1064 | * |
1065 | * If @nonblocking is NULL, it may be held for read or write and will |
1066 | * be unperturbed. |
1067 | * |
1068 | * If @nonblocking is non-NULL, it must held for read only and may be |
1069 | * released. If it's released, *@nonblocking will be set to 0. |
1070 | */ |
1071 | long populate_vma_page_range(struct vm_area_struct *vma, |
1072 | unsigned long start, unsigned long end, int *nonblocking) |
1073 | { |
1074 | struct mm_struct *mm = vma->vm_mm; |
1075 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
1076 | int gup_flags; |
1077 | |
1078 | VM_BUG_ON(start & ~PAGE_MASK); |
1079 | VM_BUG_ON(end & ~PAGE_MASK); |
1080 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
1081 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
1082 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); |
1083 | |
1084 | gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; |
1085 | if (vma->vm_flags & VM_LOCKONFAULT) |
1086 | gup_flags &= ~FOLL_POPULATE; |
1087 | /* |
1088 | * We want to touch writable mappings with a write fault in order |
1089 | * to break COW, except for shared mappings because these don't COW |
1090 | * and we would not want to dirty them for nothing. |
1091 | */ |
1092 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) |
1093 | gup_flags |= FOLL_WRITE; |
1094 | |
1095 | /* |
1096 | * We want mlock to succeed for regions that have any permissions |
1097 | * other than PROT_NONE. |
1098 | */ |
1099 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) |
1100 | gup_flags |= FOLL_FORCE; |
1101 | |
1102 | /* |
1103 | * We made sure addr is within a VMA, so the following will |
1104 | * not result in a stack expansion that recurses back here. |
1105 | */ |
1106 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, |
1107 | NULL, NULL, nonblocking); |
1108 | } |
1109 | |
1110 | /* |
1111 | * __mm_populate - populate and/or mlock pages within a range of address space. |
1112 | * |
1113 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap |
1114 | * flags. VMAs must be already marked with the desired vm_flags, and |
1115 | * mmap_sem must not be held. |
1116 | */ |
1117 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) |
1118 | { |
1119 | struct mm_struct *mm = current->mm; |
1120 | unsigned long end, nstart, nend; |
1121 | struct vm_area_struct *vma = NULL; |
1122 | int locked = 0; |
1123 | long ret = 0; |
1124 | |
1125 | end = start + len; |
1126 | |
1127 | for (nstart = start; nstart < end; nstart = nend) { |
1128 | /* |
1129 | * We want to fault in pages for [nstart; end) address range. |
1130 | * Find first corresponding VMA. |
1131 | */ |
1132 | if (!locked) { |
1133 | locked = 1; |
1134 | down_read(&mm->mmap_sem); |
1135 | vma = find_vma(mm, nstart); |
1136 | } else if (nstart >= vma->vm_end) |
1137 | vma = vma->vm_next; |
1138 | if (!vma || vma->vm_start >= end) |
1139 | break; |
1140 | /* |
1141 | * Set [nstart; nend) to intersection of desired address |
1142 | * range with the first VMA. Also, skip undesirable VMA types. |
1143 | */ |
1144 | nend = min(end, vma->vm_end); |
1145 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
1146 | continue; |
1147 | if (nstart < vma->vm_start) |
1148 | nstart = vma->vm_start; |
1149 | /* |
1150 | * Now fault in a range of pages. populate_vma_page_range() |
1151 | * double checks the vma flags, so that it won't mlock pages |
1152 | * if the vma was already munlocked. |
1153 | */ |
1154 | ret = populate_vma_page_range(vma, nstart, nend, &locked); |
1155 | if (ret < 0) { |
1156 | if (ignore_errors) { |
1157 | ret = 0; |
1158 | continue; /* continue at next VMA */ |
1159 | } |
1160 | break; |
1161 | } |
1162 | nend = nstart + ret * PAGE_SIZE; |
1163 | ret = 0; |
1164 | } |
1165 | if (locked) |
1166 | up_read(&mm->mmap_sem); |
1167 | return ret; /* 0 or negative error code */ |
1168 | } |
1169 | |
1170 | /** |
1171 | * get_dump_page() - pin user page in memory while writing it to core dump |
1172 | * @addr: user address |
1173 | * |
1174 | * Returns struct page pointer of user page pinned for dump, |
1175 | * to be freed afterwards by put_page(). |
1176 | * |
1177 | * Returns NULL on any kind of failure - a hole must then be inserted into |
1178 | * the corefile, to preserve alignment with its headers; and also returns |
1179 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - |
1180 | * allowing a hole to be left in the corefile to save diskspace. |
1181 | * |
1182 | * Called without mmap_sem, but after all other threads have been killed. |
1183 | */ |
1184 | #ifdef CONFIG_ELF_CORE |
1185 | struct page *get_dump_page(unsigned long addr) |
1186 | { |
1187 | struct vm_area_struct *vma; |
1188 | struct page *page; |
1189 | |
1190 | if (__get_user_pages(current, current->mm, addr, 1, |
1191 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, |
1192 | NULL) < 1) |
1193 | return NULL; |
1194 | flush_cache_page(vma, addr, page_to_pfn(page)); |
1195 | return page; |
1196 | } |
1197 | #endif /* CONFIG_ELF_CORE */ |
1198 | |
1199 | /* |
1200 | * Generic RCU Fast GUP |
1201 | * |
1202 | * get_user_pages_fast attempts to pin user pages by walking the page |
1203 | * tables directly and avoids taking locks. Thus the walker needs to be |
1204 | * protected from page table pages being freed from under it, and should |
1205 | * block any THP splits. |
1206 | * |
1207 | * One way to achieve this is to have the walker disable interrupts, and |
1208 | * rely on IPIs from the TLB flushing code blocking before the page table |
1209 | * pages are freed. This is unsuitable for architectures that do not need |
1210 | * to broadcast an IPI when invalidating TLBs. |
1211 | * |
1212 | * Another way to achieve this is to batch up page table containing pages |
1213 | * belonging to more than one mm_user, then rcu_sched a callback to free those |
1214 | * pages. Disabling interrupts will allow the fast_gup walker to both block |
1215 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs |
1216 | * (which is a relatively rare event). The code below adopts this strategy. |
1217 | * |
1218 | * Before activating this code, please be aware that the following assumptions |
1219 | * are currently made: |
1220 | * |
1221 | * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free |
1222 | * pages containing page tables. |
1223 | * |
1224 | * *) ptes can be read atomically by the architecture. |
1225 | * |
1226 | * *) access_ok is sufficient to validate userspace address ranges. |
1227 | * |
1228 | * The last two assumptions can be relaxed by the addition of helper functions. |
1229 | * |
1230 | * This code is based heavily on the PowerPC implementation by Nick Piggin. |
1231 | */ |
1232 | #ifdef CONFIG_HAVE_GENERIC_RCU_GUP |
1233 | |
1234 | #ifdef __HAVE_ARCH_PTE_SPECIAL |
1235 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
1236 | int write, struct page **pages, int *nr) |
1237 | { |
1238 | pte_t *ptep, *ptem; |
1239 | int ret = 0; |
1240 | |
1241 | ptem = ptep = pte_offset_map(&pmd, addr); |
1242 | do { |
1243 | /* |
1244 | * In the line below we are assuming that the pte can be read |
1245 | * atomically. If this is not the case for your architecture, |
1246 | * please wrap this in a helper function! |
1247 | * |
1248 | * for an example see gup_get_pte in arch/x86/mm/gup.c |
1249 | */ |
1250 | pte_t pte = READ_ONCE(*ptep); |
1251 | struct page *head, *page; |
1252 | |
1253 | /* |
1254 | * Similar to the PMD case below, NUMA hinting must take slow |
1255 | * path using the pte_protnone check. |
1256 | */ |
1257 | if (!pte_present(pte) || pte_special(pte) || |
1258 | pte_protnone(pte) || (write && !pte_write(pte))) |
1259 | goto pte_unmap; |
1260 | |
1261 | if (!arch_pte_access_permitted(pte, write)) |
1262 | goto pte_unmap; |
1263 | |
1264 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
1265 | page = pte_page(pte); |
1266 | head = compound_head(page); |
1267 | |
1268 | if (!page_cache_get_speculative(head)) |
1269 | goto pte_unmap; |
1270 | |
1271 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
1272 | put_page(head); |
1273 | goto pte_unmap; |
1274 | } |
1275 | |
1276 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
1277 | pages[*nr] = page; |
1278 | (*nr)++; |
1279 | |
1280 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
1281 | |
1282 | ret = 1; |
1283 | |
1284 | pte_unmap: |
1285 | pte_unmap(ptem); |
1286 | return ret; |
1287 | } |
1288 | #else |
1289 | |
1290 | /* |
1291 | * If we can't determine whether or not a pte is special, then fail immediately |
1292 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not |
1293 | * to be special. |
1294 | * |
1295 | * For a futex to be placed on a THP tail page, get_futex_key requires a |
1296 | * __get_user_pages_fast implementation that can pin pages. Thus it's still |
1297 | * useful to have gup_huge_pmd even if we can't operate on ptes. |
1298 | */ |
1299 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
1300 | int write, struct page **pages, int *nr) |
1301 | { |
1302 | return 0; |
1303 | } |
1304 | #endif /* __HAVE_ARCH_PTE_SPECIAL */ |
1305 | |
1306 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
1307 | unsigned long end, int write, struct page **pages, int *nr) |
1308 | { |
1309 | struct page *head, *page; |
1310 | int refs; |
1311 | |
1312 | if (write && !pmd_write(orig)) |
1313 | return 0; |
1314 | |
1315 | refs = 0; |
1316 | head = pmd_page(orig); |
1317 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
1318 | do { |
1319 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
1320 | pages[*nr] = page; |
1321 | (*nr)++; |
1322 | page++; |
1323 | refs++; |
1324 | } while (addr += PAGE_SIZE, addr != end); |
1325 | |
1326 | if (!page_cache_add_speculative(head, refs)) { |
1327 | *nr -= refs; |
1328 | return 0; |
1329 | } |
1330 | |
1331 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
1332 | *nr -= refs; |
1333 | while (refs--) |
1334 | put_page(head); |
1335 | return 0; |
1336 | } |
1337 | |
1338 | return 1; |
1339 | } |
1340 | |
1341 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
1342 | unsigned long end, int write, struct page **pages, int *nr) |
1343 | { |
1344 | struct page *head, *page; |
1345 | int refs; |
1346 | |
1347 | if (write && !pud_write(orig)) |
1348 | return 0; |
1349 | |
1350 | refs = 0; |
1351 | head = pud_page(orig); |
1352 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
1353 | do { |
1354 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
1355 | pages[*nr] = page; |
1356 | (*nr)++; |
1357 | page++; |
1358 | refs++; |
1359 | } while (addr += PAGE_SIZE, addr != end); |
1360 | |
1361 | if (!page_cache_add_speculative(head, refs)) { |
1362 | *nr -= refs; |
1363 | return 0; |
1364 | } |
1365 | |
1366 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
1367 | *nr -= refs; |
1368 | while (refs--) |
1369 | put_page(head); |
1370 | return 0; |
1371 | } |
1372 | |
1373 | return 1; |
1374 | } |
1375 | |
1376 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
1377 | unsigned long end, int write, |
1378 | struct page **pages, int *nr) |
1379 | { |
1380 | int refs; |
1381 | struct page *head, *page; |
1382 | |
1383 | if (write && !pgd_write(orig)) |
1384 | return 0; |
1385 | |
1386 | refs = 0; |
1387 | head = pgd_page(orig); |
1388 | page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); |
1389 | do { |
1390 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
1391 | pages[*nr] = page; |
1392 | (*nr)++; |
1393 | page++; |
1394 | refs++; |
1395 | } while (addr += PAGE_SIZE, addr != end); |
1396 | |
1397 | if (!page_cache_add_speculative(head, refs)) { |
1398 | *nr -= refs; |
1399 | return 0; |
1400 | } |
1401 | |
1402 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { |
1403 | *nr -= refs; |
1404 | while (refs--) |
1405 | put_page(head); |
1406 | return 0; |
1407 | } |
1408 | |
1409 | return 1; |
1410 | } |
1411 | |
1412 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
1413 | int write, struct page **pages, int *nr) |
1414 | { |
1415 | unsigned long next; |
1416 | pmd_t *pmdp; |
1417 | |
1418 | pmdp = pmd_offset(&pud, addr); |
1419 | do { |
1420 | pmd_t pmd = READ_ONCE(*pmdp); |
1421 | |
1422 | next = pmd_addr_end(addr, end); |
1423 | if (pmd_none(pmd)) |
1424 | return 0; |
1425 | |
1426 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
1427 | pmd_devmap(pmd))) { |
1428 | /* |
1429 | * NUMA hinting faults need to be handled in the GUP |
1430 | * slowpath for accounting purposes and so that they |
1431 | * can be serialised against THP migration. |
1432 | */ |
1433 | if (pmd_protnone(pmd)) |
1434 | return 0; |
1435 | |
1436 | if (!gup_huge_pmd(pmd, pmdp, addr, next, write, |
1437 | pages, nr)) |
1438 | return 0; |
1439 | |
1440 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { |
1441 | /* |
1442 | * architecture have different format for hugetlbfs |
1443 | * pmd format and THP pmd format |
1444 | */ |
1445 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, |
1446 | PMD_SHIFT, next, write, pages, nr)) |
1447 | return 0; |
1448 | } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) |
1449 | return 0; |
1450 | } while (pmdp++, addr = next, addr != end); |
1451 | |
1452 | return 1; |
1453 | } |
1454 | |
1455 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, |
1456 | int write, struct page **pages, int *nr) |
1457 | { |
1458 | unsigned long next; |
1459 | pud_t *pudp; |
1460 | |
1461 | pudp = pud_offset(&pgd, addr); |
1462 | do { |
1463 | pud_t pud = READ_ONCE(*pudp); |
1464 | |
1465 | next = pud_addr_end(addr, end); |
1466 | if (pud_none(pud)) |
1467 | return 0; |
1468 | if (unlikely(pud_huge(pud))) { |
1469 | if (!gup_huge_pud(pud, pudp, addr, next, write, |
1470 | pages, nr)) |
1471 | return 0; |
1472 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { |
1473 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, |
1474 | PUD_SHIFT, next, write, pages, nr)) |
1475 | return 0; |
1476 | } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) |
1477 | return 0; |
1478 | } while (pudp++, addr = next, addr != end); |
1479 | |
1480 | return 1; |
1481 | } |
1482 | |
1483 | /* |
1484 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
1485 | * the regular GUP. It will only return non-negative values. |
1486 | */ |
1487 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
1488 | struct page **pages) |
1489 | { |
1490 | struct mm_struct *mm = current->mm; |
1491 | unsigned long addr, len, end; |
1492 | unsigned long next, flags; |
1493 | pgd_t *pgdp; |
1494 | int nr = 0; |
1495 | |
1496 | start &= PAGE_MASK; |
1497 | addr = start; |
1498 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
1499 | end = start + len; |
1500 | |
1501 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, |
1502 | start, len))) |
1503 | return 0; |
1504 | |
1505 | /* |
1506 | * Disable interrupts. We use the nested form as we can already have |
1507 | * interrupts disabled by get_futex_key. |
1508 | * |
1509 | * With interrupts disabled, we block page table pages from being |
1510 | * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h |
1511 | * for more details. |
1512 | * |
1513 | * We do not adopt an rcu_read_lock(.) here as we also want to |
1514 | * block IPIs that come from THPs splitting. |
1515 | */ |
1516 | |
1517 | local_irq_save(flags); |
1518 | pgdp = pgd_offset(mm, addr); |
1519 | do { |
1520 | pgd_t pgd = READ_ONCE(*pgdp); |
1521 | |
1522 | next = pgd_addr_end(addr, end); |
1523 | if (pgd_none(pgd)) |
1524 | break; |
1525 | if (unlikely(pgd_huge(pgd))) { |
1526 | if (!gup_huge_pgd(pgd, pgdp, addr, next, write, |
1527 | pages, &nr)) |
1528 | break; |
1529 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { |
1530 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, |
1531 | PGDIR_SHIFT, next, write, pages, &nr)) |
1532 | break; |
1533 | } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) |
1534 | break; |
1535 | } while (pgdp++, addr = next, addr != end); |
1536 | local_irq_restore(flags); |
1537 | |
1538 | return nr; |
1539 | } |
1540 | |
1541 | /** |
1542 | * get_user_pages_fast() - pin user pages in memory |
1543 | * @start: starting user address |
1544 | * @nr_pages: number of pages from start to pin |
1545 | * @write: whether pages will be written to |
1546 | * @pages: array that receives pointers to the pages pinned. |
1547 | * Should be at least nr_pages long. |
1548 | * |
1549 | * Attempt to pin user pages in memory without taking mm->mmap_sem. |
1550 | * If not successful, it will fall back to taking the lock and |
1551 | * calling get_user_pages(). |
1552 | * |
1553 | * Returns number of pages pinned. This may be fewer than the number |
1554 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
1555 | * were pinned, returns -errno. |
1556 | */ |
1557 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
1558 | struct page **pages) |
1559 | { |
1560 | int nr, ret; |
1561 | |
1562 | start &= PAGE_MASK; |
1563 | nr = __get_user_pages_fast(start, nr_pages, write, pages); |
1564 | ret = nr; |
1565 | |
1566 | if (nr < nr_pages) { |
1567 | /* Try to get the remaining pages with get_user_pages */ |
1568 | start += nr << PAGE_SHIFT; |
1569 | pages += nr; |
1570 | |
1571 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, |
1572 | write ? FOLL_WRITE : 0); |
1573 | |
1574 | /* Have to be a bit careful with return values */ |
1575 | if (nr > 0) { |
1576 | if (ret < 0) |
1577 | ret = nr; |
1578 | else |
1579 | ret += nr; |
1580 | } |
1581 | } |
1582 | |
1583 | return ret; |
1584 | } |
1585 | |
1586 | #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ |
1587 |