blob: c6779ac4b4624442dc2bb04b942c5f1256b79784
1 | /* |
2 | * Copyright (C) 2009 Red Hat, Inc. |
3 | * |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See |
5 | * the COPYING file in the top-level directory. |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/mm.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/highmem.h> |
13 | #include <linux/hugetlb.h> |
14 | #include <linux/mmu_notifier.h> |
15 | #include <linux/rmap.h> |
16 | #include <linux/swap.h> |
17 | #include <linux/shrinker.h> |
18 | #include <linux/mm_inline.h> |
19 | #include <linux/swapops.h> |
20 | #include <linux/dax.h> |
21 | #include <linux/khugepaged.h> |
22 | #include <linux/freezer.h> |
23 | #include <linux/pfn_t.h> |
24 | #include <linux/mman.h> |
25 | #include <linux/memremap.h> |
26 | #include <linux/pagemap.h> |
27 | #include <linux/debugfs.h> |
28 | #include <linux/migrate.h> |
29 | #include <linux/hashtable.h> |
30 | #include <linux/userfaultfd_k.h> |
31 | #include <linux/page_idle.h> |
32 | #include <linux/shmem_fs.h> |
33 | |
34 | #include <asm/tlb.h> |
35 | #include <asm/pgalloc.h> |
36 | #include "internal.h" |
37 | |
38 | /* |
39 | * By default transparent hugepage support is disabled in order that avoid |
40 | * to risk increase the memory footprint of applications without a guaranteed |
41 | * benefit. When transparent hugepage support is enabled, is for all mappings, |
42 | * and khugepaged scans all mappings. |
43 | * Defrag is invoked by khugepaged hugepage allocations and by page faults |
44 | * for all hugepage allocations. |
45 | */ |
46 | unsigned long transparent_hugepage_flags __read_mostly = |
47 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS |
48 | (1<<TRANSPARENT_HUGEPAGE_FLAG)| |
49 | #endif |
50 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE |
51 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| |
52 | #endif |
53 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| |
54 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| |
55 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
56 | |
57 | static struct shrinker deferred_split_shrinker; |
58 | |
59 | static atomic_t huge_zero_refcount; |
60 | struct page *huge_zero_page __read_mostly; |
61 | |
62 | static struct page *get_huge_zero_page(void) |
63 | { |
64 | struct page *zero_page; |
65 | retry: |
66 | if (likely(atomic_inc_not_zero(&huge_zero_refcount))) |
67 | return READ_ONCE(huge_zero_page); |
68 | |
69 | zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, |
70 | HPAGE_PMD_ORDER); |
71 | if (!zero_page) { |
72 | count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); |
73 | return NULL; |
74 | } |
75 | count_vm_event(THP_ZERO_PAGE_ALLOC); |
76 | preempt_disable(); |
77 | if (cmpxchg(&huge_zero_page, NULL, zero_page)) { |
78 | preempt_enable(); |
79 | __free_pages(zero_page, compound_order(zero_page)); |
80 | goto retry; |
81 | } |
82 | |
83 | /* We take additional reference here. It will be put back by shrinker */ |
84 | atomic_set(&huge_zero_refcount, 2); |
85 | preempt_enable(); |
86 | return READ_ONCE(huge_zero_page); |
87 | } |
88 | |
89 | static void put_huge_zero_page(void) |
90 | { |
91 | /* |
92 | * Counter should never go to zero here. Only shrinker can put |
93 | * last reference. |
94 | */ |
95 | BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); |
96 | } |
97 | |
98 | struct page *mm_get_huge_zero_page(struct mm_struct *mm) |
99 | { |
100 | if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
101 | return READ_ONCE(huge_zero_page); |
102 | |
103 | if (!get_huge_zero_page()) |
104 | return NULL; |
105 | |
106 | if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
107 | put_huge_zero_page(); |
108 | |
109 | return READ_ONCE(huge_zero_page); |
110 | } |
111 | |
112 | void mm_put_huge_zero_page(struct mm_struct *mm) |
113 | { |
114 | if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
115 | put_huge_zero_page(); |
116 | } |
117 | |
118 | static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, |
119 | struct shrink_control *sc) |
120 | { |
121 | /* we can free zero page only if last reference remains */ |
122 | return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; |
123 | } |
124 | |
125 | static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, |
126 | struct shrink_control *sc) |
127 | { |
128 | if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { |
129 | struct page *zero_page = xchg(&huge_zero_page, NULL); |
130 | BUG_ON(zero_page == NULL); |
131 | __free_pages(zero_page, compound_order(zero_page)); |
132 | return HPAGE_PMD_NR; |
133 | } |
134 | |
135 | return 0; |
136 | } |
137 | |
138 | static struct shrinker huge_zero_page_shrinker = { |
139 | .count_objects = shrink_huge_zero_page_count, |
140 | .scan_objects = shrink_huge_zero_page_scan, |
141 | .seeks = DEFAULT_SEEKS, |
142 | }; |
143 | |
144 | #ifdef CONFIG_SYSFS |
145 | |
146 | static ssize_t triple_flag_store(struct kobject *kobj, |
147 | struct kobj_attribute *attr, |
148 | const char *buf, size_t count, |
149 | enum transparent_hugepage_flag enabled, |
150 | enum transparent_hugepage_flag deferred, |
151 | enum transparent_hugepage_flag req_madv) |
152 | { |
153 | if (!memcmp("defer", buf, |
154 | min(sizeof("defer")-1, count))) { |
155 | if (enabled == deferred) |
156 | return -EINVAL; |
157 | clear_bit(enabled, &transparent_hugepage_flags); |
158 | clear_bit(req_madv, &transparent_hugepage_flags); |
159 | set_bit(deferred, &transparent_hugepage_flags); |
160 | } else if (!memcmp("always", buf, |
161 | min(sizeof("always")-1, count))) { |
162 | clear_bit(deferred, &transparent_hugepage_flags); |
163 | clear_bit(req_madv, &transparent_hugepage_flags); |
164 | set_bit(enabled, &transparent_hugepage_flags); |
165 | } else if (!memcmp("madvise", buf, |
166 | min(sizeof("madvise")-1, count))) { |
167 | clear_bit(enabled, &transparent_hugepage_flags); |
168 | clear_bit(deferred, &transparent_hugepage_flags); |
169 | set_bit(req_madv, &transparent_hugepage_flags); |
170 | } else if (!memcmp("never", buf, |
171 | min(sizeof("never")-1, count))) { |
172 | clear_bit(enabled, &transparent_hugepage_flags); |
173 | clear_bit(req_madv, &transparent_hugepage_flags); |
174 | clear_bit(deferred, &transparent_hugepage_flags); |
175 | } else |
176 | return -EINVAL; |
177 | |
178 | return count; |
179 | } |
180 | |
181 | static ssize_t enabled_show(struct kobject *kobj, |
182 | struct kobj_attribute *attr, char *buf) |
183 | { |
184 | if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) |
185 | return sprintf(buf, "[always] madvise never\n"); |
186 | else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
187 | return sprintf(buf, "always [madvise] never\n"); |
188 | else |
189 | return sprintf(buf, "always madvise [never]\n"); |
190 | } |
191 | |
192 | static ssize_t enabled_store(struct kobject *kobj, |
193 | struct kobj_attribute *attr, |
194 | const char *buf, size_t count) |
195 | { |
196 | ssize_t ret; |
197 | |
198 | ret = triple_flag_store(kobj, attr, buf, count, |
199 | TRANSPARENT_HUGEPAGE_FLAG, |
200 | TRANSPARENT_HUGEPAGE_FLAG, |
201 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); |
202 | |
203 | if (ret > 0) { |
204 | int err = start_stop_khugepaged(); |
205 | if (err) |
206 | ret = err; |
207 | } |
208 | |
209 | return ret; |
210 | } |
211 | static struct kobj_attribute enabled_attr = |
212 | __ATTR(enabled, 0644, enabled_show, enabled_store); |
213 | |
214 | ssize_t single_hugepage_flag_show(struct kobject *kobj, |
215 | struct kobj_attribute *attr, char *buf, |
216 | enum transparent_hugepage_flag flag) |
217 | { |
218 | return sprintf(buf, "%d\n", |
219 | !!test_bit(flag, &transparent_hugepage_flags)); |
220 | } |
221 | |
222 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
223 | struct kobj_attribute *attr, |
224 | const char *buf, size_t count, |
225 | enum transparent_hugepage_flag flag) |
226 | { |
227 | unsigned long value; |
228 | int ret; |
229 | |
230 | ret = kstrtoul(buf, 10, &value); |
231 | if (ret < 0) |
232 | return ret; |
233 | if (value > 1) |
234 | return -EINVAL; |
235 | |
236 | if (value) |
237 | set_bit(flag, &transparent_hugepage_flags); |
238 | else |
239 | clear_bit(flag, &transparent_hugepage_flags); |
240 | |
241 | return count; |
242 | } |
243 | |
244 | /* |
245 | * Currently defrag only disables __GFP_NOWAIT for allocation. A blind |
246 | * __GFP_REPEAT is too aggressive, it's never worth swapping tons of |
247 | * memory just to allocate one more hugepage. |
248 | */ |
249 | static ssize_t defrag_show(struct kobject *kobj, |
250 | struct kobj_attribute *attr, char *buf) |
251 | { |
252 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) |
253 | return sprintf(buf, "[always] defer madvise never\n"); |
254 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) |
255 | return sprintf(buf, "always [defer] madvise never\n"); |
256 | else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
257 | return sprintf(buf, "always defer [madvise] never\n"); |
258 | else |
259 | return sprintf(buf, "always defer madvise [never]\n"); |
260 | |
261 | } |
262 | static ssize_t defrag_store(struct kobject *kobj, |
263 | struct kobj_attribute *attr, |
264 | const char *buf, size_t count) |
265 | { |
266 | return triple_flag_store(kobj, attr, buf, count, |
267 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
268 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, |
269 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); |
270 | } |
271 | static struct kobj_attribute defrag_attr = |
272 | __ATTR(defrag, 0644, defrag_show, defrag_store); |
273 | |
274 | static ssize_t use_zero_page_show(struct kobject *kobj, |
275 | struct kobj_attribute *attr, char *buf) |
276 | { |
277 | return single_hugepage_flag_show(kobj, attr, buf, |
278 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
279 | } |
280 | static ssize_t use_zero_page_store(struct kobject *kobj, |
281 | struct kobj_attribute *attr, const char *buf, size_t count) |
282 | { |
283 | return single_hugepage_flag_store(kobj, attr, buf, count, |
284 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
285 | } |
286 | static struct kobj_attribute use_zero_page_attr = |
287 | __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); |
288 | #ifdef CONFIG_DEBUG_VM |
289 | static ssize_t debug_cow_show(struct kobject *kobj, |
290 | struct kobj_attribute *attr, char *buf) |
291 | { |
292 | return single_hugepage_flag_show(kobj, attr, buf, |
293 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
294 | } |
295 | static ssize_t debug_cow_store(struct kobject *kobj, |
296 | struct kobj_attribute *attr, |
297 | const char *buf, size_t count) |
298 | { |
299 | return single_hugepage_flag_store(kobj, attr, buf, count, |
300 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
301 | } |
302 | static struct kobj_attribute debug_cow_attr = |
303 | __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); |
304 | #endif /* CONFIG_DEBUG_VM */ |
305 | |
306 | static struct attribute *hugepage_attr[] = { |
307 | &enabled_attr.attr, |
308 | &defrag_attr.attr, |
309 | &use_zero_page_attr.attr, |
310 | #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) |
311 | &shmem_enabled_attr.attr, |
312 | #endif |
313 | #ifdef CONFIG_DEBUG_VM |
314 | &debug_cow_attr.attr, |
315 | #endif |
316 | NULL, |
317 | }; |
318 | |
319 | static struct attribute_group hugepage_attr_group = { |
320 | .attrs = hugepage_attr, |
321 | }; |
322 | |
323 | static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) |
324 | { |
325 | int err; |
326 | |
327 | *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); |
328 | if (unlikely(!*hugepage_kobj)) { |
329 | pr_err("failed to create transparent hugepage kobject\n"); |
330 | return -ENOMEM; |
331 | } |
332 | |
333 | err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); |
334 | if (err) { |
335 | pr_err("failed to register transparent hugepage group\n"); |
336 | goto delete_obj; |
337 | } |
338 | |
339 | err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); |
340 | if (err) { |
341 | pr_err("failed to register transparent hugepage group\n"); |
342 | goto remove_hp_group; |
343 | } |
344 | |
345 | return 0; |
346 | |
347 | remove_hp_group: |
348 | sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); |
349 | delete_obj: |
350 | kobject_put(*hugepage_kobj); |
351 | return err; |
352 | } |
353 | |
354 | static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
355 | { |
356 | sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); |
357 | sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); |
358 | kobject_put(hugepage_kobj); |
359 | } |
360 | #else |
361 | static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) |
362 | { |
363 | return 0; |
364 | } |
365 | |
366 | static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
367 | { |
368 | } |
369 | #endif /* CONFIG_SYSFS */ |
370 | |
371 | static int __init hugepage_init(void) |
372 | { |
373 | int err; |
374 | struct kobject *hugepage_kobj; |
375 | |
376 | if (!has_transparent_hugepage()) { |
377 | transparent_hugepage_flags = 0; |
378 | return -EINVAL; |
379 | } |
380 | |
381 | /* |
382 | * hugepages can't be allocated by the buddy allocator |
383 | */ |
384 | MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); |
385 | /* |
386 | * we use page->mapping and page->index in second tail page |
387 | * as list_head: assuming THP order >= 2 |
388 | */ |
389 | MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); |
390 | |
391 | err = hugepage_init_sysfs(&hugepage_kobj); |
392 | if (err) |
393 | goto err_sysfs; |
394 | |
395 | err = khugepaged_init(); |
396 | if (err) |
397 | goto err_slab; |
398 | |
399 | err = register_shrinker(&huge_zero_page_shrinker); |
400 | if (err) |
401 | goto err_hzp_shrinker; |
402 | err = register_shrinker(&deferred_split_shrinker); |
403 | if (err) |
404 | goto err_split_shrinker; |
405 | |
406 | /* |
407 | * By default disable transparent hugepages on smaller systems, |
408 | * where the extra memory used could hurt more than TLB overhead |
409 | * is likely to save. The admin can still enable it through /sys. |
410 | */ |
411 | if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { |
412 | transparent_hugepage_flags = 0; |
413 | return 0; |
414 | } |
415 | |
416 | err = start_stop_khugepaged(); |
417 | if (err) |
418 | goto err_khugepaged; |
419 | |
420 | return 0; |
421 | err_khugepaged: |
422 | unregister_shrinker(&deferred_split_shrinker); |
423 | err_split_shrinker: |
424 | unregister_shrinker(&huge_zero_page_shrinker); |
425 | err_hzp_shrinker: |
426 | khugepaged_destroy(); |
427 | err_slab: |
428 | hugepage_exit_sysfs(hugepage_kobj); |
429 | err_sysfs: |
430 | return err; |
431 | } |
432 | subsys_initcall(hugepage_init); |
433 | |
434 | static int __init setup_transparent_hugepage(char *str) |
435 | { |
436 | int ret = 0; |
437 | if (!str) |
438 | goto out; |
439 | if (!strcmp(str, "always")) { |
440 | set_bit(TRANSPARENT_HUGEPAGE_FLAG, |
441 | &transparent_hugepage_flags); |
442 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
443 | &transparent_hugepage_flags); |
444 | ret = 1; |
445 | } else if (!strcmp(str, "madvise")) { |
446 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
447 | &transparent_hugepage_flags); |
448 | set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
449 | &transparent_hugepage_flags); |
450 | ret = 1; |
451 | } else if (!strcmp(str, "never")) { |
452 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
453 | &transparent_hugepage_flags); |
454 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
455 | &transparent_hugepage_flags); |
456 | ret = 1; |
457 | } |
458 | out: |
459 | if (!ret) |
460 | pr_warn("transparent_hugepage= cannot parse, ignored\n"); |
461 | return ret; |
462 | } |
463 | __setup("transparent_hugepage=", setup_transparent_hugepage); |
464 | |
465 | pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) |
466 | { |
467 | if (likely(vma->vm_flags & VM_WRITE)) |
468 | pmd = pmd_mkwrite(pmd); |
469 | return pmd; |
470 | } |
471 | |
472 | static inline struct list_head *page_deferred_list(struct page *page) |
473 | { |
474 | /* |
475 | * ->lru in the tail pages is occupied by compound_head. |
476 | * Let's use ->mapping + ->index in the second tail page as list_head. |
477 | */ |
478 | return (struct list_head *)&page[2].mapping; |
479 | } |
480 | |
481 | void prep_transhuge_page(struct page *page) |
482 | { |
483 | /* |
484 | * we use page->mapping and page->indexlru in second tail page |
485 | * as list_head: assuming THP order >= 2 |
486 | */ |
487 | |
488 | INIT_LIST_HEAD(page_deferred_list(page)); |
489 | set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); |
490 | } |
491 | |
492 | unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, |
493 | loff_t off, unsigned long flags, unsigned long size) |
494 | { |
495 | unsigned long addr; |
496 | loff_t off_end = off + len; |
497 | loff_t off_align = round_up(off, size); |
498 | unsigned long len_pad; |
499 | |
500 | if (off_end <= off_align || (off_end - off_align) < size) |
501 | return 0; |
502 | |
503 | len_pad = len + size; |
504 | if (len_pad < len || (off + len_pad) < off) |
505 | return 0; |
506 | |
507 | addr = current->mm->get_unmapped_area(filp, 0, len_pad, |
508 | off >> PAGE_SHIFT, flags); |
509 | if (IS_ERR_VALUE(addr)) |
510 | return 0; |
511 | |
512 | addr += (off - addr) & (size - 1); |
513 | return addr; |
514 | } |
515 | |
516 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
517 | unsigned long len, unsigned long pgoff, unsigned long flags) |
518 | { |
519 | loff_t off = (loff_t)pgoff << PAGE_SHIFT; |
520 | |
521 | if (addr) |
522 | goto out; |
523 | if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) |
524 | goto out; |
525 | |
526 | addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); |
527 | if (addr) |
528 | return addr; |
529 | |
530 | out: |
531 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); |
532 | } |
533 | EXPORT_SYMBOL_GPL(thp_get_unmapped_area); |
534 | |
535 | static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, |
536 | gfp_t gfp) |
537 | { |
538 | struct vm_area_struct *vma = fe->vma; |
539 | struct mem_cgroup *memcg; |
540 | pgtable_t pgtable; |
541 | unsigned long haddr = fe->address & HPAGE_PMD_MASK; |
542 | |
543 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
544 | |
545 | if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg, |
546 | true)) { |
547 | put_page(page); |
548 | count_vm_event(THP_FAULT_FALLBACK); |
549 | return VM_FAULT_FALLBACK; |
550 | } |
551 | |
552 | pgtable = pte_alloc_one(vma->vm_mm, haddr); |
553 | if (unlikely(!pgtable)) { |
554 | mem_cgroup_cancel_charge(page, memcg, true); |
555 | put_page(page); |
556 | return VM_FAULT_OOM; |
557 | } |
558 | |
559 | clear_huge_page(page, haddr, HPAGE_PMD_NR); |
560 | /* |
561 | * The memory barrier inside __SetPageUptodate makes sure that |
562 | * clear_huge_page writes become visible before the set_pmd_at() |
563 | * write. |
564 | */ |
565 | __SetPageUptodate(page); |
566 | |
567 | fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); |
568 | if (unlikely(!pmd_none(*fe->pmd))) { |
569 | spin_unlock(fe->ptl); |
570 | mem_cgroup_cancel_charge(page, memcg, true); |
571 | put_page(page); |
572 | pte_free(vma->vm_mm, pgtable); |
573 | } else { |
574 | pmd_t entry; |
575 | |
576 | /* Deliver the page fault to userland */ |
577 | if (userfaultfd_missing(vma)) { |
578 | int ret; |
579 | |
580 | spin_unlock(fe->ptl); |
581 | mem_cgroup_cancel_charge(page, memcg, true); |
582 | put_page(page); |
583 | pte_free(vma->vm_mm, pgtable); |
584 | ret = handle_userfault(fe, VM_UFFD_MISSING); |
585 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
586 | return ret; |
587 | } |
588 | |
589 | entry = mk_huge_pmd(page, vma->vm_page_prot); |
590 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
591 | page_add_new_anon_rmap(page, vma, haddr, true); |
592 | mem_cgroup_commit_charge(page, memcg, false, true); |
593 | lru_cache_add_active_or_unevictable(page, vma); |
594 | pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); |
595 | set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); |
596 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
597 | atomic_long_inc(&vma->vm_mm->nr_ptes); |
598 | spin_unlock(fe->ptl); |
599 | count_vm_event(THP_FAULT_ALLOC); |
600 | } |
601 | |
602 | return 0; |
603 | } |
604 | |
605 | /* |
606 | * If THP defrag is set to always then directly reclaim/compact as necessary |
607 | * If set to defer then do only background reclaim/compact and defer to khugepaged |
608 | * If set to madvise and the VMA is flagged then directly reclaim/compact |
609 | * When direct reclaim/compact is allowed, don't retry except for flagged VMA's |
610 | */ |
611 | static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) |
612 | { |
613 | bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); |
614 | |
615 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
616 | &transparent_hugepage_flags) && vma_madvised) |
617 | return GFP_TRANSHUGE; |
618 | else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, |
619 | &transparent_hugepage_flags)) |
620 | return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; |
621 | else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
622 | &transparent_hugepage_flags)) |
623 | return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); |
624 | |
625 | return GFP_TRANSHUGE_LIGHT; |
626 | } |
627 | |
628 | /* Caller must hold page table lock. */ |
629 | static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, |
630 | struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, |
631 | struct page *zero_page) |
632 | { |
633 | pmd_t entry; |
634 | if (!pmd_none(*pmd)) |
635 | return false; |
636 | entry = mk_pmd(zero_page, vma->vm_page_prot); |
637 | entry = pmd_mkhuge(entry); |
638 | if (pgtable) |
639 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
640 | set_pmd_at(mm, haddr, pmd, entry); |
641 | atomic_long_inc(&mm->nr_ptes); |
642 | return true; |
643 | } |
644 | |
645 | int do_huge_pmd_anonymous_page(struct fault_env *fe) |
646 | { |
647 | struct vm_area_struct *vma = fe->vma; |
648 | gfp_t gfp; |
649 | struct page *page; |
650 | unsigned long haddr = fe->address & HPAGE_PMD_MASK; |
651 | |
652 | if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) |
653 | return VM_FAULT_FALLBACK; |
654 | if (unlikely(anon_vma_prepare(vma))) |
655 | return VM_FAULT_OOM; |
656 | if (unlikely(khugepaged_enter(vma, vma->vm_flags))) |
657 | return VM_FAULT_OOM; |
658 | if (!(fe->flags & FAULT_FLAG_WRITE) && |
659 | !mm_forbids_zeropage(vma->vm_mm) && |
660 | transparent_hugepage_use_zero_page()) { |
661 | pgtable_t pgtable; |
662 | struct page *zero_page; |
663 | bool set; |
664 | int ret; |
665 | pgtable = pte_alloc_one(vma->vm_mm, haddr); |
666 | if (unlikely(!pgtable)) |
667 | return VM_FAULT_OOM; |
668 | zero_page = mm_get_huge_zero_page(vma->vm_mm); |
669 | if (unlikely(!zero_page)) { |
670 | pte_free(vma->vm_mm, pgtable); |
671 | count_vm_event(THP_FAULT_FALLBACK); |
672 | return VM_FAULT_FALLBACK; |
673 | } |
674 | fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); |
675 | ret = 0; |
676 | set = false; |
677 | if (pmd_none(*fe->pmd)) { |
678 | if (userfaultfd_missing(vma)) { |
679 | spin_unlock(fe->ptl); |
680 | ret = handle_userfault(fe, VM_UFFD_MISSING); |
681 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
682 | } else { |
683 | set_huge_zero_page(pgtable, vma->vm_mm, vma, |
684 | haddr, fe->pmd, zero_page); |
685 | spin_unlock(fe->ptl); |
686 | set = true; |
687 | } |
688 | } else |
689 | spin_unlock(fe->ptl); |
690 | if (!set) |
691 | pte_free(vma->vm_mm, pgtable); |
692 | return ret; |
693 | } |
694 | gfp = alloc_hugepage_direct_gfpmask(vma); |
695 | page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); |
696 | if (unlikely(!page)) { |
697 | count_vm_event(THP_FAULT_FALLBACK); |
698 | return VM_FAULT_FALLBACK; |
699 | } |
700 | prep_transhuge_page(page); |
701 | return __do_huge_pmd_anonymous_page(fe, page, gfp); |
702 | } |
703 | |
704 | static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
705 | pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) |
706 | { |
707 | struct mm_struct *mm = vma->vm_mm; |
708 | pmd_t entry; |
709 | spinlock_t *ptl; |
710 | |
711 | ptl = pmd_lock(mm, pmd); |
712 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
713 | if (pfn_t_devmap(pfn)) |
714 | entry = pmd_mkdevmap(entry); |
715 | if (write) { |
716 | entry = pmd_mkyoung(pmd_mkdirty(entry)); |
717 | entry = maybe_pmd_mkwrite(entry, vma); |
718 | } |
719 | set_pmd_at(mm, addr, pmd, entry); |
720 | update_mmu_cache_pmd(vma, addr, pmd); |
721 | spin_unlock(ptl); |
722 | } |
723 | |
724 | int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
725 | pmd_t *pmd, pfn_t pfn, bool write) |
726 | { |
727 | pgprot_t pgprot = vma->vm_page_prot; |
728 | /* |
729 | * If we had pmd_special, we could avoid all these restrictions, |
730 | * but we need to be consistent with PTEs and architectures that |
731 | * can't support a 'special' bit. |
732 | */ |
733 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); |
734 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
735 | (VM_PFNMAP|VM_MIXEDMAP)); |
736 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
737 | BUG_ON(!pfn_t_devmap(pfn)); |
738 | |
739 | if (addr < vma->vm_start || addr >= vma->vm_end) |
740 | return VM_FAULT_SIGBUS; |
741 | if (track_pfn_insert(vma, &pgprot, pfn)) |
742 | return VM_FAULT_SIGBUS; |
743 | insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); |
744 | return VM_FAULT_NOPAGE; |
745 | } |
746 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); |
747 | |
748 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
749 | pmd_t *pmd, int flags) |
750 | { |
751 | pmd_t _pmd; |
752 | |
753 | _pmd = pmd_mkyoung(*pmd); |
754 | if (flags & FOLL_WRITE) |
755 | _pmd = pmd_mkdirty(_pmd); |
756 | if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, |
757 | pmd, _pmd, flags & FOLL_WRITE)) |
758 | update_mmu_cache_pmd(vma, addr, pmd); |
759 | } |
760 | |
761 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
762 | pmd_t *pmd, int flags) |
763 | { |
764 | unsigned long pfn = pmd_pfn(*pmd); |
765 | struct mm_struct *mm = vma->vm_mm; |
766 | struct dev_pagemap *pgmap; |
767 | struct page *page; |
768 | |
769 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
770 | |
771 | /* |
772 | * When we COW a devmap PMD entry, we split it into PTEs, so we should |
773 | * not be in this function with `flags & FOLL_COW` set. |
774 | */ |
775 | WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); |
776 | |
777 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
778 | return NULL; |
779 | |
780 | if (pmd_present(*pmd) && pmd_devmap(*pmd)) |
781 | /* pass */; |
782 | else |
783 | return NULL; |
784 | |
785 | if (flags & FOLL_TOUCH) |
786 | touch_pmd(vma, addr, pmd, flags); |
787 | |
788 | /* |
789 | * device mapped pages can only be returned if the |
790 | * caller will manage the page reference count. |
791 | */ |
792 | if (!(flags & FOLL_GET)) |
793 | return ERR_PTR(-EEXIST); |
794 | |
795 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; |
796 | pgmap = get_dev_pagemap(pfn, NULL); |
797 | if (!pgmap) |
798 | return ERR_PTR(-EFAULT); |
799 | page = pfn_to_page(pfn); |
800 | get_page(page); |
801 | put_dev_pagemap(pgmap); |
802 | |
803 | return page; |
804 | } |
805 | |
806 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
807 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
808 | struct vm_area_struct *vma) |
809 | { |
810 | spinlock_t *dst_ptl, *src_ptl; |
811 | struct page *src_page; |
812 | pmd_t pmd; |
813 | pgtable_t pgtable = NULL; |
814 | int ret = -ENOMEM; |
815 | |
816 | /* Skip if can be re-fill on fault */ |
817 | if (!vma_is_anonymous(vma)) |
818 | return 0; |
819 | |
820 | pgtable = pte_alloc_one(dst_mm, addr); |
821 | if (unlikely(!pgtable)) |
822 | goto out; |
823 | |
824 | dst_ptl = pmd_lock(dst_mm, dst_pmd); |
825 | src_ptl = pmd_lockptr(src_mm, src_pmd); |
826 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
827 | |
828 | ret = -EAGAIN; |
829 | pmd = *src_pmd; |
830 | if (unlikely(!pmd_trans_huge(pmd))) { |
831 | pte_free(dst_mm, pgtable); |
832 | goto out_unlock; |
833 | } |
834 | /* |
835 | * When page table lock is held, the huge zero pmd should not be |
836 | * under splitting since we don't split the page itself, only pmd to |
837 | * a page table. |
838 | */ |
839 | if (is_huge_zero_pmd(pmd)) { |
840 | struct page *zero_page; |
841 | /* |
842 | * get_huge_zero_page() will never allocate a new page here, |
843 | * since we already have a zero page to copy. It just takes a |
844 | * reference. |
845 | */ |
846 | zero_page = mm_get_huge_zero_page(dst_mm); |
847 | set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, |
848 | zero_page); |
849 | ret = 0; |
850 | goto out_unlock; |
851 | } |
852 | |
853 | src_page = pmd_page(pmd); |
854 | VM_BUG_ON_PAGE(!PageHead(src_page), src_page); |
855 | get_page(src_page); |
856 | page_dup_rmap(src_page, true); |
857 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
858 | atomic_long_inc(&dst_mm->nr_ptes); |
859 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
860 | |
861 | pmdp_set_wrprotect(src_mm, addr, src_pmd); |
862 | pmd = pmd_mkold(pmd_wrprotect(pmd)); |
863 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
864 | |
865 | ret = 0; |
866 | out_unlock: |
867 | spin_unlock(src_ptl); |
868 | spin_unlock(dst_ptl); |
869 | out: |
870 | return ret; |
871 | } |
872 | |
873 | void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd) |
874 | { |
875 | pmd_t entry; |
876 | unsigned long haddr; |
877 | bool write = fe->flags & FAULT_FLAG_WRITE; |
878 | |
879 | fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd); |
880 | if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) |
881 | goto unlock; |
882 | |
883 | entry = pmd_mkyoung(orig_pmd); |
884 | if (write) |
885 | entry = pmd_mkdirty(entry); |
886 | haddr = fe->address & HPAGE_PMD_MASK; |
887 | if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, write)) |
888 | update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd); |
889 | |
890 | unlock: |
891 | spin_unlock(fe->ptl); |
892 | } |
893 | |
894 | static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, |
895 | struct page *page) |
896 | { |
897 | struct vm_area_struct *vma = fe->vma; |
898 | unsigned long haddr = fe->address & HPAGE_PMD_MASK; |
899 | struct mem_cgroup *memcg; |
900 | pgtable_t pgtable; |
901 | pmd_t _pmd; |
902 | int ret = 0, i; |
903 | struct page **pages; |
904 | unsigned long mmun_start; /* For mmu_notifiers */ |
905 | unsigned long mmun_end; /* For mmu_notifiers */ |
906 | |
907 | pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, |
908 | GFP_KERNEL); |
909 | if (unlikely(!pages)) { |
910 | ret |= VM_FAULT_OOM; |
911 | goto out; |
912 | } |
913 | |
914 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
915 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | |
916 | __GFP_OTHER_NODE, vma, |
917 | fe->address, page_to_nid(page)); |
918 | if (unlikely(!pages[i] || |
919 | mem_cgroup_try_charge(pages[i], vma->vm_mm, |
920 | GFP_KERNEL, &memcg, false))) { |
921 | if (pages[i]) |
922 | put_page(pages[i]); |
923 | while (--i >= 0) { |
924 | memcg = (void *)page_private(pages[i]); |
925 | set_page_private(pages[i], 0); |
926 | mem_cgroup_cancel_charge(pages[i], memcg, |
927 | false); |
928 | put_page(pages[i]); |
929 | } |
930 | kfree(pages); |
931 | ret |= VM_FAULT_OOM; |
932 | goto out; |
933 | } |
934 | set_page_private(pages[i], (unsigned long)memcg); |
935 | } |
936 | |
937 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
938 | copy_user_highpage(pages[i], page + i, |
939 | haddr + PAGE_SIZE * i, vma); |
940 | __SetPageUptodate(pages[i]); |
941 | cond_resched(); |
942 | } |
943 | |
944 | mmun_start = haddr; |
945 | mmun_end = haddr + HPAGE_PMD_SIZE; |
946 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
947 | |
948 | fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); |
949 | if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) |
950 | goto out_free_pages; |
951 | VM_BUG_ON_PAGE(!PageHead(page), page); |
952 | |
953 | pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); |
954 | /* leave pmd empty until pte is filled */ |
955 | |
956 | pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd); |
957 | pmd_populate(vma->vm_mm, &_pmd, pgtable); |
958 | |
959 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
960 | pte_t entry; |
961 | entry = mk_pte(pages[i], vma->vm_page_prot); |
962 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
963 | memcg = (void *)page_private(pages[i]); |
964 | set_page_private(pages[i], 0); |
965 | page_add_new_anon_rmap(pages[i], fe->vma, haddr, false); |
966 | mem_cgroup_commit_charge(pages[i], memcg, false, false); |
967 | lru_cache_add_active_or_unevictable(pages[i], vma); |
968 | fe->pte = pte_offset_map(&_pmd, haddr); |
969 | VM_BUG_ON(!pte_none(*fe->pte)); |
970 | set_pte_at(vma->vm_mm, haddr, fe->pte, entry); |
971 | pte_unmap(fe->pte); |
972 | } |
973 | kfree(pages); |
974 | |
975 | smp_wmb(); /* make pte visible before pmd */ |
976 | pmd_populate(vma->vm_mm, fe->pmd, pgtable); |
977 | page_remove_rmap(page, true); |
978 | spin_unlock(fe->ptl); |
979 | |
980 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
981 | |
982 | ret |= VM_FAULT_WRITE; |
983 | put_page(page); |
984 | |
985 | out: |
986 | return ret; |
987 | |
988 | out_free_pages: |
989 | spin_unlock(fe->ptl); |
990 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
991 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
992 | memcg = (void *)page_private(pages[i]); |
993 | set_page_private(pages[i], 0); |
994 | mem_cgroup_cancel_charge(pages[i], memcg, false); |
995 | put_page(pages[i]); |
996 | } |
997 | kfree(pages); |
998 | goto out; |
999 | } |
1000 | |
1001 | int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) |
1002 | { |
1003 | struct vm_area_struct *vma = fe->vma; |
1004 | struct page *page = NULL, *new_page; |
1005 | struct mem_cgroup *memcg; |
1006 | unsigned long haddr = fe->address & HPAGE_PMD_MASK; |
1007 | unsigned long mmun_start; /* For mmu_notifiers */ |
1008 | unsigned long mmun_end; /* For mmu_notifiers */ |
1009 | gfp_t huge_gfp; /* for allocation and charge */ |
1010 | int ret = 0; |
1011 | |
1012 | fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd); |
1013 | VM_BUG_ON_VMA(!vma->anon_vma, vma); |
1014 | if (is_huge_zero_pmd(orig_pmd)) |
1015 | goto alloc; |
1016 | spin_lock(fe->ptl); |
1017 | if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) |
1018 | goto out_unlock; |
1019 | |
1020 | page = pmd_page(orig_pmd); |
1021 | VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); |
1022 | /* |
1023 | * We can only reuse the page if nobody else maps the huge page or it's |
1024 | * part. |
1025 | */ |
1026 | if (page_trans_huge_mapcount(page, NULL) == 1) { |
1027 | pmd_t entry; |
1028 | entry = pmd_mkyoung(orig_pmd); |
1029 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
1030 | if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) |
1031 | update_mmu_cache_pmd(vma, fe->address, fe->pmd); |
1032 | ret |= VM_FAULT_WRITE; |
1033 | goto out_unlock; |
1034 | } |
1035 | get_page(page); |
1036 | spin_unlock(fe->ptl); |
1037 | alloc: |
1038 | if (transparent_hugepage_enabled(vma) && |
1039 | !transparent_hugepage_debug_cow()) { |
1040 | huge_gfp = alloc_hugepage_direct_gfpmask(vma); |
1041 | new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); |
1042 | } else |
1043 | new_page = NULL; |
1044 | |
1045 | if (likely(new_page)) { |
1046 | prep_transhuge_page(new_page); |
1047 | } else { |
1048 | if (!page) { |
1049 | split_huge_pmd(vma, fe->pmd, fe->address); |
1050 | ret |= VM_FAULT_FALLBACK; |
1051 | } else { |
1052 | ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page); |
1053 | if (ret & VM_FAULT_OOM) { |
1054 | split_huge_pmd(vma, fe->pmd, fe->address); |
1055 | ret |= VM_FAULT_FALLBACK; |
1056 | } |
1057 | put_page(page); |
1058 | } |
1059 | count_vm_event(THP_FAULT_FALLBACK); |
1060 | goto out; |
1061 | } |
1062 | |
1063 | if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, |
1064 | huge_gfp | __GFP_NORETRY, &memcg, true))) { |
1065 | put_page(new_page); |
1066 | split_huge_pmd(vma, fe->pmd, fe->address); |
1067 | if (page) |
1068 | put_page(page); |
1069 | ret |= VM_FAULT_FALLBACK; |
1070 | count_vm_event(THP_FAULT_FALLBACK); |
1071 | goto out; |
1072 | } |
1073 | |
1074 | count_vm_event(THP_FAULT_ALLOC); |
1075 | |
1076 | if (!page) |
1077 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |
1078 | else |
1079 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
1080 | __SetPageUptodate(new_page); |
1081 | |
1082 | mmun_start = haddr; |
1083 | mmun_end = haddr + HPAGE_PMD_SIZE; |
1084 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
1085 | |
1086 | spin_lock(fe->ptl); |
1087 | if (page) |
1088 | put_page(page); |
1089 | if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { |
1090 | spin_unlock(fe->ptl); |
1091 | mem_cgroup_cancel_charge(new_page, memcg, true); |
1092 | put_page(new_page); |
1093 | goto out_mn; |
1094 | } else { |
1095 | pmd_t entry; |
1096 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); |
1097 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
1098 | pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); |
1099 | page_add_new_anon_rmap(new_page, vma, haddr, true); |
1100 | mem_cgroup_commit_charge(new_page, memcg, false, true); |
1101 | lru_cache_add_active_or_unevictable(new_page, vma); |
1102 | set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); |
1103 | update_mmu_cache_pmd(vma, fe->address, fe->pmd); |
1104 | if (!page) { |
1105 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
1106 | } else { |
1107 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1108 | page_remove_rmap(page, true); |
1109 | put_page(page); |
1110 | } |
1111 | ret |= VM_FAULT_WRITE; |
1112 | } |
1113 | spin_unlock(fe->ptl); |
1114 | out_mn: |
1115 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
1116 | out: |
1117 | return ret; |
1118 | out_unlock: |
1119 | spin_unlock(fe->ptl); |
1120 | return ret; |
1121 | } |
1122 | |
1123 | /* |
1124 | * FOLL_FORCE can write to even unwritable pmd's, but only |
1125 | * after we've gone through a COW cycle and they are dirty. |
1126 | */ |
1127 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) |
1128 | { |
1129 | return pmd_write(pmd) || |
1130 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); |
1131 | } |
1132 | |
1133 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
1134 | unsigned long addr, |
1135 | pmd_t *pmd, |
1136 | unsigned int flags) |
1137 | { |
1138 | struct mm_struct *mm = vma->vm_mm; |
1139 | struct page *page = NULL; |
1140 | |
1141 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
1142 | |
1143 | if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) |
1144 | goto out; |
1145 | |
1146 | /* Avoid dumping huge zero page */ |
1147 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) |
1148 | return ERR_PTR(-EFAULT); |
1149 | |
1150 | /* Full NUMA hinting faults to serialise migration in fault paths */ |
1151 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
1152 | goto out; |
1153 | |
1154 | page = pmd_page(*pmd); |
1155 | VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); |
1156 | if (flags & FOLL_TOUCH) |
1157 | touch_pmd(vma, addr, pmd, flags); |
1158 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
1159 | /* |
1160 | * We don't mlock() pte-mapped THPs. This way we can avoid |
1161 | * leaking mlocked pages into non-VM_LOCKED VMAs. |
1162 | * |
1163 | * For anon THP: |
1164 | * |
1165 | * In most cases the pmd is the only mapping of the page as we |
1166 | * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for |
1167 | * writable private mappings in populate_vma_page_range(). |
1168 | * |
1169 | * The only scenario when we have the page shared here is if we |
1170 | * mlocking read-only mapping shared over fork(). We skip |
1171 | * mlocking such pages. |
1172 | * |
1173 | * For file THP: |
1174 | * |
1175 | * We can expect PageDoubleMap() to be stable under page lock: |
1176 | * for file pages we set it in page_add_file_rmap(), which |
1177 | * requires page to be locked. |
1178 | */ |
1179 | |
1180 | if (PageAnon(page) && compound_mapcount(page) != 1) |
1181 | goto skip_mlock; |
1182 | if (PageDoubleMap(page) || !page->mapping) |
1183 | goto skip_mlock; |
1184 | if (!trylock_page(page)) |
1185 | goto skip_mlock; |
1186 | lru_add_drain(); |
1187 | if (page->mapping && !PageDoubleMap(page)) |
1188 | mlock_vma_page(page); |
1189 | unlock_page(page); |
1190 | } |
1191 | skip_mlock: |
1192 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; |
1193 | VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); |
1194 | if (flags & FOLL_GET) |
1195 | get_page(page); |
1196 | |
1197 | out: |
1198 | return page; |
1199 | } |
1200 | |
1201 | /* NUMA hinting page fault entry point for trans huge pmds */ |
1202 | int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) |
1203 | { |
1204 | struct vm_area_struct *vma = fe->vma; |
1205 | struct anon_vma *anon_vma = NULL; |
1206 | struct page *page; |
1207 | unsigned long haddr = fe->address & HPAGE_PMD_MASK; |
1208 | int page_nid = -1, this_nid = numa_node_id(); |
1209 | int target_nid, last_cpupid = -1; |
1210 | bool page_locked; |
1211 | bool migrated = false; |
1212 | bool was_writable; |
1213 | int flags = 0; |
1214 | |
1215 | fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); |
1216 | if (unlikely(!pmd_same(pmd, *fe->pmd))) |
1217 | goto out_unlock; |
1218 | |
1219 | /* |
1220 | * If there are potential migrations, wait for completion and retry |
1221 | * without disrupting NUMA hinting information. Do not relock and |
1222 | * check_same as the page may no longer be mapped. |
1223 | */ |
1224 | if (unlikely(pmd_trans_migrating(*fe->pmd))) { |
1225 | page = pmd_page(*fe->pmd); |
1226 | if (!get_page_unless_zero(page)) |
1227 | goto out_unlock; |
1228 | spin_unlock(fe->ptl); |
1229 | wait_on_page_locked(page); |
1230 | put_page(page); |
1231 | goto out; |
1232 | } |
1233 | |
1234 | page = pmd_page(pmd); |
1235 | BUG_ON(is_huge_zero_page(page)); |
1236 | page_nid = page_to_nid(page); |
1237 | last_cpupid = page_cpupid_last(page); |
1238 | count_vm_numa_event(NUMA_HINT_FAULTS); |
1239 | if (page_nid == this_nid) { |
1240 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
1241 | flags |= TNF_FAULT_LOCAL; |
1242 | } |
1243 | |
1244 | /* See similar comment in do_numa_page for explanation */ |
1245 | if (!pmd_write(pmd)) |
1246 | flags |= TNF_NO_GROUP; |
1247 | |
1248 | /* |
1249 | * Acquire the page lock to serialise THP migrations but avoid dropping |
1250 | * page_table_lock if at all possible |
1251 | */ |
1252 | page_locked = trylock_page(page); |
1253 | target_nid = mpol_misplaced(page, vma, haddr); |
1254 | if (target_nid == -1) { |
1255 | /* If the page was locked, there are no parallel migrations */ |
1256 | if (page_locked) |
1257 | goto clear_pmdnuma; |
1258 | } |
1259 | |
1260 | /* Migration could have started since the pmd_trans_migrating check */ |
1261 | if (!page_locked) { |
1262 | page_nid = -1; |
1263 | if (!get_page_unless_zero(page)) |
1264 | goto out_unlock; |
1265 | spin_unlock(fe->ptl); |
1266 | wait_on_page_locked(page); |
1267 | put_page(page); |
1268 | goto out; |
1269 | } |
1270 | |
1271 | /* |
1272 | * Page is misplaced. Page lock serialises migrations. Acquire anon_vma |
1273 | * to serialises splits |
1274 | */ |
1275 | get_page(page); |
1276 | spin_unlock(fe->ptl); |
1277 | anon_vma = page_lock_anon_vma_read(page); |
1278 | |
1279 | /* Confirm the PMD did not change while page_table_lock was released */ |
1280 | spin_lock(fe->ptl); |
1281 | if (unlikely(!pmd_same(pmd, *fe->pmd))) { |
1282 | unlock_page(page); |
1283 | put_page(page); |
1284 | page_nid = -1; |
1285 | goto out_unlock; |
1286 | } |
1287 | |
1288 | /* Bail if we fail to protect against THP splits for any reason */ |
1289 | if (unlikely(!anon_vma)) { |
1290 | put_page(page); |
1291 | page_nid = -1; |
1292 | goto clear_pmdnuma; |
1293 | } |
1294 | |
1295 | /* |
1296 | * Migrate the THP to the requested node, returns with page unlocked |
1297 | * and access rights restored. |
1298 | */ |
1299 | spin_unlock(fe->ptl); |
1300 | migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, |
1301 | fe->pmd, pmd, fe->address, page, target_nid); |
1302 | if (migrated) { |
1303 | flags |= TNF_MIGRATED; |
1304 | page_nid = target_nid; |
1305 | } else |
1306 | flags |= TNF_MIGRATE_FAIL; |
1307 | |
1308 | goto out; |
1309 | clear_pmdnuma: |
1310 | BUG_ON(!PageLocked(page)); |
1311 | was_writable = pmd_write(pmd); |
1312 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
1313 | pmd = pmd_mkyoung(pmd); |
1314 | if (was_writable) |
1315 | pmd = pmd_mkwrite(pmd); |
1316 | set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd); |
1317 | update_mmu_cache_pmd(vma, fe->address, fe->pmd); |
1318 | unlock_page(page); |
1319 | out_unlock: |
1320 | spin_unlock(fe->ptl); |
1321 | |
1322 | out: |
1323 | if (anon_vma) |
1324 | page_unlock_anon_vma_read(anon_vma); |
1325 | |
1326 | if (page_nid != -1) |
1327 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags); |
1328 | |
1329 | return 0; |
1330 | } |
1331 | |
1332 | /* |
1333 | * Return true if we do MADV_FREE successfully on entire pmd page. |
1334 | * Otherwise, return false. |
1335 | */ |
1336 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
1337 | pmd_t *pmd, unsigned long addr, unsigned long next) |
1338 | { |
1339 | spinlock_t *ptl; |
1340 | pmd_t orig_pmd; |
1341 | struct page *page; |
1342 | struct mm_struct *mm = tlb->mm; |
1343 | bool ret = false; |
1344 | |
1345 | ptl = pmd_trans_huge_lock(pmd, vma); |
1346 | if (!ptl) |
1347 | goto out_unlocked; |
1348 | |
1349 | orig_pmd = *pmd; |
1350 | if (is_huge_zero_pmd(orig_pmd)) |
1351 | goto out; |
1352 | |
1353 | page = pmd_page(orig_pmd); |
1354 | /* |
1355 | * If other processes are mapping this page, we couldn't discard |
1356 | * the page unless they all do MADV_FREE so let's skip the page. |
1357 | */ |
1358 | if (page_mapcount(page) != 1) |
1359 | goto out; |
1360 | |
1361 | if (!trylock_page(page)) |
1362 | goto out; |
1363 | |
1364 | /* |
1365 | * If user want to discard part-pages of THP, split it so MADV_FREE |
1366 | * will deactivate only them. |
1367 | */ |
1368 | if (next - addr != HPAGE_PMD_SIZE) { |
1369 | get_page(page); |
1370 | spin_unlock(ptl); |
1371 | split_huge_page(page); |
1372 | unlock_page(page); |
1373 | put_page(page); |
1374 | goto out_unlocked; |
1375 | } |
1376 | |
1377 | if (PageDirty(page)) |
1378 | ClearPageDirty(page); |
1379 | unlock_page(page); |
1380 | |
1381 | if (PageActive(page)) |
1382 | deactivate_page(page); |
1383 | |
1384 | if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { |
1385 | pmdp_invalidate(vma, addr, pmd); |
1386 | orig_pmd = pmd_mkold(orig_pmd); |
1387 | orig_pmd = pmd_mkclean(orig_pmd); |
1388 | |
1389 | set_pmd_at(mm, addr, pmd, orig_pmd); |
1390 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
1391 | } |
1392 | ret = true; |
1393 | out: |
1394 | spin_unlock(ptl); |
1395 | out_unlocked: |
1396 | return ret; |
1397 | } |
1398 | |
1399 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
1400 | pmd_t *pmd, unsigned long addr) |
1401 | { |
1402 | pmd_t orig_pmd; |
1403 | spinlock_t *ptl; |
1404 | |
1405 | ptl = __pmd_trans_huge_lock(pmd, vma); |
1406 | if (!ptl) |
1407 | return 0; |
1408 | /* |
1409 | * For architectures like ppc64 we look at deposited pgtable |
1410 | * when calling pmdp_huge_get_and_clear. So do the |
1411 | * pgtable_trans_huge_withdraw after finishing pmdp related |
1412 | * operations. |
1413 | */ |
1414 | orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, |
1415 | tlb->fullmm); |
1416 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
1417 | if (vma_is_dax(vma)) { |
1418 | spin_unlock(ptl); |
1419 | if (is_huge_zero_pmd(orig_pmd)) |
1420 | tlb_remove_page(tlb, pmd_page(orig_pmd)); |
1421 | } else if (is_huge_zero_pmd(orig_pmd)) { |
1422 | pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); |
1423 | atomic_long_dec(&tlb->mm->nr_ptes); |
1424 | spin_unlock(ptl); |
1425 | tlb_remove_page(tlb, pmd_page(orig_pmd)); |
1426 | } else { |
1427 | struct page *page = pmd_page(orig_pmd); |
1428 | page_remove_rmap(page, true); |
1429 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
1430 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1431 | if (PageAnon(page)) { |
1432 | pgtable_t pgtable; |
1433 | pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); |
1434 | pte_free(tlb->mm, pgtable); |
1435 | atomic_long_dec(&tlb->mm->nr_ptes); |
1436 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
1437 | } else { |
1438 | add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); |
1439 | } |
1440 | spin_unlock(ptl); |
1441 | tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); |
1442 | } |
1443 | return 1; |
1444 | } |
1445 | |
1446 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
1447 | unsigned long new_addr, unsigned long old_end, |
1448 | pmd_t *old_pmd, pmd_t *new_pmd) |
1449 | { |
1450 | spinlock_t *old_ptl, *new_ptl; |
1451 | pmd_t pmd; |
1452 | struct mm_struct *mm = vma->vm_mm; |
1453 | bool force_flush = false; |
1454 | |
1455 | if ((old_addr & ~HPAGE_PMD_MASK) || |
1456 | (new_addr & ~HPAGE_PMD_MASK) || |
1457 | old_end - old_addr < HPAGE_PMD_SIZE) |
1458 | return false; |
1459 | |
1460 | /* |
1461 | * The destination pmd shouldn't be established, free_pgtables() |
1462 | * should have release it. |
1463 | */ |
1464 | if (WARN_ON(!pmd_none(*new_pmd))) { |
1465 | VM_BUG_ON(pmd_trans_huge(*new_pmd)); |
1466 | return false; |
1467 | } |
1468 | |
1469 | /* |
1470 | * We don't have to worry about the ordering of src and dst |
1471 | * ptlocks because exclusive mmap_sem prevents deadlock. |
1472 | */ |
1473 | old_ptl = __pmd_trans_huge_lock(old_pmd, vma); |
1474 | if (old_ptl) { |
1475 | new_ptl = pmd_lockptr(mm, new_pmd); |
1476 | if (new_ptl != old_ptl) |
1477 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
1478 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
1479 | if (pmd_present(pmd)) |
1480 | force_flush = true; |
1481 | VM_BUG_ON(!pmd_none(*new_pmd)); |
1482 | |
1483 | if (pmd_move_must_withdraw(new_ptl, old_ptl) && |
1484 | vma_is_anonymous(vma)) { |
1485 | pgtable_t pgtable; |
1486 | pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); |
1487 | pgtable_trans_huge_deposit(mm, new_pmd, pgtable); |
1488 | } |
1489 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); |
1490 | if (force_flush) |
1491 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); |
1492 | if (new_ptl != old_ptl) |
1493 | spin_unlock(new_ptl); |
1494 | spin_unlock(old_ptl); |
1495 | return true; |
1496 | } |
1497 | return false; |
1498 | } |
1499 | |
1500 | /* |
1501 | * Returns |
1502 | * - 0 if PMD could not be locked |
1503 | * - 1 if PMD was locked but protections unchange and TLB flush unnecessary |
1504 | * - HPAGE_PMD_NR is protections changed and TLB flush necessary |
1505 | */ |
1506 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1507 | unsigned long addr, pgprot_t newprot, int prot_numa) |
1508 | { |
1509 | struct mm_struct *mm = vma->vm_mm; |
1510 | spinlock_t *ptl; |
1511 | pmd_t entry; |
1512 | bool preserve_write; |
1513 | int ret; |
1514 | |
1515 | ptl = __pmd_trans_huge_lock(pmd, vma); |
1516 | if (!ptl) |
1517 | return 0; |
1518 | |
1519 | preserve_write = prot_numa && pmd_write(*pmd); |
1520 | ret = 1; |
1521 | |
1522 | /* |
1523 | * Avoid trapping faults against the zero page. The read-only |
1524 | * data is likely to be read-cached on the local CPU and |
1525 | * local/remote hits to the zero page are not interesting. |
1526 | */ |
1527 | if (prot_numa && is_huge_zero_pmd(*pmd)) |
1528 | goto unlock; |
1529 | |
1530 | if (prot_numa && pmd_protnone(*pmd)) |
1531 | goto unlock; |
1532 | |
1533 | /* |
1534 | * In case prot_numa, we are under down_read(mmap_sem). It's critical |
1535 | * to not clear pmd intermittently to avoid race with MADV_DONTNEED |
1536 | * which is also under down_read(mmap_sem): |
1537 | * |
1538 | * CPU0: CPU1: |
1539 | * change_huge_pmd(prot_numa=1) |
1540 | * pmdp_huge_get_and_clear_notify() |
1541 | * madvise_dontneed() |
1542 | * zap_pmd_range() |
1543 | * pmd_trans_huge(*pmd) == 0 (without ptl) |
1544 | * // skip the pmd |
1545 | * set_pmd_at(); |
1546 | * // pmd is re-established |
1547 | * |
1548 | * The race makes MADV_DONTNEED miss the huge pmd and don't clear it |
1549 | * which may break userspace. |
1550 | * |
1551 | * pmdp_invalidate() is required to make sure we don't miss |
1552 | * dirty/young flags set by hardware. |
1553 | */ |
1554 | entry = *pmd; |
1555 | pmdp_invalidate(vma, addr, pmd); |
1556 | |
1557 | /* |
1558 | * Recover dirty/young flags. It relies on pmdp_invalidate to not |
1559 | * corrupt them. |
1560 | */ |
1561 | if (pmd_dirty(*pmd)) |
1562 | entry = pmd_mkdirty(entry); |
1563 | if (pmd_young(*pmd)) |
1564 | entry = pmd_mkyoung(entry); |
1565 | |
1566 | entry = pmd_modify(entry, newprot); |
1567 | if (preserve_write) |
1568 | entry = pmd_mkwrite(entry); |
1569 | ret = HPAGE_PMD_NR; |
1570 | set_pmd_at(mm, addr, pmd, entry); |
1571 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); |
1572 | unlock: |
1573 | spin_unlock(ptl); |
1574 | return ret; |
1575 | } |
1576 | |
1577 | /* |
1578 | * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. |
1579 | * |
1580 | * Note that if it returns page table lock pointer, this routine returns without |
1581 | * unlocking page table lock. So callers must unlock it. |
1582 | */ |
1583 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) |
1584 | { |
1585 | spinlock_t *ptl; |
1586 | ptl = pmd_lock(vma->vm_mm, pmd); |
1587 | if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) |
1588 | return ptl; |
1589 | spin_unlock(ptl); |
1590 | return NULL; |
1591 | } |
1592 | |
1593 | static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, |
1594 | unsigned long haddr, pmd_t *pmd) |
1595 | { |
1596 | struct mm_struct *mm = vma->vm_mm; |
1597 | pgtable_t pgtable; |
1598 | pmd_t _pmd; |
1599 | int i; |
1600 | |
1601 | /* leave pmd empty until pte is filled */ |
1602 | pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
1603 | |
1604 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
1605 | pmd_populate(mm, &_pmd, pgtable); |
1606 | |
1607 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
1608 | pte_t *pte, entry; |
1609 | entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); |
1610 | entry = pte_mkspecial(entry); |
1611 | pte = pte_offset_map(&_pmd, haddr); |
1612 | VM_BUG_ON(!pte_none(*pte)); |
1613 | set_pte_at(mm, haddr, pte, entry); |
1614 | pte_unmap(pte); |
1615 | } |
1616 | smp_wmb(); /* make pte visible before pmd */ |
1617 | pmd_populate(mm, pmd, pgtable); |
1618 | } |
1619 | |
1620 | static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
1621 | unsigned long haddr, bool freeze) |
1622 | { |
1623 | struct mm_struct *mm = vma->vm_mm; |
1624 | struct page *page; |
1625 | pgtable_t pgtable; |
1626 | pmd_t _pmd; |
1627 | bool young, write, dirty, soft_dirty; |
1628 | unsigned long addr; |
1629 | int i; |
1630 | |
1631 | VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); |
1632 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
1633 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); |
1634 | VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); |
1635 | |
1636 | count_vm_event(THP_SPLIT_PMD); |
1637 | |
1638 | if (!vma_is_anonymous(vma)) { |
1639 | _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
1640 | if (vma_is_dax(vma)) |
1641 | return; |
1642 | page = pmd_page(_pmd); |
1643 | if (!PageDirty(page) && pmd_dirty(_pmd)) |
1644 | set_page_dirty(page); |
1645 | if (!PageReferenced(page) && pmd_young(_pmd)) |
1646 | SetPageReferenced(page); |
1647 | page_remove_rmap(page, true); |
1648 | put_page(page); |
1649 | add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); |
1650 | return; |
1651 | } else if (is_huge_zero_pmd(*pmd)) { |
1652 | return __split_huge_zero_page_pmd(vma, haddr, pmd); |
1653 | } |
1654 | |
1655 | page = pmd_page(*pmd); |
1656 | VM_BUG_ON_PAGE(!page_count(page), page); |
1657 | page_ref_add(page, HPAGE_PMD_NR - 1); |
1658 | write = pmd_write(*pmd); |
1659 | young = pmd_young(*pmd); |
1660 | dirty = pmd_dirty(*pmd); |
1661 | soft_dirty = pmd_soft_dirty(*pmd); |
1662 | |
1663 | pmdp_huge_split_prepare(vma, haddr, pmd); |
1664 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
1665 | pmd_populate(mm, &_pmd, pgtable); |
1666 | |
1667 | for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { |
1668 | pte_t entry, *pte; |
1669 | /* |
1670 | * Note that NUMA hinting access restrictions are not |
1671 | * transferred to avoid any possibility of altering |
1672 | * permissions across VMAs. |
1673 | */ |
1674 | if (freeze) { |
1675 | swp_entry_t swp_entry; |
1676 | swp_entry = make_migration_entry(page + i, write); |
1677 | entry = swp_entry_to_pte(swp_entry); |
1678 | if (soft_dirty) |
1679 | entry = pte_swp_mksoft_dirty(entry); |
1680 | } else { |
1681 | entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); |
1682 | entry = maybe_mkwrite(entry, vma); |
1683 | if (!write) |
1684 | entry = pte_wrprotect(entry); |
1685 | if (!young) |
1686 | entry = pte_mkold(entry); |
1687 | if (soft_dirty) |
1688 | entry = pte_mksoft_dirty(entry); |
1689 | } |
1690 | if (dirty) |
1691 | SetPageDirty(page + i); |
1692 | pte = pte_offset_map(&_pmd, addr); |
1693 | BUG_ON(!pte_none(*pte)); |
1694 | set_pte_at(mm, addr, pte, entry); |
1695 | atomic_inc(&page[i]._mapcount); |
1696 | pte_unmap(pte); |
1697 | } |
1698 | |
1699 | /* |
1700 | * Set PG_double_map before dropping compound_mapcount to avoid |
1701 | * false-negative page_mapped(). |
1702 | */ |
1703 | if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { |
1704 | for (i = 0; i < HPAGE_PMD_NR; i++) |
1705 | atomic_inc(&page[i]._mapcount); |
1706 | } |
1707 | |
1708 | if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
1709 | /* Last compound_mapcount is gone. */ |
1710 | __dec_node_page_state(page, NR_ANON_THPS); |
1711 | if (TestClearPageDoubleMap(page)) { |
1712 | /* No need in mapcount reference anymore */ |
1713 | for (i = 0; i < HPAGE_PMD_NR; i++) |
1714 | atomic_dec(&page[i]._mapcount); |
1715 | } |
1716 | } |
1717 | |
1718 | smp_wmb(); /* make pte visible before pmd */ |
1719 | /* |
1720 | * Up to this point the pmd is present and huge and userland has the |
1721 | * whole access to the hugepage during the split (which happens in |
1722 | * place). If we overwrite the pmd with the not-huge version pointing |
1723 | * to the pte here (which of course we could if all CPUs were bug |
1724 | * free), userland could trigger a small page size TLB miss on the |
1725 | * small sized TLB while the hugepage TLB entry is still established in |
1726 | * the huge TLB. Some CPU doesn't like that. |
1727 | * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum |
1728 | * 383 on page 93. Intel should be safe but is also warns that it's |
1729 | * only safe if the permission and cache attributes of the two entries |
1730 | * loaded in the two TLB is identical (which should be the case here). |
1731 | * But it is generally safer to never allow small and huge TLB entries |
1732 | * for the same virtual address to be loaded simultaneously. So instead |
1733 | * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the |
1734 | * current pmd notpresent (atomically because here the pmd_trans_huge |
1735 | * and pmd_trans_splitting must remain set at all times on the pmd |
1736 | * until the split is complete for this pmd), then we flush the SMP TLB |
1737 | * and finally we write the non-huge version of the pmd entry with |
1738 | * pmd_populate. |
1739 | */ |
1740 | pmdp_invalidate(vma, haddr, pmd); |
1741 | pmd_populate(mm, pmd, pgtable); |
1742 | |
1743 | if (freeze) { |
1744 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
1745 | page_remove_rmap(page + i, false); |
1746 | put_page(page + i); |
1747 | } |
1748 | } |
1749 | } |
1750 | |
1751 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1752 | unsigned long address, bool freeze, struct page *page) |
1753 | { |
1754 | spinlock_t *ptl; |
1755 | struct mm_struct *mm = vma->vm_mm; |
1756 | unsigned long haddr = address & HPAGE_PMD_MASK; |
1757 | |
1758 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
1759 | ptl = pmd_lock(mm, pmd); |
1760 | |
1761 | /* |
1762 | * If caller asks to setup a migration entries, we need a page to check |
1763 | * pmd against. Otherwise we can end up replacing wrong page. |
1764 | */ |
1765 | VM_BUG_ON(freeze && !page); |
1766 | if (page && page != pmd_page(*pmd)) |
1767 | goto out; |
1768 | |
1769 | if (pmd_trans_huge(*pmd)) { |
1770 | page = pmd_page(*pmd); |
1771 | if (PageMlocked(page)) |
1772 | clear_page_mlock(page); |
1773 | } else if (!pmd_devmap(*pmd)) |
1774 | goto out; |
1775 | __split_huge_pmd_locked(vma, pmd, haddr, freeze); |
1776 | out: |
1777 | spin_unlock(ptl); |
1778 | mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); |
1779 | } |
1780 | |
1781 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
1782 | bool freeze, struct page *page) |
1783 | { |
1784 | pgd_t *pgd; |
1785 | pud_t *pud; |
1786 | pmd_t *pmd; |
1787 | |
1788 | pgd = pgd_offset(vma->vm_mm, address); |
1789 | if (!pgd_present(*pgd)) |
1790 | return; |
1791 | |
1792 | pud = pud_offset(pgd, address); |
1793 | if (!pud_present(*pud)) |
1794 | return; |
1795 | |
1796 | pmd = pmd_offset(pud, address); |
1797 | |
1798 | __split_huge_pmd(vma, pmd, address, freeze, page); |
1799 | } |
1800 | |
1801 | void vma_adjust_trans_huge(struct vm_area_struct *vma, |
1802 | unsigned long start, |
1803 | unsigned long end, |
1804 | long adjust_next) |
1805 | { |
1806 | /* |
1807 | * If the new start address isn't hpage aligned and it could |
1808 | * previously contain an hugepage: check if we need to split |
1809 | * an huge pmd. |
1810 | */ |
1811 | if (start & ~HPAGE_PMD_MASK && |
1812 | (start & HPAGE_PMD_MASK) >= vma->vm_start && |
1813 | (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
1814 | split_huge_pmd_address(vma, start, false, NULL); |
1815 | |
1816 | /* |
1817 | * If the new end address isn't hpage aligned and it could |
1818 | * previously contain an hugepage: check if we need to split |
1819 | * an huge pmd. |
1820 | */ |
1821 | if (end & ~HPAGE_PMD_MASK && |
1822 | (end & HPAGE_PMD_MASK) >= vma->vm_start && |
1823 | (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
1824 | split_huge_pmd_address(vma, end, false, NULL); |
1825 | |
1826 | /* |
1827 | * If we're also updating the vma->vm_next->vm_start, if the new |
1828 | * vm_next->vm_start isn't page aligned and it could previously |
1829 | * contain an hugepage: check if we need to split an huge pmd. |
1830 | */ |
1831 | if (adjust_next > 0) { |
1832 | struct vm_area_struct *next = vma->vm_next; |
1833 | unsigned long nstart = next->vm_start; |
1834 | nstart += adjust_next << PAGE_SHIFT; |
1835 | if (nstart & ~HPAGE_PMD_MASK && |
1836 | (nstart & HPAGE_PMD_MASK) >= next->vm_start && |
1837 | (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) |
1838 | split_huge_pmd_address(next, nstart, false, NULL); |
1839 | } |
1840 | } |
1841 | |
1842 | static void unmap_page(struct page *page) |
1843 | { |
1844 | enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | |
1845 | TTU_RMAP_LOCKED; |
1846 | int i, ret; |
1847 | |
1848 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1849 | |
1850 | if (PageAnon(page)) |
1851 | ttu_flags |= TTU_MIGRATION; |
1852 | |
1853 | /* We only need TTU_SPLIT_HUGE_PMD once */ |
1854 | ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); |
1855 | for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { |
1856 | /* Cut short if the page is unmapped */ |
1857 | if (page_count(page) == 1) |
1858 | return; |
1859 | |
1860 | ret = try_to_unmap(page + i, ttu_flags); |
1861 | } |
1862 | VM_BUG_ON_PAGE(ret, page + i - 1); |
1863 | } |
1864 | |
1865 | static void remap_page(struct page *page) |
1866 | { |
1867 | int i; |
1868 | |
1869 | for (i = 0; i < HPAGE_PMD_NR; i++) |
1870 | remove_migration_ptes(page + i, page + i, true); |
1871 | } |
1872 | |
1873 | static void __split_huge_page_tail(struct page *head, int tail, |
1874 | struct lruvec *lruvec, struct list_head *list) |
1875 | { |
1876 | struct page *page_tail = head + tail; |
1877 | |
1878 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); |
1879 | |
1880 | /* |
1881 | * Clone page flags before unfreezing refcount. |
1882 | * |
1883 | * After successful get_page_unless_zero() might follow flags change, |
1884 | * for exmaple lock_page() which set PG_waiters. |
1885 | */ |
1886 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
1887 | page_tail->flags |= (head->flags & |
1888 | ((1L << PG_referenced) | |
1889 | (1L << PG_swapbacked) | |
1890 | (1L << PG_mlocked) | |
1891 | (1L << PG_uptodate) | |
1892 | (1L << PG_active) | |
1893 | (1L << PG_workingset) | |
1894 | (1L << PG_locked) | |
1895 | (1L << PG_unevictable) | |
1896 | (1L << PG_dirty))); |
1897 | |
1898 | /* ->mapping in first tail page is compound_mapcount */ |
1899 | VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, |
1900 | page_tail); |
1901 | page_tail->mapping = head->mapping; |
1902 | page_tail->index = head->index + tail; |
1903 | |
1904 | /* Page flags must be visible before we make the page non-compound. */ |
1905 | smp_wmb(); |
1906 | |
1907 | /* |
1908 | * Clear PageTail before unfreezing page refcount. |
1909 | * |
1910 | * After successful get_page_unless_zero() might follow put_page() |
1911 | * which needs correct compound_head(). |
1912 | */ |
1913 | clear_compound_head(page_tail); |
1914 | |
1915 | /* Finally unfreeze refcount. Additional reference from page cache. */ |
1916 | page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || |
1917 | PageSwapCache(head))); |
1918 | |
1919 | if (page_is_young(head)) |
1920 | set_page_young(page_tail); |
1921 | if (page_is_idle(head)) |
1922 | set_page_idle(page_tail); |
1923 | |
1924 | page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); |
1925 | lru_add_page_tail(head, page_tail, lruvec, list); |
1926 | } |
1927 | |
1928 | static void __split_huge_page(struct page *page, struct list_head *list, |
1929 | pgoff_t end, unsigned long flags) |
1930 | { |
1931 | struct page *head = compound_head(page); |
1932 | struct zone *zone = page_zone(head); |
1933 | struct lruvec *lruvec; |
1934 | int i; |
1935 | |
1936 | lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); |
1937 | |
1938 | /* complete memcg works before add pages to LRU */ |
1939 | mem_cgroup_split_huge_fixup(head); |
1940 | |
1941 | for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { |
1942 | __split_huge_page_tail(head, i, lruvec, list); |
1943 | /* Some pages can be beyond i_size: drop them from page cache */ |
1944 | if (head[i].index >= end) { |
1945 | __ClearPageDirty(head + i); |
1946 | __delete_from_page_cache(head + i, NULL); |
1947 | if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) |
1948 | shmem_uncharge(head->mapping->host, 1); |
1949 | put_page(head + i); |
1950 | } |
1951 | } |
1952 | |
1953 | ClearPageCompound(head); |
1954 | /* See comment in __split_huge_page_tail() */ |
1955 | if (PageAnon(head)) { |
1956 | page_ref_inc(head); |
1957 | } else { |
1958 | /* Additional pin to radix tree */ |
1959 | page_ref_add(head, 2); |
1960 | spin_unlock(&head->mapping->tree_lock); |
1961 | } |
1962 | |
1963 | spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
1964 | |
1965 | remap_page(head); |
1966 | |
1967 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
1968 | struct page *subpage = head + i; |
1969 | if (subpage == page) |
1970 | continue; |
1971 | unlock_page(subpage); |
1972 | |
1973 | /* |
1974 | * Subpages may be freed if there wasn't any mapping |
1975 | * like if add_to_swap() is running on a lru page that |
1976 | * had its mapping zapped. And freeing these pages |
1977 | * requires taking the lru_lock so we do the put_page |
1978 | * of the tail pages after the split is complete. |
1979 | */ |
1980 | put_page(subpage); |
1981 | } |
1982 | } |
1983 | |
1984 | int total_mapcount(struct page *page) |
1985 | { |
1986 | int i, compound, ret; |
1987 | |
1988 | VM_BUG_ON_PAGE(PageTail(page), page); |
1989 | |
1990 | if (likely(!PageCompound(page))) |
1991 | return atomic_read(&page->_mapcount) + 1; |
1992 | |
1993 | compound = compound_mapcount(page); |
1994 | if (PageHuge(page)) |
1995 | return compound; |
1996 | ret = compound; |
1997 | for (i = 0; i < HPAGE_PMD_NR; i++) |
1998 | ret += atomic_read(&page[i]._mapcount) + 1; |
1999 | /* File pages has compound_mapcount included in _mapcount */ |
2000 | if (!PageAnon(page)) |
2001 | return ret - compound * HPAGE_PMD_NR; |
2002 | if (PageDoubleMap(page)) |
2003 | ret -= HPAGE_PMD_NR; |
2004 | return ret; |
2005 | } |
2006 | |
2007 | /* |
2008 | * This calculates accurately how many mappings a transparent hugepage |
2009 | * has (unlike page_mapcount() which isn't fully accurate). This full |
2010 | * accuracy is primarily needed to know if copy-on-write faults can |
2011 | * reuse the page and change the mapping to read-write instead of |
2012 | * copying them. At the same time this returns the total_mapcount too. |
2013 | * |
2014 | * The function returns the highest mapcount any one of the subpages |
2015 | * has. If the return value is one, even if different processes are |
2016 | * mapping different subpages of the transparent hugepage, they can |
2017 | * all reuse it, because each process is reusing a different subpage. |
2018 | * |
2019 | * The total_mapcount is instead counting all virtual mappings of the |
2020 | * subpages. If the total_mapcount is equal to "one", it tells the |
2021 | * caller all mappings belong to the same "mm" and in turn the |
2022 | * anon_vma of the transparent hugepage can become the vma->anon_vma |
2023 | * local one as no other process may be mapping any of the subpages. |
2024 | * |
2025 | * It would be more accurate to replace page_mapcount() with |
2026 | * page_trans_huge_mapcount(), however we only use |
2027 | * page_trans_huge_mapcount() in the copy-on-write faults where we |
2028 | * need full accuracy to avoid breaking page pinning, because |
2029 | * page_trans_huge_mapcount() is slower than page_mapcount(). |
2030 | */ |
2031 | int page_trans_huge_mapcount(struct page *page, int *total_mapcount) |
2032 | { |
2033 | int i, ret, _total_mapcount, mapcount; |
2034 | |
2035 | /* hugetlbfs shouldn't call it */ |
2036 | VM_BUG_ON_PAGE(PageHuge(page), page); |
2037 | |
2038 | if (likely(!PageTransCompound(page))) { |
2039 | mapcount = atomic_read(&page->_mapcount) + 1; |
2040 | if (total_mapcount) |
2041 | *total_mapcount = mapcount; |
2042 | return mapcount; |
2043 | } |
2044 | |
2045 | page = compound_head(page); |
2046 | |
2047 | _total_mapcount = ret = 0; |
2048 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
2049 | mapcount = atomic_read(&page[i]._mapcount) + 1; |
2050 | ret = max(ret, mapcount); |
2051 | _total_mapcount += mapcount; |
2052 | } |
2053 | if (PageDoubleMap(page)) { |
2054 | ret -= 1; |
2055 | _total_mapcount -= HPAGE_PMD_NR; |
2056 | } |
2057 | mapcount = compound_mapcount(page); |
2058 | ret += mapcount; |
2059 | _total_mapcount += mapcount; |
2060 | if (total_mapcount) |
2061 | *total_mapcount = _total_mapcount; |
2062 | return ret; |
2063 | } |
2064 | |
2065 | /* |
2066 | * This function splits huge page into normal pages. @page can point to any |
2067 | * subpage of huge page to split. Split doesn't change the position of @page. |
2068 | * |
2069 | * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. |
2070 | * The huge page must be locked. |
2071 | * |
2072 | * If @list is null, tail pages will be added to LRU list, otherwise, to @list. |
2073 | * |
2074 | * Both head page and tail pages will inherit mapping, flags, and so on from |
2075 | * the hugepage. |
2076 | * |
2077 | * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if |
2078 | * they are not mapped. |
2079 | * |
2080 | * Returns 0 if the hugepage is split successfully. |
2081 | * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under |
2082 | * us. |
2083 | */ |
2084 | int split_huge_page_to_list(struct page *page, struct list_head *list) |
2085 | { |
2086 | struct page *head = compound_head(page); |
2087 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); |
2088 | struct anon_vma *anon_vma = NULL; |
2089 | struct address_space *mapping = NULL; |
2090 | int count, mapcount, extra_pins, ret; |
2091 | bool mlocked; |
2092 | unsigned long flags; |
2093 | pgoff_t end; |
2094 | |
2095 | VM_BUG_ON_PAGE(is_huge_zero_page(page), page); |
2096 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
2097 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
2098 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
2099 | |
2100 | if (PageAnon(head)) { |
2101 | /* |
2102 | * The caller does not necessarily hold an mmap_sem that would |
2103 | * prevent the anon_vma disappearing so we first we take a |
2104 | * reference to it and then lock the anon_vma for write. This |
2105 | * is similar to page_lock_anon_vma_read except the write lock |
2106 | * is taken to serialise against parallel split or collapse |
2107 | * operations. |
2108 | */ |
2109 | anon_vma = page_get_anon_vma(head); |
2110 | if (!anon_vma) { |
2111 | ret = -EBUSY; |
2112 | goto out; |
2113 | } |
2114 | extra_pins = 0; |
2115 | end = -1; |
2116 | mapping = NULL; |
2117 | anon_vma_lock_write(anon_vma); |
2118 | } else { |
2119 | mapping = head->mapping; |
2120 | |
2121 | /* Truncated ? */ |
2122 | if (!mapping) { |
2123 | ret = -EBUSY; |
2124 | goto out; |
2125 | } |
2126 | |
2127 | /* Addidional pins from radix tree */ |
2128 | extra_pins = HPAGE_PMD_NR; |
2129 | anon_vma = NULL; |
2130 | i_mmap_lock_read(mapping); |
2131 | |
2132 | /* |
2133 | *__split_huge_page() may need to trim off pages beyond EOF: |
2134 | * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, |
2135 | * which cannot be nested inside the page tree lock. So note |
2136 | * end now: i_size itself may be changed at any moment, but |
2137 | * head page lock is good enough to serialize the trimming. |
2138 | */ |
2139 | end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); |
2140 | } |
2141 | |
2142 | /* |
2143 | * Racy check if we can split the page, before unmap_page() will |
2144 | * split PMDs |
2145 | */ |
2146 | if (total_mapcount(head) != page_count(head) - extra_pins - 1) { |
2147 | ret = -EBUSY; |
2148 | goto out_unlock; |
2149 | } |
2150 | |
2151 | mlocked = PageMlocked(page); |
2152 | unmap_page(head); |
2153 | VM_BUG_ON_PAGE(compound_mapcount(head), head); |
2154 | |
2155 | /* Make sure the page is not on per-CPU pagevec as it takes pin */ |
2156 | if (mlocked) |
2157 | lru_add_drain(); |
2158 | |
2159 | /* prevent PageLRU to go away from under us, and freeze lru stats */ |
2160 | spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); |
2161 | |
2162 | if (mapping) { |
2163 | void **pslot; |
2164 | |
2165 | spin_lock(&mapping->tree_lock); |
2166 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
2167 | page_index(head)); |
2168 | /* |
2169 | * Check if the head page is present in radix tree. |
2170 | * We assume all tail are present too, if head is there. |
2171 | */ |
2172 | if (radix_tree_deref_slot_protected(pslot, |
2173 | &mapping->tree_lock) != head) |
2174 | goto fail; |
2175 | } |
2176 | |
2177 | /* Prevent deferred_split_scan() touching ->_refcount */ |
2178 | spin_lock(&pgdata->split_queue_lock); |
2179 | count = page_count(head); |
2180 | mapcount = total_mapcount(head); |
2181 | if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { |
2182 | if (!list_empty(page_deferred_list(head))) { |
2183 | pgdata->split_queue_len--; |
2184 | list_del(page_deferred_list(head)); |
2185 | } |
2186 | if (mapping) |
2187 | __dec_node_page_state(page, NR_SHMEM_THPS); |
2188 | spin_unlock(&pgdata->split_queue_lock); |
2189 | __split_huge_page(page, list, end, flags); |
2190 | ret = 0; |
2191 | } else { |
2192 | if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { |
2193 | pr_alert("total_mapcount: %u, page_count(): %u\n", |
2194 | mapcount, count); |
2195 | if (PageTail(page)) |
2196 | dump_page(head, NULL); |
2197 | dump_page(page, "total_mapcount(head) > 0"); |
2198 | BUG(); |
2199 | } |
2200 | spin_unlock(&pgdata->split_queue_lock); |
2201 | fail: if (mapping) |
2202 | spin_unlock(&mapping->tree_lock); |
2203 | spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
2204 | remap_page(head); |
2205 | ret = -EBUSY; |
2206 | } |
2207 | |
2208 | out_unlock: |
2209 | if (anon_vma) { |
2210 | anon_vma_unlock_write(anon_vma); |
2211 | put_anon_vma(anon_vma); |
2212 | } |
2213 | if (mapping) |
2214 | i_mmap_unlock_read(mapping); |
2215 | out: |
2216 | count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); |
2217 | return ret; |
2218 | } |
2219 | |
2220 | void free_transhuge_page(struct page *page) |
2221 | { |
2222 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); |
2223 | unsigned long flags; |
2224 | |
2225 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
2226 | if (!list_empty(page_deferred_list(page))) { |
2227 | pgdata->split_queue_len--; |
2228 | list_del(page_deferred_list(page)); |
2229 | } |
2230 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
2231 | free_compound_page(page); |
2232 | } |
2233 | |
2234 | void deferred_split_huge_page(struct page *page) |
2235 | { |
2236 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); |
2237 | unsigned long flags; |
2238 | |
2239 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
2240 | |
2241 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
2242 | if (list_empty(page_deferred_list(page))) { |
2243 | count_vm_event(THP_DEFERRED_SPLIT_PAGE); |
2244 | list_add_tail(page_deferred_list(page), &pgdata->split_queue); |
2245 | pgdata->split_queue_len++; |
2246 | } |
2247 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
2248 | } |
2249 | |
2250 | static unsigned long deferred_split_count(struct shrinker *shrink, |
2251 | struct shrink_control *sc) |
2252 | { |
2253 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
2254 | return ACCESS_ONCE(pgdata->split_queue_len); |
2255 | } |
2256 | |
2257 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
2258 | struct shrink_control *sc) |
2259 | { |
2260 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
2261 | unsigned long flags; |
2262 | LIST_HEAD(list), *pos, *next; |
2263 | struct page *page; |
2264 | int split = 0; |
2265 | |
2266 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
2267 | /* Take pin on all head pages to avoid freeing them under us */ |
2268 | list_for_each_safe(pos, next, &pgdata->split_queue) { |
2269 | page = list_entry((void *)pos, struct page, mapping); |
2270 | page = compound_head(page); |
2271 | if (get_page_unless_zero(page)) { |
2272 | list_move(page_deferred_list(page), &list); |
2273 | } else { |
2274 | /* We lost race with put_compound_page() */ |
2275 | list_del_init(page_deferred_list(page)); |
2276 | pgdata->split_queue_len--; |
2277 | } |
2278 | if (!--sc->nr_to_scan) |
2279 | break; |
2280 | } |
2281 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
2282 | |
2283 | list_for_each_safe(pos, next, &list) { |
2284 | page = list_entry((void *)pos, struct page, mapping); |
2285 | if (!trylock_page(page)) |
2286 | goto next; |
2287 | /* split_huge_page() removes page from list on success */ |
2288 | if (!split_huge_page(page)) |
2289 | split++; |
2290 | unlock_page(page); |
2291 | next: |
2292 | put_page(page); |
2293 | } |
2294 | |
2295 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
2296 | list_splice_tail(&list, &pgdata->split_queue); |
2297 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
2298 | |
2299 | /* |
2300 | * Stop shrinker if we didn't split any page, but the queue is empty. |
2301 | * This can happen if pages were freed under us. |
2302 | */ |
2303 | if (!split && list_empty(&pgdata->split_queue)) |
2304 | return SHRINK_STOP; |
2305 | return split; |
2306 | } |
2307 | |
2308 | static struct shrinker deferred_split_shrinker = { |
2309 | .count_objects = deferred_split_count, |
2310 | .scan_objects = deferred_split_scan, |
2311 | .seeks = DEFAULT_SEEKS, |
2312 | .flags = SHRINKER_NUMA_AWARE, |
2313 | }; |
2314 | |
2315 | #ifdef CONFIG_DEBUG_FS |
2316 | static int split_huge_pages_set(void *data, u64 val) |
2317 | { |
2318 | struct zone *zone; |
2319 | struct page *page; |
2320 | unsigned long pfn, max_zone_pfn; |
2321 | unsigned long total = 0, split = 0; |
2322 | |
2323 | if (val != 1) |
2324 | return -EINVAL; |
2325 | |
2326 | for_each_populated_zone(zone) { |
2327 | max_zone_pfn = zone_end_pfn(zone); |
2328 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { |
2329 | if (!pfn_valid(pfn)) |
2330 | continue; |
2331 | |
2332 | page = pfn_to_page(pfn); |
2333 | if (!get_page_unless_zero(page)) |
2334 | continue; |
2335 | |
2336 | if (zone != page_zone(page)) |
2337 | goto next; |
2338 | |
2339 | if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) |
2340 | goto next; |
2341 | |
2342 | total++; |
2343 | lock_page(page); |
2344 | if (!split_huge_page(page)) |
2345 | split++; |
2346 | unlock_page(page); |
2347 | next: |
2348 | put_page(page); |
2349 | } |
2350 | } |
2351 | |
2352 | pr_info("%lu of %lu THP split\n", split, total); |
2353 | |
2354 | return 0; |
2355 | } |
2356 | DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, |
2357 | "%llu\n"); |
2358 | |
2359 | static int __init split_huge_pages_debugfs(void) |
2360 | { |
2361 | void *ret; |
2362 | |
2363 | ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
2364 | &split_huge_pages_fops); |
2365 | if (!ret) |
2366 | pr_warn("Failed to create split_huge_pages in debugfs"); |
2367 | return 0; |
2368 | } |
2369 | late_initcall(split_huge_pages_debugfs); |
2370 | #endif |
2371 |