blob: e0cfc3a54b6a64b66c6d5f2e775f48356da5bf11
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2 | |
3 | #include <linux/mm.h> |
4 | #include <linux/sched.h> |
5 | #include <linux/mmu_notifier.h> |
6 | #include <linux/rmap.h> |
7 | #include <linux/swap.h> |
8 | #include <linux/mm_inline.h> |
9 | #include <linux/kthread.h> |
10 | #include <linux/khugepaged.h> |
11 | #include <linux/freezer.h> |
12 | #include <linux/mman.h> |
13 | #include <linux/hashtable.h> |
14 | #include <linux/userfaultfd_k.h> |
15 | #include <linux/page_idle.h> |
16 | #include <linux/swapops.h> |
17 | #include <linux/shmem_fs.h> |
18 | |
19 | #include <asm/tlb.h> |
20 | #include <asm/pgalloc.h> |
21 | #include "internal.h" |
22 | |
23 | enum scan_result { |
24 | SCAN_FAIL, |
25 | SCAN_SUCCEED, |
26 | SCAN_PMD_NULL, |
27 | SCAN_EXCEED_NONE_PTE, |
28 | SCAN_PTE_NON_PRESENT, |
29 | SCAN_PAGE_RO, |
30 | SCAN_LACK_REFERENCED_PAGE, |
31 | SCAN_PAGE_NULL, |
32 | SCAN_SCAN_ABORT, |
33 | SCAN_PAGE_COUNT, |
34 | SCAN_PAGE_LRU, |
35 | SCAN_PAGE_LOCK, |
36 | SCAN_PAGE_ANON, |
37 | SCAN_PAGE_COMPOUND, |
38 | SCAN_ANY_PROCESS, |
39 | SCAN_VMA_NULL, |
40 | SCAN_VMA_CHECK, |
41 | SCAN_ADDRESS_RANGE, |
42 | SCAN_SWAP_CACHE_PAGE, |
43 | SCAN_DEL_PAGE_LRU, |
44 | SCAN_ALLOC_HUGE_PAGE_FAIL, |
45 | SCAN_CGROUP_CHARGE_FAIL, |
46 | SCAN_EXCEED_SWAP_PTE, |
47 | SCAN_TRUNCATED, |
48 | }; |
49 | |
50 | #define CREATE_TRACE_POINTS |
51 | #include <trace/events/huge_memory.h> |
52 | |
53 | /* default scan 8*512 pte (or vmas) every 30 second */ |
54 | static unsigned int khugepaged_pages_to_scan __read_mostly; |
55 | static unsigned int khugepaged_pages_collapsed; |
56 | static unsigned int khugepaged_full_scans; |
57 | static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; |
58 | /* during fragmentation poll the hugepage allocator once every minute */ |
59 | static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; |
60 | static unsigned long khugepaged_sleep_expire; |
61 | static DEFINE_SPINLOCK(khugepaged_mm_lock); |
62 | static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); |
63 | /* |
64 | * default collapse hugepages if there is at least one pte mapped like |
65 | * it would have happened if the vma was large enough during page |
66 | * fault. |
67 | */ |
68 | static unsigned int khugepaged_max_ptes_none __read_mostly; |
69 | static unsigned int khugepaged_max_ptes_swap __read_mostly; |
70 | |
71 | #define MM_SLOTS_HASH_BITS 10 |
72 | static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
73 | |
74 | static struct kmem_cache *mm_slot_cache __read_mostly; |
75 | |
76 | /** |
77 | * struct mm_slot - hash lookup from mm to mm_slot |
78 | * @hash: hash collision list |
79 | * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head |
80 | * @mm: the mm that this information is valid for |
81 | */ |
82 | struct mm_slot { |
83 | struct hlist_node hash; |
84 | struct list_head mm_node; |
85 | struct mm_struct *mm; |
86 | }; |
87 | |
88 | /** |
89 | * struct khugepaged_scan - cursor for scanning |
90 | * @mm_head: the head of the mm list to scan |
91 | * @mm_slot: the current mm_slot we are scanning |
92 | * @address: the next address inside that to be scanned |
93 | * |
94 | * There is only the one khugepaged_scan instance of this cursor structure. |
95 | */ |
96 | struct khugepaged_scan { |
97 | struct list_head mm_head; |
98 | struct mm_slot *mm_slot; |
99 | unsigned long address; |
100 | }; |
101 | |
102 | static struct khugepaged_scan khugepaged_scan = { |
103 | .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), |
104 | }; |
105 | |
106 | #ifdef CONFIG_SYSFS |
107 | static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, |
108 | struct kobj_attribute *attr, |
109 | char *buf) |
110 | { |
111 | return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); |
112 | } |
113 | |
114 | static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, |
115 | struct kobj_attribute *attr, |
116 | const char *buf, size_t count) |
117 | { |
118 | unsigned long msecs; |
119 | int err; |
120 | |
121 | err = kstrtoul(buf, 10, &msecs); |
122 | if (err || msecs > UINT_MAX) |
123 | return -EINVAL; |
124 | |
125 | khugepaged_scan_sleep_millisecs = msecs; |
126 | khugepaged_sleep_expire = 0; |
127 | wake_up_interruptible(&khugepaged_wait); |
128 | |
129 | return count; |
130 | } |
131 | static struct kobj_attribute scan_sleep_millisecs_attr = |
132 | __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, |
133 | scan_sleep_millisecs_store); |
134 | |
135 | static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, |
136 | struct kobj_attribute *attr, |
137 | char *buf) |
138 | { |
139 | return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); |
140 | } |
141 | |
142 | static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, |
143 | struct kobj_attribute *attr, |
144 | const char *buf, size_t count) |
145 | { |
146 | unsigned long msecs; |
147 | int err; |
148 | |
149 | err = kstrtoul(buf, 10, &msecs); |
150 | if (err || msecs > UINT_MAX) |
151 | return -EINVAL; |
152 | |
153 | khugepaged_alloc_sleep_millisecs = msecs; |
154 | khugepaged_sleep_expire = 0; |
155 | wake_up_interruptible(&khugepaged_wait); |
156 | |
157 | return count; |
158 | } |
159 | static struct kobj_attribute alloc_sleep_millisecs_attr = |
160 | __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, |
161 | alloc_sleep_millisecs_store); |
162 | |
163 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
164 | struct kobj_attribute *attr, |
165 | char *buf) |
166 | { |
167 | return sprintf(buf, "%u\n", khugepaged_pages_to_scan); |
168 | } |
169 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
170 | struct kobj_attribute *attr, |
171 | const char *buf, size_t count) |
172 | { |
173 | int err; |
174 | unsigned long pages; |
175 | |
176 | err = kstrtoul(buf, 10, &pages); |
177 | if (err || !pages || pages > UINT_MAX) |
178 | return -EINVAL; |
179 | |
180 | khugepaged_pages_to_scan = pages; |
181 | |
182 | return count; |
183 | } |
184 | static struct kobj_attribute pages_to_scan_attr = |
185 | __ATTR(pages_to_scan, 0644, pages_to_scan_show, |
186 | pages_to_scan_store); |
187 | |
188 | static ssize_t pages_collapsed_show(struct kobject *kobj, |
189 | struct kobj_attribute *attr, |
190 | char *buf) |
191 | { |
192 | return sprintf(buf, "%u\n", khugepaged_pages_collapsed); |
193 | } |
194 | static struct kobj_attribute pages_collapsed_attr = |
195 | __ATTR_RO(pages_collapsed); |
196 | |
197 | static ssize_t full_scans_show(struct kobject *kobj, |
198 | struct kobj_attribute *attr, |
199 | char *buf) |
200 | { |
201 | return sprintf(buf, "%u\n", khugepaged_full_scans); |
202 | } |
203 | static struct kobj_attribute full_scans_attr = |
204 | __ATTR_RO(full_scans); |
205 | |
206 | static ssize_t khugepaged_defrag_show(struct kobject *kobj, |
207 | struct kobj_attribute *attr, char *buf) |
208 | { |
209 | return single_hugepage_flag_show(kobj, attr, buf, |
210 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); |
211 | } |
212 | static ssize_t khugepaged_defrag_store(struct kobject *kobj, |
213 | struct kobj_attribute *attr, |
214 | const char *buf, size_t count) |
215 | { |
216 | return single_hugepage_flag_store(kobj, attr, buf, count, |
217 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); |
218 | } |
219 | static struct kobj_attribute khugepaged_defrag_attr = |
220 | __ATTR(defrag, 0644, khugepaged_defrag_show, |
221 | khugepaged_defrag_store); |
222 | |
223 | /* |
224 | * max_ptes_none controls if khugepaged should collapse hugepages over |
225 | * any unmapped ptes in turn potentially increasing the memory |
226 | * footprint of the vmas. When max_ptes_none is 0 khugepaged will not |
227 | * reduce the available free memory in the system as it |
228 | * runs. Increasing max_ptes_none will instead potentially reduce the |
229 | * free memory in the system during the khugepaged scan. |
230 | */ |
231 | static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, |
232 | struct kobj_attribute *attr, |
233 | char *buf) |
234 | { |
235 | return sprintf(buf, "%u\n", khugepaged_max_ptes_none); |
236 | } |
237 | static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, |
238 | struct kobj_attribute *attr, |
239 | const char *buf, size_t count) |
240 | { |
241 | int err; |
242 | unsigned long max_ptes_none; |
243 | |
244 | err = kstrtoul(buf, 10, &max_ptes_none); |
245 | if (err || max_ptes_none > HPAGE_PMD_NR-1) |
246 | return -EINVAL; |
247 | |
248 | khugepaged_max_ptes_none = max_ptes_none; |
249 | |
250 | return count; |
251 | } |
252 | static struct kobj_attribute khugepaged_max_ptes_none_attr = |
253 | __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, |
254 | khugepaged_max_ptes_none_store); |
255 | |
256 | static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, |
257 | struct kobj_attribute *attr, |
258 | char *buf) |
259 | { |
260 | return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); |
261 | } |
262 | |
263 | static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, |
264 | struct kobj_attribute *attr, |
265 | const char *buf, size_t count) |
266 | { |
267 | int err; |
268 | unsigned long max_ptes_swap; |
269 | |
270 | err = kstrtoul(buf, 10, &max_ptes_swap); |
271 | if (err || max_ptes_swap > HPAGE_PMD_NR-1) |
272 | return -EINVAL; |
273 | |
274 | khugepaged_max_ptes_swap = max_ptes_swap; |
275 | |
276 | return count; |
277 | } |
278 | |
279 | static struct kobj_attribute khugepaged_max_ptes_swap_attr = |
280 | __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, |
281 | khugepaged_max_ptes_swap_store); |
282 | |
283 | static struct attribute *khugepaged_attr[] = { |
284 | &khugepaged_defrag_attr.attr, |
285 | &khugepaged_max_ptes_none_attr.attr, |
286 | &pages_to_scan_attr.attr, |
287 | &pages_collapsed_attr.attr, |
288 | &full_scans_attr.attr, |
289 | &scan_sleep_millisecs_attr.attr, |
290 | &alloc_sleep_millisecs_attr.attr, |
291 | &khugepaged_max_ptes_swap_attr.attr, |
292 | NULL, |
293 | }; |
294 | |
295 | struct attribute_group khugepaged_attr_group = { |
296 | .attrs = khugepaged_attr, |
297 | .name = "khugepaged", |
298 | }; |
299 | #endif /* CONFIG_SYSFS */ |
300 | |
301 | #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) |
302 | |
303 | int hugepage_madvise(struct vm_area_struct *vma, |
304 | unsigned long *vm_flags, int advice) |
305 | { |
306 | switch (advice) { |
307 | case MADV_HUGEPAGE: |
308 | #ifdef CONFIG_S390 |
309 | /* |
310 | * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 |
311 | * can't handle this properly after s390_enable_sie, so we simply |
312 | * ignore the madvise to prevent qemu from causing a SIGSEGV. |
313 | */ |
314 | if (mm_has_pgste(vma->vm_mm)) |
315 | return 0; |
316 | #endif |
317 | *vm_flags &= ~VM_NOHUGEPAGE; |
318 | *vm_flags |= VM_HUGEPAGE; |
319 | /* |
320 | * If the vma become good for khugepaged to scan, |
321 | * register it here without waiting a page fault that |
322 | * may not happen any time soon. |
323 | */ |
324 | if (!(*vm_flags & VM_NO_KHUGEPAGED) && |
325 | khugepaged_enter_vma_merge(vma, *vm_flags)) |
326 | return -ENOMEM; |
327 | break; |
328 | case MADV_NOHUGEPAGE: |
329 | *vm_flags &= ~VM_HUGEPAGE; |
330 | *vm_flags |= VM_NOHUGEPAGE; |
331 | /* |
332 | * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning |
333 | * this vma even if we leave the mm registered in khugepaged if |
334 | * it got registered before VM_NOHUGEPAGE was set. |
335 | */ |
336 | break; |
337 | } |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | int __init khugepaged_init(void) |
343 | { |
344 | mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", |
345 | sizeof(struct mm_slot), |
346 | __alignof__(struct mm_slot), 0, NULL); |
347 | if (!mm_slot_cache) |
348 | return -ENOMEM; |
349 | |
350 | khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; |
351 | khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; |
352 | khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; |
353 | |
354 | return 0; |
355 | } |
356 | |
357 | void __init khugepaged_destroy(void) |
358 | { |
359 | kmem_cache_destroy(mm_slot_cache); |
360 | } |
361 | |
362 | static inline struct mm_slot *alloc_mm_slot(void) |
363 | { |
364 | if (!mm_slot_cache) /* initialization failed */ |
365 | return NULL; |
366 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); |
367 | } |
368 | |
369 | static inline void free_mm_slot(struct mm_slot *mm_slot) |
370 | { |
371 | kmem_cache_free(mm_slot_cache, mm_slot); |
372 | } |
373 | |
374 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
375 | { |
376 | struct mm_slot *mm_slot; |
377 | |
378 | hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) |
379 | if (mm == mm_slot->mm) |
380 | return mm_slot; |
381 | |
382 | return NULL; |
383 | } |
384 | |
385 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
386 | struct mm_slot *mm_slot) |
387 | { |
388 | mm_slot->mm = mm; |
389 | hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); |
390 | } |
391 | |
392 | static inline int khugepaged_test_exit(struct mm_struct *mm) |
393 | { |
394 | return atomic_read(&mm->mm_users) == 0; |
395 | } |
396 | |
397 | int __khugepaged_enter(struct mm_struct *mm) |
398 | { |
399 | struct mm_slot *mm_slot; |
400 | int wakeup; |
401 | |
402 | mm_slot = alloc_mm_slot(); |
403 | if (!mm_slot) |
404 | return -ENOMEM; |
405 | |
406 | /* __khugepaged_exit() must not run from under us */ |
407 | VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); |
408 | if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { |
409 | free_mm_slot(mm_slot); |
410 | return 0; |
411 | } |
412 | |
413 | spin_lock(&khugepaged_mm_lock); |
414 | insert_to_mm_slots_hash(mm, mm_slot); |
415 | /* |
416 | * Insert just behind the scanning cursor, to let the area settle |
417 | * down a little. |
418 | */ |
419 | wakeup = list_empty(&khugepaged_scan.mm_head); |
420 | list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); |
421 | spin_unlock(&khugepaged_mm_lock); |
422 | |
423 | atomic_inc(&mm->mm_count); |
424 | if (wakeup) |
425 | wake_up_interruptible(&khugepaged_wait); |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
431 | unsigned long vm_flags) |
432 | { |
433 | unsigned long hstart, hend; |
434 | if (!vma->anon_vma) |
435 | /* |
436 | * Not yet faulted in so we will register later in the |
437 | * page fault if needed. |
438 | */ |
439 | return 0; |
440 | if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) |
441 | /* khugepaged not yet working on file or special mappings */ |
442 | return 0; |
443 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
444 | hend = vma->vm_end & HPAGE_PMD_MASK; |
445 | if (hstart < hend) |
446 | return khugepaged_enter(vma, vm_flags); |
447 | return 0; |
448 | } |
449 | |
450 | void __khugepaged_exit(struct mm_struct *mm) |
451 | { |
452 | struct mm_slot *mm_slot; |
453 | int free = 0; |
454 | |
455 | spin_lock(&khugepaged_mm_lock); |
456 | mm_slot = get_mm_slot(mm); |
457 | if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { |
458 | hash_del(&mm_slot->hash); |
459 | list_del(&mm_slot->mm_node); |
460 | free = 1; |
461 | } |
462 | spin_unlock(&khugepaged_mm_lock); |
463 | |
464 | if (free) { |
465 | clear_bit(MMF_VM_HUGEPAGE, &mm->flags); |
466 | free_mm_slot(mm_slot); |
467 | mmdrop(mm); |
468 | } else if (mm_slot) { |
469 | /* |
470 | * This is required to serialize against |
471 | * khugepaged_test_exit() (which is guaranteed to run |
472 | * under mmap sem read mode). Stop here (after we |
473 | * return all pagetables will be destroyed) until |
474 | * khugepaged has finished working on the pagetables |
475 | * under the mmap_sem. |
476 | */ |
477 | down_write(&mm->mmap_sem); |
478 | up_write(&mm->mmap_sem); |
479 | } |
480 | } |
481 | |
482 | static void release_pte_page(struct page *page) |
483 | { |
484 | /* 0 stands for page_is_file_cache(page) == false */ |
485 | dec_node_page_state(page, NR_ISOLATED_ANON + 0); |
486 | unlock_page(page); |
487 | putback_lru_page(page); |
488 | } |
489 | |
490 | static void release_pte_pages(pte_t *pte, pte_t *_pte) |
491 | { |
492 | while (--_pte >= pte) { |
493 | pte_t pteval = *_pte; |
494 | if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) |
495 | release_pte_page(pte_page(pteval)); |
496 | } |
497 | } |
498 | |
499 | static int __collapse_huge_page_isolate(struct vm_area_struct *vma, |
500 | unsigned long address, |
501 | pte_t *pte) |
502 | { |
503 | struct page *page = NULL; |
504 | pte_t *_pte; |
505 | int none_or_zero = 0, result = 0, referenced = 0; |
506 | bool writable = false; |
507 | |
508 | for (_pte = pte; _pte < pte+HPAGE_PMD_NR; |
509 | _pte++, address += PAGE_SIZE) { |
510 | pte_t pteval = *_pte; |
511 | if (pte_none(pteval) || (pte_present(pteval) && |
512 | is_zero_pfn(pte_pfn(pteval)))) { |
513 | if (!userfaultfd_armed(vma) && |
514 | ++none_or_zero <= khugepaged_max_ptes_none) { |
515 | continue; |
516 | } else { |
517 | result = SCAN_EXCEED_NONE_PTE; |
518 | goto out; |
519 | } |
520 | } |
521 | if (!pte_present(pteval)) { |
522 | result = SCAN_PTE_NON_PRESENT; |
523 | goto out; |
524 | } |
525 | page = vm_normal_page(vma, address, pteval); |
526 | if (unlikely(!page)) { |
527 | result = SCAN_PAGE_NULL; |
528 | goto out; |
529 | } |
530 | |
531 | /* TODO: teach khugepaged to collapse THP mapped with pte */ |
532 | if (PageCompound(page)) { |
533 | result = SCAN_PAGE_COMPOUND; |
534 | goto out; |
535 | } |
536 | |
537 | VM_BUG_ON_PAGE(!PageAnon(page), page); |
538 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
539 | |
540 | /* |
541 | * We can do it before isolate_lru_page because the |
542 | * page can't be freed from under us. NOTE: PG_lock |
543 | * is needed to serialize against split_huge_page |
544 | * when invoked from the VM. |
545 | */ |
546 | if (!trylock_page(page)) { |
547 | result = SCAN_PAGE_LOCK; |
548 | goto out; |
549 | } |
550 | |
551 | /* |
552 | * cannot use mapcount: can't collapse if there's a gup pin. |
553 | * The page must only be referenced by the scanned process |
554 | * and page swap cache. |
555 | */ |
556 | if (page_count(page) != 1 + !!PageSwapCache(page)) { |
557 | unlock_page(page); |
558 | result = SCAN_PAGE_COUNT; |
559 | goto out; |
560 | } |
561 | if (pte_write(pteval)) { |
562 | writable = true; |
563 | } else { |
564 | if (PageSwapCache(page) && |
565 | !reuse_swap_page(page, NULL)) { |
566 | unlock_page(page); |
567 | result = SCAN_SWAP_CACHE_PAGE; |
568 | goto out; |
569 | } |
570 | /* |
571 | * Page is not in the swap cache. It can be collapsed |
572 | * into a THP. |
573 | */ |
574 | } |
575 | |
576 | /* |
577 | * Isolate the page to avoid collapsing an hugepage |
578 | * currently in use by the VM. |
579 | */ |
580 | if (isolate_lru_page(page)) { |
581 | unlock_page(page); |
582 | result = SCAN_DEL_PAGE_LRU; |
583 | goto out; |
584 | } |
585 | /* 0 stands for page_is_file_cache(page) == false */ |
586 | inc_node_page_state(page, NR_ISOLATED_ANON + 0); |
587 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
588 | VM_BUG_ON_PAGE(PageLRU(page), page); |
589 | |
590 | /* There should be enough young pte to collapse the page */ |
591 | if (pte_young(pteval) || |
592 | page_is_young(page) || PageReferenced(page) || |
593 | mmu_notifier_test_young(vma->vm_mm, address)) |
594 | referenced++; |
595 | } |
596 | if (likely(writable)) { |
597 | if (likely(referenced)) { |
598 | result = SCAN_SUCCEED; |
599 | trace_mm_collapse_huge_page_isolate(page, none_or_zero, |
600 | referenced, writable, result); |
601 | return 1; |
602 | } |
603 | } else { |
604 | result = SCAN_PAGE_RO; |
605 | } |
606 | |
607 | out: |
608 | release_pte_pages(pte, _pte); |
609 | trace_mm_collapse_huge_page_isolate(page, none_or_zero, |
610 | referenced, writable, result); |
611 | return 0; |
612 | } |
613 | |
614 | static void __collapse_huge_page_copy(pte_t *pte, struct page *page, |
615 | struct vm_area_struct *vma, |
616 | unsigned long address, |
617 | spinlock_t *ptl) |
618 | { |
619 | pte_t *_pte; |
620 | for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { |
621 | pte_t pteval = *_pte; |
622 | struct page *src_page; |
623 | |
624 | if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { |
625 | clear_user_highpage(page, address); |
626 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); |
627 | if (is_zero_pfn(pte_pfn(pteval))) { |
628 | /* |
629 | * ptl mostly unnecessary. |
630 | */ |
631 | spin_lock(ptl); |
632 | /* |
633 | * paravirt calls inside pte_clear here are |
634 | * superfluous. |
635 | */ |
636 | pte_clear(vma->vm_mm, address, _pte); |
637 | spin_unlock(ptl); |
638 | } |
639 | } else { |
640 | src_page = pte_page(pteval); |
641 | copy_user_highpage(page, src_page, address, vma); |
642 | VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); |
643 | release_pte_page(src_page); |
644 | /* |
645 | * ptl mostly unnecessary, but preempt has to |
646 | * be disabled to update the per-cpu stats |
647 | * inside page_remove_rmap(). |
648 | */ |
649 | spin_lock(ptl); |
650 | /* |
651 | * paravirt calls inside pte_clear here are |
652 | * superfluous. |
653 | */ |
654 | pte_clear(vma->vm_mm, address, _pte); |
655 | page_remove_rmap(src_page, false); |
656 | spin_unlock(ptl); |
657 | free_page_and_swap_cache(src_page); |
658 | } |
659 | |
660 | address += PAGE_SIZE; |
661 | page++; |
662 | } |
663 | } |
664 | |
665 | static void khugepaged_alloc_sleep(void) |
666 | { |
667 | DEFINE_WAIT(wait); |
668 | |
669 | add_wait_queue(&khugepaged_wait, &wait); |
670 | freezable_schedule_timeout_interruptible( |
671 | msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); |
672 | remove_wait_queue(&khugepaged_wait, &wait); |
673 | } |
674 | |
675 | static int khugepaged_node_load[MAX_NUMNODES]; |
676 | |
677 | static bool khugepaged_scan_abort(int nid) |
678 | { |
679 | int i; |
680 | |
681 | /* |
682 | * If node_reclaim_mode is disabled, then no extra effort is made to |
683 | * allocate memory locally. |
684 | */ |
685 | if (!node_reclaim_mode) |
686 | return false; |
687 | |
688 | /* If there is a count for this node already, it must be acceptable */ |
689 | if (khugepaged_node_load[nid]) |
690 | return false; |
691 | |
692 | for (i = 0; i < MAX_NUMNODES; i++) { |
693 | if (!khugepaged_node_load[i]) |
694 | continue; |
695 | if (node_distance(nid, i) > RECLAIM_DISTANCE) |
696 | return true; |
697 | } |
698 | return false; |
699 | } |
700 | |
701 | /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ |
702 | static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) |
703 | { |
704 | return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; |
705 | } |
706 | |
707 | #ifdef CONFIG_NUMA |
708 | static int khugepaged_find_target_node(void) |
709 | { |
710 | static int last_khugepaged_target_node = NUMA_NO_NODE; |
711 | int nid, target_node = 0, max_value = 0; |
712 | |
713 | /* find first node with max normal pages hit */ |
714 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
715 | if (khugepaged_node_load[nid] > max_value) { |
716 | max_value = khugepaged_node_load[nid]; |
717 | target_node = nid; |
718 | } |
719 | |
720 | /* do some balance if several nodes have the same hit record */ |
721 | if (target_node <= last_khugepaged_target_node) |
722 | for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; |
723 | nid++) |
724 | if (max_value == khugepaged_node_load[nid]) { |
725 | target_node = nid; |
726 | break; |
727 | } |
728 | |
729 | last_khugepaged_target_node = target_node; |
730 | return target_node; |
731 | } |
732 | |
733 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) |
734 | { |
735 | if (IS_ERR(*hpage)) { |
736 | if (!*wait) |
737 | return false; |
738 | |
739 | *wait = false; |
740 | *hpage = NULL; |
741 | khugepaged_alloc_sleep(); |
742 | } else if (*hpage) { |
743 | put_page(*hpage); |
744 | *hpage = NULL; |
745 | } |
746 | |
747 | return true; |
748 | } |
749 | |
750 | static struct page * |
751 | khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) |
752 | { |
753 | VM_BUG_ON_PAGE(*hpage, *hpage); |
754 | |
755 | *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); |
756 | if (unlikely(!*hpage)) { |
757 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
758 | *hpage = ERR_PTR(-ENOMEM); |
759 | return NULL; |
760 | } |
761 | |
762 | prep_transhuge_page(*hpage); |
763 | count_vm_event(THP_COLLAPSE_ALLOC); |
764 | return *hpage; |
765 | } |
766 | #else |
767 | static int khugepaged_find_target_node(void) |
768 | { |
769 | return 0; |
770 | } |
771 | |
772 | static inline struct page *alloc_khugepaged_hugepage(void) |
773 | { |
774 | struct page *page; |
775 | |
776 | page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), |
777 | HPAGE_PMD_ORDER); |
778 | if (page) |
779 | prep_transhuge_page(page); |
780 | return page; |
781 | } |
782 | |
783 | static struct page *khugepaged_alloc_hugepage(bool *wait) |
784 | { |
785 | struct page *hpage; |
786 | |
787 | do { |
788 | hpage = alloc_khugepaged_hugepage(); |
789 | if (!hpage) { |
790 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
791 | if (!*wait) |
792 | return NULL; |
793 | |
794 | *wait = false; |
795 | khugepaged_alloc_sleep(); |
796 | } else |
797 | count_vm_event(THP_COLLAPSE_ALLOC); |
798 | } while (unlikely(!hpage) && likely(khugepaged_enabled())); |
799 | |
800 | return hpage; |
801 | } |
802 | |
803 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) |
804 | { |
805 | if (!*hpage) |
806 | *hpage = khugepaged_alloc_hugepage(wait); |
807 | |
808 | if (unlikely(!*hpage)) |
809 | return false; |
810 | |
811 | return true; |
812 | } |
813 | |
814 | static struct page * |
815 | khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) |
816 | { |
817 | VM_BUG_ON(!*hpage); |
818 | |
819 | return *hpage; |
820 | } |
821 | #endif |
822 | |
823 | static bool hugepage_vma_check(struct vm_area_struct *vma) |
824 | { |
825 | if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || |
826 | (vma->vm_flags & VM_NOHUGEPAGE)) |
827 | return false; |
828 | if (shmem_file(vma->vm_file)) { |
829 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) |
830 | return false; |
831 | return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, |
832 | HPAGE_PMD_NR); |
833 | } |
834 | if (!vma->anon_vma || vma->vm_ops) |
835 | return false; |
836 | if (is_vma_temporary_stack(vma)) |
837 | return false; |
838 | return !(vma->vm_flags & VM_NO_KHUGEPAGED); |
839 | } |
840 | |
841 | /* |
842 | * If mmap_sem temporarily dropped, revalidate vma |
843 | * before taking mmap_sem. |
844 | * Return 0 if succeeds, otherwise return none-zero |
845 | * value (scan code). |
846 | */ |
847 | |
848 | static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, |
849 | struct vm_area_struct **vmap) |
850 | { |
851 | struct vm_area_struct *vma; |
852 | unsigned long hstart, hend; |
853 | |
854 | if (unlikely(khugepaged_test_exit(mm))) |
855 | return SCAN_ANY_PROCESS; |
856 | |
857 | *vmap = vma = find_vma(mm, address); |
858 | if (!vma) |
859 | return SCAN_VMA_NULL; |
860 | |
861 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
862 | hend = vma->vm_end & HPAGE_PMD_MASK; |
863 | if (address < hstart || address + HPAGE_PMD_SIZE > hend) |
864 | return SCAN_ADDRESS_RANGE; |
865 | if (!hugepage_vma_check(vma)) |
866 | return SCAN_VMA_CHECK; |
867 | return 0; |
868 | } |
869 | |
870 | /* |
871 | * Bring missing pages in from swap, to complete THP collapse. |
872 | * Only done if khugepaged_scan_pmd believes it is worthwhile. |
873 | * |
874 | * Called and returns without pte mapped or spinlocks held, |
875 | * but with mmap_sem held to protect against vma changes. |
876 | */ |
877 | |
878 | static bool __collapse_huge_page_swapin(struct mm_struct *mm, |
879 | struct vm_area_struct *vma, |
880 | unsigned long address, pmd_t *pmd, |
881 | int referenced) |
882 | { |
883 | pte_t pteval; |
884 | int swapped_in = 0, ret = 0; |
885 | struct fault_env fe = { |
886 | .vma = vma, |
887 | .address = address, |
888 | .flags = FAULT_FLAG_ALLOW_RETRY, |
889 | .pmd = pmd, |
890 | }; |
891 | |
892 | /* we only decide to swapin, if there is enough young ptes */ |
893 | if (referenced < HPAGE_PMD_NR/2) { |
894 | trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); |
895 | return false; |
896 | } |
897 | fe.pte = pte_offset_map(pmd, address); |
898 | for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; |
899 | fe.pte++, fe.address += PAGE_SIZE) { |
900 | pteval = *fe.pte; |
901 | if (!is_swap_pte(pteval)) |
902 | continue; |
903 | swapped_in++; |
904 | ret = do_swap_page(&fe, pteval); |
905 | |
906 | /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ |
907 | if (ret & VM_FAULT_RETRY) { |
908 | down_read(&mm->mmap_sem); |
909 | if (hugepage_vma_revalidate(mm, address, &fe.vma)) { |
910 | /* vma is no longer available, don't continue to swapin */ |
911 | trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); |
912 | return false; |
913 | } |
914 | /* check if the pmd is still valid */ |
915 | if (mm_find_pmd(mm, address) != pmd) |
916 | return false; |
917 | } |
918 | if (ret & VM_FAULT_ERROR) { |
919 | trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); |
920 | return false; |
921 | } |
922 | /* pte is unmapped now, we need to map it */ |
923 | fe.pte = pte_offset_map(pmd, fe.address); |
924 | } |
925 | fe.pte--; |
926 | pte_unmap(fe.pte); |
927 | trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); |
928 | return true; |
929 | } |
930 | |
931 | static void collapse_huge_page(struct mm_struct *mm, |
932 | unsigned long address, |
933 | struct page **hpage, |
934 | int node, int referenced) |
935 | { |
936 | pmd_t *pmd, _pmd; |
937 | pte_t *pte; |
938 | pgtable_t pgtable; |
939 | struct page *new_page; |
940 | spinlock_t *pmd_ptl, *pte_ptl; |
941 | int isolated = 0, result = 0; |
942 | struct mem_cgroup *memcg; |
943 | struct vm_area_struct *vma; |
944 | unsigned long mmun_start; /* For mmu_notifiers */ |
945 | unsigned long mmun_end; /* For mmu_notifiers */ |
946 | gfp_t gfp; |
947 | |
948 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
949 | |
950 | /* Only allocate from the target node */ |
951 | gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; |
952 | |
953 | /* |
954 | * Before allocating the hugepage, release the mmap_sem read lock. |
955 | * The allocation can take potentially a long time if it involves |
956 | * sync compaction, and we do not need to hold the mmap_sem during |
957 | * that. We will recheck the vma after taking it again in write mode. |
958 | */ |
959 | up_read(&mm->mmap_sem); |
960 | new_page = khugepaged_alloc_page(hpage, gfp, node); |
961 | if (!new_page) { |
962 | result = SCAN_ALLOC_HUGE_PAGE_FAIL; |
963 | goto out_nolock; |
964 | } |
965 | |
966 | /* Do not oom kill for khugepaged charges */ |
967 | if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, |
968 | &memcg, true))) { |
969 | result = SCAN_CGROUP_CHARGE_FAIL; |
970 | goto out_nolock; |
971 | } |
972 | |
973 | down_read(&mm->mmap_sem); |
974 | result = hugepage_vma_revalidate(mm, address, &vma); |
975 | if (result) { |
976 | mem_cgroup_cancel_charge(new_page, memcg, true); |
977 | up_read(&mm->mmap_sem); |
978 | goto out_nolock; |
979 | } |
980 | |
981 | pmd = mm_find_pmd(mm, address); |
982 | if (!pmd) { |
983 | result = SCAN_PMD_NULL; |
984 | mem_cgroup_cancel_charge(new_page, memcg, true); |
985 | up_read(&mm->mmap_sem); |
986 | goto out_nolock; |
987 | } |
988 | |
989 | /* |
990 | * __collapse_huge_page_swapin always returns with mmap_sem locked. |
991 | * If it fails, we release mmap_sem and jump out_nolock. |
992 | * Continuing to collapse causes inconsistency. |
993 | */ |
994 | if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { |
995 | mem_cgroup_cancel_charge(new_page, memcg, true); |
996 | up_read(&mm->mmap_sem); |
997 | goto out_nolock; |
998 | } |
999 | |
1000 | up_read(&mm->mmap_sem); |
1001 | /* |
1002 | * Prevent all access to pagetables with the exception of |
1003 | * gup_fast later handled by the ptep_clear_flush and the VM |
1004 | * handled by the anon_vma lock + PG_lock. |
1005 | */ |
1006 | down_write(&mm->mmap_sem); |
1007 | result = hugepage_vma_revalidate(mm, address, &vma); |
1008 | if (result) |
1009 | goto out; |
1010 | /* check if the pmd is still valid */ |
1011 | if (mm_find_pmd(mm, address) != pmd) |
1012 | goto out; |
1013 | |
1014 | anon_vma_lock_write(vma->anon_vma); |
1015 | |
1016 | pte = pte_offset_map(pmd, address); |
1017 | pte_ptl = pte_lockptr(mm, pmd); |
1018 | |
1019 | mmun_start = address; |
1020 | mmun_end = address + HPAGE_PMD_SIZE; |
1021 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
1022 | pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ |
1023 | /* |
1024 | * After this gup_fast can't run anymore. This also removes |
1025 | * any huge TLB entry from the CPU so we won't allow |
1026 | * huge and small TLB entries for the same virtual address |
1027 | * to avoid the risk of CPU bugs in that area. |
1028 | */ |
1029 | _pmd = pmdp_collapse_flush(vma, address, pmd); |
1030 | spin_unlock(pmd_ptl); |
1031 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
1032 | |
1033 | spin_lock(pte_ptl); |
1034 | isolated = __collapse_huge_page_isolate(vma, address, pte); |
1035 | spin_unlock(pte_ptl); |
1036 | |
1037 | if (unlikely(!isolated)) { |
1038 | pte_unmap(pte); |
1039 | spin_lock(pmd_ptl); |
1040 | BUG_ON(!pmd_none(*pmd)); |
1041 | /* |
1042 | * We can only use set_pmd_at when establishing |
1043 | * hugepmds and never for establishing regular pmds that |
1044 | * points to regular pagetables. Use pmd_populate for that |
1045 | */ |
1046 | pmd_populate(mm, pmd, pmd_pgtable(_pmd)); |
1047 | spin_unlock(pmd_ptl); |
1048 | anon_vma_unlock_write(vma->anon_vma); |
1049 | result = SCAN_FAIL; |
1050 | goto out; |
1051 | } |
1052 | |
1053 | /* |
1054 | * All pages are isolated and locked so anon_vma rmap |
1055 | * can't run anymore. |
1056 | */ |
1057 | anon_vma_unlock_write(vma->anon_vma); |
1058 | |
1059 | __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); |
1060 | pte_unmap(pte); |
1061 | __SetPageUptodate(new_page); |
1062 | pgtable = pmd_pgtable(_pmd); |
1063 | |
1064 | _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); |
1065 | _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); |
1066 | |
1067 | /* |
1068 | * spin_lock() below is not the equivalent of smp_wmb(), so |
1069 | * this is needed to avoid the copy_huge_page writes to become |
1070 | * visible after the set_pmd_at() write. |
1071 | */ |
1072 | smp_wmb(); |
1073 | |
1074 | spin_lock(pmd_ptl); |
1075 | BUG_ON(!pmd_none(*pmd)); |
1076 | page_add_new_anon_rmap(new_page, vma, address, true); |
1077 | mem_cgroup_commit_charge(new_page, memcg, false, true); |
1078 | lru_cache_add_active_or_unevictable(new_page, vma); |
1079 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
1080 | set_pmd_at(mm, address, pmd, _pmd); |
1081 | update_mmu_cache_pmd(vma, address, pmd); |
1082 | spin_unlock(pmd_ptl); |
1083 | |
1084 | *hpage = NULL; |
1085 | |
1086 | khugepaged_pages_collapsed++; |
1087 | result = SCAN_SUCCEED; |
1088 | out_up_write: |
1089 | up_write(&mm->mmap_sem); |
1090 | out_nolock: |
1091 | trace_mm_collapse_huge_page(mm, isolated, result); |
1092 | return; |
1093 | out: |
1094 | mem_cgroup_cancel_charge(new_page, memcg, true); |
1095 | goto out_up_write; |
1096 | } |
1097 | |
1098 | static int khugepaged_scan_pmd(struct mm_struct *mm, |
1099 | struct vm_area_struct *vma, |
1100 | unsigned long address, |
1101 | struct page **hpage) |
1102 | { |
1103 | pmd_t *pmd; |
1104 | pte_t *pte, *_pte; |
1105 | int ret = 0, none_or_zero = 0, result = 0, referenced = 0; |
1106 | struct page *page = NULL; |
1107 | unsigned long _address; |
1108 | spinlock_t *ptl; |
1109 | int node = NUMA_NO_NODE, unmapped = 0; |
1110 | bool writable = false; |
1111 | |
1112 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
1113 | |
1114 | pmd = mm_find_pmd(mm, address); |
1115 | if (!pmd) { |
1116 | result = SCAN_PMD_NULL; |
1117 | goto out; |
1118 | } |
1119 | |
1120 | memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); |
1121 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
1122 | for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; |
1123 | _pte++, _address += PAGE_SIZE) { |
1124 | pte_t pteval = *_pte; |
1125 | if (is_swap_pte(pteval)) { |
1126 | if (++unmapped <= khugepaged_max_ptes_swap) { |
1127 | continue; |
1128 | } else { |
1129 | result = SCAN_EXCEED_SWAP_PTE; |
1130 | goto out_unmap; |
1131 | } |
1132 | } |
1133 | if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { |
1134 | if (!userfaultfd_armed(vma) && |
1135 | ++none_or_zero <= khugepaged_max_ptes_none) { |
1136 | continue; |
1137 | } else { |
1138 | result = SCAN_EXCEED_NONE_PTE; |
1139 | goto out_unmap; |
1140 | } |
1141 | } |
1142 | if (!pte_present(pteval)) { |
1143 | result = SCAN_PTE_NON_PRESENT; |
1144 | goto out_unmap; |
1145 | } |
1146 | if (pte_write(pteval)) |
1147 | writable = true; |
1148 | |
1149 | page = vm_normal_page(vma, _address, pteval); |
1150 | if (unlikely(!page)) { |
1151 | result = SCAN_PAGE_NULL; |
1152 | goto out_unmap; |
1153 | } |
1154 | |
1155 | /* TODO: teach khugepaged to collapse THP mapped with pte */ |
1156 | if (PageCompound(page)) { |
1157 | result = SCAN_PAGE_COMPOUND; |
1158 | goto out_unmap; |
1159 | } |
1160 | |
1161 | /* |
1162 | * Record which node the original page is from and save this |
1163 | * information to khugepaged_node_load[]. |
1164 | * Khupaged will allocate hugepage from the node has the max |
1165 | * hit record. |
1166 | */ |
1167 | node = page_to_nid(page); |
1168 | if (khugepaged_scan_abort(node)) { |
1169 | result = SCAN_SCAN_ABORT; |
1170 | goto out_unmap; |
1171 | } |
1172 | khugepaged_node_load[node]++; |
1173 | if (!PageLRU(page)) { |
1174 | result = SCAN_PAGE_LRU; |
1175 | goto out_unmap; |
1176 | } |
1177 | if (PageLocked(page)) { |
1178 | result = SCAN_PAGE_LOCK; |
1179 | goto out_unmap; |
1180 | } |
1181 | if (!PageAnon(page)) { |
1182 | result = SCAN_PAGE_ANON; |
1183 | goto out_unmap; |
1184 | } |
1185 | |
1186 | /* |
1187 | * cannot use mapcount: can't collapse if there's a gup pin. |
1188 | * The page must only be referenced by the scanned process |
1189 | * and page swap cache. |
1190 | */ |
1191 | if (page_count(page) != 1 + !!PageSwapCache(page)) { |
1192 | result = SCAN_PAGE_COUNT; |
1193 | goto out_unmap; |
1194 | } |
1195 | if (pte_young(pteval) || |
1196 | page_is_young(page) || PageReferenced(page) || |
1197 | mmu_notifier_test_young(vma->vm_mm, address)) |
1198 | referenced++; |
1199 | } |
1200 | if (writable) { |
1201 | if (referenced) { |
1202 | result = SCAN_SUCCEED; |
1203 | ret = 1; |
1204 | } else { |
1205 | result = SCAN_LACK_REFERENCED_PAGE; |
1206 | } |
1207 | } else { |
1208 | result = SCAN_PAGE_RO; |
1209 | } |
1210 | out_unmap: |
1211 | pte_unmap_unlock(pte, ptl); |
1212 | if (ret) { |
1213 | node = khugepaged_find_target_node(); |
1214 | /* collapse_huge_page will return with the mmap_sem released */ |
1215 | collapse_huge_page(mm, address, hpage, node, referenced); |
1216 | } |
1217 | out: |
1218 | trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, |
1219 | none_or_zero, result, unmapped); |
1220 | return ret; |
1221 | } |
1222 | |
1223 | static void collect_mm_slot(struct mm_slot *mm_slot) |
1224 | { |
1225 | struct mm_struct *mm = mm_slot->mm; |
1226 | |
1227 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); |
1228 | |
1229 | if (khugepaged_test_exit(mm)) { |
1230 | /* free mm_slot */ |
1231 | hash_del(&mm_slot->hash); |
1232 | list_del(&mm_slot->mm_node); |
1233 | |
1234 | /* |
1235 | * Not strictly needed because the mm exited already. |
1236 | * |
1237 | * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); |
1238 | */ |
1239 | |
1240 | /* khugepaged_mm_lock actually not necessary for the below */ |
1241 | free_mm_slot(mm_slot); |
1242 | mmdrop(mm); |
1243 | } |
1244 | } |
1245 | |
1246 | #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) |
1247 | static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) |
1248 | { |
1249 | struct vm_area_struct *vma; |
1250 | unsigned long addr; |
1251 | pmd_t *pmd, _pmd; |
1252 | |
1253 | i_mmap_lock_write(mapping); |
1254 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
1255 | /* probably overkill */ |
1256 | if (vma->anon_vma) |
1257 | continue; |
1258 | addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
1259 | if (addr & ~HPAGE_PMD_MASK) |
1260 | continue; |
1261 | if (vma->vm_end < addr + HPAGE_PMD_SIZE) |
1262 | continue; |
1263 | pmd = mm_find_pmd(vma->vm_mm, addr); |
1264 | if (!pmd) |
1265 | continue; |
1266 | /* |
1267 | * We need exclusive mmap_sem to retract page table. |
1268 | * If trylock fails we would end up with pte-mapped THP after |
1269 | * re-fault. Not ideal, but it's more important to not disturb |
1270 | * the system too much. |
1271 | */ |
1272 | if (down_write_trylock(&vma->vm_mm->mmap_sem)) { |
1273 | spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); |
1274 | /* assume page table is clear */ |
1275 | _pmd = pmdp_collapse_flush(vma, addr, pmd); |
1276 | spin_unlock(ptl); |
1277 | up_write(&vma->vm_mm->mmap_sem); |
1278 | atomic_long_dec(&vma->vm_mm->nr_ptes); |
1279 | pte_free(vma->vm_mm, pmd_pgtable(_pmd)); |
1280 | } |
1281 | } |
1282 | i_mmap_unlock_write(mapping); |
1283 | } |
1284 | |
1285 | /** |
1286 | * collapse_shmem - collapse small tmpfs/shmem pages into huge one. |
1287 | * |
1288 | * Basic scheme is simple, details are more complex: |
1289 | * - allocate and lock a new huge page; |
1290 | * - scan over radix tree replacing old pages the new one |
1291 | * + swap in pages if necessary; |
1292 | * + fill in gaps; |
1293 | * + keep old pages around in case if rollback is required; |
1294 | * - if replacing succeed: |
1295 | * + copy data over; |
1296 | * + free old pages; |
1297 | * + unlock huge page; |
1298 | * - if replacing failed; |
1299 | * + put all pages back and unfreeze them; |
1300 | * + restore gaps in the radix-tree; |
1301 | * + unlock and free huge page; |
1302 | */ |
1303 | static void collapse_shmem(struct mm_struct *mm, |
1304 | struct address_space *mapping, pgoff_t start, |
1305 | struct page **hpage, int node) |
1306 | { |
1307 | gfp_t gfp; |
1308 | struct page *page, *new_page, *tmp; |
1309 | struct mem_cgroup *memcg; |
1310 | pgoff_t index, end = start + HPAGE_PMD_NR; |
1311 | LIST_HEAD(pagelist); |
1312 | struct radix_tree_iter iter; |
1313 | void **slot; |
1314 | int nr_none = 0, result = SCAN_SUCCEED; |
1315 | |
1316 | VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); |
1317 | |
1318 | /* Only allocate from the target node */ |
1319 | gfp = alloc_hugepage_khugepaged_gfpmask() | |
1320 | __GFP_OTHER_NODE | __GFP_THISNODE; |
1321 | |
1322 | new_page = khugepaged_alloc_page(hpage, gfp, node); |
1323 | if (!new_page) { |
1324 | result = SCAN_ALLOC_HUGE_PAGE_FAIL; |
1325 | goto out; |
1326 | } |
1327 | |
1328 | /* Do not oom kill for khugepaged charges */ |
1329 | if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, |
1330 | &memcg, true))) { |
1331 | result = SCAN_CGROUP_CHARGE_FAIL; |
1332 | goto out; |
1333 | } |
1334 | |
1335 | __SetPageLocked(new_page); |
1336 | __SetPageSwapBacked(new_page); |
1337 | new_page->index = start; |
1338 | new_page->mapping = mapping; |
1339 | |
1340 | /* |
1341 | * At this point the new_page is locked and not up-to-date. |
1342 | * It's safe to insert it into the page cache, because nobody would |
1343 | * be able to map it or use it in another way until we unlock it. |
1344 | */ |
1345 | |
1346 | index = start; |
1347 | spin_lock_irq(&mapping->tree_lock); |
1348 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { |
1349 | int n = min(iter.index, end) - index; |
1350 | |
1351 | /* |
1352 | * Stop if extent has been hole-punched, and is now completely |
1353 | * empty (the more obvious i_size_read() check would take an |
1354 | * irq-unsafe seqlock on 32-bit). |
1355 | */ |
1356 | if (n >= HPAGE_PMD_NR) { |
1357 | result = SCAN_TRUNCATED; |
1358 | goto tree_locked; |
1359 | } |
1360 | |
1361 | /* |
1362 | * Handle holes in the radix tree: charge it from shmem and |
1363 | * insert relevant subpage of new_page into the radix-tree. |
1364 | */ |
1365 | if (n && !shmem_charge(mapping->host, n)) { |
1366 | result = SCAN_FAIL; |
1367 | goto tree_locked; |
1368 | } |
1369 | for (; index < min(iter.index, end); index++) { |
1370 | radix_tree_insert(&mapping->page_tree, index, |
1371 | new_page + (index % HPAGE_PMD_NR)); |
1372 | } |
1373 | nr_none += n; |
1374 | |
1375 | /* We are done. */ |
1376 | if (index >= end) |
1377 | break; |
1378 | |
1379 | page = radix_tree_deref_slot_protected(slot, |
1380 | &mapping->tree_lock); |
1381 | if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) { |
1382 | spin_unlock_irq(&mapping->tree_lock); |
1383 | /* swap in or instantiate fallocated page */ |
1384 | if (shmem_getpage(mapping->host, index, &page, |
1385 | SGP_NOHUGE)) { |
1386 | result = SCAN_FAIL; |
1387 | goto tree_unlocked; |
1388 | } |
1389 | } else if (trylock_page(page)) { |
1390 | get_page(page); |
1391 | spin_unlock_irq(&mapping->tree_lock); |
1392 | } else { |
1393 | result = SCAN_PAGE_LOCK; |
1394 | goto tree_locked; |
1395 | } |
1396 | |
1397 | /* |
1398 | * The page must be locked, so we can drop the tree_lock |
1399 | * without racing with truncate. |
1400 | */ |
1401 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1402 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
1403 | |
1404 | /* |
1405 | * If file was truncated then extended, or hole-punched, before |
1406 | * we locked the first page, then a THP might be there already. |
1407 | */ |
1408 | if (PageTransCompound(page)) { |
1409 | result = SCAN_PAGE_COMPOUND; |
1410 | goto out_unlock; |
1411 | } |
1412 | |
1413 | if (page_mapping(page) != mapping) { |
1414 | result = SCAN_TRUNCATED; |
1415 | goto out_unlock; |
1416 | } |
1417 | |
1418 | if (isolate_lru_page(page)) { |
1419 | result = SCAN_DEL_PAGE_LRU; |
1420 | goto out_unlock; |
1421 | } |
1422 | |
1423 | if (page_mapped(page)) |
1424 | unmap_mapping_range(mapping, index << PAGE_SHIFT, |
1425 | PAGE_SIZE, 0); |
1426 | |
1427 | spin_lock_irq(&mapping->tree_lock); |
1428 | |
1429 | slot = radix_tree_lookup_slot(&mapping->page_tree, index); |
1430 | VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot, |
1431 | &mapping->tree_lock), page); |
1432 | VM_BUG_ON_PAGE(page_mapped(page), page); |
1433 | |
1434 | /* |
1435 | * The page is expected to have page_count() == 3: |
1436 | * - we hold a pin on it; |
1437 | * - one reference from radix tree; |
1438 | * - one from isolate_lru_page; |
1439 | */ |
1440 | if (!page_ref_freeze(page, 3)) { |
1441 | result = SCAN_PAGE_COUNT; |
1442 | spin_unlock_irq(&mapping->tree_lock); |
1443 | putback_lru_page(page); |
1444 | goto out_unlock; |
1445 | } |
1446 | |
1447 | /* |
1448 | * Add the page to the list to be able to undo the collapse if |
1449 | * something go wrong. |
1450 | */ |
1451 | list_add_tail(&page->lru, &pagelist); |
1452 | |
1453 | /* Finally, replace with the new page. */ |
1454 | radix_tree_replace_slot(slot, |
1455 | new_page + (index % HPAGE_PMD_NR)); |
1456 | |
1457 | slot = radix_tree_iter_next(&iter); |
1458 | index++; |
1459 | continue; |
1460 | out_unlock: |
1461 | unlock_page(page); |
1462 | put_page(page); |
1463 | goto tree_unlocked; |
1464 | } |
1465 | |
1466 | /* |
1467 | * Handle hole in radix tree at the end of the range. |
1468 | * This code only triggers if there's nothing in radix tree |
1469 | * beyond 'end'. |
1470 | */ |
1471 | if (index < end) { |
1472 | int n = end - index; |
1473 | |
1474 | /* Stop if extent has been truncated, and is now empty */ |
1475 | if (n >= HPAGE_PMD_NR) { |
1476 | result = SCAN_TRUNCATED; |
1477 | goto tree_locked; |
1478 | } |
1479 | if (!shmem_charge(mapping->host, n)) { |
1480 | result = SCAN_FAIL; |
1481 | goto tree_locked; |
1482 | } |
1483 | for (; index < end; index++) { |
1484 | radix_tree_insert(&mapping->page_tree, index, |
1485 | new_page + (index % HPAGE_PMD_NR)); |
1486 | } |
1487 | nr_none += n; |
1488 | } |
1489 | |
1490 | __inc_node_page_state(new_page, NR_SHMEM_THPS); |
1491 | if (nr_none) { |
1492 | struct zone *zone = page_zone(new_page); |
1493 | |
1494 | __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none); |
1495 | __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none); |
1496 | } |
1497 | |
1498 | tree_locked: |
1499 | spin_unlock_irq(&mapping->tree_lock); |
1500 | tree_unlocked: |
1501 | |
1502 | if (result == SCAN_SUCCEED) { |
1503 | /* |
1504 | * Replacing old pages with new one has succeed, now we need to |
1505 | * copy the content and free old pages. |
1506 | */ |
1507 | index = start; |
1508 | list_for_each_entry_safe(page, tmp, &pagelist, lru) { |
1509 | while (index < page->index) { |
1510 | clear_highpage(new_page + (index % HPAGE_PMD_NR)); |
1511 | index++; |
1512 | } |
1513 | copy_highpage(new_page + (page->index % HPAGE_PMD_NR), |
1514 | page); |
1515 | list_del(&page->lru); |
1516 | page->mapping = NULL; |
1517 | page_ref_unfreeze(page, 1); |
1518 | ClearPageActive(page); |
1519 | ClearPageUnevictable(page); |
1520 | unlock_page(page); |
1521 | put_page(page); |
1522 | index++; |
1523 | } |
1524 | while (index < end) { |
1525 | clear_highpage(new_page + (index % HPAGE_PMD_NR)); |
1526 | index++; |
1527 | } |
1528 | |
1529 | SetPageUptodate(new_page); |
1530 | page_ref_add(new_page, HPAGE_PMD_NR - 1); |
1531 | set_page_dirty(new_page); |
1532 | mem_cgroup_commit_charge(new_page, memcg, false, true); |
1533 | lru_cache_add_anon(new_page); |
1534 | |
1535 | /* |
1536 | * Remove pte page tables, so we can re-fault the page as huge. |
1537 | */ |
1538 | retract_page_tables(mapping, start); |
1539 | *hpage = NULL; |
1540 | } else { |
1541 | /* Something went wrong: rollback changes to the radix-tree */ |
1542 | spin_lock_irq(&mapping->tree_lock); |
1543 | mapping->nrpages -= nr_none; |
1544 | shmem_uncharge(mapping->host, nr_none); |
1545 | |
1546 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, |
1547 | start) { |
1548 | if (iter.index >= end) |
1549 | break; |
1550 | page = list_first_entry_or_null(&pagelist, |
1551 | struct page, lru); |
1552 | if (!page || iter.index < page->index) { |
1553 | if (!nr_none) |
1554 | break; |
1555 | nr_none--; |
1556 | /* Put holes back where they were */ |
1557 | radix_tree_delete(&mapping->page_tree, |
1558 | iter.index); |
1559 | slot = radix_tree_iter_next(&iter); |
1560 | continue; |
1561 | } |
1562 | |
1563 | VM_BUG_ON_PAGE(page->index != iter.index, page); |
1564 | |
1565 | /* Unfreeze the page. */ |
1566 | list_del(&page->lru); |
1567 | page_ref_unfreeze(page, 2); |
1568 | radix_tree_replace_slot(slot, page); |
1569 | spin_unlock_irq(&mapping->tree_lock); |
1570 | unlock_page(page); |
1571 | putback_lru_page(page); |
1572 | spin_lock_irq(&mapping->tree_lock); |
1573 | slot = radix_tree_iter_next(&iter); |
1574 | } |
1575 | VM_BUG_ON(nr_none); |
1576 | spin_unlock_irq(&mapping->tree_lock); |
1577 | |
1578 | mem_cgroup_cancel_charge(new_page, memcg, true); |
1579 | new_page->mapping = NULL; |
1580 | } |
1581 | |
1582 | unlock_page(new_page); |
1583 | out: |
1584 | VM_BUG_ON(!list_empty(&pagelist)); |
1585 | /* TODO: tracepoints */ |
1586 | } |
1587 | |
1588 | static void khugepaged_scan_shmem(struct mm_struct *mm, |
1589 | struct address_space *mapping, |
1590 | pgoff_t start, struct page **hpage) |
1591 | { |
1592 | struct page *page = NULL; |
1593 | struct radix_tree_iter iter; |
1594 | void **slot; |
1595 | int present, swap; |
1596 | int node = NUMA_NO_NODE; |
1597 | int result = SCAN_SUCCEED; |
1598 | |
1599 | present = 0; |
1600 | swap = 0; |
1601 | memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); |
1602 | rcu_read_lock(); |
1603 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { |
1604 | if (iter.index >= start + HPAGE_PMD_NR) |
1605 | break; |
1606 | |
1607 | page = radix_tree_deref_slot(slot); |
1608 | if (radix_tree_deref_retry(page)) { |
1609 | slot = radix_tree_iter_retry(&iter); |
1610 | continue; |
1611 | } |
1612 | |
1613 | if (radix_tree_exception(page)) { |
1614 | if (++swap > khugepaged_max_ptes_swap) { |
1615 | result = SCAN_EXCEED_SWAP_PTE; |
1616 | break; |
1617 | } |
1618 | continue; |
1619 | } |
1620 | |
1621 | if (PageTransCompound(page)) { |
1622 | result = SCAN_PAGE_COMPOUND; |
1623 | break; |
1624 | } |
1625 | |
1626 | node = page_to_nid(page); |
1627 | if (khugepaged_scan_abort(node)) { |
1628 | result = SCAN_SCAN_ABORT; |
1629 | break; |
1630 | } |
1631 | khugepaged_node_load[node]++; |
1632 | |
1633 | if (!PageLRU(page)) { |
1634 | result = SCAN_PAGE_LRU; |
1635 | break; |
1636 | } |
1637 | |
1638 | if (page_count(page) != 1 + page_mapcount(page)) { |
1639 | result = SCAN_PAGE_COUNT; |
1640 | break; |
1641 | } |
1642 | |
1643 | /* |
1644 | * We probably should check if the page is referenced here, but |
1645 | * nobody would transfer pte_young() to PageReferenced() for us. |
1646 | * And rmap walk here is just too costly... |
1647 | */ |
1648 | |
1649 | present++; |
1650 | |
1651 | if (need_resched()) { |
1652 | cond_resched_rcu(); |
1653 | slot = radix_tree_iter_next(&iter); |
1654 | } |
1655 | } |
1656 | rcu_read_unlock(); |
1657 | |
1658 | if (result == SCAN_SUCCEED) { |
1659 | if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { |
1660 | result = SCAN_EXCEED_NONE_PTE; |
1661 | } else { |
1662 | node = khugepaged_find_target_node(); |
1663 | collapse_shmem(mm, mapping, start, hpage, node); |
1664 | } |
1665 | } |
1666 | |
1667 | /* TODO: tracepoints */ |
1668 | } |
1669 | #else |
1670 | static void khugepaged_scan_shmem(struct mm_struct *mm, |
1671 | struct address_space *mapping, |
1672 | pgoff_t start, struct page **hpage) |
1673 | { |
1674 | BUILD_BUG(); |
1675 | } |
1676 | #endif |
1677 | |
1678 | static unsigned int khugepaged_scan_mm_slot(unsigned int pages, |
1679 | struct page **hpage) |
1680 | __releases(&khugepaged_mm_lock) |
1681 | __acquires(&khugepaged_mm_lock) |
1682 | { |
1683 | struct mm_slot *mm_slot; |
1684 | struct mm_struct *mm; |
1685 | struct vm_area_struct *vma; |
1686 | int progress = 0; |
1687 | |
1688 | VM_BUG_ON(!pages); |
1689 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); |
1690 | |
1691 | if (khugepaged_scan.mm_slot) |
1692 | mm_slot = khugepaged_scan.mm_slot; |
1693 | else { |
1694 | mm_slot = list_entry(khugepaged_scan.mm_head.next, |
1695 | struct mm_slot, mm_node); |
1696 | khugepaged_scan.address = 0; |
1697 | khugepaged_scan.mm_slot = mm_slot; |
1698 | } |
1699 | spin_unlock(&khugepaged_mm_lock); |
1700 | |
1701 | mm = mm_slot->mm; |
1702 | /* |
1703 | * Don't wait for semaphore (to avoid long wait times). Just move to |
1704 | * the next mm on the list. |
1705 | */ |
1706 | vma = NULL; |
1707 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) |
1708 | goto breakouterloop_mmap_sem; |
1709 | if (likely(!khugepaged_test_exit(mm))) |
1710 | vma = find_vma(mm, khugepaged_scan.address); |
1711 | |
1712 | progress++; |
1713 | for (; vma; vma = vma->vm_next) { |
1714 | unsigned long hstart, hend; |
1715 | |
1716 | cond_resched(); |
1717 | if (unlikely(khugepaged_test_exit(mm))) { |
1718 | progress++; |
1719 | break; |
1720 | } |
1721 | if (!hugepage_vma_check(vma)) { |
1722 | skip: |
1723 | progress++; |
1724 | continue; |
1725 | } |
1726 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
1727 | hend = vma->vm_end & HPAGE_PMD_MASK; |
1728 | if (hstart >= hend) |
1729 | goto skip; |
1730 | if (khugepaged_scan.address > hend) |
1731 | goto skip; |
1732 | if (khugepaged_scan.address < hstart) |
1733 | khugepaged_scan.address = hstart; |
1734 | VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); |
1735 | |
1736 | while (khugepaged_scan.address < hend) { |
1737 | int ret; |
1738 | cond_resched(); |
1739 | if (unlikely(khugepaged_test_exit(mm))) |
1740 | goto breakouterloop; |
1741 | |
1742 | VM_BUG_ON(khugepaged_scan.address < hstart || |
1743 | khugepaged_scan.address + HPAGE_PMD_SIZE > |
1744 | hend); |
1745 | if (shmem_file(vma->vm_file)) { |
1746 | struct file *file; |
1747 | pgoff_t pgoff = linear_page_index(vma, |
1748 | khugepaged_scan.address); |
1749 | if (!shmem_huge_enabled(vma)) |
1750 | goto skip; |
1751 | file = get_file(vma->vm_file); |
1752 | up_read(&mm->mmap_sem); |
1753 | ret = 1; |
1754 | khugepaged_scan_shmem(mm, file->f_mapping, |
1755 | pgoff, hpage); |
1756 | fput(file); |
1757 | } else { |
1758 | ret = khugepaged_scan_pmd(mm, vma, |
1759 | khugepaged_scan.address, |
1760 | hpage); |
1761 | } |
1762 | /* move to next address */ |
1763 | khugepaged_scan.address += HPAGE_PMD_SIZE; |
1764 | progress += HPAGE_PMD_NR; |
1765 | if (ret) |
1766 | /* we released mmap_sem so break loop */ |
1767 | goto breakouterloop_mmap_sem; |
1768 | if (progress >= pages) |
1769 | goto breakouterloop; |
1770 | } |
1771 | } |
1772 | breakouterloop: |
1773 | up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ |
1774 | breakouterloop_mmap_sem: |
1775 | |
1776 | spin_lock(&khugepaged_mm_lock); |
1777 | VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); |
1778 | /* |
1779 | * Release the current mm_slot if this mm is about to die, or |
1780 | * if we scanned all vmas of this mm. |
1781 | */ |
1782 | if (khugepaged_test_exit(mm) || !vma) { |
1783 | /* |
1784 | * Make sure that if mm_users is reaching zero while |
1785 | * khugepaged runs here, khugepaged_exit will find |
1786 | * mm_slot not pointing to the exiting mm. |
1787 | */ |
1788 | if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { |
1789 | khugepaged_scan.mm_slot = list_entry( |
1790 | mm_slot->mm_node.next, |
1791 | struct mm_slot, mm_node); |
1792 | khugepaged_scan.address = 0; |
1793 | } else { |
1794 | khugepaged_scan.mm_slot = NULL; |
1795 | khugepaged_full_scans++; |
1796 | } |
1797 | |
1798 | collect_mm_slot(mm_slot); |
1799 | } |
1800 | |
1801 | return progress; |
1802 | } |
1803 | |
1804 | static int khugepaged_has_work(void) |
1805 | { |
1806 | return !list_empty(&khugepaged_scan.mm_head) && |
1807 | khugepaged_enabled(); |
1808 | } |
1809 | |
1810 | static int khugepaged_wait_event(void) |
1811 | { |
1812 | return !list_empty(&khugepaged_scan.mm_head) || |
1813 | kthread_should_stop(); |
1814 | } |
1815 | |
1816 | static void khugepaged_do_scan(void) |
1817 | { |
1818 | struct page *hpage = NULL; |
1819 | unsigned int progress = 0, pass_through_head = 0; |
1820 | unsigned int pages = khugepaged_pages_to_scan; |
1821 | bool wait = true; |
1822 | |
1823 | barrier(); /* write khugepaged_pages_to_scan to local stack */ |
1824 | |
1825 | while (progress < pages) { |
1826 | if (!khugepaged_prealloc_page(&hpage, &wait)) |
1827 | break; |
1828 | |
1829 | cond_resched(); |
1830 | |
1831 | if (unlikely(kthread_should_stop() || try_to_freeze())) |
1832 | break; |
1833 | |
1834 | spin_lock(&khugepaged_mm_lock); |
1835 | if (!khugepaged_scan.mm_slot) |
1836 | pass_through_head++; |
1837 | if (khugepaged_has_work() && |
1838 | pass_through_head < 2) |
1839 | progress += khugepaged_scan_mm_slot(pages - progress, |
1840 | &hpage); |
1841 | else |
1842 | progress = pages; |
1843 | spin_unlock(&khugepaged_mm_lock); |
1844 | } |
1845 | |
1846 | if (!IS_ERR_OR_NULL(hpage)) |
1847 | put_page(hpage); |
1848 | } |
1849 | |
1850 | static bool khugepaged_should_wakeup(void) |
1851 | { |
1852 | return kthread_should_stop() || |
1853 | time_after_eq(jiffies, khugepaged_sleep_expire); |
1854 | } |
1855 | |
1856 | static void khugepaged_wait_work(void) |
1857 | { |
1858 | if (khugepaged_has_work()) { |
1859 | const unsigned long scan_sleep_jiffies = |
1860 | msecs_to_jiffies(khugepaged_scan_sleep_millisecs); |
1861 | |
1862 | if (!scan_sleep_jiffies) |
1863 | return; |
1864 | |
1865 | khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; |
1866 | wait_event_freezable_timeout(khugepaged_wait, |
1867 | khugepaged_should_wakeup(), |
1868 | scan_sleep_jiffies); |
1869 | return; |
1870 | } |
1871 | |
1872 | if (khugepaged_enabled()) |
1873 | wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); |
1874 | } |
1875 | |
1876 | static int khugepaged(void *none) |
1877 | { |
1878 | struct mm_slot *mm_slot; |
1879 | |
1880 | set_freezable(); |
1881 | set_user_nice(current, MAX_NICE); |
1882 | |
1883 | while (!kthread_should_stop()) { |
1884 | khugepaged_do_scan(); |
1885 | khugepaged_wait_work(); |
1886 | } |
1887 | |
1888 | spin_lock(&khugepaged_mm_lock); |
1889 | mm_slot = khugepaged_scan.mm_slot; |
1890 | khugepaged_scan.mm_slot = NULL; |
1891 | if (mm_slot) |
1892 | collect_mm_slot(mm_slot); |
1893 | spin_unlock(&khugepaged_mm_lock); |
1894 | return 0; |
1895 | } |
1896 | |
1897 | static void set_recommended_min_free_kbytes(void) |
1898 | { |
1899 | struct zone *zone; |
1900 | int nr_zones = 0; |
1901 | unsigned long recommended_min; |
1902 | |
1903 | for_each_populated_zone(zone) |
1904 | nr_zones++; |
1905 | |
1906 | /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ |
1907 | recommended_min = pageblock_nr_pages * nr_zones * 2; |
1908 | |
1909 | /* |
1910 | * Make sure that on average at least two pageblocks are almost free |
1911 | * of another type, one for a migratetype to fall back to and a |
1912 | * second to avoid subsequent fallbacks of other types There are 3 |
1913 | * MIGRATE_TYPES we care about. |
1914 | */ |
1915 | recommended_min += pageblock_nr_pages * nr_zones * |
1916 | MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; |
1917 | |
1918 | /* don't ever allow to reserve more than 5% of the lowmem */ |
1919 | recommended_min = min(recommended_min, |
1920 | (unsigned long) nr_free_buffer_pages() / 20); |
1921 | recommended_min <<= (PAGE_SHIFT-10); |
1922 | |
1923 | if (recommended_min > min_free_kbytes) { |
1924 | if (user_min_free_kbytes >= 0) |
1925 | pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", |
1926 | min_free_kbytes, recommended_min); |
1927 | |
1928 | min_free_kbytes = recommended_min; |
1929 | } |
1930 | setup_per_zone_wmarks(); |
1931 | } |
1932 | |
1933 | int start_stop_khugepaged(void) |
1934 | { |
1935 | static struct task_struct *khugepaged_thread __read_mostly; |
1936 | static DEFINE_MUTEX(khugepaged_mutex); |
1937 | int err = 0; |
1938 | |
1939 | mutex_lock(&khugepaged_mutex); |
1940 | if (khugepaged_enabled()) { |
1941 | if (!khugepaged_thread) |
1942 | khugepaged_thread = kthread_run(khugepaged, NULL, |
1943 | "khugepaged"); |
1944 | if (IS_ERR(khugepaged_thread)) { |
1945 | pr_err("khugepaged: kthread_run(khugepaged) failed\n"); |
1946 | err = PTR_ERR(khugepaged_thread); |
1947 | khugepaged_thread = NULL; |
1948 | goto fail; |
1949 | } |
1950 | |
1951 | if (!list_empty(&khugepaged_scan.mm_head)) |
1952 | wake_up_interruptible(&khugepaged_wait); |
1953 | |
1954 | set_recommended_min_free_kbytes(); |
1955 | } else if (khugepaged_thread) { |
1956 | kthread_stop(khugepaged_thread); |
1957 | khugepaged_thread = NULL; |
1958 | } |
1959 | fail: |
1960 | mutex_unlock(&khugepaged_mutex); |
1961 | return err; |
1962 | } |
1963 |