blob: 95f902c4f6f6ad5f2f4b33840edd2e259b02dfc7
1 | /* |
2 | * Memory merging support. |
3 | * |
4 | * This code enables dynamic sharing of identical pages found in different |
5 | * memory areas, even if they are not shared by fork() |
6 | * |
7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
8 | * Authors: |
9 | * Izik Eidus |
10 | * Andrea Arcangeli |
11 | * Chris Wright |
12 | * Hugh Dickins |
13 | * |
14 | * This work is licensed under the terms of the GNU GPL, version 2. |
15 | */ |
16 | |
17 | #include <linux/errno.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/fs.h> |
20 | #include <linux/mman.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/rwsem.h> |
23 | #include <linux/pagemap.h> |
24 | #include <linux/rmap.h> |
25 | #include <linux/spinlock.h> |
26 | #include <linux/jhash.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/kthread.h> |
29 | #include <linux/wait.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/rbtree.h> |
32 | #include <linux/memory.h> |
33 | #include <linux/mmu_notifier.h> |
34 | #include <linux/swap.h> |
35 | #include <linux/ksm.h> |
36 | #include <linux/hashtable.h> |
37 | #include <linux/freezer.h> |
38 | #include <linux/oom.h> |
39 | #include <linux/numa.h> |
40 | |
41 | #include <asm/tlbflush.h> |
42 | #include "internal.h" |
43 | |
44 | #ifdef CONFIG_NUMA |
45 | #define NUMA(x) (x) |
46 | #define DO_NUMA(x) do { (x); } while (0) |
47 | #else |
48 | #define NUMA(x) (0) |
49 | #define DO_NUMA(x) do { } while (0) |
50 | #endif |
51 | |
52 | /* |
53 | * A few notes about the KSM scanning process, |
54 | * to make it easier to understand the data structures below: |
55 | * |
56 | * In order to reduce excessive scanning, KSM sorts the memory pages by their |
57 | * contents into a data structure that holds pointers to the pages' locations. |
58 | * |
59 | * Since the contents of the pages may change at any moment, KSM cannot just |
60 | * insert the pages into a normal sorted tree and expect it to find anything. |
61 | * Therefore KSM uses two data structures - the stable and the unstable tree. |
62 | * |
63 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted |
64 | * by their contents. Because each such page is write-protected, searching on |
65 | * this tree is fully assured to be working (except when pages are unmapped), |
66 | * and therefore this tree is called the stable tree. |
67 | * |
68 | * In addition to the stable tree, KSM uses a second data structure called the |
69 | * unstable tree: this tree holds pointers to pages which have been found to |
70 | * be "unchanged for a period of time". The unstable tree sorts these pages |
71 | * by their contents, but since they are not write-protected, KSM cannot rely |
72 | * upon the unstable tree to work correctly - the unstable tree is liable to |
73 | * be corrupted as its contents are modified, and so it is called unstable. |
74 | * |
75 | * KSM solves this problem by several techniques: |
76 | * |
77 | * 1) The unstable tree is flushed every time KSM completes scanning all |
78 | * memory areas, and then the tree is rebuilt again from the beginning. |
79 | * 2) KSM will only insert into the unstable tree, pages whose hash value |
80 | * has not changed since the previous scan of all memory areas. |
81 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the |
82 | * colors of the nodes and not on their contents, assuring that even when |
83 | * the tree gets "corrupted" it won't get out of balance, so scanning time |
84 | * remains the same (also, searching and inserting nodes in an rbtree uses |
85 | * the same algorithm, so we have no overhead when we flush and rebuild). |
86 | * 4) KSM never flushes the stable tree, which means that even if it were to |
87 | * take 10 attempts to find a page in the unstable tree, once it is found, |
88 | * it is secured in the stable tree. (When we scan a new page, we first |
89 | * compare it against the stable tree, and then against the unstable tree.) |
90 | * |
91 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple |
92 | * stable trees and multiple unstable trees: one of each for each NUMA node. |
93 | */ |
94 | |
95 | /** |
96 | * struct mm_slot - ksm information per mm that is being scanned |
97 | * @link: link to the mm_slots hash list |
98 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head |
99 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
100 | * @mm: the mm that this information is valid for |
101 | */ |
102 | struct mm_slot { |
103 | struct hlist_node link; |
104 | struct list_head mm_list; |
105 | struct rmap_item *rmap_list; |
106 | struct mm_struct *mm; |
107 | }; |
108 | |
109 | /** |
110 | * struct ksm_scan - cursor for scanning |
111 | * @mm_slot: the current mm_slot we are scanning |
112 | * @address: the next address inside that to be scanned |
113 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
114 | * @seqnr: count of completed full scans (needed when removing unstable node) |
115 | * |
116 | * There is only the one ksm_scan instance of this cursor structure. |
117 | */ |
118 | struct ksm_scan { |
119 | struct mm_slot *mm_slot; |
120 | unsigned long address; |
121 | struct rmap_item **rmap_list; |
122 | unsigned long seqnr; |
123 | }; |
124 | |
125 | /** |
126 | * struct stable_node - node of the stable rbtree |
127 | * @node: rb node of this ksm page in the stable tree |
128 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
129 | * @list: linked into migrate_nodes, pending placement in the proper node tree |
130 | * @hlist: hlist head of rmap_items using this ksm page |
131 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
132 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) |
133 | */ |
134 | struct stable_node { |
135 | union { |
136 | struct rb_node node; /* when node of stable tree */ |
137 | struct { /* when listed for migration */ |
138 | struct list_head *head; |
139 | struct list_head list; |
140 | }; |
141 | }; |
142 | struct hlist_head hlist; |
143 | unsigned long kpfn; |
144 | #ifdef CONFIG_NUMA |
145 | int nid; |
146 | #endif |
147 | }; |
148 | |
149 | /** |
150 | * struct rmap_item - reverse mapping item for virtual addresses |
151 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
152 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
153 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
154 | * @mm: the memory structure this rmap_item is pointing into |
155 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
156 | * @oldchecksum: previous checksum of the page at that virtual address |
157 | * @node: rb node of this rmap_item in the unstable tree |
158 | * @head: pointer to stable_node heading this list in the stable tree |
159 | * @hlist: link into hlist of rmap_items hanging off that stable_node |
160 | */ |
161 | struct rmap_item { |
162 | struct rmap_item *rmap_list; |
163 | union { |
164 | struct anon_vma *anon_vma; /* when stable */ |
165 | #ifdef CONFIG_NUMA |
166 | int nid; /* when node of unstable tree */ |
167 | #endif |
168 | }; |
169 | struct mm_struct *mm; |
170 | unsigned long address; /* + low bits used for flags below */ |
171 | unsigned int oldchecksum; /* when unstable */ |
172 | union { |
173 | struct rb_node node; /* when node of unstable tree */ |
174 | struct { /* when listed from stable tree */ |
175 | struct stable_node *head; |
176 | struct hlist_node hlist; |
177 | }; |
178 | }; |
179 | }; |
180 | |
181 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ |
182 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
183 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ |
184 | |
185 | /* The stable and unstable tree heads */ |
186 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
187 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; |
188 | static struct rb_root *root_stable_tree = one_stable_tree; |
189 | static struct rb_root *root_unstable_tree = one_unstable_tree; |
190 | |
191 | /* Recently migrated nodes of stable tree, pending proper placement */ |
192 | static LIST_HEAD(migrate_nodes); |
193 | |
194 | #define MM_SLOTS_HASH_BITS 10 |
195 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
196 | |
197 | static struct mm_slot ksm_mm_head = { |
198 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), |
199 | }; |
200 | static struct ksm_scan ksm_scan = { |
201 | .mm_slot = &ksm_mm_head, |
202 | }; |
203 | |
204 | static struct kmem_cache *rmap_item_cache; |
205 | static struct kmem_cache *stable_node_cache; |
206 | static struct kmem_cache *mm_slot_cache; |
207 | |
208 | /* The number of nodes in the stable tree */ |
209 | static unsigned long ksm_pages_shared; |
210 | |
211 | /* The number of page slots additionally sharing those nodes */ |
212 | static unsigned long ksm_pages_sharing; |
213 | |
214 | /* The number of nodes in the unstable tree */ |
215 | static unsigned long ksm_pages_unshared; |
216 | |
217 | /* The number of rmap_items in use: to calculate pages_volatile */ |
218 | static unsigned long ksm_rmap_items; |
219 | |
220 | /* Number of pages ksmd should scan in one batch */ |
221 | static unsigned int ksm_thread_pages_to_scan = 100; |
222 | |
223 | /* Milliseconds ksmd should sleep between batches */ |
224 | static unsigned int ksm_thread_sleep_millisecs = 20; |
225 | |
226 | #ifdef CONFIG_NUMA |
227 | /* Zeroed when merging across nodes is not allowed */ |
228 | static unsigned int ksm_merge_across_nodes = 1; |
229 | static int ksm_nr_node_ids = 1; |
230 | #else |
231 | #define ksm_merge_across_nodes 1U |
232 | #define ksm_nr_node_ids 1 |
233 | #endif |
234 | |
235 | #define KSM_RUN_STOP 0 |
236 | #define KSM_RUN_MERGE 1 |
237 | #define KSM_RUN_UNMERGE 2 |
238 | #define KSM_RUN_OFFLINE 4 |
239 | static unsigned long ksm_run = KSM_RUN_STOP; |
240 | static void wait_while_offlining(void); |
241 | |
242 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
243 | static DEFINE_MUTEX(ksm_thread_mutex); |
244 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
245 | |
246 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ |
247 | sizeof(struct __struct), __alignof__(struct __struct),\ |
248 | (__flags), NULL) |
249 | |
250 | static int __init ksm_slab_init(void) |
251 | { |
252 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); |
253 | if (!rmap_item_cache) |
254 | goto out; |
255 | |
256 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
257 | if (!stable_node_cache) |
258 | goto out_free1; |
259 | |
260 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
261 | if (!mm_slot_cache) |
262 | goto out_free2; |
263 | |
264 | return 0; |
265 | |
266 | out_free2: |
267 | kmem_cache_destroy(stable_node_cache); |
268 | out_free1: |
269 | kmem_cache_destroy(rmap_item_cache); |
270 | out: |
271 | return -ENOMEM; |
272 | } |
273 | |
274 | static void __init ksm_slab_free(void) |
275 | { |
276 | kmem_cache_destroy(mm_slot_cache); |
277 | kmem_cache_destroy(stable_node_cache); |
278 | kmem_cache_destroy(rmap_item_cache); |
279 | mm_slot_cache = NULL; |
280 | } |
281 | |
282 | static inline struct rmap_item *alloc_rmap_item(void) |
283 | { |
284 | struct rmap_item *rmap_item; |
285 | |
286 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | |
287 | __GFP_NORETRY | __GFP_NOWARN); |
288 | if (rmap_item) |
289 | ksm_rmap_items++; |
290 | return rmap_item; |
291 | } |
292 | |
293 | static inline void free_rmap_item(struct rmap_item *rmap_item) |
294 | { |
295 | ksm_rmap_items--; |
296 | rmap_item->mm = NULL; /* debug safety */ |
297 | kmem_cache_free(rmap_item_cache, rmap_item); |
298 | } |
299 | |
300 | static inline struct stable_node *alloc_stable_node(void) |
301 | { |
302 | /* |
303 | * The allocation can take too long with GFP_KERNEL when memory is under |
304 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH |
305 | * grants access to memory reserves, helping to avoid this problem. |
306 | */ |
307 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); |
308 | } |
309 | |
310 | static inline void free_stable_node(struct stable_node *stable_node) |
311 | { |
312 | kmem_cache_free(stable_node_cache, stable_node); |
313 | } |
314 | |
315 | static inline struct mm_slot *alloc_mm_slot(void) |
316 | { |
317 | if (!mm_slot_cache) /* initialization failed */ |
318 | return NULL; |
319 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); |
320 | } |
321 | |
322 | static inline void free_mm_slot(struct mm_slot *mm_slot) |
323 | { |
324 | kmem_cache_free(mm_slot_cache, mm_slot); |
325 | } |
326 | |
327 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
328 | { |
329 | struct mm_slot *slot; |
330 | |
331 | hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) |
332 | if (slot->mm == mm) |
333 | return slot; |
334 | |
335 | return NULL; |
336 | } |
337 | |
338 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
339 | struct mm_slot *mm_slot) |
340 | { |
341 | mm_slot->mm = mm; |
342 | hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); |
343 | } |
344 | |
345 | /* |
346 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's |
347 | * page tables after it has passed through ksm_exit() - which, if necessary, |
348 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set |
349 | * a special flag: they can just back out as soon as mm_users goes to zero. |
350 | * ksm_test_exit() is used throughout to make this test for exit: in some |
351 | * places for correctness, in some places just to avoid unnecessary work. |
352 | */ |
353 | static inline bool ksm_test_exit(struct mm_struct *mm) |
354 | { |
355 | return atomic_read(&mm->mm_users) == 0; |
356 | } |
357 | |
358 | /* |
359 | * We use break_ksm to break COW on a ksm page: it's a stripped down |
360 | * |
361 | * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1) |
362 | * put_page(page); |
363 | * |
364 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, |
365 | * in case the application has unmapped and remapped mm,addr meanwhile. |
366 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP |
367 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. |
368 | * |
369 | * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context |
370 | * of the process that owns 'vma'. We also do not want to enforce |
371 | * protection keys here anyway. |
372 | */ |
373 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
374 | { |
375 | struct page *page; |
376 | int ret = 0; |
377 | |
378 | do { |
379 | cond_resched(); |
380 | page = follow_page(vma, addr, |
381 | FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); |
382 | if (IS_ERR_OR_NULL(page)) |
383 | break; |
384 | if (PageKsm(page)) |
385 | ret = handle_mm_fault(vma, addr, |
386 | FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); |
387 | else |
388 | ret = VM_FAULT_WRITE; |
389 | put_page(page); |
390 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
391 | /* |
392 | * We must loop because handle_mm_fault() may back out if there's |
393 | * any difficulty e.g. if pte accessed bit gets updated concurrently. |
394 | * |
395 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that |
396 | * COW has been broken, even if the vma does not permit VM_WRITE; |
397 | * but note that a concurrent fault might break PageKsm for us. |
398 | * |
399 | * VM_FAULT_SIGBUS could occur if we race with truncation of the |
400 | * backing file, which also invalidates anonymous pages: that's |
401 | * okay, that truncation will have unmapped the PageKsm for us. |
402 | * |
403 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting |
404 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the |
405 | * current task has TIF_MEMDIE set, and will be OOM killed on return |
406 | * to user; and ksmd, having no mm, would never be chosen for that. |
407 | * |
408 | * But if the mm is in a limited mem_cgroup, then the fault may fail |
409 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and |
410 | * even ksmd can fail in this way - though it's usually breaking ksm |
411 | * just to undo a merge it made a moment before, so unlikely to oom. |
412 | * |
413 | * That's a pity: we might therefore have more kernel pages allocated |
414 | * than we're counting as nodes in the stable tree; but ksm_do_scan |
415 | * will retry to break_cow on each pass, so should recover the page |
416 | * in due course. The important thing is to not let VM_MERGEABLE |
417 | * be cleared while any such pages might remain in the area. |
418 | */ |
419 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; |
420 | } |
421 | |
422 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
423 | unsigned long addr) |
424 | { |
425 | struct vm_area_struct *vma; |
426 | if (ksm_test_exit(mm)) |
427 | return NULL; |
428 | vma = find_vma(mm, addr); |
429 | if (!vma || vma->vm_start > addr) |
430 | return NULL; |
431 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
432 | return NULL; |
433 | return vma; |
434 | } |
435 | |
436 | static void break_cow(struct rmap_item *rmap_item) |
437 | { |
438 | struct mm_struct *mm = rmap_item->mm; |
439 | unsigned long addr = rmap_item->address; |
440 | struct vm_area_struct *vma; |
441 | |
442 | /* |
443 | * It is not an accident that whenever we want to break COW |
444 | * to undo, we also need to drop a reference to the anon_vma. |
445 | */ |
446 | put_anon_vma(rmap_item->anon_vma); |
447 | |
448 | down_read(&mm->mmap_sem); |
449 | vma = find_mergeable_vma(mm, addr); |
450 | if (vma) |
451 | break_ksm(vma, addr); |
452 | up_read(&mm->mmap_sem); |
453 | } |
454 | |
455 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) |
456 | { |
457 | struct mm_struct *mm = rmap_item->mm; |
458 | unsigned long addr = rmap_item->address; |
459 | struct vm_area_struct *vma; |
460 | struct page *page; |
461 | |
462 | down_read(&mm->mmap_sem); |
463 | vma = find_mergeable_vma(mm, addr); |
464 | if (!vma) |
465 | goto out; |
466 | |
467 | page = follow_page(vma, addr, FOLL_GET); |
468 | if (IS_ERR_OR_NULL(page)) |
469 | goto out; |
470 | if (PageAnon(page)) { |
471 | flush_anon_page(vma, page, addr); |
472 | flush_dcache_page(page); |
473 | } else { |
474 | put_page(page); |
475 | out: |
476 | page = NULL; |
477 | } |
478 | up_read(&mm->mmap_sem); |
479 | return page; |
480 | } |
481 | |
482 | /* |
483 | * This helper is used for getting right index into array of tree roots. |
484 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for |
485 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, |
486 | * every node has its own stable and unstable tree. |
487 | */ |
488 | static inline int get_kpfn_nid(unsigned long kpfn) |
489 | { |
490 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
491 | } |
492 | |
493 | static void remove_node_from_stable_tree(struct stable_node *stable_node) |
494 | { |
495 | struct rmap_item *rmap_item; |
496 | |
497 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
498 | if (rmap_item->hlist.next) |
499 | ksm_pages_sharing--; |
500 | else |
501 | ksm_pages_shared--; |
502 | put_anon_vma(rmap_item->anon_vma); |
503 | rmap_item->address &= PAGE_MASK; |
504 | cond_resched(); |
505 | } |
506 | |
507 | if (stable_node->head == &migrate_nodes) |
508 | list_del(&stable_node->list); |
509 | else |
510 | rb_erase(&stable_node->node, |
511 | root_stable_tree + NUMA(stable_node->nid)); |
512 | free_stable_node(stable_node); |
513 | } |
514 | |
515 | /* |
516 | * get_ksm_page: checks if the page indicated by the stable node |
517 | * is still its ksm page, despite having held no reference to it. |
518 | * In which case we can trust the content of the page, and it |
519 | * returns the gotten page; but if the page has now been zapped, |
520 | * remove the stale node from the stable tree and return NULL. |
521 | * But beware, the stable node's page might be being migrated. |
522 | * |
523 | * You would expect the stable_node to hold a reference to the ksm page. |
524 | * But if it increments the page's count, swapping out has to wait for |
525 | * ksmd to come around again before it can free the page, which may take |
526 | * seconds or even minutes: much too unresponsive. So instead we use a |
527 | * "keyhole reference": access to the ksm page from the stable node peeps |
528 | * out through its keyhole to see if that page still holds the right key, |
529 | * pointing back to this stable node. This relies on freeing a PageAnon |
530 | * page to reset its page->mapping to NULL, and relies on no other use of |
531 | * a page to put something that might look like our key in page->mapping. |
532 | * is on its way to being freed; but it is an anomaly to bear in mind. |
533 | */ |
534 | static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) |
535 | { |
536 | struct page *page; |
537 | void *expected_mapping; |
538 | unsigned long kpfn; |
539 | |
540 | expected_mapping = (void *)((unsigned long)stable_node | |
541 | PAGE_MAPPING_KSM); |
542 | again: |
543 | kpfn = READ_ONCE(stable_node->kpfn); |
544 | page = pfn_to_page(kpfn); |
545 | |
546 | /* |
547 | * page is computed from kpfn, so on most architectures reading |
548 | * page->mapping is naturally ordered after reading node->kpfn, |
549 | * but on Alpha we need to be more careful. |
550 | */ |
551 | smp_read_barrier_depends(); |
552 | if (READ_ONCE(page->mapping) != expected_mapping) |
553 | goto stale; |
554 | |
555 | /* |
556 | * We cannot do anything with the page while its refcount is 0. |
557 | * Usually 0 means free, or tail of a higher-order page: in which |
558 | * case this node is no longer referenced, and should be freed; |
559 | * however, it might mean that the page is under page_freeze_refs(). |
560 | * The __remove_mapping() case is easy, again the node is now stale; |
561 | * but if page is swapcache in migrate_page_move_mapping(), it might |
562 | * still be our page, in which case it's essential to keep the node. |
563 | */ |
564 | while (!get_page_unless_zero(page)) { |
565 | /* |
566 | * Another check for page->mapping != expected_mapping would |
567 | * work here too. We have chosen the !PageSwapCache test to |
568 | * optimize the common case, when the page is or is about to |
569 | * be freed: PageSwapCache is cleared (under spin_lock_irq) |
570 | * in the freeze_refs section of __remove_mapping(); but Anon |
571 | * page->mapping reset to NULL later, in free_pages_prepare(). |
572 | */ |
573 | if (!PageSwapCache(page)) |
574 | goto stale; |
575 | cpu_relax(); |
576 | } |
577 | |
578 | if (READ_ONCE(page->mapping) != expected_mapping) { |
579 | put_page(page); |
580 | goto stale; |
581 | } |
582 | |
583 | if (lock_it) { |
584 | lock_page(page); |
585 | if (READ_ONCE(page->mapping) != expected_mapping) { |
586 | unlock_page(page); |
587 | put_page(page); |
588 | goto stale; |
589 | } |
590 | } |
591 | return page; |
592 | |
593 | stale: |
594 | /* |
595 | * We come here from above when page->mapping or !PageSwapCache |
596 | * suggests that the node is stale; but it might be under migration. |
597 | * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), |
598 | * before checking whether node->kpfn has been changed. |
599 | */ |
600 | smp_rmb(); |
601 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
602 | goto again; |
603 | remove_node_from_stable_tree(stable_node); |
604 | return NULL; |
605 | } |
606 | |
607 | /* |
608 | * Removing rmap_item from stable or unstable tree. |
609 | * This function will clean the information from the stable/unstable tree. |
610 | */ |
611 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) |
612 | { |
613 | if (rmap_item->address & STABLE_FLAG) { |
614 | struct stable_node *stable_node; |
615 | struct page *page; |
616 | |
617 | stable_node = rmap_item->head; |
618 | page = get_ksm_page(stable_node, true); |
619 | if (!page) |
620 | goto out; |
621 | |
622 | hlist_del(&rmap_item->hlist); |
623 | unlock_page(page); |
624 | put_page(page); |
625 | |
626 | if (!hlist_empty(&stable_node->hlist)) |
627 | ksm_pages_sharing--; |
628 | else |
629 | ksm_pages_shared--; |
630 | |
631 | put_anon_vma(rmap_item->anon_vma); |
632 | rmap_item->address &= PAGE_MASK; |
633 | |
634 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
635 | unsigned char age; |
636 | /* |
637 | * Usually ksmd can and must skip the rb_erase, because |
638 | * root_unstable_tree was already reset to RB_ROOT. |
639 | * But be careful when an mm is exiting: do the rb_erase |
640 | * if this rmap_item was inserted by this scan, rather |
641 | * than left over from before. |
642 | */ |
643 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); |
644 | BUG_ON(age > 1); |
645 | if (!age) |
646 | rb_erase(&rmap_item->node, |
647 | root_unstable_tree + NUMA(rmap_item->nid)); |
648 | ksm_pages_unshared--; |
649 | rmap_item->address &= PAGE_MASK; |
650 | } |
651 | out: |
652 | cond_resched(); /* we're called from many long loops */ |
653 | } |
654 | |
655 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
656 | struct rmap_item **rmap_list) |
657 | { |
658 | while (*rmap_list) { |
659 | struct rmap_item *rmap_item = *rmap_list; |
660 | *rmap_list = rmap_item->rmap_list; |
661 | remove_rmap_item_from_tree(rmap_item); |
662 | free_rmap_item(rmap_item); |
663 | } |
664 | } |
665 | |
666 | /* |
667 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
668 | * than check every pte of a given vma, the locking doesn't quite work for |
669 | * that - an rmap_item is assigned to the stable tree after inserting ksm |
670 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing |
671 | * rmap_items from parent to child at fork time (so as not to waste time |
672 | * if exit comes before the next scan reaches it). |
673 | * |
674 | * Similarly, although we'd like to remove rmap_items (so updating counts |
675 | * and freeing memory) when unmerging an area, it's easier to leave that |
676 | * to the next pass of ksmd - consider, for example, how ksmd might be |
677 | * in cmp_and_merge_page on one of the rmap_items we would be removing. |
678 | */ |
679 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
680 | unsigned long start, unsigned long end) |
681 | { |
682 | unsigned long addr; |
683 | int err = 0; |
684 | |
685 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
686 | if (ksm_test_exit(vma->vm_mm)) |
687 | break; |
688 | if (signal_pending(current)) |
689 | err = -ERESTARTSYS; |
690 | else |
691 | err = break_ksm(vma, addr); |
692 | } |
693 | return err; |
694 | } |
695 | |
696 | #ifdef CONFIG_SYSFS |
697 | /* |
698 | * Only called through the sysfs control interface: |
699 | */ |
700 | static int remove_stable_node(struct stable_node *stable_node) |
701 | { |
702 | struct page *page; |
703 | int err; |
704 | |
705 | page = get_ksm_page(stable_node, true); |
706 | if (!page) { |
707 | /* |
708 | * get_ksm_page did remove_node_from_stable_tree itself. |
709 | */ |
710 | return 0; |
711 | } |
712 | |
713 | if (WARN_ON_ONCE(page_mapped(page))) { |
714 | /* |
715 | * This should not happen: but if it does, just refuse to let |
716 | * merge_across_nodes be switched - there is no need to panic. |
717 | */ |
718 | err = -EBUSY; |
719 | } else { |
720 | /* |
721 | * The stable node did not yet appear stale to get_ksm_page(), |
722 | * since that allows for an unmapped ksm page to be recognized |
723 | * right up until it is freed; but the node is safe to remove. |
724 | * This page might be in a pagevec waiting to be freed, |
725 | * or it might be PageSwapCache (perhaps under writeback), |
726 | * or it might have been removed from swapcache a moment ago. |
727 | */ |
728 | set_page_stable_node(page, NULL); |
729 | remove_node_from_stable_tree(stable_node); |
730 | err = 0; |
731 | } |
732 | |
733 | unlock_page(page); |
734 | put_page(page); |
735 | return err; |
736 | } |
737 | |
738 | static int remove_all_stable_nodes(void) |
739 | { |
740 | struct stable_node *stable_node, *next; |
741 | int nid; |
742 | int err = 0; |
743 | |
744 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
745 | while (root_stable_tree[nid].rb_node) { |
746 | stable_node = rb_entry(root_stable_tree[nid].rb_node, |
747 | struct stable_node, node); |
748 | if (remove_stable_node(stable_node)) { |
749 | err = -EBUSY; |
750 | break; /* proceed to next nid */ |
751 | } |
752 | cond_resched(); |
753 | } |
754 | } |
755 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
756 | if (remove_stable_node(stable_node)) |
757 | err = -EBUSY; |
758 | cond_resched(); |
759 | } |
760 | return err; |
761 | } |
762 | |
763 | static int unmerge_and_remove_all_rmap_items(void) |
764 | { |
765 | struct mm_slot *mm_slot; |
766 | struct mm_struct *mm; |
767 | struct vm_area_struct *vma; |
768 | int err = 0; |
769 | |
770 | spin_lock(&ksm_mmlist_lock); |
771 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
772 | struct mm_slot, mm_list); |
773 | spin_unlock(&ksm_mmlist_lock); |
774 | |
775 | for (mm_slot = ksm_scan.mm_slot; |
776 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { |
777 | mm = mm_slot->mm; |
778 | down_read(&mm->mmap_sem); |
779 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
780 | if (ksm_test_exit(mm)) |
781 | break; |
782 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
783 | continue; |
784 | err = unmerge_ksm_pages(vma, |
785 | vma->vm_start, vma->vm_end); |
786 | if (err) |
787 | goto error; |
788 | } |
789 | |
790 | remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); |
791 | up_read(&mm->mmap_sem); |
792 | |
793 | spin_lock(&ksm_mmlist_lock); |
794 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
795 | struct mm_slot, mm_list); |
796 | if (ksm_test_exit(mm)) { |
797 | hash_del(&mm_slot->link); |
798 | list_del(&mm_slot->mm_list); |
799 | spin_unlock(&ksm_mmlist_lock); |
800 | |
801 | free_mm_slot(mm_slot); |
802 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
803 | mmdrop(mm); |
804 | } else |
805 | spin_unlock(&ksm_mmlist_lock); |
806 | } |
807 | |
808 | /* Clean up stable nodes, but don't worry if some are still busy */ |
809 | remove_all_stable_nodes(); |
810 | ksm_scan.seqnr = 0; |
811 | return 0; |
812 | |
813 | error: |
814 | up_read(&mm->mmap_sem); |
815 | spin_lock(&ksm_mmlist_lock); |
816 | ksm_scan.mm_slot = &ksm_mm_head; |
817 | spin_unlock(&ksm_mmlist_lock); |
818 | return err; |
819 | } |
820 | #endif /* CONFIG_SYSFS */ |
821 | |
822 | static u32 calc_checksum(struct page *page) |
823 | { |
824 | u32 checksum; |
825 | void *addr = kmap_atomic(page); |
826 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); |
827 | kunmap_atomic(addr); |
828 | return checksum; |
829 | } |
830 | |
831 | static int memcmp_pages(struct page *page1, struct page *page2) |
832 | { |
833 | char *addr1, *addr2; |
834 | int ret; |
835 | |
836 | addr1 = kmap_atomic(page1); |
837 | addr2 = kmap_atomic(page2); |
838 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
839 | kunmap_atomic(addr2); |
840 | kunmap_atomic(addr1); |
841 | return ret; |
842 | } |
843 | |
844 | static inline int pages_identical(struct page *page1, struct page *page2) |
845 | { |
846 | return !memcmp_pages(page1, page2); |
847 | } |
848 | |
849 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, |
850 | pte_t *orig_pte) |
851 | { |
852 | struct mm_struct *mm = vma->vm_mm; |
853 | unsigned long addr; |
854 | pte_t *ptep; |
855 | spinlock_t *ptl; |
856 | int swapped; |
857 | int err = -EFAULT; |
858 | unsigned long mmun_start; /* For mmu_notifiers */ |
859 | unsigned long mmun_end; /* For mmu_notifiers */ |
860 | |
861 | addr = page_address_in_vma(page, vma); |
862 | if (addr == -EFAULT) |
863 | goto out; |
864 | |
865 | BUG_ON(PageTransCompound(page)); |
866 | |
867 | mmun_start = addr; |
868 | mmun_end = addr + PAGE_SIZE; |
869 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
870 | |
871 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
872 | if (!ptep) |
873 | goto out_mn; |
874 | |
875 | if (pte_write(*ptep) || pte_dirty(*ptep)) { |
876 | pte_t entry; |
877 | |
878 | swapped = PageSwapCache(page); |
879 | flush_cache_page(vma, addr, page_to_pfn(page)); |
880 | /* |
881 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
882 | * take any lock, therefore the check that we are going to make |
883 | * with the pagecount against the mapcount is racey and |
884 | * O_DIRECT can happen right after the check. |
885 | * So we clear the pte and flush the tlb before the check |
886 | * this assure us that no O_DIRECT can happen after the check |
887 | * or in the middle of the check. |
888 | */ |
889 | entry = ptep_clear_flush_notify(vma, addr, ptep); |
890 | /* |
891 | * Check that no O_DIRECT or similar I/O is in progress on the |
892 | * page |
893 | */ |
894 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
895 | set_pte_at(mm, addr, ptep, entry); |
896 | goto out_unlock; |
897 | } |
898 | if (pte_dirty(entry)) |
899 | set_page_dirty(page); |
900 | entry = pte_mkclean(pte_wrprotect(entry)); |
901 | set_pte_at_notify(mm, addr, ptep, entry); |
902 | } |
903 | *orig_pte = *ptep; |
904 | err = 0; |
905 | |
906 | out_unlock: |
907 | pte_unmap_unlock(ptep, ptl); |
908 | out_mn: |
909 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
910 | out: |
911 | return err; |
912 | } |
913 | |
914 | /** |
915 | * replace_page - replace page in vma by new ksm page |
916 | * @vma: vma that holds the pte pointing to page |
917 | * @page: the page we are replacing by kpage |
918 | * @kpage: the ksm page we replace page by |
919 | * @orig_pte: the original value of the pte |
920 | * |
921 | * Returns 0 on success, -EFAULT on failure. |
922 | */ |
923 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
924 | struct page *kpage, pte_t orig_pte) |
925 | { |
926 | struct mm_struct *mm = vma->vm_mm; |
927 | pmd_t *pmd; |
928 | pte_t *ptep; |
929 | spinlock_t *ptl; |
930 | unsigned long addr; |
931 | int err = -EFAULT; |
932 | unsigned long mmun_start; /* For mmu_notifiers */ |
933 | unsigned long mmun_end; /* For mmu_notifiers */ |
934 | |
935 | addr = page_address_in_vma(page, vma); |
936 | if (addr == -EFAULT) |
937 | goto out; |
938 | |
939 | pmd = mm_find_pmd(mm, addr); |
940 | if (!pmd) |
941 | goto out; |
942 | |
943 | mmun_start = addr; |
944 | mmun_end = addr + PAGE_SIZE; |
945 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
946 | |
947 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
948 | if (!pte_same(*ptep, orig_pte)) { |
949 | pte_unmap_unlock(ptep, ptl); |
950 | goto out_mn; |
951 | } |
952 | |
953 | get_page(kpage); |
954 | page_add_anon_rmap(kpage, vma, addr, false); |
955 | |
956 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
957 | ptep_clear_flush_notify(vma, addr, ptep); |
958 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
959 | |
960 | page_remove_rmap(page, false); |
961 | if (!page_mapped(page)) |
962 | try_to_free_swap(page); |
963 | put_page(page); |
964 | |
965 | pte_unmap_unlock(ptep, ptl); |
966 | err = 0; |
967 | out_mn: |
968 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
969 | out: |
970 | return err; |
971 | } |
972 | |
973 | /* |
974 | * try_to_merge_one_page - take two pages and merge them into one |
975 | * @vma: the vma that holds the pte pointing to page |
976 | * @page: the PageAnon page that we want to replace with kpage |
977 | * @kpage: the PageKsm page that we want to map instead of page, |
978 | * or NULL the first time when we want to use page as kpage. |
979 | * |
980 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
981 | */ |
982 | static int try_to_merge_one_page(struct vm_area_struct *vma, |
983 | struct page *page, struct page *kpage) |
984 | { |
985 | pte_t orig_pte = __pte(0); |
986 | int err = -EFAULT; |
987 | |
988 | if (page == kpage) /* ksm page forked */ |
989 | return 0; |
990 | |
991 | if (!PageAnon(page)) |
992 | goto out; |
993 | |
994 | /* |
995 | * We need the page lock to read a stable PageSwapCache in |
996 | * write_protect_page(). We use trylock_page() instead of |
997 | * lock_page() because we don't want to wait here - we |
998 | * prefer to continue scanning and merging different pages, |
999 | * then come back to this page when it is unlocked. |
1000 | */ |
1001 | if (!trylock_page(page)) |
1002 | goto out; |
1003 | |
1004 | if (PageTransCompound(page)) { |
1005 | if (split_huge_page(page)) |
1006 | goto out_unlock; |
1007 | } |
1008 | |
1009 | /* |
1010 | * If this anonymous page is mapped only here, its pte may need |
1011 | * to be write-protected. If it's mapped elsewhere, all of its |
1012 | * ptes are necessarily already write-protected. But in either |
1013 | * case, we need to lock and check page_count is not raised. |
1014 | */ |
1015 | if (write_protect_page(vma, page, &orig_pte) == 0) { |
1016 | if (!kpage) { |
1017 | /* |
1018 | * While we hold page lock, upgrade page from |
1019 | * PageAnon+anon_vma to PageKsm+NULL stable_node: |
1020 | * stable_tree_insert() will update stable_node. |
1021 | */ |
1022 | set_page_stable_node(page, NULL); |
1023 | mark_page_accessed(page); |
1024 | /* |
1025 | * Page reclaim just frees a clean page with no dirty |
1026 | * ptes: make sure that the ksm page would be swapped. |
1027 | */ |
1028 | if (!PageDirty(page)) |
1029 | SetPageDirty(page); |
1030 | err = 0; |
1031 | } else if (pages_identical(page, kpage)) |
1032 | err = replace_page(vma, page, kpage, orig_pte); |
1033 | } |
1034 | |
1035 | if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { |
1036 | munlock_vma_page(page); |
1037 | if (!PageMlocked(kpage)) { |
1038 | unlock_page(page); |
1039 | lock_page(kpage); |
1040 | mlock_vma_page(kpage); |
1041 | page = kpage; /* for final unlock */ |
1042 | } |
1043 | } |
1044 | |
1045 | out_unlock: |
1046 | unlock_page(page); |
1047 | out: |
1048 | return err; |
1049 | } |
1050 | |
1051 | /* |
1052 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, |
1053 | * but no new kernel page is allocated: kpage must already be a ksm page. |
1054 | * |
1055 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
1056 | */ |
1057 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
1058 | struct page *page, struct page *kpage) |
1059 | { |
1060 | struct mm_struct *mm = rmap_item->mm; |
1061 | struct vm_area_struct *vma; |
1062 | int err = -EFAULT; |
1063 | |
1064 | down_read(&mm->mmap_sem); |
1065 | vma = find_mergeable_vma(mm, rmap_item->address); |
1066 | if (!vma) |
1067 | goto out; |
1068 | |
1069 | err = try_to_merge_one_page(vma, page, kpage); |
1070 | if (err) |
1071 | goto out; |
1072 | |
1073 | /* Unstable nid is in union with stable anon_vma: remove first */ |
1074 | remove_rmap_item_from_tree(rmap_item); |
1075 | |
1076 | /* Must get reference to anon_vma while still holding mmap_sem */ |
1077 | rmap_item->anon_vma = vma->anon_vma; |
1078 | get_anon_vma(vma->anon_vma); |
1079 | out: |
1080 | up_read(&mm->mmap_sem); |
1081 | return err; |
1082 | } |
1083 | |
1084 | /* |
1085 | * try_to_merge_two_pages - take two identical pages and prepare them |
1086 | * to be merged into one page. |
1087 | * |
1088 | * This function returns the kpage if we successfully merged two identical |
1089 | * pages into one ksm page, NULL otherwise. |
1090 | * |
1091 | * Note that this function upgrades page to ksm page: if one of the pages |
1092 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
1093 | */ |
1094 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
1095 | struct page *page, |
1096 | struct rmap_item *tree_rmap_item, |
1097 | struct page *tree_page) |
1098 | { |
1099 | int err; |
1100 | |
1101 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
1102 | if (!err) { |
1103 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
1104 | tree_page, page); |
1105 | /* |
1106 | * If that fails, we have a ksm page with only one pte |
1107 | * pointing to it: so break it. |
1108 | */ |
1109 | if (err) |
1110 | break_cow(rmap_item); |
1111 | } |
1112 | return err ? NULL : page; |
1113 | } |
1114 | |
1115 | /* |
1116 | * stable_tree_search - search for page inside the stable tree |
1117 | * |
1118 | * This function checks if there is a page inside the stable tree |
1119 | * with identical content to the page that we are scanning right now. |
1120 | * |
1121 | * This function returns the stable tree node of identical content if found, |
1122 | * NULL otherwise. |
1123 | */ |
1124 | static struct page *stable_tree_search(struct page *page) |
1125 | { |
1126 | int nid; |
1127 | struct rb_root *root; |
1128 | struct rb_node **new; |
1129 | struct rb_node *parent; |
1130 | struct stable_node *stable_node; |
1131 | struct stable_node *page_node; |
1132 | |
1133 | page_node = page_stable_node(page); |
1134 | if (page_node && page_node->head != &migrate_nodes) { |
1135 | /* ksm page forked */ |
1136 | get_page(page); |
1137 | return page; |
1138 | } |
1139 | |
1140 | nid = get_kpfn_nid(page_to_pfn(page)); |
1141 | root = root_stable_tree + nid; |
1142 | again: |
1143 | new = &root->rb_node; |
1144 | parent = NULL; |
1145 | |
1146 | while (*new) { |
1147 | struct page *tree_page; |
1148 | int ret; |
1149 | |
1150 | cond_resched(); |
1151 | stable_node = rb_entry(*new, struct stable_node, node); |
1152 | tree_page = get_ksm_page(stable_node, false); |
1153 | if (!tree_page) { |
1154 | /* |
1155 | * If we walked over a stale stable_node, |
1156 | * get_ksm_page() will call rb_erase() and it |
1157 | * may rebalance the tree from under us. So |
1158 | * restart the search from scratch. Returning |
1159 | * NULL would be safe too, but we'd generate |
1160 | * false negative insertions just because some |
1161 | * stable_node was stale. |
1162 | */ |
1163 | goto again; |
1164 | } |
1165 | |
1166 | ret = memcmp_pages(page, tree_page); |
1167 | put_page(tree_page); |
1168 | |
1169 | parent = *new; |
1170 | if (ret < 0) |
1171 | new = &parent->rb_left; |
1172 | else if (ret > 0) |
1173 | new = &parent->rb_right; |
1174 | else { |
1175 | /* |
1176 | * Lock and unlock the stable_node's page (which |
1177 | * might already have been migrated) so that page |
1178 | * migration is sure to notice its raised count. |
1179 | * It would be more elegant to return stable_node |
1180 | * than kpage, but that involves more changes. |
1181 | */ |
1182 | tree_page = get_ksm_page(stable_node, true); |
1183 | if (tree_page) { |
1184 | unlock_page(tree_page); |
1185 | if (get_kpfn_nid(stable_node->kpfn) != |
1186 | NUMA(stable_node->nid)) { |
1187 | put_page(tree_page); |
1188 | goto replace; |
1189 | } |
1190 | return tree_page; |
1191 | } |
1192 | /* |
1193 | * There is now a place for page_node, but the tree may |
1194 | * have been rebalanced, so re-evaluate parent and new. |
1195 | */ |
1196 | if (page_node) |
1197 | goto again; |
1198 | return NULL; |
1199 | } |
1200 | } |
1201 | |
1202 | if (!page_node) |
1203 | return NULL; |
1204 | |
1205 | list_del(&page_node->list); |
1206 | DO_NUMA(page_node->nid = nid); |
1207 | rb_link_node(&page_node->node, parent, new); |
1208 | rb_insert_color(&page_node->node, root); |
1209 | get_page(page); |
1210 | return page; |
1211 | |
1212 | replace: |
1213 | if (page_node) { |
1214 | list_del(&page_node->list); |
1215 | DO_NUMA(page_node->nid = nid); |
1216 | rb_replace_node(&stable_node->node, &page_node->node, root); |
1217 | get_page(page); |
1218 | } else { |
1219 | rb_erase(&stable_node->node, root); |
1220 | page = NULL; |
1221 | } |
1222 | stable_node->head = &migrate_nodes; |
1223 | list_add(&stable_node->list, stable_node->head); |
1224 | return page; |
1225 | } |
1226 | |
1227 | /* |
1228 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
1229 | * into the stable tree. |
1230 | * |
1231 | * This function returns the stable tree node just allocated on success, |
1232 | * NULL otherwise. |
1233 | */ |
1234 | static struct stable_node *stable_tree_insert(struct page *kpage) |
1235 | { |
1236 | int nid; |
1237 | unsigned long kpfn; |
1238 | struct rb_root *root; |
1239 | struct rb_node **new; |
1240 | struct rb_node *parent; |
1241 | struct stable_node *stable_node; |
1242 | |
1243 | kpfn = page_to_pfn(kpage); |
1244 | nid = get_kpfn_nid(kpfn); |
1245 | root = root_stable_tree + nid; |
1246 | again: |
1247 | parent = NULL; |
1248 | new = &root->rb_node; |
1249 | |
1250 | while (*new) { |
1251 | struct page *tree_page; |
1252 | int ret; |
1253 | |
1254 | cond_resched(); |
1255 | stable_node = rb_entry(*new, struct stable_node, node); |
1256 | tree_page = get_ksm_page(stable_node, false); |
1257 | if (!tree_page) { |
1258 | /* |
1259 | * If we walked over a stale stable_node, |
1260 | * get_ksm_page() will call rb_erase() and it |
1261 | * may rebalance the tree from under us. So |
1262 | * restart the search from scratch. Returning |
1263 | * NULL would be safe too, but we'd generate |
1264 | * false negative insertions just because some |
1265 | * stable_node was stale. |
1266 | */ |
1267 | goto again; |
1268 | } |
1269 | |
1270 | ret = memcmp_pages(kpage, tree_page); |
1271 | put_page(tree_page); |
1272 | |
1273 | parent = *new; |
1274 | if (ret < 0) |
1275 | new = &parent->rb_left; |
1276 | else if (ret > 0) |
1277 | new = &parent->rb_right; |
1278 | else { |
1279 | /* |
1280 | * It is not a bug that stable_tree_search() didn't |
1281 | * find this node: because at that time our page was |
1282 | * not yet write-protected, so may have changed since. |
1283 | */ |
1284 | return NULL; |
1285 | } |
1286 | } |
1287 | |
1288 | stable_node = alloc_stable_node(); |
1289 | if (!stable_node) |
1290 | return NULL; |
1291 | |
1292 | INIT_HLIST_HEAD(&stable_node->hlist); |
1293 | stable_node->kpfn = kpfn; |
1294 | set_page_stable_node(kpage, stable_node); |
1295 | DO_NUMA(stable_node->nid = nid); |
1296 | rb_link_node(&stable_node->node, parent, new); |
1297 | rb_insert_color(&stable_node->node, root); |
1298 | |
1299 | return stable_node; |
1300 | } |
1301 | |
1302 | /* |
1303 | * unstable_tree_search_insert - search for identical page, |
1304 | * else insert rmap_item into the unstable tree. |
1305 | * |
1306 | * This function searches for a page in the unstable tree identical to the |
1307 | * page currently being scanned; and if no identical page is found in the |
1308 | * tree, we insert rmap_item as a new object into the unstable tree. |
1309 | * |
1310 | * This function returns pointer to rmap_item found to be identical |
1311 | * to the currently scanned page, NULL otherwise. |
1312 | * |
1313 | * This function does both searching and inserting, because they share |
1314 | * the same walking algorithm in an rbtree. |
1315 | */ |
1316 | static |
1317 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, |
1318 | struct page *page, |
1319 | struct page **tree_pagep) |
1320 | { |
1321 | struct rb_node **new; |
1322 | struct rb_root *root; |
1323 | struct rb_node *parent = NULL; |
1324 | int nid; |
1325 | |
1326 | nid = get_kpfn_nid(page_to_pfn(page)); |
1327 | root = root_unstable_tree + nid; |
1328 | new = &root->rb_node; |
1329 | |
1330 | while (*new) { |
1331 | struct rmap_item *tree_rmap_item; |
1332 | struct page *tree_page; |
1333 | int ret; |
1334 | |
1335 | cond_resched(); |
1336 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1337 | tree_page = get_mergeable_page(tree_rmap_item); |
1338 | if (!tree_page) |
1339 | return NULL; |
1340 | |
1341 | /* |
1342 | * Don't substitute a ksm page for a forked page. |
1343 | */ |
1344 | if (page == tree_page) { |
1345 | put_page(tree_page); |
1346 | return NULL; |
1347 | } |
1348 | |
1349 | ret = memcmp_pages(page, tree_page); |
1350 | |
1351 | parent = *new; |
1352 | if (ret < 0) { |
1353 | put_page(tree_page); |
1354 | new = &parent->rb_left; |
1355 | } else if (ret > 0) { |
1356 | put_page(tree_page); |
1357 | new = &parent->rb_right; |
1358 | } else if (!ksm_merge_across_nodes && |
1359 | page_to_nid(tree_page) != nid) { |
1360 | /* |
1361 | * If tree_page has been migrated to another NUMA node, |
1362 | * it will be flushed out and put in the right unstable |
1363 | * tree next time: only merge with it when across_nodes. |
1364 | */ |
1365 | put_page(tree_page); |
1366 | return NULL; |
1367 | } else { |
1368 | *tree_pagep = tree_page; |
1369 | return tree_rmap_item; |
1370 | } |
1371 | } |
1372 | |
1373 | rmap_item->address |= UNSTABLE_FLAG; |
1374 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
1375 | DO_NUMA(rmap_item->nid = nid); |
1376 | rb_link_node(&rmap_item->node, parent, new); |
1377 | rb_insert_color(&rmap_item->node, root); |
1378 | |
1379 | ksm_pages_unshared++; |
1380 | return NULL; |
1381 | } |
1382 | |
1383 | /* |
1384 | * stable_tree_append - add another rmap_item to the linked list of |
1385 | * rmap_items hanging off a given node of the stable tree, all sharing |
1386 | * the same ksm page. |
1387 | */ |
1388 | static void stable_tree_append(struct rmap_item *rmap_item, |
1389 | struct stable_node *stable_node) |
1390 | { |
1391 | rmap_item->head = stable_node; |
1392 | rmap_item->address |= STABLE_FLAG; |
1393 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
1394 | |
1395 | if (rmap_item->hlist.next) |
1396 | ksm_pages_sharing++; |
1397 | else |
1398 | ksm_pages_shared++; |
1399 | } |
1400 | |
1401 | /* |
1402 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1403 | * if not, compare checksum to previous and if it's the same, see if page can |
1404 | * be inserted into the unstable tree, or merged with a page already there and |
1405 | * both transferred to the stable tree. |
1406 | * |
1407 | * @page: the page that we are searching identical page to. |
1408 | * @rmap_item: the reverse mapping into the virtual address of this page |
1409 | */ |
1410 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) |
1411 | { |
1412 | struct rmap_item *tree_rmap_item; |
1413 | struct page *tree_page = NULL; |
1414 | struct stable_node *stable_node; |
1415 | struct page *kpage; |
1416 | unsigned int checksum; |
1417 | int err; |
1418 | |
1419 | stable_node = page_stable_node(page); |
1420 | if (stable_node) { |
1421 | if (stable_node->head != &migrate_nodes && |
1422 | get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) { |
1423 | rb_erase(&stable_node->node, |
1424 | root_stable_tree + NUMA(stable_node->nid)); |
1425 | stable_node->head = &migrate_nodes; |
1426 | list_add(&stable_node->list, stable_node->head); |
1427 | } |
1428 | if (stable_node->head != &migrate_nodes && |
1429 | rmap_item->head == stable_node) |
1430 | return; |
1431 | } |
1432 | |
1433 | /* We first start with searching the page inside the stable tree */ |
1434 | kpage = stable_tree_search(page); |
1435 | if (kpage == page && rmap_item->head == stable_node) { |
1436 | put_page(kpage); |
1437 | return; |
1438 | } |
1439 | |
1440 | remove_rmap_item_from_tree(rmap_item); |
1441 | |
1442 | if (kpage) { |
1443 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
1444 | if (!err) { |
1445 | /* |
1446 | * The page was successfully merged: |
1447 | * add its rmap_item to the stable tree. |
1448 | */ |
1449 | lock_page(kpage); |
1450 | stable_tree_append(rmap_item, page_stable_node(kpage)); |
1451 | unlock_page(kpage); |
1452 | } |
1453 | put_page(kpage); |
1454 | return; |
1455 | } |
1456 | |
1457 | /* |
1458 | * If the hash value of the page has changed from the last time |
1459 | * we calculated it, this page is changing frequently: therefore we |
1460 | * don't want to insert it in the unstable tree, and we don't want |
1461 | * to waste our time searching for something identical to it there. |
1462 | */ |
1463 | checksum = calc_checksum(page); |
1464 | if (rmap_item->oldchecksum != checksum) { |
1465 | rmap_item->oldchecksum = checksum; |
1466 | return; |
1467 | } |
1468 | |
1469 | tree_rmap_item = |
1470 | unstable_tree_search_insert(rmap_item, page, &tree_page); |
1471 | #ifdef CONFIG_AMLOGIC_CMA |
1472 | /* |
1473 | * Now page is inserted to unstable tree, but do not |
1474 | * let cma page to be kpage, it can be merged with other pages |
1475 | */ |
1476 | if (cma_page(page)) { |
1477 | if (tree_rmap_item) |
1478 | put_page(tree_page); |
1479 | return; |
1480 | } |
1481 | #endif /* CONFIG_AMLOGIC_CMA */ |
1482 | if (tree_rmap_item) { |
1483 | bool split; |
1484 | |
1485 | kpage = try_to_merge_two_pages(rmap_item, page, |
1486 | tree_rmap_item, tree_page); |
1487 | /* |
1488 | * If both pages we tried to merge belong to the same compound |
1489 | * page, then we actually ended up increasing the reference |
1490 | * count of the same compound page twice, and split_huge_page |
1491 | * failed. |
1492 | * Here we set a flag if that happened, and we use it later to |
1493 | * try split_huge_page again. Since we call put_page right |
1494 | * afterwards, the reference count will be correct and |
1495 | * split_huge_page should succeed. |
1496 | */ |
1497 | split = PageTransCompound(page) |
1498 | && compound_head(page) == compound_head(tree_page); |
1499 | put_page(tree_page); |
1500 | if (kpage) { |
1501 | /* |
1502 | * The pages were successfully merged: insert new |
1503 | * node in the stable tree and add both rmap_items. |
1504 | */ |
1505 | lock_page(kpage); |
1506 | stable_node = stable_tree_insert(kpage); |
1507 | if (stable_node) { |
1508 | stable_tree_append(tree_rmap_item, stable_node); |
1509 | stable_tree_append(rmap_item, stable_node); |
1510 | } |
1511 | unlock_page(kpage); |
1512 | |
1513 | /* |
1514 | * If we fail to insert the page into the stable tree, |
1515 | * we will have 2 virtual addresses that are pointing |
1516 | * to a ksm page left outside the stable tree, |
1517 | * in which case we need to break_cow on both. |
1518 | */ |
1519 | if (!stable_node) { |
1520 | break_cow(tree_rmap_item); |
1521 | break_cow(rmap_item); |
1522 | } |
1523 | } else if (split) { |
1524 | /* |
1525 | * We are here if we tried to merge two pages and |
1526 | * failed because they both belonged to the same |
1527 | * compound page. We will split the page now, but no |
1528 | * merging will take place. |
1529 | * We do not want to add the cost of a full lock; if |
1530 | * the page is locked, it is better to skip it and |
1531 | * perhaps try again later. |
1532 | */ |
1533 | if (!trylock_page(page)) |
1534 | return; |
1535 | split_huge_page(page); |
1536 | unlock_page(page); |
1537 | } |
1538 | } |
1539 | } |
1540 | |
1541 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, |
1542 | struct rmap_item **rmap_list, |
1543 | unsigned long addr) |
1544 | { |
1545 | struct rmap_item *rmap_item; |
1546 | |
1547 | while (*rmap_list) { |
1548 | rmap_item = *rmap_list; |
1549 | if ((rmap_item->address & PAGE_MASK) == addr) |
1550 | return rmap_item; |
1551 | if (rmap_item->address > addr) |
1552 | break; |
1553 | *rmap_list = rmap_item->rmap_list; |
1554 | remove_rmap_item_from_tree(rmap_item); |
1555 | free_rmap_item(rmap_item); |
1556 | } |
1557 | |
1558 | rmap_item = alloc_rmap_item(); |
1559 | if (rmap_item) { |
1560 | /* It has already been zeroed */ |
1561 | rmap_item->mm = mm_slot->mm; |
1562 | rmap_item->address = addr; |
1563 | rmap_item->rmap_list = *rmap_list; |
1564 | *rmap_list = rmap_item; |
1565 | } |
1566 | return rmap_item; |
1567 | } |
1568 | |
1569 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) |
1570 | { |
1571 | struct mm_struct *mm; |
1572 | struct mm_slot *slot; |
1573 | struct vm_area_struct *vma; |
1574 | struct rmap_item *rmap_item; |
1575 | int nid; |
1576 | |
1577 | if (list_empty(&ksm_mm_head.mm_list)) |
1578 | return NULL; |
1579 | |
1580 | slot = ksm_scan.mm_slot; |
1581 | if (slot == &ksm_mm_head) { |
1582 | /* |
1583 | * A number of pages can hang around indefinitely on per-cpu |
1584 | * pagevecs, raised page count preventing write_protect_page |
1585 | * from merging them. Though it doesn't really matter much, |
1586 | * it is puzzling to see some stuck in pages_volatile until |
1587 | * other activity jostles them out, and they also prevented |
1588 | * LTP's KSM test from succeeding deterministically; so drain |
1589 | * them here (here rather than on entry to ksm_do_scan(), |
1590 | * so we don't IPI too often when pages_to_scan is set low). |
1591 | */ |
1592 | lru_add_drain_all(); |
1593 | |
1594 | /* |
1595 | * Whereas stale stable_nodes on the stable_tree itself |
1596 | * get pruned in the regular course of stable_tree_search(), |
1597 | * those moved out to the migrate_nodes list can accumulate: |
1598 | * so prune them once before each full scan. |
1599 | */ |
1600 | if (!ksm_merge_across_nodes) { |
1601 | struct stable_node *stable_node, *next; |
1602 | struct page *page; |
1603 | |
1604 | list_for_each_entry_safe(stable_node, next, |
1605 | &migrate_nodes, list) { |
1606 | page = get_ksm_page(stable_node, false); |
1607 | if (page) |
1608 | put_page(page); |
1609 | cond_resched(); |
1610 | } |
1611 | } |
1612 | |
1613 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
1614 | root_unstable_tree[nid] = RB_ROOT; |
1615 | |
1616 | spin_lock(&ksm_mmlist_lock); |
1617 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); |
1618 | ksm_scan.mm_slot = slot; |
1619 | spin_unlock(&ksm_mmlist_lock); |
1620 | /* |
1621 | * Although we tested list_empty() above, a racing __ksm_exit |
1622 | * of the last mm on the list may have removed it since then. |
1623 | */ |
1624 | if (slot == &ksm_mm_head) |
1625 | return NULL; |
1626 | next_mm: |
1627 | ksm_scan.address = 0; |
1628 | ksm_scan.rmap_list = &slot->rmap_list; |
1629 | } |
1630 | |
1631 | mm = slot->mm; |
1632 | down_read(&mm->mmap_sem); |
1633 | if (ksm_test_exit(mm)) |
1634 | vma = NULL; |
1635 | else |
1636 | vma = find_vma(mm, ksm_scan.address); |
1637 | |
1638 | for (; vma; vma = vma->vm_next) { |
1639 | if (!(vma->vm_flags & VM_MERGEABLE)) |
1640 | continue; |
1641 | if (ksm_scan.address < vma->vm_start) |
1642 | ksm_scan.address = vma->vm_start; |
1643 | if (!vma->anon_vma) |
1644 | ksm_scan.address = vma->vm_end; |
1645 | |
1646 | while (ksm_scan.address < vma->vm_end) { |
1647 | if (ksm_test_exit(mm)) |
1648 | break; |
1649 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1650 | if (IS_ERR_OR_NULL(*page)) { |
1651 | ksm_scan.address += PAGE_SIZE; |
1652 | cond_resched(); |
1653 | continue; |
1654 | } |
1655 | if (PageAnon(*page)) { |
1656 | flush_anon_page(vma, *page, ksm_scan.address); |
1657 | flush_dcache_page(*page); |
1658 | rmap_item = get_next_rmap_item(slot, |
1659 | ksm_scan.rmap_list, ksm_scan.address); |
1660 | if (rmap_item) { |
1661 | ksm_scan.rmap_list = |
1662 | &rmap_item->rmap_list; |
1663 | ksm_scan.address += PAGE_SIZE; |
1664 | } else |
1665 | put_page(*page); |
1666 | up_read(&mm->mmap_sem); |
1667 | return rmap_item; |
1668 | } |
1669 | put_page(*page); |
1670 | ksm_scan.address += PAGE_SIZE; |
1671 | cond_resched(); |
1672 | } |
1673 | } |
1674 | |
1675 | if (ksm_test_exit(mm)) { |
1676 | ksm_scan.address = 0; |
1677 | ksm_scan.rmap_list = &slot->rmap_list; |
1678 | } |
1679 | /* |
1680 | * Nuke all the rmap_items that are above this current rmap: |
1681 | * because there were no VM_MERGEABLE vmas with such addresses. |
1682 | */ |
1683 | remove_trailing_rmap_items(slot, ksm_scan.rmap_list); |
1684 | |
1685 | spin_lock(&ksm_mmlist_lock); |
1686 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
1687 | struct mm_slot, mm_list); |
1688 | if (ksm_scan.address == 0) { |
1689 | /* |
1690 | * We've completed a full scan of all vmas, holding mmap_sem |
1691 | * throughout, and found no VM_MERGEABLE: so do the same as |
1692 | * __ksm_exit does to remove this mm from all our lists now. |
1693 | * This applies either when cleaning up after __ksm_exit |
1694 | * (but beware: we can reach here even before __ksm_exit), |
1695 | * or when all VM_MERGEABLE areas have been unmapped (and |
1696 | * mmap_sem then protects against race with MADV_MERGEABLE). |
1697 | */ |
1698 | hash_del(&slot->link); |
1699 | list_del(&slot->mm_list); |
1700 | spin_unlock(&ksm_mmlist_lock); |
1701 | |
1702 | free_mm_slot(slot); |
1703 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
1704 | up_read(&mm->mmap_sem); |
1705 | mmdrop(mm); |
1706 | } else { |
1707 | up_read(&mm->mmap_sem); |
1708 | /* |
1709 | * up_read(&mm->mmap_sem) first because after |
1710 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may |
1711 | * already have been freed under us by __ksm_exit() |
1712 | * because the "mm_slot" is still hashed and |
1713 | * ksm_scan.mm_slot doesn't point to it anymore. |
1714 | */ |
1715 | spin_unlock(&ksm_mmlist_lock); |
1716 | } |
1717 | |
1718 | /* Repeat until we've completed scanning the whole list */ |
1719 | slot = ksm_scan.mm_slot; |
1720 | if (slot != &ksm_mm_head) |
1721 | goto next_mm; |
1722 | |
1723 | ksm_scan.seqnr++; |
1724 | return NULL; |
1725 | } |
1726 | |
1727 | /** |
1728 | * ksm_do_scan - the ksm scanner main worker function. |
1729 | * @scan_npages - number of pages we want to scan before we return. |
1730 | */ |
1731 | static void ksm_do_scan(unsigned int scan_npages) |
1732 | { |
1733 | struct rmap_item *rmap_item; |
1734 | struct page *uninitialized_var(page); |
1735 | |
1736 | while (scan_npages-- && likely(!freezing(current))) { |
1737 | cond_resched(); |
1738 | rmap_item = scan_get_next_rmap_item(&page); |
1739 | if (!rmap_item) |
1740 | return; |
1741 | cmp_and_merge_page(page, rmap_item); |
1742 | put_page(page); |
1743 | } |
1744 | } |
1745 | |
1746 | static int ksmd_should_run(void) |
1747 | { |
1748 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); |
1749 | } |
1750 | |
1751 | static int ksm_scan_thread(void *nothing) |
1752 | { |
1753 | set_freezable(); |
1754 | set_user_nice(current, 5); |
1755 | |
1756 | while (!kthread_should_stop()) { |
1757 | mutex_lock(&ksm_thread_mutex); |
1758 | wait_while_offlining(); |
1759 | if (ksmd_should_run()) |
1760 | ksm_do_scan(ksm_thread_pages_to_scan); |
1761 | mutex_unlock(&ksm_thread_mutex); |
1762 | |
1763 | try_to_freeze(); |
1764 | |
1765 | if (ksmd_should_run()) { |
1766 | schedule_timeout_interruptible( |
1767 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); |
1768 | } else { |
1769 | wait_event_freezable(ksm_thread_wait, |
1770 | ksmd_should_run() || kthread_should_stop()); |
1771 | } |
1772 | } |
1773 | return 0; |
1774 | } |
1775 | |
1776 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
1777 | unsigned long end, int advice, unsigned long *vm_flags) |
1778 | { |
1779 | struct mm_struct *mm = vma->vm_mm; |
1780 | int err; |
1781 | |
1782 | switch (advice) { |
1783 | case MADV_MERGEABLE: |
1784 | /* |
1785 | * Be somewhat over-protective for now! |
1786 | */ |
1787 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | |
1788 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | |
1789 | VM_HUGETLB | VM_MIXEDMAP)) |
1790 | return 0; /* just ignore the advice */ |
1791 | |
1792 | #ifdef VM_SAO |
1793 | if (*vm_flags & VM_SAO) |
1794 | return 0; |
1795 | #endif |
1796 | |
1797 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
1798 | err = __ksm_enter(mm); |
1799 | if (err) |
1800 | return err; |
1801 | } |
1802 | |
1803 | *vm_flags |= VM_MERGEABLE; |
1804 | break; |
1805 | |
1806 | case MADV_UNMERGEABLE: |
1807 | if (!(*vm_flags & VM_MERGEABLE)) |
1808 | return 0; /* just ignore the advice */ |
1809 | |
1810 | if (vma->anon_vma) { |
1811 | err = unmerge_ksm_pages(vma, start, end); |
1812 | if (err) |
1813 | return err; |
1814 | } |
1815 | |
1816 | *vm_flags &= ~VM_MERGEABLE; |
1817 | break; |
1818 | } |
1819 | |
1820 | return 0; |
1821 | } |
1822 | |
1823 | int __ksm_enter(struct mm_struct *mm) |
1824 | { |
1825 | struct mm_slot *mm_slot; |
1826 | int needs_wakeup; |
1827 | |
1828 | mm_slot = alloc_mm_slot(); |
1829 | if (!mm_slot) |
1830 | return -ENOMEM; |
1831 | |
1832 | /* Check ksm_run too? Would need tighter locking */ |
1833 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); |
1834 | |
1835 | spin_lock(&ksm_mmlist_lock); |
1836 | insert_to_mm_slots_hash(mm, mm_slot); |
1837 | /* |
1838 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
1839 | * insert just behind the scanning cursor, to let the area settle |
1840 | * down a little; when fork is followed by immediate exec, we don't |
1841 | * want ksmd to waste time setting up and tearing down an rmap_list. |
1842 | * |
1843 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its |
1844 | * scanning cursor, otherwise KSM pages in newly forked mms will be |
1845 | * missed: then we might as well insert at the end of the list. |
1846 | */ |
1847 | if (ksm_run & KSM_RUN_UNMERGE) |
1848 | list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); |
1849 | else |
1850 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); |
1851 | spin_unlock(&ksm_mmlist_lock); |
1852 | |
1853 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
1854 | atomic_inc(&mm->mm_count); |
1855 | |
1856 | if (needs_wakeup) |
1857 | wake_up_interruptible(&ksm_thread_wait); |
1858 | |
1859 | return 0; |
1860 | } |
1861 | |
1862 | void __ksm_exit(struct mm_struct *mm) |
1863 | { |
1864 | struct mm_slot *mm_slot; |
1865 | int easy_to_free = 0; |
1866 | |
1867 | /* |
1868 | * This process is exiting: if it's straightforward (as is the |
1869 | * case when ksmd was never running), free mm_slot immediately. |
1870 | * But if it's at the cursor or has rmap_items linked to it, use |
1871 | * mmap_sem to synchronize with any break_cows before pagetables |
1872 | * are freed, and leave the mm_slot on the list for ksmd to free. |
1873 | * Beware: ksm may already have noticed it exiting and freed the slot. |
1874 | */ |
1875 | |
1876 | spin_lock(&ksm_mmlist_lock); |
1877 | mm_slot = get_mm_slot(mm); |
1878 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
1879 | if (!mm_slot->rmap_list) { |
1880 | hash_del(&mm_slot->link); |
1881 | list_del(&mm_slot->mm_list); |
1882 | easy_to_free = 1; |
1883 | } else { |
1884 | list_move(&mm_slot->mm_list, |
1885 | &ksm_scan.mm_slot->mm_list); |
1886 | } |
1887 | } |
1888 | spin_unlock(&ksm_mmlist_lock); |
1889 | |
1890 | if (easy_to_free) { |
1891 | free_mm_slot(mm_slot); |
1892 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
1893 | mmdrop(mm); |
1894 | } else if (mm_slot) { |
1895 | down_write(&mm->mmap_sem); |
1896 | up_write(&mm->mmap_sem); |
1897 | } |
1898 | } |
1899 | |
1900 | struct page *ksm_might_need_to_copy(struct page *page, |
1901 | struct vm_area_struct *vma, unsigned long address) |
1902 | { |
1903 | struct anon_vma *anon_vma = page_anon_vma(page); |
1904 | struct page *new_page; |
1905 | |
1906 | if (PageKsm(page)) { |
1907 | if (page_stable_node(page) && |
1908 | !(ksm_run & KSM_RUN_UNMERGE)) |
1909 | return page; /* no need to copy it */ |
1910 | } else if (!anon_vma) { |
1911 | return page; /* no need to copy it */ |
1912 | } else if (anon_vma->root == vma->anon_vma->root && |
1913 | page->index == linear_page_index(vma, address)) { |
1914 | return page; /* still no need to copy it */ |
1915 | } |
1916 | if (!PageUptodate(page)) |
1917 | return page; /* let do_swap_page report the error */ |
1918 | |
1919 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
1920 | if (new_page) { |
1921 | copy_user_highpage(new_page, page, address, vma); |
1922 | |
1923 | SetPageDirty(new_page); |
1924 | __SetPageUptodate(new_page); |
1925 | __SetPageLocked(new_page); |
1926 | } |
1927 | |
1928 | return new_page; |
1929 | } |
1930 | |
1931 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
1932 | { |
1933 | struct stable_node *stable_node; |
1934 | struct rmap_item *rmap_item; |
1935 | int ret = SWAP_AGAIN; |
1936 | int search_new_forks = 0; |
1937 | |
1938 | VM_BUG_ON_PAGE(!PageKsm(page), page); |
1939 | |
1940 | /* |
1941 | * Rely on the page lock to protect against concurrent modifications |
1942 | * to that page's node of the stable tree. |
1943 | */ |
1944 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1945 | |
1946 | stable_node = page_stable_node(page); |
1947 | if (!stable_node) |
1948 | return ret; |
1949 | again: |
1950 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
1951 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
1952 | struct anon_vma_chain *vmac; |
1953 | struct vm_area_struct *vma; |
1954 | |
1955 | cond_resched(); |
1956 | anon_vma_lock_read(anon_vma); |
1957 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1958 | 0, ULONG_MAX) { |
1959 | cond_resched(); |
1960 | vma = vmac->vma; |
1961 | if (rmap_item->address < vma->vm_start || |
1962 | rmap_item->address >= vma->vm_end) |
1963 | continue; |
1964 | /* |
1965 | * Initially we examine only the vma which covers this |
1966 | * rmap_item; but later, if there is still work to do, |
1967 | * we examine covering vmas in other mms: in case they |
1968 | * were forked from the original since ksmd passed. |
1969 | */ |
1970 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
1971 | continue; |
1972 | |
1973 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
1974 | continue; |
1975 | |
1976 | ret = rwc->rmap_one(page, vma, |
1977 | rmap_item->address, rwc->arg); |
1978 | if (ret != SWAP_AGAIN) { |
1979 | anon_vma_unlock_read(anon_vma); |
1980 | goto out; |
1981 | } |
1982 | if (rwc->done && rwc->done(page)) { |
1983 | anon_vma_unlock_read(anon_vma); |
1984 | goto out; |
1985 | } |
1986 | } |
1987 | anon_vma_unlock_read(anon_vma); |
1988 | } |
1989 | if (!search_new_forks++) |
1990 | goto again; |
1991 | out: |
1992 | return ret; |
1993 | } |
1994 | |
1995 | #ifdef CONFIG_MIGRATION |
1996 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
1997 | { |
1998 | struct stable_node *stable_node; |
1999 | |
2000 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
2001 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
2002 | VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); |
2003 | |
2004 | stable_node = page_stable_node(newpage); |
2005 | if (stable_node) { |
2006 | VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); |
2007 | stable_node->kpfn = page_to_pfn(newpage); |
2008 | /* |
2009 | * newpage->mapping was set in advance; now we need smp_wmb() |
2010 | * to make sure that the new stable_node->kpfn is visible |
2011 | * to get_ksm_page() before it can see that oldpage->mapping |
2012 | * has gone stale (or that PageSwapCache has been cleared). |
2013 | */ |
2014 | smp_wmb(); |
2015 | set_page_stable_node(oldpage, NULL); |
2016 | } |
2017 | } |
2018 | #endif /* CONFIG_MIGRATION */ |
2019 | |
2020 | #ifdef CONFIG_MEMORY_HOTREMOVE |
2021 | static void wait_while_offlining(void) |
2022 | { |
2023 | while (ksm_run & KSM_RUN_OFFLINE) { |
2024 | mutex_unlock(&ksm_thread_mutex); |
2025 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), |
2026 | TASK_UNINTERRUPTIBLE); |
2027 | mutex_lock(&ksm_thread_mutex); |
2028 | } |
2029 | } |
2030 | |
2031 | static void ksm_check_stable_tree(unsigned long start_pfn, |
2032 | unsigned long end_pfn) |
2033 | { |
2034 | struct stable_node *stable_node, *next; |
2035 | struct rb_node *node; |
2036 | int nid; |
2037 | |
2038 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
2039 | node = rb_first(root_stable_tree + nid); |
2040 | while (node) { |
2041 | stable_node = rb_entry(node, struct stable_node, node); |
2042 | if (stable_node->kpfn >= start_pfn && |
2043 | stable_node->kpfn < end_pfn) { |
2044 | /* |
2045 | * Don't get_ksm_page, page has already gone: |
2046 | * which is why we keep kpfn instead of page* |
2047 | */ |
2048 | remove_node_from_stable_tree(stable_node); |
2049 | node = rb_first(root_stable_tree + nid); |
2050 | } else |
2051 | node = rb_next(node); |
2052 | cond_resched(); |
2053 | } |
2054 | } |
2055 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
2056 | if (stable_node->kpfn >= start_pfn && |
2057 | stable_node->kpfn < end_pfn) |
2058 | remove_node_from_stable_tree(stable_node); |
2059 | cond_resched(); |
2060 | } |
2061 | } |
2062 | |
2063 | static int ksm_memory_callback(struct notifier_block *self, |
2064 | unsigned long action, void *arg) |
2065 | { |
2066 | struct memory_notify *mn = arg; |
2067 | |
2068 | switch (action) { |
2069 | case MEM_GOING_OFFLINE: |
2070 | /* |
2071 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
2072 | * and remove_all_stable_nodes() while memory is going offline: |
2073 | * it is unsafe for them to touch the stable tree at this time. |
2074 | * But unmerge_ksm_pages(), rmap lookups and other entry points |
2075 | * which do not need the ksm_thread_mutex are all safe. |
2076 | */ |
2077 | mutex_lock(&ksm_thread_mutex); |
2078 | ksm_run |= KSM_RUN_OFFLINE; |
2079 | mutex_unlock(&ksm_thread_mutex); |
2080 | break; |
2081 | |
2082 | case MEM_OFFLINE: |
2083 | /* |
2084 | * Most of the work is done by page migration; but there might |
2085 | * be a few stable_nodes left over, still pointing to struct |
2086 | * pages which have been offlined: prune those from the tree, |
2087 | * otherwise get_ksm_page() might later try to access a |
2088 | * non-existent struct page. |
2089 | */ |
2090 | ksm_check_stable_tree(mn->start_pfn, |
2091 | mn->start_pfn + mn->nr_pages); |
2092 | /* fallthrough */ |
2093 | |
2094 | case MEM_CANCEL_OFFLINE: |
2095 | mutex_lock(&ksm_thread_mutex); |
2096 | ksm_run &= ~KSM_RUN_OFFLINE; |
2097 | mutex_unlock(&ksm_thread_mutex); |
2098 | |
2099 | smp_mb(); /* wake_up_bit advises this */ |
2100 | wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); |
2101 | break; |
2102 | } |
2103 | return NOTIFY_OK; |
2104 | } |
2105 | #else |
2106 | static void wait_while_offlining(void) |
2107 | { |
2108 | } |
2109 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
2110 | |
2111 | #ifdef CONFIG_SYSFS |
2112 | /* |
2113 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |
2114 | */ |
2115 | |
2116 | #define KSM_ATTR_RO(_name) \ |
2117 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
2118 | #define KSM_ATTR(_name) \ |
2119 | static struct kobj_attribute _name##_attr = \ |
2120 | __ATTR(_name, 0644, _name##_show, _name##_store) |
2121 | |
2122 | static ssize_t sleep_millisecs_show(struct kobject *kobj, |
2123 | struct kobj_attribute *attr, char *buf) |
2124 | { |
2125 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); |
2126 | } |
2127 | |
2128 | static ssize_t sleep_millisecs_store(struct kobject *kobj, |
2129 | struct kobj_attribute *attr, |
2130 | const char *buf, size_t count) |
2131 | { |
2132 | unsigned long msecs; |
2133 | int err; |
2134 | |
2135 | err = kstrtoul(buf, 10, &msecs); |
2136 | if (err || msecs > UINT_MAX) |
2137 | return -EINVAL; |
2138 | |
2139 | ksm_thread_sleep_millisecs = msecs; |
2140 | |
2141 | return count; |
2142 | } |
2143 | KSM_ATTR(sleep_millisecs); |
2144 | |
2145 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
2146 | struct kobj_attribute *attr, char *buf) |
2147 | { |
2148 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); |
2149 | } |
2150 | |
2151 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
2152 | struct kobj_attribute *attr, |
2153 | const char *buf, size_t count) |
2154 | { |
2155 | int err; |
2156 | unsigned long nr_pages; |
2157 | |
2158 | err = kstrtoul(buf, 10, &nr_pages); |
2159 | if (err || nr_pages > UINT_MAX) |
2160 | return -EINVAL; |
2161 | |
2162 | ksm_thread_pages_to_scan = nr_pages; |
2163 | |
2164 | return count; |
2165 | } |
2166 | KSM_ATTR(pages_to_scan); |
2167 | |
2168 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, |
2169 | char *buf) |
2170 | { |
2171 | return sprintf(buf, "%lu\n", ksm_run); |
2172 | } |
2173 | |
2174 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, |
2175 | const char *buf, size_t count) |
2176 | { |
2177 | int err; |
2178 | unsigned long flags; |
2179 | |
2180 | err = kstrtoul(buf, 10, &flags); |
2181 | if (err || flags > UINT_MAX) |
2182 | return -EINVAL; |
2183 | if (flags > KSM_RUN_UNMERGE) |
2184 | return -EINVAL; |
2185 | |
2186 | /* |
2187 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. |
2188 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, |
2189 | * breaking COW to free the pages_shared (but leaves mm_slots |
2190 | * on the list for when ksmd may be set running again). |
2191 | */ |
2192 | |
2193 | mutex_lock(&ksm_thread_mutex); |
2194 | wait_while_offlining(); |
2195 | if (ksm_run != flags) { |
2196 | ksm_run = flags; |
2197 | if (flags & KSM_RUN_UNMERGE) { |
2198 | set_current_oom_origin(); |
2199 | err = unmerge_and_remove_all_rmap_items(); |
2200 | clear_current_oom_origin(); |
2201 | if (err) { |
2202 | ksm_run = KSM_RUN_STOP; |
2203 | count = err; |
2204 | } |
2205 | } |
2206 | } |
2207 | mutex_unlock(&ksm_thread_mutex); |
2208 | |
2209 | if (flags & KSM_RUN_MERGE) |
2210 | wake_up_interruptible(&ksm_thread_wait); |
2211 | |
2212 | return count; |
2213 | } |
2214 | KSM_ATTR(run); |
2215 | |
2216 | #ifdef CONFIG_NUMA |
2217 | static ssize_t merge_across_nodes_show(struct kobject *kobj, |
2218 | struct kobj_attribute *attr, char *buf) |
2219 | { |
2220 | return sprintf(buf, "%u\n", ksm_merge_across_nodes); |
2221 | } |
2222 | |
2223 | static ssize_t merge_across_nodes_store(struct kobject *kobj, |
2224 | struct kobj_attribute *attr, |
2225 | const char *buf, size_t count) |
2226 | { |
2227 | int err; |
2228 | unsigned long knob; |
2229 | |
2230 | err = kstrtoul(buf, 10, &knob); |
2231 | if (err) |
2232 | return err; |
2233 | if (knob > 1) |
2234 | return -EINVAL; |
2235 | |
2236 | mutex_lock(&ksm_thread_mutex); |
2237 | wait_while_offlining(); |
2238 | if (ksm_merge_across_nodes != knob) { |
2239 | if (ksm_pages_shared || remove_all_stable_nodes()) |
2240 | err = -EBUSY; |
2241 | else if (root_stable_tree == one_stable_tree) { |
2242 | struct rb_root *buf; |
2243 | /* |
2244 | * This is the first time that we switch away from the |
2245 | * default of merging across nodes: must now allocate |
2246 | * a buffer to hold as many roots as may be needed. |
2247 | * Allocate stable and unstable together: |
2248 | * MAXSMP NODES_SHIFT 10 will use 16kB. |
2249 | */ |
2250 | buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), |
2251 | GFP_KERNEL); |
2252 | /* Let us assume that RB_ROOT is NULL is zero */ |
2253 | if (!buf) |
2254 | err = -ENOMEM; |
2255 | else { |
2256 | root_stable_tree = buf; |
2257 | root_unstable_tree = buf + nr_node_ids; |
2258 | /* Stable tree is empty but not the unstable */ |
2259 | root_unstable_tree[0] = one_unstable_tree[0]; |
2260 | } |
2261 | } |
2262 | if (!err) { |
2263 | ksm_merge_across_nodes = knob; |
2264 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
2265 | } |
2266 | } |
2267 | mutex_unlock(&ksm_thread_mutex); |
2268 | |
2269 | return err ? err : count; |
2270 | } |
2271 | KSM_ATTR(merge_across_nodes); |
2272 | #endif |
2273 | |
2274 | static ssize_t pages_shared_show(struct kobject *kobj, |
2275 | struct kobj_attribute *attr, char *buf) |
2276 | { |
2277 | return sprintf(buf, "%lu\n", ksm_pages_shared); |
2278 | } |
2279 | KSM_ATTR_RO(pages_shared); |
2280 | |
2281 | static ssize_t pages_sharing_show(struct kobject *kobj, |
2282 | struct kobj_attribute *attr, char *buf) |
2283 | { |
2284 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
2285 | } |
2286 | KSM_ATTR_RO(pages_sharing); |
2287 | |
2288 | static ssize_t pages_unshared_show(struct kobject *kobj, |
2289 | struct kobj_attribute *attr, char *buf) |
2290 | { |
2291 | return sprintf(buf, "%lu\n", ksm_pages_unshared); |
2292 | } |
2293 | KSM_ATTR_RO(pages_unshared); |
2294 | |
2295 | static ssize_t pages_volatile_show(struct kobject *kobj, |
2296 | struct kobj_attribute *attr, char *buf) |
2297 | { |
2298 | long ksm_pages_volatile; |
2299 | |
2300 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared |
2301 | - ksm_pages_sharing - ksm_pages_unshared; |
2302 | /* |
2303 | * It was not worth any locking to calculate that statistic, |
2304 | * but it might therefore sometimes be negative: conceal that. |
2305 | */ |
2306 | if (ksm_pages_volatile < 0) |
2307 | ksm_pages_volatile = 0; |
2308 | return sprintf(buf, "%ld\n", ksm_pages_volatile); |
2309 | } |
2310 | KSM_ATTR_RO(pages_volatile); |
2311 | |
2312 | static ssize_t full_scans_show(struct kobject *kobj, |
2313 | struct kobj_attribute *attr, char *buf) |
2314 | { |
2315 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); |
2316 | } |
2317 | KSM_ATTR_RO(full_scans); |
2318 | |
2319 | static struct attribute *ksm_attrs[] = { |
2320 | &sleep_millisecs_attr.attr, |
2321 | &pages_to_scan_attr.attr, |
2322 | &run_attr.attr, |
2323 | &pages_shared_attr.attr, |
2324 | &pages_sharing_attr.attr, |
2325 | &pages_unshared_attr.attr, |
2326 | &pages_volatile_attr.attr, |
2327 | &full_scans_attr.attr, |
2328 | #ifdef CONFIG_NUMA |
2329 | &merge_across_nodes_attr.attr, |
2330 | #endif |
2331 | NULL, |
2332 | }; |
2333 | |
2334 | static struct attribute_group ksm_attr_group = { |
2335 | .attrs = ksm_attrs, |
2336 | .name = "ksm", |
2337 | }; |
2338 | #endif /* CONFIG_SYSFS */ |
2339 | |
2340 | static int __init ksm_init(void) |
2341 | { |
2342 | struct task_struct *ksm_thread; |
2343 | int err; |
2344 | |
2345 | err = ksm_slab_init(); |
2346 | if (err) |
2347 | goto out; |
2348 | |
2349 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
2350 | if (IS_ERR(ksm_thread)) { |
2351 | pr_err("ksm: creating kthread failed\n"); |
2352 | err = PTR_ERR(ksm_thread); |
2353 | goto out_free; |
2354 | } |
2355 | |
2356 | #ifdef CONFIG_SYSFS |
2357 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
2358 | if (err) { |
2359 | pr_err("ksm: register sysfs failed\n"); |
2360 | kthread_stop(ksm_thread); |
2361 | goto out_free; |
2362 | } |
2363 | #else |
2364 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ |
2365 | |
2366 | #endif /* CONFIG_SYSFS */ |
2367 | |
2368 | #ifdef CONFIG_MEMORY_HOTREMOVE |
2369 | /* There is no significance to this priority 100 */ |
2370 | hotplug_memory_notifier(ksm_memory_callback, 100); |
2371 | #endif |
2372 | return 0; |
2373 | |
2374 | out_free: |
2375 | ksm_slab_free(); |
2376 | out: |
2377 | return err; |
2378 | } |
2379 | subsys_initcall(ksm_init); |
2380 |