blob: a6976119b4a49a24bcaa1450638b46f2c963d725
1 | /* |
2 | * Workingset detection |
3 | * |
4 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner |
5 | */ |
6 | |
7 | #include <linux/memcontrol.h> |
8 | #include <linux/writeback.h> |
9 | #include <linux/pagemap.h> |
10 | #include <linux/atomic.h> |
11 | #include <linux/module.h> |
12 | #include <linux/swap.h> |
13 | #include <linux/fs.h> |
14 | #include <linux/mm.h> |
15 | |
16 | /* |
17 | * Double CLOCK lists |
18 | * |
19 | * Per node, two clock lists are maintained for file pages: the |
20 | * inactive and the active list. Freshly faulted pages start out at |
21 | * the head of the inactive list and page reclaim scans pages from the |
22 | * tail. Pages that are accessed multiple times on the inactive list |
23 | * are promoted to the active list, to protect them from reclaim, |
24 | * whereas active pages are demoted to the inactive list when the |
25 | * active list grows too big. |
26 | * |
27 | * fault ------------------------+ |
28 | * | |
29 | * +--------------+ | +-------------+ |
30 | * reclaim <- | inactive | <-+-- demotion | active | <--+ |
31 | * +--------------+ +-------------+ | |
32 | * | | |
33 | * +-------------- promotion ------------------+ |
34 | * |
35 | * |
36 | * Access frequency and refault distance |
37 | * |
38 | * A workload is thrashing when its pages are frequently used but they |
39 | * are evicted from the inactive list every time before another access |
40 | * would have promoted them to the active list. |
41 | * |
42 | * In cases where the average access distance between thrashing pages |
43 | * is bigger than the size of memory there is nothing that can be |
44 | * done - the thrashing set could never fit into memory under any |
45 | * circumstance. |
46 | * |
47 | * However, the average access distance could be bigger than the |
48 | * inactive list, yet smaller than the size of memory. In this case, |
49 | * the set could fit into memory if it weren't for the currently |
50 | * active pages - which may be used more, hopefully less frequently: |
51 | * |
52 | * +-memory available to cache-+ |
53 | * | | |
54 | * +-inactive------+-active----+ |
55 | * a b | c d e f g h i | J K L M N | |
56 | * +---------------+-----------+ |
57 | * |
58 | * It is prohibitively expensive to accurately track access frequency |
59 | * of pages. But a reasonable approximation can be made to measure |
60 | * thrashing on the inactive list, after which refaulting pages can be |
61 | * activated optimistically to compete with the existing active pages. |
62 | * |
63 | * Approximating inactive page access frequency - Observations: |
64 | * |
65 | * 1. When a page is accessed for the first time, it is added to the |
66 | * head of the inactive list, slides every existing inactive page |
67 | * towards the tail by one slot, and pushes the current tail page |
68 | * out of memory. |
69 | * |
70 | * 2. When a page is accessed for the second time, it is promoted to |
71 | * the active list, shrinking the inactive list by one slot. This |
72 | * also slides all inactive pages that were faulted into the cache |
73 | * more recently than the activated page towards the tail of the |
74 | * inactive list. |
75 | * |
76 | * Thus: |
77 | * |
78 | * 1. The sum of evictions and activations between any two points in |
79 | * time indicate the minimum number of inactive pages accessed in |
80 | * between. |
81 | * |
82 | * 2. Moving one inactive page N page slots towards the tail of the |
83 | * list requires at least N inactive page accesses. |
84 | * |
85 | * Combining these: |
86 | * |
87 | * 1. When a page is finally evicted from memory, the number of |
88 | * inactive pages accessed while the page was in cache is at least |
89 | * the number of page slots on the inactive list. |
90 | * |
91 | * 2. In addition, measuring the sum of evictions and activations (E) |
92 | * at the time of a page's eviction, and comparing it to another |
93 | * reading (R) at the time the page faults back into memory tells |
94 | * the minimum number of accesses while the page was not cached. |
95 | * This is called the refault distance. |
96 | * |
97 | * Because the first access of the page was the fault and the second |
98 | * access the refault, we combine the in-cache distance with the |
99 | * out-of-cache distance to get the complete minimum access distance |
100 | * of this page: |
101 | * |
102 | * NR_inactive + (R - E) |
103 | * |
104 | * And knowing the minimum access distance of a page, we can easily |
105 | * tell if the page would be able to stay in cache assuming all page |
106 | * slots in the cache were available: |
107 | * |
108 | * NR_inactive + (R - E) <= NR_inactive + NR_active |
109 | * |
110 | * which can be further simplified to |
111 | * |
112 | * (R - E) <= NR_active |
113 | * |
114 | * Put into words, the refault distance (out-of-cache) can be seen as |
115 | * a deficit in inactive list space (in-cache). If the inactive list |
116 | * had (R - E) more page slots, the page would not have been evicted |
117 | * in between accesses, but activated instead. And on a full system, |
118 | * the only thing eating into inactive list space is active pages. |
119 | * |
120 | * |
121 | * Refaulting inactive pages |
122 | * |
123 | * All that is known about the active list is that the pages have been |
124 | * accessed more than once in the past. This means that at any given |
125 | * time there is actually a good chance that pages on the active list |
126 | * are no longer in active use. |
127 | * |
128 | * So when a refault distance of (R - E) is observed and there are at |
129 | * least (R - E) active pages, the refaulting page is activated |
130 | * optimistically in the hope that (R - E) active pages are actually |
131 | * used less frequently than the refaulting page - or even not used at |
132 | * all anymore. |
133 | * |
134 | * That means if inactive cache is refaulting with a suitable refault |
135 | * distance, we assume the cache workingset is transitioning and put |
136 | * pressure on the current active list. |
137 | * |
138 | * If this is wrong and demotion kicks in, the pages which are truly |
139 | * used more frequently will be reactivated while the less frequently |
140 | * used once will be evicted from memory. |
141 | * |
142 | * But if this is right, the stale pages will be pushed out of memory |
143 | * and the used pages get to stay in cache. |
144 | * |
145 | * Refaulting active pages |
146 | * |
147 | * If on the other hand the refaulting pages have recently been |
148 | * deactivated, it means that the active list is no longer protecting |
149 | * actively used cache from reclaim. The cache is NOT transitioning to |
150 | * a different workingset; the existing workingset is thrashing in the |
151 | * space allocated to the page cache. |
152 | * |
153 | * |
154 | * Implementation |
155 | * |
156 | * For each node's file LRU lists, a counter for inactive evictions |
157 | * and activations is maintained (node->inactive_age). |
158 | * |
159 | * On eviction, a snapshot of this counter (along with some bits to |
160 | * identify the node) is stored in the now empty page cache radix tree |
161 | * slot of the evicted page. This is called a shadow entry. |
162 | * |
163 | * On cache misses for which there are shadow entries, an eligible |
164 | * refault distance will immediately activate the refaulting page. |
165 | */ |
166 | |
167 | #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \ |
168 | 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) |
169 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
170 | |
171 | /* |
172 | * Eviction timestamps need to be able to cover the full range of |
173 | * actionable refaults. However, bits are tight in the radix tree |
174 | * entry, and after storing the identifier for the lruvec there might |
175 | * not be enough left to represent every single actionable refault. In |
176 | * that case, we have to sacrifice granularity for distance, and group |
177 | * evictions into coarser buckets by shaving off lower timestamp bits. |
178 | */ |
179 | static unsigned int bucket_order __read_mostly; |
180 | |
181 | static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, |
182 | bool workingset) |
183 | { |
184 | eviction >>= bucket_order; |
185 | eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; |
186 | eviction = (eviction << NODES_SHIFT) | pgdat->node_id; |
187 | eviction = (eviction << 1) | workingset; |
188 | eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); |
189 | |
190 | return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); |
191 | } |
192 | |
193 | static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, |
194 | unsigned long *evictionp, bool *workingsetp) |
195 | { |
196 | unsigned long entry = (unsigned long)shadow; |
197 | int memcgid, nid; |
198 | bool workingset; |
199 | |
200 | entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; |
201 | workingset = entry & 1; |
202 | entry >>= 1; |
203 | nid = entry & ((1UL << NODES_SHIFT) - 1); |
204 | entry >>= NODES_SHIFT; |
205 | memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); |
206 | entry >>= MEM_CGROUP_ID_SHIFT; |
207 | |
208 | *memcgidp = memcgid; |
209 | *pgdat = NODE_DATA(nid); |
210 | *evictionp = entry << bucket_order; |
211 | *workingsetp = workingset; |
212 | } |
213 | |
214 | /** |
215 | * workingset_eviction - note the eviction of a page from memory |
216 | * @mapping: address space the page was backing |
217 | * @page: the page being evicted |
218 | * |
219 | * Returns a shadow entry to be stored in @mapping->page_tree in place |
220 | * of the evicted @page so that a later refault can be detected. |
221 | */ |
222 | void *workingset_eviction(struct address_space *mapping, struct page *page) |
223 | { |
224 | struct pglist_data *pgdat = page_pgdat(page); |
225 | struct mem_cgroup *memcg = page_memcg(page); |
226 | int memcgid = mem_cgroup_id(memcg); |
227 | unsigned long eviction; |
228 | struct lruvec *lruvec; |
229 | |
230 | /* Page is fully exclusive and pins page->mem_cgroup */ |
231 | VM_BUG_ON_PAGE(PageLRU(page), page); |
232 | VM_BUG_ON_PAGE(page_count(page), page); |
233 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
234 | |
235 | lruvec = mem_cgroup_lruvec(pgdat, memcg); |
236 | eviction = atomic_long_inc_return(&lruvec->inactive_age); |
237 | return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); |
238 | } |
239 | |
240 | /** |
241 | * workingset_refault - evaluate the refault of a previously evicted page |
242 | * @page: the freshly allocated replacement page |
243 | * @shadow: shadow entry of the evicted page |
244 | * |
245 | * Calculates and evaluates the refault distance of the previously |
246 | * evicted page in the context of the node it was allocated in. |
247 | */ |
248 | void workingset_refault(struct page *page, void *shadow) |
249 | { |
250 | unsigned long refault_distance; |
251 | struct pglist_data *pgdat; |
252 | unsigned long active_file; |
253 | struct mem_cgroup *memcg; |
254 | unsigned long eviction; |
255 | struct lruvec *lruvec; |
256 | unsigned long refault; |
257 | bool workingset; |
258 | int memcgid; |
259 | |
260 | unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); |
261 | |
262 | rcu_read_lock(); |
263 | /* |
264 | * Look up the memcg associated with the stored ID. It might |
265 | * have been deleted since the page's eviction. |
266 | * |
267 | * Note that in rare events the ID could have been recycled |
268 | * for a new cgroup that refaults a shared page. This is |
269 | * impossible to tell from the available data. However, this |
270 | * should be a rare and limited disturbance, and activations |
271 | * are always speculative anyway. Ultimately, it's the aging |
272 | * algorithm's job to shake out the minimum access frequency |
273 | * for the active cache. |
274 | * |
275 | * XXX: On !CONFIG_MEMCG, this will always return NULL; it |
276 | * would be better if the root_mem_cgroup existed in all |
277 | * configurations instead. |
278 | */ |
279 | memcg = mem_cgroup_from_id(memcgid); |
280 | if (!mem_cgroup_disabled() && !memcg) |
281 | goto out; |
282 | lruvec = mem_cgroup_lruvec(pgdat, memcg); |
283 | refault = atomic_long_read(&lruvec->inactive_age); |
284 | active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES); |
285 | |
286 | /* |
287 | * Calculate the refault distance |
288 | * |
289 | * The unsigned subtraction here gives an accurate distance |
290 | * across inactive_age overflows in most cases. There is a |
291 | * special case: usually, shadow entries have a short lifetime |
292 | * and are either refaulted or reclaimed along with the inode |
293 | * before they get too old. But it is not impossible for the |
294 | * inactive_age to lap a shadow entry in the field, which can |
295 | * then result in a false small refault distance, leading to a |
296 | * false activation should this old entry actually refault |
297 | * again. However, earlier kernels used to deactivate |
298 | * unconditionally with *every* reclaim invocation for the |
299 | * longest time, so the occasional inappropriate activation |
300 | * leading to pressure on the active list is not a problem. |
301 | */ |
302 | refault_distance = (refault - eviction) & EVICTION_MASK; |
303 | |
304 | inc_node_state(pgdat, WORKINGSET_REFAULT); |
305 | |
306 | /* |
307 | * Compare the distance to the existing workingset size. We |
308 | * don't act on pages that couldn't stay resident even if all |
309 | * the memory was available to the page cache. |
310 | */ |
311 | if (refault_distance > active_file) |
312 | goto out; |
313 | |
314 | SetPageActive(page); |
315 | atomic_long_inc(&lruvec->inactive_age); |
316 | inc_node_state(pgdat, WORKINGSET_ACTIVATE); |
317 | |
318 | /* Page was active prior to eviction */ |
319 | if (workingset) { |
320 | SetPageWorkingset(page); |
321 | inc_node_state(pgdat, WORKINGSET_RESTORE); |
322 | } |
323 | out: |
324 | rcu_read_unlock(); |
325 | } |
326 | |
327 | /** |
328 | * workingset_activation - note a page activation |
329 | * @page: page that is being activated |
330 | */ |
331 | void workingset_activation(struct page *page) |
332 | { |
333 | struct mem_cgroup *memcg; |
334 | struct lruvec *lruvec; |
335 | |
336 | rcu_read_lock(); |
337 | /* |
338 | * Filter non-memcg pages here, e.g. unmap can call |
339 | * mark_page_accessed() on VDSO pages. |
340 | * |
341 | * XXX: See workingset_refault() - this should return |
342 | * root_mem_cgroup even for !CONFIG_MEMCG. |
343 | */ |
344 | memcg = page_memcg_rcu(page); |
345 | if (!mem_cgroup_disabled() && !memcg) |
346 | goto out; |
347 | lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); |
348 | atomic_long_inc(&lruvec->inactive_age); |
349 | out: |
350 | rcu_read_unlock(); |
351 | } |
352 | |
353 | /* |
354 | * Shadow entries reflect the share of the working set that does not |
355 | * fit into memory, so their number depends on the access pattern of |
356 | * the workload. In most cases, they will refault or get reclaimed |
357 | * along with the inode, but a (malicious) workload that streams |
358 | * through files with a total size several times that of available |
359 | * memory, while preventing the inodes from being reclaimed, can |
360 | * create excessive amounts of shadow nodes. To keep a lid on this, |
361 | * track shadow nodes and reclaim them when they grow way past the |
362 | * point where they would still be useful. |
363 | */ |
364 | |
365 | struct list_lru workingset_shadow_nodes; |
366 | |
367 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, |
368 | struct shrink_control *sc) |
369 | { |
370 | unsigned long shadow_nodes; |
371 | unsigned long max_nodes; |
372 | unsigned long pages; |
373 | |
374 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ |
375 | local_irq_disable(); |
376 | shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); |
377 | local_irq_enable(); |
378 | |
379 | if (sc->memcg) { |
380 | pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, |
381 | LRU_ALL_FILE); |
382 | } else { |
383 | pages = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) + |
384 | node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE); |
385 | } |
386 | |
387 | /* |
388 | * Active cache pages are limited to 50% of memory, and shadow |
389 | * entries that represent a refault distance bigger than that |
390 | * do not have any effect. Limit the number of shadow nodes |
391 | * such that shadow entries do not exceed the number of active |
392 | * cache pages, assuming a worst-case node population density |
393 | * of 1/8th on average. |
394 | * |
395 | * On 64-bit with 7 radix_tree_nodes per page and 64 slots |
396 | * each, this will reclaim shadow entries when they consume |
397 | * ~2% of available memory: |
398 | * |
399 | * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE |
400 | */ |
401 | max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3); |
402 | |
403 | if (shadow_nodes <= max_nodes) |
404 | return 0; |
405 | |
406 | return shadow_nodes - max_nodes; |
407 | } |
408 | |
409 | static enum lru_status shadow_lru_isolate(struct list_head *item, |
410 | struct list_lru_one *lru, |
411 | spinlock_t *lru_lock, |
412 | void *arg) |
413 | { |
414 | struct address_space *mapping; |
415 | struct radix_tree_node *node; |
416 | unsigned int i; |
417 | int ret; |
418 | |
419 | /* |
420 | * Page cache insertions and deletions synchroneously maintain |
421 | * the shadow node LRU under the mapping->tree_lock and the |
422 | * lru_lock. Because the page cache tree is emptied before |
423 | * the inode can be destroyed, holding the lru_lock pins any |
424 | * address_space that has radix tree nodes on the LRU. |
425 | * |
426 | * We can then safely transition to the mapping->tree_lock to |
427 | * pin only the address_space of the particular node we want |
428 | * to reclaim, take the node off-LRU, and drop the lru_lock. |
429 | */ |
430 | |
431 | node = container_of(item, struct radix_tree_node, private_list); |
432 | mapping = node->private_data; |
433 | |
434 | /* Coming from the list, invert the lock order */ |
435 | if (!spin_trylock(&mapping->tree_lock)) { |
436 | spin_unlock(lru_lock); |
437 | ret = LRU_RETRY; |
438 | goto out; |
439 | } |
440 | |
441 | list_lru_isolate(lru, item); |
442 | spin_unlock(lru_lock); |
443 | |
444 | /* |
445 | * The nodes should only contain one or more shadow entries, |
446 | * no pages, so we expect to be able to remove them all and |
447 | * delete and free the empty node afterwards. |
448 | */ |
449 | BUG_ON(!workingset_node_shadows(node)); |
450 | BUG_ON(workingset_node_pages(node)); |
451 | |
452 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { |
453 | if (node->slots[i]) { |
454 | BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); |
455 | node->slots[i] = NULL; |
456 | workingset_node_shadows_dec(node); |
457 | BUG_ON(!mapping->nrexceptional); |
458 | mapping->nrexceptional--; |
459 | } |
460 | } |
461 | BUG_ON(workingset_node_shadows(node)); |
462 | inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); |
463 | if (!__radix_tree_delete_node(&mapping->page_tree, node)) |
464 | BUG(); |
465 | |
466 | spin_unlock(&mapping->tree_lock); |
467 | ret = LRU_REMOVED_RETRY; |
468 | out: |
469 | local_irq_enable(); |
470 | cond_resched(); |
471 | local_irq_disable(); |
472 | spin_lock(lru_lock); |
473 | return ret; |
474 | } |
475 | |
476 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, |
477 | struct shrink_control *sc) |
478 | { |
479 | unsigned long ret; |
480 | |
481 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ |
482 | local_irq_disable(); |
483 | ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, |
484 | shadow_lru_isolate, NULL); |
485 | local_irq_enable(); |
486 | return ret; |
487 | } |
488 | |
489 | static struct shrinker workingset_shadow_shrinker = { |
490 | .count_objects = count_shadow_nodes, |
491 | .scan_objects = scan_shadow_nodes, |
492 | .seeks = DEFAULT_SEEKS, |
493 | .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, |
494 | }; |
495 | |
496 | /* |
497 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe |
498 | * mapping->tree_lock. |
499 | */ |
500 | static struct lock_class_key shadow_nodes_key; |
501 | |
502 | static int __init workingset_init(void) |
503 | { |
504 | unsigned int timestamp_bits; |
505 | unsigned int max_order; |
506 | int ret; |
507 | |
508 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); |
509 | /* |
510 | * Calculate the eviction bucket size to cover the longest |
511 | * actionable refault distance, which is currently half of |
512 | * memory (totalram_pages/2). However, memory hotplug may add |
513 | * some more pages at runtime, so keep working with up to |
514 | * double the initial memory by using totalram_pages as-is. |
515 | */ |
516 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; |
517 | max_order = fls_long(totalram_pages - 1); |
518 | if (max_order > timestamp_bits) |
519 | bucket_order = max_order - timestamp_bits; |
520 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
521 | timestamp_bits, max_order, bucket_order); |
522 | |
523 | ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key); |
524 | if (ret) |
525 | goto err; |
526 | ret = register_shrinker(&workingset_shadow_shrinker); |
527 | if (ret) |
528 | goto err_list_lru; |
529 | return 0; |
530 | err_list_lru: |
531 | list_lru_destroy(&workingset_shadow_nodes); |
532 | err: |
533 | return ret; |
534 | } |
535 | module_init(workingset_init); |
536 |