blob: a7be1c7a79f66d4627b8429d82314afaa4b24b9c
1 | #include <linux/mm.h> |
2 | #include <linux/mmzone.h> |
3 | #include <linux/bootmem.h> |
4 | #include <linux/page_ext.h> |
5 | #include <linux/memory.h> |
6 | #include <linux/vmalloc.h> |
7 | #include <linux/kmemleak.h> |
8 | #include <linux/page_owner.h> |
9 | #include <linux/page_idle.h> |
10 | |
11 | /* |
12 | * struct page extension |
13 | * |
14 | * This is the feature to manage memory for extended data per page. |
15 | * |
16 | * Until now, we must modify struct page itself to store extra data per page. |
17 | * This requires rebuilding the kernel and it is really time consuming process. |
18 | * And, sometimes, rebuild is impossible due to third party module dependency. |
19 | * At last, enlarging struct page could cause un-wanted system behaviour change. |
20 | * |
21 | * This feature is intended to overcome above mentioned problems. This feature |
22 | * allocates memory for extended data per page in certain place rather than |
23 | * the struct page itself. This memory can be accessed by the accessor |
24 | * functions provided by this code. During the boot process, it checks whether |
25 | * allocation of huge chunk of memory is needed or not. If not, it avoids |
26 | * allocating memory at all. With this advantage, we can include this feature |
27 | * into the kernel in default and can avoid rebuild and solve related problems. |
28 | * |
29 | * To help these things to work well, there are two callbacks for clients. One |
30 | * is the need callback which is mandatory if user wants to avoid useless |
31 | * memory allocation at boot-time. The other is optional, init callback, which |
32 | * is used to do proper initialization after memory is allocated. |
33 | * |
34 | * The need callback is used to decide whether extended memory allocation is |
35 | * needed or not. Sometimes users want to deactivate some features in this |
36 | * boot and extra memory would be unneccessary. In this case, to avoid |
37 | * allocating huge chunk of memory, each clients represent their need of |
38 | * extra memory through the need callback. If one of the need callbacks |
39 | * returns true, it means that someone needs extra memory so that |
40 | * page extension core should allocates memory for page extension. If |
41 | * none of need callbacks return true, memory isn't needed at all in this boot |
42 | * and page extension core can skip to allocate memory. As result, |
43 | * none of memory is wasted. |
44 | * |
45 | * When need callback returns true, page_ext checks if there is a request for |
46 | * extra memory through size in struct page_ext_operations. If it is non-zero, |
47 | * extra space is allocated for each page_ext entry and offset is returned to |
48 | * user through offset in struct page_ext_operations. |
49 | * |
50 | * The init callback is used to do proper initialization after page extension |
51 | * is completely initialized. In sparse memory system, extra memory is |
52 | * allocated some time later than memmap is allocated. In other words, lifetime |
53 | * of memory for page extension isn't same with memmap for struct page. |
54 | * Therefore, clients can't store extra data until page extension is |
55 | * initialized, even if pages are allocated and used freely. This could |
56 | * cause inadequate state of extra data per page, so, to prevent it, client |
57 | * can utilize this callback to initialize the state of it correctly. |
58 | */ |
59 | |
60 | static struct page_ext_operations *page_ext_ops[] = { |
61 | &debug_guardpage_ops, |
62 | #ifdef CONFIG_PAGE_POISONING |
63 | &page_poisoning_ops, |
64 | #endif |
65 | #ifdef CONFIG_PAGE_OWNER |
66 | &page_owner_ops, |
67 | #endif |
68 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) |
69 | &page_idle_ops, |
70 | #endif |
71 | }; |
72 | |
73 | static unsigned long total_usage; |
74 | static unsigned long extra_mem; |
75 | |
76 | static bool __init invoke_need_callbacks(void) |
77 | { |
78 | int i; |
79 | int entries = ARRAY_SIZE(page_ext_ops); |
80 | bool need = false; |
81 | |
82 | for (i = 0; i < entries; i++) { |
83 | if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { |
84 | page_ext_ops[i]->offset = sizeof(struct page_ext) + |
85 | extra_mem; |
86 | extra_mem += page_ext_ops[i]->size; |
87 | need = true; |
88 | } |
89 | } |
90 | |
91 | return need; |
92 | } |
93 | |
94 | static void __init invoke_init_callbacks(void) |
95 | { |
96 | int i; |
97 | int entries = ARRAY_SIZE(page_ext_ops); |
98 | |
99 | for (i = 0; i < entries; i++) { |
100 | if (page_ext_ops[i]->init) |
101 | page_ext_ops[i]->init(); |
102 | } |
103 | } |
104 | |
105 | static unsigned long get_entry_size(void) |
106 | { |
107 | return sizeof(struct page_ext) + extra_mem; |
108 | } |
109 | |
110 | static inline struct page_ext *get_entry(void *base, unsigned long index) |
111 | { |
112 | return base + get_entry_size() * index; |
113 | } |
114 | |
115 | #if !defined(CONFIG_SPARSEMEM) |
116 | |
117 | |
118 | void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) |
119 | { |
120 | pgdat->node_page_ext = NULL; |
121 | } |
122 | |
123 | struct page_ext *lookup_page_ext(struct page *page) |
124 | { |
125 | unsigned long pfn = page_to_pfn(page); |
126 | unsigned long index; |
127 | struct page_ext *base; |
128 | |
129 | base = NODE_DATA(page_to_nid(page))->node_page_ext; |
130 | #if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) |
131 | /* |
132 | * The sanity checks the page allocator does upon freeing a |
133 | * page can reach here before the page_ext arrays are |
134 | * allocated when feeding a range of pages to the allocator |
135 | * for the first time during bootup or memory hotplug. |
136 | * |
137 | * This check is also necessary for ensuring page poisoning |
138 | * works as expected when enabled |
139 | */ |
140 | if (unlikely(!base)) |
141 | return NULL; |
142 | #endif |
143 | index = pfn - round_down(node_start_pfn(page_to_nid(page)), |
144 | MAX_ORDER_NR_PAGES); |
145 | return get_entry(base, index); |
146 | } |
147 | |
148 | static int __init alloc_node_page_ext(int nid) |
149 | { |
150 | struct page_ext *base; |
151 | unsigned long table_size; |
152 | unsigned long nr_pages; |
153 | |
154 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
155 | if (!nr_pages) |
156 | return 0; |
157 | |
158 | /* |
159 | * Need extra space if node range is not aligned with |
160 | * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm |
161 | * checks buddy's status, range could be out of exact node range. |
162 | */ |
163 | if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || |
164 | !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) |
165 | nr_pages += MAX_ORDER_NR_PAGES; |
166 | |
167 | table_size = get_entry_size() * nr_pages; |
168 | |
169 | base = memblock_virt_alloc_try_nid_nopanic( |
170 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
171 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
172 | if (!base) |
173 | return -ENOMEM; |
174 | NODE_DATA(nid)->node_page_ext = base; |
175 | total_usage += table_size; |
176 | return 0; |
177 | } |
178 | |
179 | void __init page_ext_init_flatmem(void) |
180 | { |
181 | |
182 | int nid, fail; |
183 | |
184 | if (!invoke_need_callbacks()) |
185 | return; |
186 | |
187 | for_each_online_node(nid) { |
188 | fail = alloc_node_page_ext(nid); |
189 | if (fail) |
190 | goto fail; |
191 | } |
192 | pr_info("allocated %ld bytes of page_ext\n", total_usage); |
193 | invoke_init_callbacks(); |
194 | return; |
195 | |
196 | fail: |
197 | pr_crit("allocation of page_ext failed.\n"); |
198 | panic("Out of memory"); |
199 | } |
200 | |
201 | #else /* CONFIG_FLAT_NODE_MEM_MAP */ |
202 | |
203 | struct page_ext *lookup_page_ext(struct page *page) |
204 | { |
205 | unsigned long pfn = page_to_pfn(page); |
206 | struct mem_section *section = __pfn_to_section(pfn); |
207 | #if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) |
208 | /* |
209 | * The sanity checks the page allocator does upon freeing a |
210 | * page can reach here before the page_ext arrays are |
211 | * allocated when feeding a range of pages to the allocator |
212 | * for the first time during bootup or memory hotplug. |
213 | * |
214 | * This check is also necessary for ensuring page poisoning |
215 | * works as expected when enabled |
216 | */ |
217 | if (!section->page_ext) |
218 | return NULL; |
219 | #endif |
220 | return get_entry(section->page_ext, pfn); |
221 | } |
222 | |
223 | static void *__meminit alloc_page_ext(size_t size, int nid) |
224 | { |
225 | gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; |
226 | void *addr = NULL; |
227 | |
228 | addr = alloc_pages_exact_nid(nid, size, flags); |
229 | if (addr) { |
230 | kmemleak_alloc(addr, size, 1, flags); |
231 | return addr; |
232 | } |
233 | |
234 | if (node_state(nid, N_HIGH_MEMORY)) |
235 | addr = vzalloc_node(size, nid); |
236 | else |
237 | addr = vzalloc(size); |
238 | |
239 | return addr; |
240 | } |
241 | |
242 | static int __meminit init_section_page_ext(unsigned long pfn, int nid) |
243 | { |
244 | struct mem_section *section; |
245 | struct page_ext *base; |
246 | unsigned long table_size; |
247 | |
248 | section = __pfn_to_section(pfn); |
249 | |
250 | if (section->page_ext) |
251 | return 0; |
252 | |
253 | table_size = get_entry_size() * PAGES_PER_SECTION; |
254 | base = alloc_page_ext(table_size, nid); |
255 | |
256 | /* |
257 | * The value stored in section->page_ext is (base - pfn) |
258 | * and it does not point to the memory block allocated above, |
259 | * causing kmemleak false positives. |
260 | */ |
261 | kmemleak_not_leak(base); |
262 | |
263 | if (!base) { |
264 | pr_err("page ext allocation failure\n"); |
265 | return -ENOMEM; |
266 | } |
267 | |
268 | /* |
269 | * The passed "pfn" may not be aligned to SECTION. For the calculation |
270 | * we need to apply a mask. |
271 | */ |
272 | pfn &= PAGE_SECTION_MASK; |
273 | section->page_ext = (void *)base - get_entry_size() * pfn; |
274 | total_usage += table_size; |
275 | return 0; |
276 | } |
277 | #ifdef CONFIG_MEMORY_HOTPLUG |
278 | static void free_page_ext(void *addr) |
279 | { |
280 | if (is_vmalloc_addr(addr)) { |
281 | vfree(addr); |
282 | } else { |
283 | struct page *page = virt_to_page(addr); |
284 | size_t table_size; |
285 | |
286 | table_size = get_entry_size() * PAGES_PER_SECTION; |
287 | |
288 | BUG_ON(PageReserved(page)); |
289 | kmemleak_free(addr); |
290 | free_pages_exact(addr, table_size); |
291 | } |
292 | } |
293 | |
294 | static void __free_page_ext(unsigned long pfn) |
295 | { |
296 | struct mem_section *ms; |
297 | struct page_ext *base; |
298 | |
299 | ms = __pfn_to_section(pfn); |
300 | if (!ms || !ms->page_ext) |
301 | return; |
302 | base = get_entry(ms->page_ext, pfn); |
303 | free_page_ext(base); |
304 | ms->page_ext = NULL; |
305 | } |
306 | |
307 | static int __meminit online_page_ext(unsigned long start_pfn, |
308 | unsigned long nr_pages, |
309 | int nid) |
310 | { |
311 | unsigned long start, end, pfn; |
312 | int fail = 0; |
313 | |
314 | start = SECTION_ALIGN_DOWN(start_pfn); |
315 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
316 | |
317 | if (nid == -1) { |
318 | /* |
319 | * In this case, "nid" already exists and contains valid memory. |
320 | * "start_pfn" passed to us is a pfn which is an arg for |
321 | * online__pages(), and start_pfn should exist. |
322 | */ |
323 | nid = pfn_to_nid(start_pfn); |
324 | VM_BUG_ON(!node_state(nid, N_ONLINE)); |
325 | } |
326 | |
327 | for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { |
328 | if (!pfn_present(pfn)) |
329 | continue; |
330 | fail = init_section_page_ext(pfn, nid); |
331 | } |
332 | if (!fail) |
333 | return 0; |
334 | |
335 | /* rollback */ |
336 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
337 | __free_page_ext(pfn); |
338 | |
339 | return -ENOMEM; |
340 | } |
341 | |
342 | static int __meminit offline_page_ext(unsigned long start_pfn, |
343 | unsigned long nr_pages, int nid) |
344 | { |
345 | unsigned long start, end, pfn; |
346 | |
347 | start = SECTION_ALIGN_DOWN(start_pfn); |
348 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
349 | |
350 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
351 | __free_page_ext(pfn); |
352 | return 0; |
353 | |
354 | } |
355 | |
356 | static int __meminit page_ext_callback(struct notifier_block *self, |
357 | unsigned long action, void *arg) |
358 | { |
359 | struct memory_notify *mn = arg; |
360 | int ret = 0; |
361 | |
362 | switch (action) { |
363 | case MEM_GOING_ONLINE: |
364 | ret = online_page_ext(mn->start_pfn, |
365 | mn->nr_pages, mn->status_change_nid); |
366 | break; |
367 | case MEM_OFFLINE: |
368 | offline_page_ext(mn->start_pfn, |
369 | mn->nr_pages, mn->status_change_nid); |
370 | break; |
371 | case MEM_CANCEL_ONLINE: |
372 | offline_page_ext(mn->start_pfn, |
373 | mn->nr_pages, mn->status_change_nid); |
374 | break; |
375 | case MEM_GOING_OFFLINE: |
376 | break; |
377 | case MEM_ONLINE: |
378 | case MEM_CANCEL_OFFLINE: |
379 | break; |
380 | } |
381 | |
382 | return notifier_from_errno(ret); |
383 | } |
384 | |
385 | #endif |
386 | |
387 | void __init page_ext_init(void) |
388 | { |
389 | unsigned long pfn; |
390 | int nid; |
391 | |
392 | if (!invoke_need_callbacks()) |
393 | return; |
394 | |
395 | for_each_node_state(nid, N_MEMORY) { |
396 | unsigned long start_pfn, end_pfn; |
397 | |
398 | start_pfn = node_start_pfn(nid); |
399 | end_pfn = node_end_pfn(nid); |
400 | /* |
401 | * start_pfn and end_pfn may not be aligned to SECTION and the |
402 | * page->flags of out of node pages are not initialized. So we |
403 | * scan [start_pfn, the biggest section's pfn < end_pfn) here. |
404 | */ |
405 | for (pfn = start_pfn; pfn < end_pfn; |
406 | pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { |
407 | |
408 | if (!pfn_valid(pfn)) |
409 | continue; |
410 | /* |
411 | * Nodes's pfns can be overlapping. |
412 | * We know some arch can have a nodes layout such as |
413 | * -------------pfn--------------> |
414 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
415 | * |
416 | * Take into account DEFERRED_STRUCT_PAGE_INIT. |
417 | */ |
418 | if (early_pfn_to_nid(pfn) != nid) |
419 | continue; |
420 | if (init_section_page_ext(pfn, nid)) |
421 | goto oom; |
422 | } |
423 | } |
424 | hotplug_memory_notifier(page_ext_callback, 0); |
425 | pr_info("allocated %ld bytes of page_ext\n", total_usage); |
426 | invoke_init_callbacks(); |
427 | return; |
428 | |
429 | oom: |
430 | panic("Out of memory"); |
431 | } |
432 | |
433 | void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) |
434 | { |
435 | } |
436 | |
437 | #endif |
438 |