blob: ab998125f04d320b19626b92eba11c938e473406
1 | /* |
2 | * bootmem - A boot-time physical memory allocator and configurator |
3 | * |
4 | * Copyright (C) 1999 Ingo Molnar |
5 | * 1999 Kanoj Sarcar, SGI |
6 | * 2008 Johannes Weiner |
7 | * |
8 | * Access to this subsystem has to be serialized externally (which is true |
9 | * for the boot process anyway). |
10 | */ |
11 | #include <linux/init.h> |
12 | #include <linux/pfn.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/export.h> |
15 | #include <linux/kmemleak.h> |
16 | #include <linux/range.h> |
17 | #include <linux/memblock.h> |
18 | #include <linux/bootmem.h> |
19 | |
20 | #include <asm/bug.h> |
21 | #include <asm/io.h> |
22 | |
23 | #include "internal.h" |
24 | |
25 | #ifndef CONFIG_HAVE_MEMBLOCK |
26 | #error CONFIG_HAVE_MEMBLOCK not defined |
27 | #endif |
28 | |
29 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
30 | struct pglist_data __refdata contig_page_data; |
31 | EXPORT_SYMBOL(contig_page_data); |
32 | #endif |
33 | |
34 | unsigned long max_low_pfn; |
35 | unsigned long min_low_pfn; |
36 | unsigned long max_pfn; |
37 | unsigned long long max_possible_pfn; |
38 | |
39 | static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, |
40 | u64 goal, u64 limit) |
41 | { |
42 | void *ptr; |
43 | u64 addr; |
44 | ulong flags = choose_memblock_flags(); |
45 | |
46 | if (limit > memblock.current_limit) |
47 | limit = memblock.current_limit; |
48 | |
49 | again: |
50 | addr = memblock_find_in_range_node(size, align, goal, limit, nid, |
51 | flags); |
52 | if (!addr && (flags & MEMBLOCK_MIRROR)) { |
53 | flags &= ~MEMBLOCK_MIRROR; |
54 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
55 | &size); |
56 | goto again; |
57 | } |
58 | if (!addr) |
59 | return NULL; |
60 | |
61 | if (memblock_reserve(addr, size)) |
62 | return NULL; |
63 | |
64 | ptr = phys_to_virt(addr); |
65 | memset(ptr, 0, size); |
66 | /* |
67 | * The min_count is set to 0 so that bootmem allocated blocks |
68 | * are never reported as leaks. |
69 | */ |
70 | kmemleak_alloc(ptr, size, 0, 0); |
71 | return ptr; |
72 | } |
73 | |
74 | /* |
75 | * free_bootmem_late - free bootmem pages directly to page allocator |
76 | * @addr: starting address of the range |
77 | * @size: size of the range in bytes |
78 | * |
79 | * This is only useful when the bootmem allocator has already been torn |
80 | * down, but we are still initializing the system. Pages are given directly |
81 | * to the page allocator, no bootmem metadata is updated because it is gone. |
82 | */ |
83 | void __init free_bootmem_late(unsigned long addr, unsigned long size) |
84 | { |
85 | unsigned long cursor, end; |
86 | |
87 | kmemleak_free_part_phys(addr, size); |
88 | |
89 | cursor = PFN_UP(addr); |
90 | end = PFN_DOWN(addr + size); |
91 | |
92 | for (; cursor < end; cursor++) { |
93 | __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); |
94 | totalram_pages++; |
95 | } |
96 | } |
97 | |
98 | static void __init __free_pages_memory(unsigned long start, unsigned long end) |
99 | { |
100 | int order; |
101 | |
102 | while (start < end) { |
103 | order = min(MAX_ORDER - 1UL, __ffs(start)); |
104 | |
105 | while (start + (1UL << order) > end) |
106 | order--; |
107 | |
108 | __free_pages_bootmem(pfn_to_page(start), start, order); |
109 | |
110 | start += (1UL << order); |
111 | } |
112 | } |
113 | |
114 | static unsigned long __init __free_memory_core(phys_addr_t start, |
115 | phys_addr_t end) |
116 | { |
117 | unsigned long start_pfn = PFN_UP(start); |
118 | unsigned long end_pfn = min_t(unsigned long, |
119 | PFN_DOWN(end), max_low_pfn); |
120 | |
121 | if (start_pfn > end_pfn) |
122 | return 0; |
123 | |
124 | __free_pages_memory(start_pfn, end_pfn); |
125 | |
126 | return end_pfn - start_pfn; |
127 | } |
128 | |
129 | static unsigned long __init free_low_memory_core_early(void) |
130 | { |
131 | unsigned long count = 0; |
132 | phys_addr_t start, end; |
133 | u64 i; |
134 | |
135 | memblock_clear_hotplug(0, -1); |
136 | |
137 | for_each_reserved_mem_region(i, &start, &end) |
138 | reserve_bootmem_region(start, end); |
139 | |
140 | /* |
141 | * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id |
142 | * because in some case like Node0 doesn't have RAM installed |
143 | * low ram will be on Node1 |
144 | */ |
145 | for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, |
146 | NULL) |
147 | count += __free_memory_core(start, end); |
148 | |
149 | return count; |
150 | } |
151 | |
152 | static int reset_managed_pages_done __initdata; |
153 | |
154 | void reset_node_managed_pages(pg_data_t *pgdat) |
155 | { |
156 | struct zone *z; |
157 | |
158 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
159 | z->managed_pages = 0; |
160 | } |
161 | |
162 | void __init reset_all_zones_managed_pages(void) |
163 | { |
164 | struct pglist_data *pgdat; |
165 | |
166 | if (reset_managed_pages_done) |
167 | return; |
168 | |
169 | for_each_online_pgdat(pgdat) |
170 | reset_node_managed_pages(pgdat); |
171 | |
172 | reset_managed_pages_done = 1; |
173 | } |
174 | |
175 | /** |
176 | * free_all_bootmem - release free pages to the buddy allocator |
177 | * |
178 | * Returns the number of pages actually released. |
179 | */ |
180 | unsigned long __init free_all_bootmem(void) |
181 | { |
182 | unsigned long pages; |
183 | |
184 | reset_all_zones_managed_pages(); |
185 | |
186 | pages = free_low_memory_core_early(); |
187 | totalram_pages += pages; |
188 | |
189 | return pages; |
190 | } |
191 | |
192 | /** |
193 | * free_bootmem_node - mark a page range as usable |
194 | * @pgdat: node the range resides on |
195 | * @physaddr: starting address of the range |
196 | * @size: size of the range in bytes |
197 | * |
198 | * Partial pages will be considered reserved and left as they are. |
199 | * |
200 | * The range must reside completely on the specified node. |
201 | */ |
202 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, |
203 | unsigned long size) |
204 | { |
205 | memblock_free(physaddr, size); |
206 | } |
207 | |
208 | /** |
209 | * free_bootmem - mark a page range as usable |
210 | * @addr: starting address of the range |
211 | * @size: size of the range in bytes |
212 | * |
213 | * Partial pages will be considered reserved and left as they are. |
214 | * |
215 | * The range must be contiguous but may span node boundaries. |
216 | */ |
217 | void __init free_bootmem(unsigned long addr, unsigned long size) |
218 | { |
219 | memblock_free(addr, size); |
220 | } |
221 | |
222 | static void * __init ___alloc_bootmem_nopanic(unsigned long size, |
223 | unsigned long align, |
224 | unsigned long goal, |
225 | unsigned long limit) |
226 | { |
227 | void *ptr; |
228 | |
229 | if (WARN_ON_ONCE(slab_is_available())) |
230 | return kzalloc(size, GFP_NOWAIT); |
231 | |
232 | restart: |
233 | |
234 | ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit); |
235 | |
236 | if (ptr) |
237 | return ptr; |
238 | |
239 | if (goal != 0) { |
240 | goal = 0; |
241 | goto restart; |
242 | } |
243 | |
244 | return NULL; |
245 | } |
246 | |
247 | /** |
248 | * __alloc_bootmem_nopanic - allocate boot memory without panicking |
249 | * @size: size of the request in bytes |
250 | * @align: alignment of the region |
251 | * @goal: preferred starting address of the region |
252 | * |
253 | * The goal is dropped if it can not be satisfied and the allocation will |
254 | * fall back to memory below @goal. |
255 | * |
256 | * Allocation may happen on any node in the system. |
257 | * |
258 | * Returns NULL on failure. |
259 | */ |
260 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
261 | unsigned long goal) |
262 | { |
263 | unsigned long limit = -1UL; |
264 | |
265 | return ___alloc_bootmem_nopanic(size, align, goal, limit); |
266 | } |
267 | |
268 | static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, |
269 | unsigned long goal, unsigned long limit) |
270 | { |
271 | void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); |
272 | |
273 | if (mem) |
274 | return mem; |
275 | /* |
276 | * Whoops, we cannot satisfy the allocation request. |
277 | */ |
278 | pr_alert("bootmem alloc of %lu bytes failed!\n", size); |
279 | panic("Out of memory"); |
280 | return NULL; |
281 | } |
282 | |
283 | /** |
284 | * __alloc_bootmem - allocate boot memory |
285 | * @size: size of the request in bytes |
286 | * @align: alignment of the region |
287 | * @goal: preferred starting address of the region |
288 | * |
289 | * The goal is dropped if it can not be satisfied and the allocation will |
290 | * fall back to memory below @goal. |
291 | * |
292 | * Allocation may happen on any node in the system. |
293 | * |
294 | * The function panics if the request can not be satisfied. |
295 | */ |
296 | void * __init __alloc_bootmem(unsigned long size, unsigned long align, |
297 | unsigned long goal) |
298 | { |
299 | unsigned long limit = -1UL; |
300 | |
301 | return ___alloc_bootmem(size, align, goal, limit); |
302 | } |
303 | |
304 | void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, |
305 | unsigned long size, |
306 | unsigned long align, |
307 | unsigned long goal, |
308 | unsigned long limit) |
309 | { |
310 | void *ptr; |
311 | |
312 | again: |
313 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, |
314 | goal, limit); |
315 | if (ptr) |
316 | return ptr; |
317 | |
318 | ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, |
319 | goal, limit); |
320 | if (ptr) |
321 | return ptr; |
322 | |
323 | if (goal) { |
324 | goal = 0; |
325 | goto again; |
326 | } |
327 | |
328 | return NULL; |
329 | } |
330 | |
331 | void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, |
332 | unsigned long align, unsigned long goal) |
333 | { |
334 | if (WARN_ON_ONCE(slab_is_available())) |
335 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
336 | |
337 | return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); |
338 | } |
339 | |
340 | static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
341 | unsigned long align, unsigned long goal, |
342 | unsigned long limit) |
343 | { |
344 | void *ptr; |
345 | |
346 | ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit); |
347 | if (ptr) |
348 | return ptr; |
349 | |
350 | pr_alert("bootmem alloc of %lu bytes failed!\n", size); |
351 | panic("Out of memory"); |
352 | return NULL; |
353 | } |
354 | |
355 | /** |
356 | * __alloc_bootmem_node - allocate boot memory from a specific node |
357 | * @pgdat: node to allocate from |
358 | * @size: size of the request in bytes |
359 | * @align: alignment of the region |
360 | * @goal: preferred starting address of the region |
361 | * |
362 | * The goal is dropped if it can not be satisfied and the allocation will |
363 | * fall back to memory below @goal. |
364 | * |
365 | * Allocation may fall back to any node in the system if the specified node |
366 | * can not hold the requested memory. |
367 | * |
368 | * The function panics if the request can not be satisfied. |
369 | */ |
370 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
371 | unsigned long align, unsigned long goal) |
372 | { |
373 | if (WARN_ON_ONCE(slab_is_available())) |
374 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
375 | |
376 | return ___alloc_bootmem_node(pgdat, size, align, goal, 0); |
377 | } |
378 | |
379 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, |
380 | unsigned long align, unsigned long goal) |
381 | { |
382 | return __alloc_bootmem_node(pgdat, size, align, goal); |
383 | } |
384 | |
385 | |
386 | /** |
387 | * __alloc_bootmem_low - allocate low boot memory |
388 | * @size: size of the request in bytes |
389 | * @align: alignment of the region |
390 | * @goal: preferred starting address of the region |
391 | * |
392 | * The goal is dropped if it can not be satisfied and the allocation will |
393 | * fall back to memory below @goal. |
394 | * |
395 | * Allocation may happen on any node in the system. |
396 | * |
397 | * The function panics if the request can not be satisfied. |
398 | */ |
399 | void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, |
400 | unsigned long goal) |
401 | { |
402 | return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); |
403 | } |
404 | |
405 | void * __init __alloc_bootmem_low_nopanic(unsigned long size, |
406 | unsigned long align, |
407 | unsigned long goal) |
408 | { |
409 | return ___alloc_bootmem_nopanic(size, align, goal, |
410 | ARCH_LOW_ADDRESS_LIMIT); |
411 | } |
412 | |
413 | /** |
414 | * __alloc_bootmem_low_node - allocate low boot memory from a specific node |
415 | * @pgdat: node to allocate from |
416 | * @size: size of the request in bytes |
417 | * @align: alignment of the region |
418 | * @goal: preferred starting address of the region |
419 | * |
420 | * The goal is dropped if it can not be satisfied and the allocation will |
421 | * fall back to memory below @goal. |
422 | * |
423 | * Allocation may fall back to any node in the system if the specified node |
424 | * can not hold the requested memory. |
425 | * |
426 | * The function panics if the request can not be satisfied. |
427 | */ |
428 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, |
429 | unsigned long align, unsigned long goal) |
430 | { |
431 | if (WARN_ON_ONCE(slab_is_available())) |
432 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
433 | |
434 | return ___alloc_bootmem_node(pgdat, size, align, goal, |
435 | ARCH_LOW_ADDRESS_LIMIT); |
436 | } |
437 |