blob: 42b98af6a41586dd5eba8def1c12ac2d353e639c
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. |
3 | * |
4 | * Peter Bergner, IBM Corp. June 2001. |
5 | * Copyright (C) 2001 Peter Bergner. |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. |
11 | */ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> |
17 | #include <linux/poison.h> |
18 | #include <linux/pfn.h> |
19 | #include <linux/debugfs.h> |
20 | #include <linux/seq_file.h> |
21 | #include <linux/memblock.h> |
22 | |
23 | #include <asm/sections.h> |
24 | #include <linux/io.h> |
25 | |
26 | #include "internal.h" |
27 | |
28 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
29 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
30 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
31 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; |
32 | #endif |
33 | |
34 | struct memblock memblock __initdata_memblock = { |
35 | .memory.regions = memblock_memory_init_regions, |
36 | .memory.cnt = 1, /* empty dummy entry */ |
37 | .memory.max = INIT_MEMBLOCK_REGIONS, |
38 | |
39 | .reserved.regions = memblock_reserved_init_regions, |
40 | .reserved.cnt = 1, /* empty dummy entry */ |
41 | .reserved.max = INIT_MEMBLOCK_REGIONS, |
42 | |
43 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
44 | .physmem.regions = memblock_physmem_init_regions, |
45 | .physmem.cnt = 1, /* empty dummy entry */ |
46 | .physmem.max = INIT_PHYSMEM_REGIONS, |
47 | #endif |
48 | |
49 | .bottom_up = false, |
50 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, |
51 | }; |
52 | |
53 | int memblock_debug __initdata_memblock; |
54 | #ifdef CONFIG_MOVABLE_NODE |
55 | bool movable_node_enabled __initdata_memblock = false; |
56 | #endif |
57 | static bool system_has_some_mirror __initdata_memblock = false; |
58 | static int memblock_can_resize __initdata_memblock; |
59 | static int memblock_memory_in_slab __initdata_memblock = 0; |
60 | static int memblock_reserved_in_slab __initdata_memblock = 0; |
61 | |
62 | ulong __init_memblock choose_memblock_flags(void) |
63 | { |
64 | return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; |
65 | } |
66 | |
67 | /* inline so we don't get a warning when pr_debug is compiled out */ |
68 | static __init_memblock const char * |
69 | memblock_type_name(struct memblock_type *type) |
70 | { |
71 | if (type == &memblock.memory) |
72 | return "memory"; |
73 | else if (type == &memblock.reserved) |
74 | return "reserved"; |
75 | else |
76 | return "unknown"; |
77 | } |
78 | |
79 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
80 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) |
81 | { |
82 | return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); |
83 | } |
84 | |
85 | /* |
86 | * Address comparison utilities |
87 | */ |
88 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
89 | phys_addr_t base2, phys_addr_t size2) |
90 | { |
91 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
92 | } |
93 | |
94 | bool __init_memblock memblock_overlaps_region(struct memblock_type *type, |
95 | phys_addr_t base, phys_addr_t size) |
96 | { |
97 | unsigned long i; |
98 | |
99 | for (i = 0; i < type->cnt; i++) |
100 | if (memblock_addrs_overlap(base, size, type->regions[i].base, |
101 | type->regions[i].size)) |
102 | break; |
103 | return i < type->cnt; |
104 | } |
105 | |
106 | /* |
107 | * __memblock_find_range_bottom_up - find free area utility in bottom-up |
108 | * @start: start of candidate range |
109 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
110 | * @size: size of free area to find |
111 | * @align: alignment of free area to find |
112 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
113 | * @flags: pick from blocks based on memory attributes |
114 | * |
115 | * Utility called from memblock_find_in_range_node(), find free area bottom-up. |
116 | * |
117 | * RETURNS: |
118 | * Found address on success, 0 on failure. |
119 | */ |
120 | static phys_addr_t __init_memblock |
121 | __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, |
122 | phys_addr_t size, phys_addr_t align, int nid, |
123 | ulong flags) |
124 | { |
125 | phys_addr_t this_start, this_end, cand; |
126 | u64 i; |
127 | |
128 | for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { |
129 | this_start = clamp(this_start, start, end); |
130 | this_end = clamp(this_end, start, end); |
131 | |
132 | cand = round_up(this_start, align); |
133 | if (cand < this_end && this_end - cand >= size) |
134 | return cand; |
135 | } |
136 | |
137 | return 0; |
138 | } |
139 | |
140 | /** |
141 | * __memblock_find_range_top_down - find free area utility, in top-down |
142 | * @start: start of candidate range |
143 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
144 | * @size: size of free area to find |
145 | * @align: alignment of free area to find |
146 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
147 | * @flags: pick from blocks based on memory attributes |
148 | * |
149 | * Utility called from memblock_find_in_range_node(), find free area top-down. |
150 | * |
151 | * RETURNS: |
152 | * Found address on success, 0 on failure. |
153 | */ |
154 | static phys_addr_t __init_memblock |
155 | __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, |
156 | phys_addr_t size, phys_addr_t align, int nid, |
157 | ulong flags) |
158 | { |
159 | phys_addr_t this_start, this_end, cand; |
160 | u64 i; |
161 | |
162 | for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, |
163 | NULL) { |
164 | this_start = clamp(this_start, start, end); |
165 | this_end = clamp(this_end, start, end); |
166 | |
167 | if (this_end < size) |
168 | continue; |
169 | |
170 | cand = round_down(this_end - size, align); |
171 | if (cand >= this_start) |
172 | return cand; |
173 | } |
174 | |
175 | return 0; |
176 | } |
177 | |
178 | /** |
179 | * memblock_find_in_range_node - find free area in given range and node |
180 | * @size: size of free area to find |
181 | * @align: alignment of free area to find |
182 | * @start: start of candidate range |
183 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
184 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
185 | * @flags: pick from blocks based on memory attributes |
186 | * |
187 | * Find @size free area aligned to @align in the specified range and node. |
188 | * |
189 | * When allocation direction is bottom-up, the @start should be greater |
190 | * than the end of the kernel image. Otherwise, it will be trimmed. The |
191 | * reason is that we want the bottom-up allocation just near the kernel |
192 | * image so it is highly likely that the allocated memory and the kernel |
193 | * will reside in the same node. |
194 | * |
195 | * If bottom-up allocation failed, will try to allocate memory top-down. |
196 | * |
197 | * RETURNS: |
198 | * Found address on success, 0 on failure. |
199 | */ |
200 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, |
201 | phys_addr_t align, phys_addr_t start, |
202 | phys_addr_t end, int nid, ulong flags) |
203 | { |
204 | phys_addr_t kernel_end, ret; |
205 | |
206 | /* pump up @end */ |
207 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
208 | end = memblock.current_limit; |
209 | |
210 | /* avoid allocating the first page */ |
211 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
212 | end = max(start, end); |
213 | kernel_end = __pa_symbol(_end); |
214 | |
215 | /* |
216 | * try bottom-up allocation only when bottom-up mode |
217 | * is set and @end is above the kernel image. |
218 | */ |
219 | if (memblock_bottom_up() && end > kernel_end) { |
220 | phys_addr_t bottom_up_start; |
221 | |
222 | /* make sure we will allocate above the kernel */ |
223 | bottom_up_start = max(start, kernel_end); |
224 | |
225 | /* ok, try bottom-up allocation first */ |
226 | ret = __memblock_find_range_bottom_up(bottom_up_start, end, |
227 | size, align, nid, flags); |
228 | if (ret) |
229 | return ret; |
230 | |
231 | /* |
232 | * we always limit bottom-up allocation above the kernel, |
233 | * but top-down allocation doesn't have the limit, so |
234 | * retrying top-down allocation may succeed when bottom-up |
235 | * allocation failed. |
236 | * |
237 | * bottom-up allocation is expected to be fail very rarely, |
238 | * so we use WARN_ONCE() here to see the stack trace if |
239 | * fail happens. |
240 | */ |
241 | WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); |
242 | } |
243 | |
244 | return __memblock_find_range_top_down(start, end, size, align, nid, |
245 | flags); |
246 | } |
247 | |
248 | /** |
249 | * memblock_find_in_range - find free area in given range |
250 | * @start: start of candidate range |
251 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
252 | * @size: size of free area to find |
253 | * @align: alignment of free area to find |
254 | * |
255 | * Find @size free area aligned to @align in the specified range. |
256 | * |
257 | * RETURNS: |
258 | * Found address on success, 0 on failure. |
259 | */ |
260 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
261 | phys_addr_t end, phys_addr_t size, |
262 | phys_addr_t align) |
263 | { |
264 | phys_addr_t ret; |
265 | ulong flags = choose_memblock_flags(); |
266 | |
267 | again: |
268 | ret = memblock_find_in_range_node(size, align, start, end, |
269 | NUMA_NO_NODE, flags); |
270 | |
271 | if (!ret && (flags & MEMBLOCK_MIRROR)) { |
272 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
273 | &size); |
274 | flags &= ~MEMBLOCK_MIRROR; |
275 | goto again; |
276 | } |
277 | |
278 | return ret; |
279 | } |
280 | |
281 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
282 | { |
283 | type->total_size -= type->regions[r].size; |
284 | memmove(&type->regions[r], &type->regions[r + 1], |
285 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); |
286 | type->cnt--; |
287 | |
288 | /* Special case for empty arrays */ |
289 | if (type->cnt == 0) { |
290 | WARN_ON(type->total_size != 0); |
291 | type->cnt = 1; |
292 | type->regions[0].base = 0; |
293 | type->regions[0].size = 0; |
294 | type->regions[0].flags = 0; |
295 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); |
296 | } |
297 | } |
298 | |
299 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
300 | /** |
301 | * Discard memory and reserved arrays if they were allocated |
302 | */ |
303 | void __init memblock_discard(void) |
304 | { |
305 | phys_addr_t addr, size; |
306 | |
307 | if (memblock.reserved.regions != memblock_reserved_init_regions) { |
308 | addr = __pa(memblock.reserved.regions); |
309 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
310 | memblock.reserved.max); |
311 | __memblock_free_late(addr, size); |
312 | } |
313 | |
314 | if (memblock.memory.regions != memblock_memory_init_regions) { |
315 | addr = __pa(memblock.memory.regions); |
316 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
317 | memblock.memory.max); |
318 | __memblock_free_late(addr, size); |
319 | } |
320 | } |
321 | #endif |
322 | |
323 | /** |
324 | * memblock_double_array - double the size of the memblock regions array |
325 | * @type: memblock type of the regions array being doubled |
326 | * @new_area_start: starting address of memory range to avoid overlap with |
327 | * @new_area_size: size of memory range to avoid overlap with |
328 | * |
329 | * Double the size of the @type regions array. If memblock is being used to |
330 | * allocate memory for a new reserved regions array and there is a previously |
331 | * allocated memory range [@new_area_start,@new_area_start+@new_area_size] |
332 | * waiting to be reserved, ensure the memory used by the new array does |
333 | * not overlap. |
334 | * |
335 | * RETURNS: |
336 | * 0 on success, -1 on failure. |
337 | */ |
338 | static int __init_memblock memblock_double_array(struct memblock_type *type, |
339 | phys_addr_t new_area_start, |
340 | phys_addr_t new_area_size) |
341 | { |
342 | struct memblock_region *new_array, *old_array; |
343 | phys_addr_t old_alloc_size, new_alloc_size; |
344 | phys_addr_t old_size, new_size, addr; |
345 | int use_slab = slab_is_available(); |
346 | int *in_slab; |
347 | |
348 | /* We don't allow resizing until we know about the reserved regions |
349 | * of memory that aren't suitable for allocation |
350 | */ |
351 | if (!memblock_can_resize) |
352 | return -1; |
353 | |
354 | /* Calculate new doubled size */ |
355 | old_size = type->max * sizeof(struct memblock_region); |
356 | new_size = old_size << 1; |
357 | /* |
358 | * We need to allocated new one align to PAGE_SIZE, |
359 | * so we can free them completely later. |
360 | */ |
361 | old_alloc_size = PAGE_ALIGN(old_size); |
362 | new_alloc_size = PAGE_ALIGN(new_size); |
363 | |
364 | /* Retrieve the slab flag */ |
365 | if (type == &memblock.memory) |
366 | in_slab = &memblock_memory_in_slab; |
367 | else |
368 | in_slab = &memblock_reserved_in_slab; |
369 | |
370 | /* Try to find some space for it. |
371 | * |
372 | * WARNING: We assume that either slab_is_available() and we use it or |
373 | * we use MEMBLOCK for allocations. That means that this is unsafe to |
374 | * use when bootmem is currently active (unless bootmem itself is |
375 | * implemented on top of MEMBLOCK which isn't the case yet) |
376 | * |
377 | * This should however not be an issue for now, as we currently only |
378 | * call into MEMBLOCK while it's still active, or much later when slab |
379 | * is active for memory hotplug operations |
380 | */ |
381 | if (use_slab) { |
382 | new_array = kmalloc(new_size, GFP_KERNEL); |
383 | addr = new_array ? __pa(new_array) : 0; |
384 | } else { |
385 | /* only exclude range when trying to double reserved.regions */ |
386 | if (type != &memblock.reserved) |
387 | new_area_start = new_area_size = 0; |
388 | |
389 | addr = memblock_find_in_range(new_area_start + new_area_size, |
390 | memblock.current_limit, |
391 | new_alloc_size, PAGE_SIZE); |
392 | if (!addr && new_area_size) |
393 | addr = memblock_find_in_range(0, |
394 | min(new_area_start, memblock.current_limit), |
395 | new_alloc_size, PAGE_SIZE); |
396 | |
397 | new_array = addr ? __va(addr) : NULL; |
398 | } |
399 | if (!addr) { |
400 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
401 | memblock_type_name(type), type->max, type->max * 2); |
402 | return -1; |
403 | } |
404 | |
405 | memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", |
406 | memblock_type_name(type), type->max * 2, (u64)addr, |
407 | (u64)addr + new_size - 1); |
408 | |
409 | /* |
410 | * Found space, we now need to move the array over before we add the |
411 | * reserved region since it may be our reserved array itself that is |
412 | * full. |
413 | */ |
414 | memcpy(new_array, type->regions, old_size); |
415 | memset(new_array + type->max, 0, old_size); |
416 | old_array = type->regions; |
417 | type->regions = new_array; |
418 | type->max <<= 1; |
419 | |
420 | /* Free old array. We needn't free it if the array is the static one */ |
421 | if (*in_slab) |
422 | kfree(old_array); |
423 | else if (old_array != memblock_memory_init_regions && |
424 | old_array != memblock_reserved_init_regions) |
425 | memblock_free(__pa(old_array), old_alloc_size); |
426 | |
427 | /* |
428 | * Reserve the new array if that comes from the memblock. Otherwise, we |
429 | * needn't do it |
430 | */ |
431 | if (!use_slab) |
432 | BUG_ON(memblock_reserve(addr, new_alloc_size)); |
433 | |
434 | /* Update slab flag */ |
435 | *in_slab = use_slab; |
436 | |
437 | return 0; |
438 | } |
439 | |
440 | /** |
441 | * memblock_merge_regions - merge neighboring compatible regions |
442 | * @type: memblock type to scan |
443 | * |
444 | * Scan @type and merge neighboring compatible regions. |
445 | */ |
446 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) |
447 | { |
448 | int i = 0; |
449 | |
450 | /* cnt never goes below 1 */ |
451 | while (i < type->cnt - 1) { |
452 | struct memblock_region *this = &type->regions[i]; |
453 | struct memblock_region *next = &type->regions[i + 1]; |
454 | |
455 | if (this->base + this->size != next->base || |
456 | memblock_get_region_node(this) != |
457 | memblock_get_region_node(next) || |
458 | this->flags != next->flags) { |
459 | BUG_ON(this->base + this->size > next->base); |
460 | i++; |
461 | continue; |
462 | } |
463 | |
464 | this->size += next->size; |
465 | /* move forward from next + 1, index of which is i + 2 */ |
466 | memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); |
467 | type->cnt--; |
468 | } |
469 | } |
470 | |
471 | /** |
472 | * memblock_insert_region - insert new memblock region |
473 | * @type: memblock type to insert into |
474 | * @idx: index for the insertion point |
475 | * @base: base address of the new region |
476 | * @size: size of the new region |
477 | * @nid: node id of the new region |
478 | * @flags: flags of the new region |
479 | * |
480 | * Insert new memblock region [@base,@base+@size) into @type at @idx. |
481 | * @type must already have extra room to accommodate the new region. |
482 | */ |
483 | static void __init_memblock memblock_insert_region(struct memblock_type *type, |
484 | int idx, phys_addr_t base, |
485 | phys_addr_t size, |
486 | int nid, unsigned long flags) |
487 | { |
488 | struct memblock_region *rgn = &type->regions[idx]; |
489 | |
490 | BUG_ON(type->cnt >= type->max); |
491 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); |
492 | rgn->base = base; |
493 | rgn->size = size; |
494 | rgn->flags = flags; |
495 | memblock_set_region_node(rgn, nid); |
496 | type->cnt++; |
497 | type->total_size += size; |
498 | } |
499 | |
500 | /** |
501 | * memblock_add_range - add new memblock region |
502 | * @type: memblock type to add new region into |
503 | * @base: base address of the new region |
504 | * @size: size of the new region |
505 | * @nid: nid of the new region |
506 | * @flags: flags of the new region |
507 | * |
508 | * Add new memblock region [@base,@base+@size) into @type. The new region |
509 | * is allowed to overlap with existing ones - overlaps don't affect already |
510 | * existing regions. @type is guaranteed to be minimal (all neighbouring |
511 | * compatible regions are merged) after the addition. |
512 | * |
513 | * RETURNS: |
514 | * 0 on success, -errno on failure. |
515 | */ |
516 | int __init_memblock memblock_add_range(struct memblock_type *type, |
517 | phys_addr_t base, phys_addr_t size, |
518 | int nid, unsigned long flags) |
519 | { |
520 | bool insert = false; |
521 | phys_addr_t obase = base; |
522 | phys_addr_t end = base + memblock_cap_size(base, &size); |
523 | int idx, nr_new; |
524 | struct memblock_region *rgn; |
525 | |
526 | if (!size) |
527 | return 0; |
528 | |
529 | /* special case for empty array */ |
530 | if (type->regions[0].size == 0) { |
531 | WARN_ON(type->cnt != 1 || type->total_size); |
532 | type->regions[0].base = base; |
533 | type->regions[0].size = size; |
534 | type->regions[0].flags = flags; |
535 | memblock_set_region_node(&type->regions[0], nid); |
536 | type->total_size = size; |
537 | return 0; |
538 | } |
539 | repeat: |
540 | /* |
541 | * The following is executed twice. Once with %false @insert and |
542 | * then with %true. The first counts the number of regions needed |
543 | * to accommodate the new area. The second actually inserts them. |
544 | */ |
545 | base = obase; |
546 | nr_new = 0; |
547 | |
548 | for_each_memblock_type(type, rgn) { |
549 | phys_addr_t rbase = rgn->base; |
550 | phys_addr_t rend = rbase + rgn->size; |
551 | |
552 | if (rbase >= end) |
553 | break; |
554 | if (rend <= base) |
555 | continue; |
556 | /* |
557 | * @rgn overlaps. If it separates the lower part of new |
558 | * area, insert that portion. |
559 | */ |
560 | if (rbase > base) { |
561 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
562 | WARN_ON(nid != memblock_get_region_node(rgn)); |
563 | #endif |
564 | WARN_ON(flags != rgn->flags); |
565 | nr_new++; |
566 | if (insert) |
567 | memblock_insert_region(type, idx++, base, |
568 | rbase - base, nid, |
569 | flags); |
570 | } |
571 | /* area below @rend is dealt with, forget about it */ |
572 | base = min(rend, end); |
573 | } |
574 | |
575 | /* insert the remaining portion */ |
576 | if (base < end) { |
577 | nr_new++; |
578 | if (insert) |
579 | memblock_insert_region(type, idx, base, end - base, |
580 | nid, flags); |
581 | } |
582 | |
583 | if (!nr_new) |
584 | return 0; |
585 | |
586 | /* |
587 | * If this was the first round, resize array and repeat for actual |
588 | * insertions; otherwise, merge and return. |
589 | */ |
590 | if (!insert) { |
591 | while (type->cnt + nr_new > type->max) |
592 | if (memblock_double_array(type, obase, size) < 0) |
593 | return -ENOMEM; |
594 | insert = true; |
595 | goto repeat; |
596 | } else { |
597 | memblock_merge_regions(type); |
598 | return 0; |
599 | } |
600 | } |
601 | |
602 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
603 | int nid) |
604 | { |
605 | return memblock_add_range(&memblock.memory, base, size, nid, 0); |
606 | } |
607 | |
608 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
609 | { |
610 | memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", |
611 | (unsigned long long)base, |
612 | (unsigned long long)base + size - 1, |
613 | 0UL, (void *)_RET_IP_); |
614 | |
615 | return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); |
616 | } |
617 | |
618 | /** |
619 | * memblock_isolate_range - isolate given range into disjoint memblocks |
620 | * @type: memblock type to isolate range for |
621 | * @base: base of range to isolate |
622 | * @size: size of range to isolate |
623 | * @start_rgn: out parameter for the start of isolated region |
624 | * @end_rgn: out parameter for the end of isolated region |
625 | * |
626 | * Walk @type and ensure that regions don't cross the boundaries defined by |
627 | * [@base,@base+@size). Crossing regions are split at the boundaries, |
628 | * which may create at most two more regions. The index of the first |
629 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. |
630 | * |
631 | * RETURNS: |
632 | * 0 on success, -errno on failure. |
633 | */ |
634 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, |
635 | phys_addr_t base, phys_addr_t size, |
636 | int *start_rgn, int *end_rgn) |
637 | { |
638 | phys_addr_t end = base + memblock_cap_size(base, &size); |
639 | int idx; |
640 | struct memblock_region *rgn; |
641 | |
642 | *start_rgn = *end_rgn = 0; |
643 | |
644 | if (!size) |
645 | return 0; |
646 | |
647 | /* we'll create at most two more regions */ |
648 | while (type->cnt + 2 > type->max) |
649 | if (memblock_double_array(type, base, size) < 0) |
650 | return -ENOMEM; |
651 | |
652 | for_each_memblock_type(type, rgn) { |
653 | phys_addr_t rbase = rgn->base; |
654 | phys_addr_t rend = rbase + rgn->size; |
655 | |
656 | if (rbase >= end) |
657 | break; |
658 | if (rend <= base) |
659 | continue; |
660 | |
661 | if (rbase < base) { |
662 | /* |
663 | * @rgn intersects from below. Split and continue |
664 | * to process the next region - the new top half. |
665 | */ |
666 | rgn->base = base; |
667 | rgn->size -= base - rbase; |
668 | type->total_size -= base - rbase; |
669 | memblock_insert_region(type, idx, rbase, base - rbase, |
670 | memblock_get_region_node(rgn), |
671 | rgn->flags); |
672 | } else if (rend > end) { |
673 | /* |
674 | * @rgn intersects from above. Split and redo the |
675 | * current region - the new bottom half. |
676 | */ |
677 | rgn->base = end; |
678 | rgn->size -= end - rbase; |
679 | type->total_size -= end - rbase; |
680 | memblock_insert_region(type, idx--, rbase, end - rbase, |
681 | memblock_get_region_node(rgn), |
682 | rgn->flags); |
683 | } else { |
684 | /* @rgn is fully contained, record it */ |
685 | if (!*end_rgn) |
686 | *start_rgn = idx; |
687 | *end_rgn = idx + 1; |
688 | } |
689 | } |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | static int __init_memblock memblock_remove_range(struct memblock_type *type, |
695 | phys_addr_t base, phys_addr_t size) |
696 | { |
697 | int start_rgn, end_rgn; |
698 | int i, ret; |
699 | |
700 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
701 | if (ret) |
702 | return ret; |
703 | |
704 | for (i = end_rgn - 1; i >= start_rgn; i--) |
705 | memblock_remove_region(type, i); |
706 | return 0; |
707 | } |
708 | |
709 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
710 | { |
711 | return memblock_remove_range(&memblock.memory, base, size); |
712 | } |
713 | |
714 | |
715 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
716 | { |
717 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", |
718 | (unsigned long long)base, |
719 | (unsigned long long)base + size - 1, |
720 | (void *)_RET_IP_); |
721 | |
722 | kmemleak_free_part_phys(base, size); |
723 | return memblock_remove_range(&memblock.reserved, base, size); |
724 | } |
725 | |
726 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
727 | { |
728 | memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", |
729 | (unsigned long long)base, |
730 | (unsigned long long)base + size - 1, |
731 | 0UL, (void *)_RET_IP_); |
732 | |
733 | return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); |
734 | } |
735 | |
736 | /** |
737 | * |
738 | * This function isolates region [@base, @base + @size), and sets/clears flag |
739 | * |
740 | * Return 0 on success, -errno on failure. |
741 | */ |
742 | static int __init_memblock memblock_setclr_flag(phys_addr_t base, |
743 | phys_addr_t size, int set, int flag) |
744 | { |
745 | struct memblock_type *type = &memblock.memory; |
746 | int i, ret, start_rgn, end_rgn; |
747 | |
748 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
749 | if (ret) |
750 | return ret; |
751 | |
752 | for (i = start_rgn; i < end_rgn; i++) |
753 | if (set) |
754 | memblock_set_region_flags(&type->regions[i], flag); |
755 | else |
756 | memblock_clear_region_flags(&type->regions[i], flag); |
757 | |
758 | memblock_merge_regions(type); |
759 | return 0; |
760 | } |
761 | |
762 | /** |
763 | * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. |
764 | * @base: the base phys addr of the region |
765 | * @size: the size of the region |
766 | * |
767 | * Return 0 on success, -errno on failure. |
768 | */ |
769 | int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) |
770 | { |
771 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); |
772 | } |
773 | |
774 | /** |
775 | * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. |
776 | * @base: the base phys addr of the region |
777 | * @size: the size of the region |
778 | * |
779 | * Return 0 on success, -errno on failure. |
780 | */ |
781 | int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) |
782 | { |
783 | return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); |
784 | } |
785 | |
786 | /** |
787 | * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. |
788 | * @base: the base phys addr of the region |
789 | * @size: the size of the region |
790 | * |
791 | * Return 0 on success, -errno on failure. |
792 | */ |
793 | int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) |
794 | { |
795 | system_has_some_mirror = true; |
796 | |
797 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); |
798 | } |
799 | |
800 | /** |
801 | * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. |
802 | * @base: the base phys addr of the region |
803 | * @size: the size of the region |
804 | * |
805 | * Return 0 on success, -errno on failure. |
806 | */ |
807 | int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) |
808 | { |
809 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); |
810 | } |
811 | |
812 | /** |
813 | * __next_reserved_mem_region - next function for for_each_reserved_region() |
814 | * @idx: pointer to u64 loop variable |
815 | * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL |
816 | * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL |
817 | * |
818 | * Iterate over all reserved memory regions. |
819 | */ |
820 | void __init_memblock __next_reserved_mem_region(u64 *idx, |
821 | phys_addr_t *out_start, |
822 | phys_addr_t *out_end) |
823 | { |
824 | struct memblock_type *type = &memblock.reserved; |
825 | |
826 | if (*idx < type->cnt) { |
827 | struct memblock_region *r = &type->regions[*idx]; |
828 | phys_addr_t base = r->base; |
829 | phys_addr_t size = r->size; |
830 | |
831 | if (out_start) |
832 | *out_start = base; |
833 | if (out_end) |
834 | *out_end = base + size - 1; |
835 | |
836 | *idx += 1; |
837 | return; |
838 | } |
839 | |
840 | /* signal end of iteration */ |
841 | *idx = ULLONG_MAX; |
842 | } |
843 | |
844 | /** |
845 | * __next__mem_range - next function for for_each_free_mem_range() etc. |
846 | * @idx: pointer to u64 loop variable |
847 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
848 | * @flags: pick from blocks based on memory attributes |
849 | * @type_a: pointer to memblock_type from where the range is taken |
850 | * @type_b: pointer to memblock_type which excludes memory from being taken |
851 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
852 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
853 | * @out_nid: ptr to int for nid of the range, can be %NULL |
854 | * |
855 | * Find the first area from *@idx which matches @nid, fill the out |
856 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
857 | * *@idx contains index into type_a and the upper 32bit indexes the |
858 | * areas before each region in type_b. For example, if type_b regions |
859 | * look like the following, |
860 | * |
861 | * 0:[0-16), 1:[32-48), 2:[128-130) |
862 | * |
863 | * The upper 32bit indexes the following regions. |
864 | * |
865 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) |
866 | * |
867 | * As both region arrays are sorted, the function advances the two indices |
868 | * in lockstep and returns each intersection. |
869 | */ |
870 | void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, |
871 | struct memblock_type *type_a, |
872 | struct memblock_type *type_b, |
873 | phys_addr_t *out_start, |
874 | phys_addr_t *out_end, int *out_nid) |
875 | { |
876 | int idx_a = *idx & 0xffffffff; |
877 | int idx_b = *idx >> 32; |
878 | |
879 | if (WARN_ONCE(nid == MAX_NUMNODES, |
880 | "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
881 | nid = NUMA_NO_NODE; |
882 | |
883 | for (; idx_a < type_a->cnt; idx_a++) { |
884 | struct memblock_region *m = &type_a->regions[idx_a]; |
885 | |
886 | phys_addr_t m_start = m->base; |
887 | phys_addr_t m_end = m->base + m->size; |
888 | int m_nid = memblock_get_region_node(m); |
889 | |
890 | /* only memory regions are associated with nodes, check it */ |
891 | if (nid != NUMA_NO_NODE && nid != m_nid) |
892 | continue; |
893 | |
894 | /* skip hotpluggable memory regions if needed */ |
895 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) |
896 | continue; |
897 | |
898 | /* if we want mirror memory skip non-mirror memory regions */ |
899 | if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) |
900 | continue; |
901 | |
902 | /* skip nomap memory unless we were asked for it explicitly */ |
903 | if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) |
904 | continue; |
905 | |
906 | if (!type_b) { |
907 | if (out_start) |
908 | *out_start = m_start; |
909 | if (out_end) |
910 | *out_end = m_end; |
911 | if (out_nid) |
912 | *out_nid = m_nid; |
913 | idx_a++; |
914 | *idx = (u32)idx_a | (u64)idx_b << 32; |
915 | return; |
916 | } |
917 | |
918 | /* scan areas before each reservation */ |
919 | for (; idx_b < type_b->cnt + 1; idx_b++) { |
920 | struct memblock_region *r; |
921 | phys_addr_t r_start; |
922 | phys_addr_t r_end; |
923 | |
924 | r = &type_b->regions[idx_b]; |
925 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
926 | r_end = idx_b < type_b->cnt ? |
927 | r->base : ULLONG_MAX; |
928 | |
929 | /* |
930 | * if idx_b advanced past idx_a, |
931 | * break out to advance idx_a |
932 | */ |
933 | if (r_start >= m_end) |
934 | break; |
935 | /* if the two regions intersect, we're done */ |
936 | if (m_start < r_end) { |
937 | if (out_start) |
938 | *out_start = |
939 | max(m_start, r_start); |
940 | if (out_end) |
941 | *out_end = min(m_end, r_end); |
942 | if (out_nid) |
943 | *out_nid = m_nid; |
944 | /* |
945 | * The region which ends first is |
946 | * advanced for the next iteration. |
947 | */ |
948 | if (m_end <= r_end) |
949 | idx_a++; |
950 | else |
951 | idx_b++; |
952 | *idx = (u32)idx_a | (u64)idx_b << 32; |
953 | return; |
954 | } |
955 | } |
956 | } |
957 | |
958 | /* signal end of iteration */ |
959 | *idx = ULLONG_MAX; |
960 | } |
961 | |
962 | /** |
963 | * __next_mem_range_rev - generic next function for for_each_*_range_rev() |
964 | * |
965 | * Finds the next range from type_a which is not marked as unsuitable |
966 | * in type_b. |
967 | * |
968 | * @idx: pointer to u64 loop variable |
969 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
970 | * @flags: pick from blocks based on memory attributes |
971 | * @type_a: pointer to memblock_type from where the range is taken |
972 | * @type_b: pointer to memblock_type which excludes memory from being taken |
973 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
974 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
975 | * @out_nid: ptr to int for nid of the range, can be %NULL |
976 | * |
977 | * Reverse of __next_mem_range(). |
978 | */ |
979 | void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, |
980 | struct memblock_type *type_a, |
981 | struct memblock_type *type_b, |
982 | phys_addr_t *out_start, |
983 | phys_addr_t *out_end, int *out_nid) |
984 | { |
985 | int idx_a = *idx & 0xffffffff; |
986 | int idx_b = *idx >> 32; |
987 | |
988 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
989 | nid = NUMA_NO_NODE; |
990 | |
991 | if (*idx == (u64)ULLONG_MAX) { |
992 | idx_a = type_a->cnt - 1; |
993 | if (type_b != NULL) |
994 | idx_b = type_b->cnt; |
995 | else |
996 | idx_b = 0; |
997 | } |
998 | |
999 | for (; idx_a >= 0; idx_a--) { |
1000 | struct memblock_region *m = &type_a->regions[idx_a]; |
1001 | |
1002 | phys_addr_t m_start = m->base; |
1003 | phys_addr_t m_end = m->base + m->size; |
1004 | int m_nid = memblock_get_region_node(m); |
1005 | |
1006 | /* only memory regions are associated with nodes, check it */ |
1007 | if (nid != NUMA_NO_NODE && nid != m_nid) |
1008 | continue; |
1009 | |
1010 | /* skip hotpluggable memory regions if needed */ |
1011 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) |
1012 | continue; |
1013 | |
1014 | /* if we want mirror memory skip non-mirror memory regions */ |
1015 | if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) |
1016 | continue; |
1017 | |
1018 | /* skip nomap memory unless we were asked for it explicitly */ |
1019 | if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) |
1020 | continue; |
1021 | |
1022 | if (!type_b) { |
1023 | if (out_start) |
1024 | *out_start = m_start; |
1025 | if (out_end) |
1026 | *out_end = m_end; |
1027 | if (out_nid) |
1028 | *out_nid = m_nid; |
1029 | idx_a--; |
1030 | *idx = (u32)idx_a | (u64)idx_b << 32; |
1031 | return; |
1032 | } |
1033 | |
1034 | /* scan areas before each reservation */ |
1035 | for (; idx_b >= 0; idx_b--) { |
1036 | struct memblock_region *r; |
1037 | phys_addr_t r_start; |
1038 | phys_addr_t r_end; |
1039 | |
1040 | r = &type_b->regions[idx_b]; |
1041 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
1042 | r_end = idx_b < type_b->cnt ? |
1043 | r->base : ULLONG_MAX; |
1044 | /* |
1045 | * if idx_b advanced past idx_a, |
1046 | * break out to advance idx_a |
1047 | */ |
1048 | |
1049 | if (r_end <= m_start) |
1050 | break; |
1051 | /* if the two regions intersect, we're done */ |
1052 | if (m_end > r_start) { |
1053 | if (out_start) |
1054 | *out_start = max(m_start, r_start); |
1055 | if (out_end) |
1056 | *out_end = min(m_end, r_end); |
1057 | if (out_nid) |
1058 | *out_nid = m_nid; |
1059 | if (m_start >= r_start) |
1060 | idx_a--; |
1061 | else |
1062 | idx_b--; |
1063 | *idx = (u32)idx_a | (u64)idx_b << 32; |
1064 | return; |
1065 | } |
1066 | } |
1067 | } |
1068 | /* signal end of iteration */ |
1069 | *idx = ULLONG_MAX; |
1070 | } |
1071 | |
1072 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
1073 | /* |
1074 | * Common iterator interface used to define for_each_mem_range(). |
1075 | */ |
1076 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, |
1077 | unsigned long *out_start_pfn, |
1078 | unsigned long *out_end_pfn, int *out_nid) |
1079 | { |
1080 | struct memblock_type *type = &memblock.memory; |
1081 | struct memblock_region *r; |
1082 | |
1083 | while (++*idx < type->cnt) { |
1084 | r = &type->regions[*idx]; |
1085 | |
1086 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) |
1087 | continue; |
1088 | if (nid == MAX_NUMNODES || nid == r->nid) |
1089 | break; |
1090 | } |
1091 | if (*idx >= type->cnt) { |
1092 | *idx = -1; |
1093 | return; |
1094 | } |
1095 | |
1096 | if (out_start_pfn) |
1097 | *out_start_pfn = PFN_UP(r->base); |
1098 | if (out_end_pfn) |
1099 | *out_end_pfn = PFN_DOWN(r->base + r->size); |
1100 | if (out_nid) |
1101 | *out_nid = r->nid; |
1102 | } |
1103 | |
1104 | /** |
1105 | * memblock_set_node - set node ID on memblock regions |
1106 | * @base: base of area to set node ID for |
1107 | * @size: size of area to set node ID for |
1108 | * @type: memblock type to set node ID for |
1109 | * @nid: node ID to set |
1110 | * |
1111 | * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. |
1112 | * Regions which cross the area boundaries are split as necessary. |
1113 | * |
1114 | * RETURNS: |
1115 | * 0 on success, -errno on failure. |
1116 | */ |
1117 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, |
1118 | struct memblock_type *type, int nid) |
1119 | { |
1120 | int start_rgn, end_rgn; |
1121 | int i, ret; |
1122 | |
1123 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
1124 | if (ret) |
1125 | return ret; |
1126 | |
1127 | for (i = start_rgn; i < end_rgn; i++) |
1128 | memblock_set_region_node(&type->regions[i], nid); |
1129 | |
1130 | memblock_merge_regions(type); |
1131 | return 0; |
1132 | } |
1133 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
1134 | |
1135 | static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, |
1136 | phys_addr_t align, phys_addr_t start, |
1137 | phys_addr_t end, int nid, ulong flags) |
1138 | { |
1139 | phys_addr_t found; |
1140 | |
1141 | if (!align) |
1142 | align = SMP_CACHE_BYTES; |
1143 | |
1144 | found = memblock_find_in_range_node(size, align, start, end, nid, |
1145 | flags); |
1146 | if (found && !memblock_reserve(found, size)) { |
1147 | /* |
1148 | * The min_count is set to 0 so that memblock allocations are |
1149 | * never reported as leaks. |
1150 | */ |
1151 | kmemleak_alloc_phys(found, size, 0, 0); |
1152 | return found; |
1153 | } |
1154 | return 0; |
1155 | } |
1156 | |
1157 | phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, |
1158 | phys_addr_t start, phys_addr_t end, |
1159 | ulong flags) |
1160 | { |
1161 | return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, |
1162 | flags); |
1163 | } |
1164 | |
1165 | static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, |
1166 | phys_addr_t align, phys_addr_t max_addr, |
1167 | int nid, ulong flags) |
1168 | { |
1169 | return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); |
1170 | } |
1171 | |
1172 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
1173 | { |
1174 | ulong flags = choose_memblock_flags(); |
1175 | phys_addr_t ret; |
1176 | |
1177 | again: |
1178 | ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, |
1179 | nid, flags); |
1180 | |
1181 | if (!ret && (flags & MEMBLOCK_MIRROR)) { |
1182 | flags &= ~MEMBLOCK_MIRROR; |
1183 | goto again; |
1184 | } |
1185 | return ret; |
1186 | } |
1187 | |
1188 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
1189 | { |
1190 | return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, |
1191 | MEMBLOCK_NONE); |
1192 | } |
1193 | |
1194 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
1195 | { |
1196 | phys_addr_t alloc; |
1197 | |
1198 | alloc = __memblock_alloc_base(size, align, max_addr); |
1199 | |
1200 | if (alloc == 0) |
1201 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", |
1202 | (unsigned long long) size, (unsigned long long) max_addr); |
1203 | |
1204 | return alloc; |
1205 | } |
1206 | |
1207 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
1208 | { |
1209 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
1210 | } |
1211 | |
1212 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
1213 | { |
1214 | phys_addr_t res = memblock_alloc_nid(size, align, nid); |
1215 | |
1216 | if (res) |
1217 | return res; |
1218 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
1219 | } |
1220 | |
1221 | /** |
1222 | * memblock_virt_alloc_internal - allocate boot memory block |
1223 | * @size: size of memory block to be allocated in bytes |
1224 | * @align: alignment of the region and block's size |
1225 | * @min_addr: the lower bound of the memory region to allocate (phys address) |
1226 | * @max_addr: the upper bound of the memory region to allocate (phys address) |
1227 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1228 | * |
1229 | * The @min_addr limit is dropped if it can not be satisfied and the allocation |
1230 | * will fall back to memory below @min_addr. Also, allocation may fall back |
1231 | * to any node in the system if the specified node can not |
1232 | * hold the requested memory. |
1233 | * |
1234 | * The allocation is performed from memory region limited by |
1235 | * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. |
1236 | * |
1237 | * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. |
1238 | * |
1239 | * The phys address of allocated boot memory block is converted to virtual and |
1240 | * allocated memory is reset to 0. |
1241 | * |
1242 | * In addition, function sets the min_count to 0 using kmemleak_alloc for |
1243 | * allocated boot memory block, so that it is never reported as leaks. |
1244 | * |
1245 | * RETURNS: |
1246 | * Virtual address of allocated memory block on success, NULL on failure. |
1247 | */ |
1248 | static void * __init memblock_virt_alloc_internal( |
1249 | phys_addr_t size, phys_addr_t align, |
1250 | phys_addr_t min_addr, phys_addr_t max_addr, |
1251 | int nid) |
1252 | { |
1253 | phys_addr_t alloc; |
1254 | void *ptr; |
1255 | ulong flags = choose_memblock_flags(); |
1256 | |
1257 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
1258 | nid = NUMA_NO_NODE; |
1259 | |
1260 | /* |
1261 | * Detect any accidental use of these APIs after slab is ready, as at |
1262 | * this moment memblock may be deinitialized already and its |
1263 | * internal data may be destroyed (after execution of free_all_bootmem) |
1264 | */ |
1265 | if (WARN_ON_ONCE(slab_is_available())) |
1266 | return kzalloc_node(size, GFP_NOWAIT, nid); |
1267 | |
1268 | if (!align) |
1269 | align = SMP_CACHE_BYTES; |
1270 | |
1271 | if (max_addr > memblock.current_limit) |
1272 | max_addr = memblock.current_limit; |
1273 | |
1274 | again: |
1275 | alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, |
1276 | nid, flags); |
1277 | if (alloc) |
1278 | goto done; |
1279 | |
1280 | if (nid != NUMA_NO_NODE) { |
1281 | alloc = memblock_find_in_range_node(size, align, min_addr, |
1282 | max_addr, NUMA_NO_NODE, |
1283 | flags); |
1284 | if (alloc) |
1285 | goto done; |
1286 | } |
1287 | |
1288 | if (min_addr) { |
1289 | min_addr = 0; |
1290 | goto again; |
1291 | } |
1292 | |
1293 | if (flags & MEMBLOCK_MIRROR) { |
1294 | flags &= ~MEMBLOCK_MIRROR; |
1295 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
1296 | &size); |
1297 | goto again; |
1298 | } |
1299 | |
1300 | return NULL; |
1301 | done: |
1302 | memblock_reserve(alloc, size); |
1303 | ptr = phys_to_virt(alloc); |
1304 | memset(ptr, 0, size); |
1305 | |
1306 | /* |
1307 | * The min_count is set to 0 so that bootmem allocated blocks |
1308 | * are never reported as leaks. This is because many of these blocks |
1309 | * are only referred via the physical address which is not |
1310 | * looked up by kmemleak. |
1311 | */ |
1312 | kmemleak_alloc(ptr, size, 0, 0); |
1313 | |
1314 | return ptr; |
1315 | } |
1316 | |
1317 | /** |
1318 | * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block |
1319 | * @size: size of memory block to be allocated in bytes |
1320 | * @align: alignment of the region and block's size |
1321 | * @min_addr: the lower bound of the memory region from where the allocation |
1322 | * is preferred (phys address) |
1323 | * @max_addr: the upper bound of the memory region from where the allocation |
1324 | * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to |
1325 | * allocate only from memory limited by memblock.current_limit value |
1326 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1327 | * |
1328 | * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides |
1329 | * additional debug information (including caller info), if enabled. |
1330 | * |
1331 | * RETURNS: |
1332 | * Virtual address of allocated memory block on success, NULL on failure. |
1333 | */ |
1334 | void * __init memblock_virt_alloc_try_nid_nopanic( |
1335 | phys_addr_t size, phys_addr_t align, |
1336 | phys_addr_t min_addr, phys_addr_t max_addr, |
1337 | int nid) |
1338 | { |
1339 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", |
1340 | __func__, (u64)size, (u64)align, nid, (u64)min_addr, |
1341 | (u64)max_addr, (void *)_RET_IP_); |
1342 | return memblock_virt_alloc_internal(size, align, min_addr, |
1343 | max_addr, nid); |
1344 | } |
1345 | |
1346 | /** |
1347 | * memblock_virt_alloc_try_nid - allocate boot memory block with panicking |
1348 | * @size: size of memory block to be allocated in bytes |
1349 | * @align: alignment of the region and block's size |
1350 | * @min_addr: the lower bound of the memory region from where the allocation |
1351 | * is preferred (phys address) |
1352 | * @max_addr: the upper bound of the memory region from where the allocation |
1353 | * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to |
1354 | * allocate only from memory limited by memblock.current_limit value |
1355 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1356 | * |
1357 | * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() |
1358 | * which provides debug information (including caller info), if enabled, |
1359 | * and panics if the request can not be satisfied. |
1360 | * |
1361 | * RETURNS: |
1362 | * Virtual address of allocated memory block on success, NULL on failure. |
1363 | */ |
1364 | void * __init memblock_virt_alloc_try_nid( |
1365 | phys_addr_t size, phys_addr_t align, |
1366 | phys_addr_t min_addr, phys_addr_t max_addr, |
1367 | int nid) |
1368 | { |
1369 | void *ptr; |
1370 | |
1371 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", |
1372 | __func__, (u64)size, (u64)align, nid, (u64)min_addr, |
1373 | (u64)max_addr, (void *)_RET_IP_); |
1374 | ptr = memblock_virt_alloc_internal(size, align, |
1375 | min_addr, max_addr, nid); |
1376 | if (ptr) |
1377 | return ptr; |
1378 | |
1379 | panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", |
1380 | __func__, (u64)size, (u64)align, nid, (u64)min_addr, |
1381 | (u64)max_addr); |
1382 | return NULL; |
1383 | } |
1384 | |
1385 | /** |
1386 | * __memblock_free_early - free boot memory block |
1387 | * @base: phys starting address of the boot memory block |
1388 | * @size: size of the boot memory block in bytes |
1389 | * |
1390 | * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. |
1391 | * The freeing memory will not be released to the buddy allocator. |
1392 | */ |
1393 | void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) |
1394 | { |
1395 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", |
1396 | __func__, (u64)base, (u64)base + size - 1, |
1397 | (void *)_RET_IP_); |
1398 | kmemleak_free_part_phys(base, size); |
1399 | memblock_remove_range(&memblock.reserved, base, size); |
1400 | } |
1401 | |
1402 | /* |
1403 | * __memblock_free_late - free bootmem block pages directly to buddy allocator |
1404 | * @addr: phys starting address of the boot memory block |
1405 | * @size: size of the boot memory block in bytes |
1406 | * |
1407 | * This is only useful when the bootmem allocator has already been torn |
1408 | * down, but we are still initializing the system. Pages are released directly |
1409 | * to the buddy allocator, no bootmem metadata is updated because it is gone. |
1410 | */ |
1411 | void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) |
1412 | { |
1413 | u64 cursor, end; |
1414 | |
1415 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", |
1416 | __func__, (u64)base, (u64)base + size - 1, |
1417 | (void *)_RET_IP_); |
1418 | kmemleak_free_part_phys(base, size); |
1419 | cursor = PFN_UP(base); |
1420 | end = PFN_DOWN(base + size); |
1421 | |
1422 | for (; cursor < end; cursor++) { |
1423 | __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); |
1424 | totalram_pages++; |
1425 | } |
1426 | } |
1427 | |
1428 | /* |
1429 | * Remaining API functions |
1430 | */ |
1431 | |
1432 | phys_addr_t __init_memblock memblock_phys_mem_size(void) |
1433 | { |
1434 | return memblock.memory.total_size; |
1435 | } |
1436 | |
1437 | phys_addr_t __init_memblock memblock_reserved_size(void) |
1438 | { |
1439 | return memblock.reserved.total_size; |
1440 | } |
1441 | |
1442 | phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) |
1443 | { |
1444 | unsigned long pages = 0; |
1445 | struct memblock_region *r; |
1446 | unsigned long start_pfn, end_pfn; |
1447 | |
1448 | for_each_memblock(memory, r) { |
1449 | start_pfn = memblock_region_memory_base_pfn(r); |
1450 | end_pfn = memblock_region_memory_end_pfn(r); |
1451 | start_pfn = min_t(unsigned long, start_pfn, limit_pfn); |
1452 | end_pfn = min_t(unsigned long, end_pfn, limit_pfn); |
1453 | pages += end_pfn - start_pfn; |
1454 | } |
1455 | |
1456 | return PFN_PHYS(pages); |
1457 | } |
1458 | |
1459 | /* lowest address */ |
1460 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) |
1461 | { |
1462 | return memblock.memory.regions[0].base; |
1463 | } |
1464 | |
1465 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
1466 | { |
1467 | int idx = memblock.memory.cnt - 1; |
1468 | |
1469 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
1470 | } |
1471 | |
1472 | static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) |
1473 | { |
1474 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; |
1475 | struct memblock_region *r; |
1476 | |
1477 | /* |
1478 | * translate the memory @limit size into the max address within one of |
1479 | * the memory memblock regions, if the @limit exceeds the total size |
1480 | * of those regions, max_addr will keep original value ULLONG_MAX |
1481 | */ |
1482 | for_each_memblock(memory, r) { |
1483 | if (limit <= r->size) { |
1484 | max_addr = r->base + limit; |
1485 | break; |
1486 | } |
1487 | limit -= r->size; |
1488 | } |
1489 | |
1490 | return max_addr; |
1491 | } |
1492 | |
1493 | void __init memblock_enforce_memory_limit(phys_addr_t limit) |
1494 | { |
1495 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; |
1496 | |
1497 | if (!limit) |
1498 | return; |
1499 | |
1500 | max_addr = __find_max_addr(limit); |
1501 | |
1502 | /* @limit exceeds the total size of the memory, do nothing */ |
1503 | if (max_addr == (phys_addr_t)ULLONG_MAX) |
1504 | return; |
1505 | |
1506 | /* truncate both memory and reserved regions */ |
1507 | memblock_remove_range(&memblock.memory, max_addr, |
1508 | (phys_addr_t)ULLONG_MAX); |
1509 | memblock_remove_range(&memblock.reserved, max_addr, |
1510 | (phys_addr_t)ULLONG_MAX); |
1511 | } |
1512 | |
1513 | void __init memblock_mem_limit_remove_map(phys_addr_t limit) |
1514 | { |
1515 | struct memblock_type *type = &memblock.memory; |
1516 | phys_addr_t max_addr; |
1517 | int i, ret, start_rgn, end_rgn; |
1518 | |
1519 | if (!limit) |
1520 | return; |
1521 | |
1522 | max_addr = __find_max_addr(limit); |
1523 | |
1524 | /* @limit exceeds the total size of the memory, do nothing */ |
1525 | if (max_addr == (phys_addr_t)ULLONG_MAX) |
1526 | return; |
1527 | |
1528 | ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX, |
1529 | &start_rgn, &end_rgn); |
1530 | if (ret) |
1531 | return; |
1532 | |
1533 | /* remove all the MAP regions above the limit */ |
1534 | for (i = end_rgn - 1; i >= start_rgn; i--) { |
1535 | if (!memblock_is_nomap(&type->regions[i])) |
1536 | memblock_remove_region(type, i); |
1537 | } |
1538 | /* truncate the reserved regions */ |
1539 | memblock_remove_range(&memblock.reserved, max_addr, |
1540 | (phys_addr_t)ULLONG_MAX); |
1541 | } |
1542 | |
1543 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
1544 | { |
1545 | unsigned int left = 0, right = type->cnt; |
1546 | |
1547 | do { |
1548 | unsigned int mid = (right + left) / 2; |
1549 | |
1550 | if (addr < type->regions[mid].base) |
1551 | right = mid; |
1552 | else if (addr >= (type->regions[mid].base + |
1553 | type->regions[mid].size)) |
1554 | left = mid + 1; |
1555 | else |
1556 | return mid; |
1557 | } while (left < right); |
1558 | return -1; |
1559 | } |
1560 | |
1561 | bool __init memblock_is_reserved(phys_addr_t addr) |
1562 | { |
1563 | return memblock_search(&memblock.reserved, addr) != -1; |
1564 | } |
1565 | |
1566 | bool __init_memblock memblock_is_memory(phys_addr_t addr) |
1567 | { |
1568 | return memblock_search(&memblock.memory, addr) != -1; |
1569 | } |
1570 | |
1571 | int __init_memblock memblock_is_map_memory(phys_addr_t addr) |
1572 | { |
1573 | int i = memblock_search(&memblock.memory, addr); |
1574 | |
1575 | if (i == -1) |
1576 | return false; |
1577 | return !memblock_is_nomap(&memblock.memory.regions[i]); |
1578 | } |
1579 | |
1580 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
1581 | int __init_memblock memblock_search_pfn_nid(unsigned long pfn, |
1582 | unsigned long *start_pfn, unsigned long *end_pfn) |
1583 | { |
1584 | struct memblock_type *type = &memblock.memory; |
1585 | int mid = memblock_search(type, PFN_PHYS(pfn)); |
1586 | |
1587 | if (mid == -1) |
1588 | return -1; |
1589 | |
1590 | *start_pfn = PFN_DOWN(type->regions[mid].base); |
1591 | *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); |
1592 | |
1593 | return type->regions[mid].nid; |
1594 | } |
1595 | #endif |
1596 | |
1597 | /** |
1598 | * memblock_is_region_memory - check if a region is a subset of memory |
1599 | * @base: base of region to check |
1600 | * @size: size of region to check |
1601 | * |
1602 | * Check if the region [@base, @base+@size) is a subset of a memory block. |
1603 | * |
1604 | * RETURNS: |
1605 | * 0 if false, non-zero if true |
1606 | */ |
1607 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
1608 | { |
1609 | int idx = memblock_search(&memblock.memory, base); |
1610 | phys_addr_t end = base + memblock_cap_size(base, &size); |
1611 | |
1612 | if (idx == -1) |
1613 | return 0; |
1614 | return memblock.memory.regions[idx].base <= base && |
1615 | (memblock.memory.regions[idx].base + |
1616 | memblock.memory.regions[idx].size) >= end; |
1617 | } |
1618 | |
1619 | /** |
1620 | * memblock_is_region_reserved - check if a region intersects reserved memory |
1621 | * @base: base of region to check |
1622 | * @size: size of region to check |
1623 | * |
1624 | * Check if the region [@base, @base+@size) intersects a reserved memory block. |
1625 | * |
1626 | * RETURNS: |
1627 | * True if they intersect, false if not. |
1628 | */ |
1629 | bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
1630 | { |
1631 | memblock_cap_size(base, &size); |
1632 | return memblock_overlaps_region(&memblock.reserved, base, size); |
1633 | } |
1634 | |
1635 | void __init_memblock memblock_trim_memory(phys_addr_t align) |
1636 | { |
1637 | phys_addr_t start, end, orig_start, orig_end; |
1638 | struct memblock_region *r; |
1639 | |
1640 | for_each_memblock(memory, r) { |
1641 | orig_start = r->base; |
1642 | orig_end = r->base + r->size; |
1643 | start = round_up(orig_start, align); |
1644 | end = round_down(orig_end, align); |
1645 | |
1646 | if (start == orig_start && end == orig_end) |
1647 | continue; |
1648 | |
1649 | if (start < end) { |
1650 | r->base = start; |
1651 | r->size = end - start; |
1652 | } else { |
1653 | memblock_remove_region(&memblock.memory, |
1654 | r - memblock.memory.regions); |
1655 | r--; |
1656 | } |
1657 | } |
1658 | } |
1659 | |
1660 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
1661 | { |
1662 | memblock.current_limit = limit; |
1663 | } |
1664 | |
1665 | phys_addr_t __init_memblock memblock_get_current_limit(void) |
1666 | { |
1667 | return memblock.current_limit; |
1668 | } |
1669 | |
1670 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) |
1671 | { |
1672 | unsigned long long base, size; |
1673 | unsigned long flags; |
1674 | int idx; |
1675 | struct memblock_region *rgn; |
1676 | |
1677 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); |
1678 | |
1679 | for_each_memblock_type(type, rgn) { |
1680 | char nid_buf[32] = ""; |
1681 | |
1682 | base = rgn->base; |
1683 | size = rgn->size; |
1684 | flags = rgn->flags; |
1685 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
1686 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) |
1687 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", |
1688 | memblock_get_region_node(rgn)); |
1689 | #endif |
1690 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", |
1691 | name, idx, base, base + size - 1, size, nid_buf, flags); |
1692 | } |
1693 | } |
1694 | |
1695 | extern unsigned long __init_memblock |
1696 | memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) |
1697 | { |
1698 | struct memblock_region *rgn; |
1699 | unsigned long size = 0; |
1700 | int idx; |
1701 | |
1702 | for_each_memblock_type((&memblock.reserved), rgn) { |
1703 | phys_addr_t start, end; |
1704 | |
1705 | if (rgn->base + rgn->size < start_addr) |
1706 | continue; |
1707 | if (rgn->base > end_addr) |
1708 | continue; |
1709 | |
1710 | start = rgn->base; |
1711 | end = start + rgn->size; |
1712 | size += end - start; |
1713 | } |
1714 | |
1715 | return size; |
1716 | } |
1717 | |
1718 | void __init_memblock __memblock_dump_all(void) |
1719 | { |
1720 | pr_info("MEMBLOCK configuration:\n"); |
1721 | pr_info(" memory size = %#llx reserved size = %#llx\n", |
1722 | (unsigned long long)memblock.memory.total_size, |
1723 | (unsigned long long)memblock.reserved.total_size); |
1724 | |
1725 | memblock_dump(&memblock.memory, "memory"); |
1726 | memblock_dump(&memblock.reserved, "reserved"); |
1727 | } |
1728 | |
1729 | void __init memblock_allow_resize(void) |
1730 | { |
1731 | memblock_can_resize = 1; |
1732 | } |
1733 | |
1734 | static int __init early_memblock(char *p) |
1735 | { |
1736 | if (p && strstr(p, "debug")) |
1737 | memblock_debug = 1; |
1738 | return 0; |
1739 | } |
1740 | early_param("memblock", early_memblock); |
1741 | |
1742 | #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) |
1743 | |
1744 | static int memblock_debug_show(struct seq_file *m, void *private) |
1745 | { |
1746 | struct memblock_type *type = m->private; |
1747 | struct memblock_region *reg; |
1748 | int i; |
1749 | |
1750 | for (i = 0; i < type->cnt; i++) { |
1751 | reg = &type->regions[i]; |
1752 | seq_printf(m, "%4d: ", i); |
1753 | if (sizeof(phys_addr_t) == 4) |
1754 | seq_printf(m, "0x%08lx..0x%08lx\n", |
1755 | (unsigned long)reg->base, |
1756 | (unsigned long)(reg->base + reg->size - 1)); |
1757 | else |
1758 | seq_printf(m, "0x%016llx..0x%016llx\n", |
1759 | (unsigned long long)reg->base, |
1760 | (unsigned long long)(reg->base + reg->size - 1)); |
1761 | |
1762 | } |
1763 | return 0; |
1764 | } |
1765 | |
1766 | static int memblock_debug_open(struct inode *inode, struct file *file) |
1767 | { |
1768 | return single_open(file, memblock_debug_show, inode->i_private); |
1769 | } |
1770 | |
1771 | static const struct file_operations memblock_debug_fops = { |
1772 | .open = memblock_debug_open, |
1773 | .read = seq_read, |
1774 | .llseek = seq_lseek, |
1775 | .release = single_release, |
1776 | }; |
1777 | |
1778 | static int __init memblock_init_debugfs(void) |
1779 | { |
1780 | struct dentry *root = debugfs_create_dir("memblock", NULL); |
1781 | if (!root) |
1782 | return -ENXIO; |
1783 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); |
1784 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); |
1785 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
1786 | debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); |
1787 | #endif |
1788 | |
1789 | return 0; |
1790 | } |
1791 | __initcall(memblock_init_debugfs); |
1792 | |
1793 | #endif /* CONFIG_DEBUG_FS */ |
1794 |