blob: 8a01df35f42eddfdf7eb9be5858f8d888cc84a92
1 | /* |
2 | * Contiguous Memory Allocator |
3 | * |
4 | * Copyright (c) 2010-2011 by Samsung Electronics. |
5 | * Copyright IBM Corporation, 2013 |
6 | * Copyright LG Electronics Inc., 2014 |
7 | * Written by: |
8 | * Marek Szyprowski <m.szyprowski@samsung.com> |
9 | * Michal Nazarewicz <mina86@mina86.com> |
10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
12 | * |
13 | * This program is free software; you can redistribute it and/or |
14 | * modify it under the terms of the GNU General Public License as |
15 | * published by the Free Software Foundation; either version 2 of the |
16 | * License or (at your optional) any later version of the license. |
17 | */ |
18 | |
19 | #define pr_fmt(fmt) "cma: " fmt |
20 | |
21 | #ifdef CONFIG_CMA_DEBUG |
22 | #ifndef DEBUG |
23 | # define DEBUG |
24 | #endif |
25 | #endif |
26 | #define CREATE_TRACE_POINTS |
27 | |
28 | #include <linux/memblock.h> |
29 | #include <linux/err.h> |
30 | #include <linux/mm.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/sizes.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/log2.h> |
35 | #include <linux/cma.h> |
36 | #include <linux/highmem.h> |
37 | #include <linux/io.h> |
38 | #include <trace/events/cma.h> |
39 | #ifdef CONFIG_AMLOGIC_CMA |
40 | #include <asm/pgtable.h> |
41 | #include <linux/amlogic/aml_cma.h> |
42 | #include <linux/delay.h> |
43 | #include <linux/amlogic/secmon.h> |
44 | #endif /* CONFIG_AMLOGIC_CMA */ |
45 | |
46 | #include "cma.h" |
47 | |
48 | struct cma cma_areas[MAX_CMA_AREAS]; |
49 | unsigned cma_area_count; |
50 | static DEFINE_MUTEX(cma_mutex); |
51 | |
52 | #ifdef CONFIG_AMLOGIC_CMA |
53 | void cma_init_clear(struct cma *cma, bool clear) |
54 | { |
55 | cma->clear_map = clear; |
56 | } |
57 | |
58 | #ifdef CONFIG_ARM64 |
59 | static int clear_cma_pagemap2(struct cma *cma) |
60 | { |
61 | pgd_t *pgd; |
62 | pud_t *pud; |
63 | pmd_t *pmd; |
64 | unsigned long addr, end; |
65 | struct mm_struct *mm; |
66 | |
67 | addr = (unsigned long)pfn_to_kaddr(cma->base_pfn); |
68 | end = addr + cma->count * PAGE_SIZE; |
69 | mm = &init_mm; |
70 | for (; addr < end; addr += SECTION_SIZE) { |
71 | pgd = pgd_offset(mm, addr); |
72 | if (pgd_none(*pgd) || pgd_bad(*pgd)) |
73 | break; |
74 | |
75 | pud = pud_offset(pgd, addr); |
76 | if (pud_none(*pud) || pud_bad(*pud)) |
77 | break; |
78 | |
79 | pmd = pmd_offset(pud, addr); |
80 | if (pmd_none(*pmd)) |
81 | break; |
82 | |
83 | pr_debug("%s, addr:%lx, pgd:%p %llx, pmd:%p %llx\n", |
84 | __func__, addr, pgd, pgd_val(*pgd), pmd, pmd_val(*pmd)); |
85 | pmd_clear(pmd); |
86 | } |
87 | |
88 | return 0; |
89 | } |
90 | #endif |
91 | |
92 | int setup_cma_full_pagemap(struct cma *cma) |
93 | { |
94 | #ifdef CONFIG_ARM |
95 | /* |
96 | * arm already create level 3 mmu mapping for lowmem cma. |
97 | * And if high mem cma, there is no mapping. So nothing to |
98 | * do for arch arm. |
99 | */ |
100 | return 0; |
101 | #elif defined(CONFIG_ARM64) |
102 | struct vm_area_struct vma = {}; |
103 | unsigned long addr, size; |
104 | int ret; |
105 | |
106 | clear_cma_pagemap2(cma); |
107 | addr = (unsigned long)pfn_to_kaddr(cma->base_pfn); |
108 | size = cma->count * PAGE_SIZE; |
109 | vma.vm_mm = &init_mm; |
110 | vma.vm_start = addr; |
111 | vma.vm_end = addr + size; |
112 | vma.vm_page_prot = PAGE_KERNEL; |
113 | ret = remap_pfn_range(&vma, addr, cma->base_pfn, |
114 | size, vma.vm_page_prot); |
115 | if (ret < 0) |
116 | pr_info("%s, remap pte failed:%d, cma:%lx\n", |
117 | __func__, ret, cma->base_pfn); |
118 | return 0; |
119 | #else |
120 | #error "NOT supported ARCH" |
121 | #endif |
122 | } |
123 | |
124 | int cma_mmu_op(struct page *page, int count, bool set) |
125 | { |
126 | pgd_t *pgd; |
127 | pud_t *pud; |
128 | pmd_t *pmd; |
129 | pte_t *pte; |
130 | unsigned long addr, end; |
131 | struct mm_struct *mm; |
132 | |
133 | if (!page || PageHighMem(page)) |
134 | return -EINVAL; |
135 | |
136 | addr = (unsigned long)page_address(page); |
137 | end = addr + count * PAGE_SIZE; |
138 | mm = &init_mm; |
139 | for (; addr < end; addr += PAGE_SIZE) { |
140 | pgd = pgd_offset(mm, addr); |
141 | if (pgd_none(*pgd) || pgd_bad(*pgd)) |
142 | break; |
143 | |
144 | pud = pud_offset(pgd, addr); |
145 | if (pud_none(*pud) || pud_bad(*pud)) |
146 | break; |
147 | |
148 | pmd = pmd_offset(pud, addr); |
149 | if (pmd_none(*pmd)) |
150 | break; |
151 | |
152 | pte = pte_offset_map(pmd, addr); |
153 | if (set) |
154 | set_pte_at(mm, addr, pte, mk_pte(page, PAGE_KERNEL)); |
155 | else |
156 | pte_clear(mm, addr, pte); |
157 | pte_unmap(pte); |
158 | #ifdef CONFIG_ARM |
159 | pr_debug("%s, add:%lx, pgd:%p %x, pmd:%p %x, pte:%p %x\n", |
160 | __func__, addr, pgd, (int)pgd_val(*pgd), |
161 | pmd, (int)pmd_val(*pmd), pte, (int)pte_val(*pte)); |
162 | #elif defined(CONFIG_ARM64) |
163 | pr_debug("%s, add:%lx, pgd:%p %llx, pmd:%p %llx, pte:%p %llx\n", |
164 | __func__, addr, pgd, pgd_val(*pgd), |
165 | pmd, pmd_val(*pmd), pte, pte_val(*pte)); |
166 | #endif |
167 | page++; |
168 | } |
169 | return 0; |
170 | } |
171 | #endif |
172 | |
173 | phys_addr_t cma_get_base(const struct cma *cma) |
174 | { |
175 | return PFN_PHYS(cma->base_pfn); |
176 | } |
177 | |
178 | unsigned long cma_get_size(const struct cma *cma) |
179 | { |
180 | return cma->count << PAGE_SHIFT; |
181 | } |
182 | |
183 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
184 | unsigned int align_order) |
185 | { |
186 | if (align_order <= cma->order_per_bit) |
187 | return 0; |
188 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
189 | } |
190 | |
191 | /* |
192 | * Find the offset of the base PFN from the specified align_order. |
193 | * The value returned is represented in order_per_bits. |
194 | */ |
195 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
196 | unsigned int align_order) |
197 | { |
198 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
199 | >> cma->order_per_bit; |
200 | } |
201 | |
202 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
203 | unsigned long pages) |
204 | { |
205 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
206 | } |
207 | |
208 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
209 | unsigned int count) |
210 | { |
211 | unsigned long bitmap_no, bitmap_count; |
212 | |
213 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
214 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
215 | |
216 | mutex_lock(&cma->lock); |
217 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
218 | mutex_unlock(&cma->lock); |
219 | } |
220 | |
221 | static int __init cma_activate_area(struct cma *cma) |
222 | { |
223 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); |
224 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; |
225 | unsigned i = cma->count >> pageblock_order; |
226 | struct zone *zone; |
227 | |
228 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
229 | |
230 | if (!cma->bitmap) |
231 | return -ENOMEM; |
232 | |
233 | WARN_ON_ONCE(!pfn_valid(pfn)); |
234 | zone = page_zone(pfn_to_page(pfn)); |
235 | |
236 | do { |
237 | unsigned j; |
238 | |
239 | base_pfn = pfn; |
240 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
241 | WARN_ON_ONCE(!pfn_valid(pfn)); |
242 | /* |
243 | * alloc_contig_range requires the pfn range |
244 | * specified to be in the same zone. Make this |
245 | * simple by forcing the entire CMA resv range |
246 | * to be in the same zone. |
247 | */ |
248 | if (page_zone(pfn_to_page(pfn)) != zone) |
249 | goto err; |
250 | } |
251 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
252 | } while (--i); |
253 | |
254 | mutex_init(&cma->lock); |
255 | |
256 | #ifdef CONFIG_AMLOGIC_CMA |
257 | if (cma->clear_map) |
258 | setup_cma_full_pagemap(cma); |
259 | #endif |
260 | |
261 | #ifdef CONFIG_CMA_DEBUGFS |
262 | INIT_HLIST_HEAD(&cma->mem_head); |
263 | spin_lock_init(&cma->mem_head_lock); |
264 | #endif |
265 | |
266 | return 0; |
267 | |
268 | err: |
269 | kfree(cma->bitmap); |
270 | cma->count = 0; |
271 | return -EINVAL; |
272 | } |
273 | |
274 | static int __init cma_init_reserved_areas(void) |
275 | { |
276 | int i; |
277 | |
278 | for (i = 0; i < cma_area_count; i++) { |
279 | int ret = cma_activate_area(&cma_areas[i]); |
280 | |
281 | if (ret) |
282 | return ret; |
283 | } |
284 | #ifdef CONFIG_AMLOGIC_SEC |
285 | /* |
286 | * A73 cache speculate prefetch may cause SError when boot. |
287 | * because it may prefetch cache line in secure memory range |
288 | * which have already reserved by bootloader. So we must |
289 | * clear mmu of secmon range before A73 core boot up |
290 | */ |
291 | secmon_clear_cma_mmu(); |
292 | #endif |
293 | return 0; |
294 | } |
295 | #ifdef CONFIG_AMLOGIC_CMA |
296 | early_initcall(cma_init_reserved_areas); |
297 | #else |
298 | core_initcall(cma_init_reserved_areas); |
299 | #endif |
300 | |
301 | /** |
302 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
303 | * @base: Base address of the reserved area |
304 | * @size: Size of the reserved area (in bytes), |
305 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
306 | * @res_cma: Pointer to store the created cma region. |
307 | * |
308 | * This function creates custom contiguous area from already reserved memory. |
309 | */ |
310 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
311 | unsigned int order_per_bit, |
312 | struct cma **res_cma) |
313 | { |
314 | struct cma *cma; |
315 | phys_addr_t alignment; |
316 | |
317 | /* Sanity checks */ |
318 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
319 | pr_err("Not enough slots for CMA reserved regions!\n"); |
320 | return -ENOSPC; |
321 | } |
322 | |
323 | if (!size || !memblock_is_region_reserved(base, size)) |
324 | return -EINVAL; |
325 | |
326 | /* ensure minimal alignment required by mm core */ |
327 | alignment = PAGE_SIZE << |
328 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); |
329 | |
330 | /* alignment should be aligned with order_per_bit */ |
331 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) |
332 | return -EINVAL; |
333 | |
334 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) |
335 | return -EINVAL; |
336 | |
337 | /* |
338 | * Each reserved area must be initialised later, when more kernel |
339 | * subsystems (like slab allocator) are available. |
340 | */ |
341 | cma = &cma_areas[cma_area_count]; |
342 | cma->base_pfn = PFN_DOWN(base); |
343 | cma->count = size >> PAGE_SHIFT; |
344 | cma->order_per_bit = order_per_bit; |
345 | *res_cma = cma; |
346 | cma_area_count++; |
347 | totalcma_pages += (size / PAGE_SIZE); |
348 | |
349 | return 0; |
350 | } |
351 | |
352 | /** |
353 | * cma_declare_contiguous() - reserve custom contiguous area |
354 | * @base: Base address of the reserved area optional, use 0 for any |
355 | * @size: Size of the reserved area (in bytes), |
356 | * @limit: End address of the reserved memory (optional, 0 for any). |
357 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
358 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
359 | * @fixed: hint about where to place the reserved area |
360 | * @res_cma: Pointer to store the created cma region. |
361 | * |
362 | * This function reserves memory from early allocator. It should be |
363 | * called by arch specific code once the early allocator (memblock or bootmem) |
364 | * has been activated and all other subsystems have already allocated/reserved |
365 | * memory. This function allows to create custom reserved areas. |
366 | * |
367 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
368 | * reserve in range from @base to @limit. |
369 | */ |
370 | int __init cma_declare_contiguous(phys_addr_t base, |
371 | phys_addr_t size, phys_addr_t limit, |
372 | phys_addr_t alignment, unsigned int order_per_bit, |
373 | bool fixed, struct cma **res_cma) |
374 | { |
375 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
376 | phys_addr_t highmem_start; |
377 | int ret = 0; |
378 | |
379 | #ifdef CONFIG_X86 |
380 | /* |
381 | * high_memory isn't direct mapped memory so retrieving its physical |
382 | * address isn't appropriate. But it would be useful to check the |
383 | * physical address of the highmem boundary so it's justifiable to get |
384 | * the physical address from it. On x86 there is a validation check for |
385 | * this case, so the following workaround is needed to avoid it. |
386 | */ |
387 | highmem_start = __pa_nodebug(high_memory); |
388 | #else |
389 | highmem_start = __pa(high_memory); |
390 | #endif |
391 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
392 | __func__, &size, &base, &limit, &alignment); |
393 | |
394 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
395 | pr_err("Not enough slots for CMA reserved regions!\n"); |
396 | return -ENOSPC; |
397 | } |
398 | |
399 | if (!size) |
400 | return -EINVAL; |
401 | |
402 | if (alignment && !is_power_of_2(alignment)) |
403 | return -EINVAL; |
404 | |
405 | /* |
406 | * Sanitise input arguments. |
407 | * Pages both ends in CMA area could be merged into adjacent unmovable |
408 | * migratetype page by page allocator's buddy algorithm. In the case, |
409 | * you couldn't get a contiguous memory, which is not what we want. |
410 | */ |
411 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << |
412 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); |
413 | base = ALIGN(base, alignment); |
414 | size = ALIGN(size, alignment); |
415 | limit &= ~(alignment - 1); |
416 | |
417 | if (!base) |
418 | fixed = false; |
419 | |
420 | /* size should be aligned with order_per_bit */ |
421 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
422 | return -EINVAL; |
423 | |
424 | /* |
425 | * If allocating at a fixed base the request region must not cross the |
426 | * low/high memory boundary. |
427 | */ |
428 | if (fixed && base < highmem_start && base + size > highmem_start) { |
429 | ret = -EINVAL; |
430 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
431 | &base, &highmem_start); |
432 | goto err; |
433 | } |
434 | |
435 | /* |
436 | * If the limit is unspecified or above the memblock end, its effective |
437 | * value will be the memblock end. Set it explicitly to simplify further |
438 | * checks. |
439 | */ |
440 | if (limit == 0 || limit > memblock_end) |
441 | limit = memblock_end; |
442 | |
443 | /* Reserve memory */ |
444 | if (fixed) { |
445 | if (memblock_is_region_reserved(base, size) || |
446 | memblock_reserve(base, size) < 0) { |
447 | ret = -EBUSY; |
448 | goto err; |
449 | } |
450 | } else { |
451 | phys_addr_t addr = 0; |
452 | |
453 | /* |
454 | * All pages in the reserved area must come from the same zone. |
455 | * If the requested region crosses the low/high memory boundary, |
456 | * try allocating from high memory first and fall back to low |
457 | * memory in case of failure. |
458 | */ |
459 | if (base < highmem_start && limit > highmem_start) { |
460 | addr = memblock_alloc_range(size, alignment, |
461 | highmem_start, limit, |
462 | MEMBLOCK_NONE); |
463 | limit = highmem_start; |
464 | } |
465 | |
466 | if (!addr) { |
467 | addr = memblock_alloc_range(size, alignment, base, |
468 | limit, |
469 | MEMBLOCK_NONE); |
470 | if (!addr) { |
471 | ret = -ENOMEM; |
472 | goto err; |
473 | } |
474 | } |
475 | |
476 | /* |
477 | * kmemleak scans/reads tracked objects for pointers to other |
478 | * objects but this address isn't mapped and accessible |
479 | */ |
480 | kmemleak_ignore_phys(addr); |
481 | base = addr; |
482 | } |
483 | |
484 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); |
485 | if (ret) |
486 | goto free_mem; |
487 | |
488 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
489 | &base); |
490 | return 0; |
491 | |
492 | free_mem: |
493 | memblock_free(base, size); |
494 | err: |
495 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
496 | return ret; |
497 | } |
498 | |
499 | /** |
500 | * cma_alloc() - allocate pages from contiguous area |
501 | * @cma: Contiguous memory region for which the allocation is performed. |
502 | * @count: Requested number of pages. |
503 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
504 | * |
505 | * This function allocates part of contiguous memory on specific |
506 | * contiguous memory area. |
507 | */ |
508 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) |
509 | { |
510 | unsigned long mask, offset; |
511 | unsigned long pfn = -1; |
512 | unsigned long start = 0; |
513 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
514 | struct page *page = NULL; |
515 | int ret; |
516 | #ifdef CONFIG_AMLOGIC_CMA |
517 | int dummy; |
518 | unsigned long long tick; |
519 | unsigned long long in_tick, timeout; |
520 | |
521 | in_tick = sched_clock(); |
522 | #endif /* CONFIG_AMLOGIC_CMA */ |
523 | |
524 | if (!cma || !cma->count) |
525 | return NULL; |
526 | |
527 | pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, |
528 | count, align); |
529 | |
530 | #ifdef CONFIG_AMLOGIC_CMA |
531 | tick = sched_clock(); |
532 | cma_debug(0, NULL, "(cma %p, count %zu, align %d)\n", |
533 | (void *)cma, count, align); |
534 | in_tick = sched_clock(); |
535 | timeout = 2ULL * 1000000 * (1 + ((count * PAGE_SIZE) >> 20)); |
536 | #endif |
537 | if (!count) |
538 | return NULL; |
539 | |
540 | mask = cma_bitmap_aligned_mask(cma, align); |
541 | offset = cma_bitmap_aligned_offset(cma, align); |
542 | bitmap_maxno = cma_bitmap_maxno(cma); |
543 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
544 | |
545 | if (bitmap_count > bitmap_maxno) |
546 | return NULL; |
547 | |
548 | #ifdef CONFIG_AMLOGIC_CMA |
549 | aml_cma_alloc_pre_hook(&dummy, count); |
550 | #endif /* CONFIG_AMLOGIC_CMA */ |
551 | |
552 | for (;;) { |
553 | mutex_lock(&cma->lock); |
554 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
555 | bitmap_maxno, start, bitmap_count, mask, |
556 | offset); |
557 | if (bitmap_no >= bitmap_maxno) { |
558 | mutex_unlock(&cma->lock); |
559 | break; |
560 | } |
561 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); |
562 | /* |
563 | * It's safe to drop the lock here. We've marked this region for |
564 | * our exclusive use. If the migration fails we will take the |
565 | * lock again and unmark it. |
566 | */ |
567 | mutex_unlock(&cma->lock); |
568 | |
569 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
570 | mutex_lock(&cma_mutex); |
571 | #ifdef CONFIG_AMLOGIC_CMA |
572 | ret = aml_cma_alloc_range(pfn, pfn + count); |
573 | #else |
574 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); |
575 | #endif /* CONFIG_AMLOGIC_CMA */ |
576 | mutex_unlock(&cma_mutex); |
577 | if (ret == 0) { |
578 | page = pfn_to_page(pfn); |
579 | break; |
580 | } |
581 | |
582 | cma_clear_bitmap(cma, pfn, count); |
583 | if (ret != -EBUSY) |
584 | break; |
585 | |
586 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
587 | __func__, pfn_to_page(pfn)); |
588 | #ifndef CONFIG_AMLOGIC_CMA |
589 | /* try again with a bit different memory target */ |
590 | start = bitmap_no + mask + 1; |
591 | #else |
592 | /* |
593 | * CMA allocation time out, may blocked on some pages |
594 | * relax CPU and try later |
595 | */ |
596 | if ((sched_clock() - in_tick) >= timeout) |
597 | usleep_range(1000, 2000); |
598 | #endif /* CONFIG_AMLOGIC_CMA */ |
599 | } |
600 | |
601 | trace_cma_alloc(pfn, page, count, align); |
602 | |
603 | #ifdef CONFIG_AMLOGIC_CMA |
604 | aml_cma_alloc_post_hook(&dummy, count, page); |
605 | cma_debug(0, NULL, "return page:%lx, tick:%lld\n", |
606 | page ? page_to_pfn(page) : 0, sched_clock() - tick); |
607 | #endif /* CONFIG_AMLOGIC_CMA */ |
608 | pr_debug("%s(): returned %p\n", __func__, page); |
609 | return page; |
610 | } |
611 | |
612 | /** |
613 | * cma_release() - release allocated pages |
614 | * @cma: Contiguous memory region for which the allocation is performed. |
615 | * @pages: Allocated pages. |
616 | * @count: Number of allocated pages. |
617 | * |
618 | * This function releases memory allocated by alloc_cma(). |
619 | * It returns false when provided pages do not belong to contiguous area and |
620 | * true otherwise. |
621 | */ |
622 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
623 | { |
624 | unsigned long pfn; |
625 | |
626 | if (!cma || !pages) |
627 | return false; |
628 | |
629 | pr_debug("%s(page %p)\n", __func__, (void *)pages); |
630 | |
631 | pfn = page_to_pfn(pages); |
632 | |
633 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) |
634 | return false; |
635 | |
636 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
637 | |
638 | #ifdef CONFIG_AMLOGIC_CMA |
639 | aml_cma_release_hook(count, (struct page *)pages); |
640 | aml_cma_free(pfn, count); |
641 | #else |
642 | free_contig_range(pfn, count); |
643 | #endif /* CONFIG_AMLOGIC_CMA */ |
644 | cma_clear_bitmap(cma, pfn, count); |
645 | trace_cma_release(pfn, pages, count); |
646 | |
647 | return true; |
648 | } |
649 | |
650 |