blob: ca06adc4f44510d816ddc9911a1ff38f04cd497b
1 | /* |
2 | * Basic general purpose allocator for managing special purpose |
3 | * memory, for example, memory that is not managed by the regular |
4 | * kmalloc/kfree interface. Uses for this includes on-device special |
5 | * memory, uncached memory etc. |
6 | * |
7 | * It is safe to use the allocator in NMI handlers and other special |
8 | * unblockable contexts that could otherwise deadlock on locks. This |
9 | * is implemented by using atomic operations and retries on any |
10 | * conflicts. The disadvantage is that there may be livelocks in |
11 | * extreme cases. For better scalability, one allocator can be used |
12 | * for each CPU. |
13 | * |
14 | * The lockless operation only works if there is enough memory |
15 | * available. If new memory is added to the pool a lock has to be |
16 | * still taken. So any user relying on locklessness has to ensure |
17 | * that sufficient memory is preallocated. |
18 | * |
19 | * The basic atomic operation of this allocator is cmpxchg on long. |
20 | * On architectures that don't have NMI-safe cmpxchg implementation, |
21 | * the allocator can NOT be used in NMI handler. So code uses the |
22 | * allocator in NMI handler should depend on |
23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. |
24 | * |
25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
26 | * |
27 | * This source code is licensed under the GNU General Public License, |
28 | * Version 2. See the file COPYING for more details. |
29 | */ |
30 | |
31 | #include <linux/slab.h> |
32 | #include <linux/export.h> |
33 | #include <linux/bitmap.h> |
34 | #include <linux/rculist.h> |
35 | #include <linux/interrupt.h> |
36 | #include <linux/genalloc.h> |
37 | #include <linux/of_device.h> |
38 | |
39 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) |
40 | { |
41 | return chunk->end_addr - chunk->start_addr + 1; |
42 | } |
43 | |
44 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) |
45 | { |
46 | unsigned long val, nval; |
47 | |
48 | nval = *addr; |
49 | do { |
50 | val = nval; |
51 | if (val & mask_to_set) |
52 | return -EBUSY; |
53 | cpu_relax(); |
54 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); |
55 | |
56 | return 0; |
57 | } |
58 | |
59 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) |
60 | { |
61 | unsigned long val, nval; |
62 | |
63 | nval = *addr; |
64 | do { |
65 | val = nval; |
66 | if ((val & mask_to_clear) != mask_to_clear) |
67 | return -EBUSY; |
68 | cpu_relax(); |
69 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); |
70 | |
71 | return 0; |
72 | } |
73 | |
74 | /* |
75 | * bitmap_set_ll - set the specified number of bits at the specified position |
76 | * @map: pointer to a bitmap |
77 | * @start: a bit position in @map |
78 | * @nr: number of bits to set |
79 | * |
80 | * Set @nr bits start from @start in @map lock-lessly. Several users |
81 | * can set/clear the same bitmap simultaneously without lock. If two |
82 | * users set the same bit, one user will return remain bits, otherwise |
83 | * return 0. |
84 | */ |
85 | static int bitmap_set_ll(unsigned long *map, int start, int nr) |
86 | { |
87 | unsigned long *p = map + BIT_WORD(start); |
88 | const int size = start + nr; |
89 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); |
90 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); |
91 | |
92 | while (nr - bits_to_set >= 0) { |
93 | if (set_bits_ll(p, mask_to_set)) |
94 | return nr; |
95 | nr -= bits_to_set; |
96 | bits_to_set = BITS_PER_LONG; |
97 | mask_to_set = ~0UL; |
98 | p++; |
99 | } |
100 | if (nr) { |
101 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); |
102 | if (set_bits_ll(p, mask_to_set)) |
103 | return nr; |
104 | } |
105 | |
106 | return 0; |
107 | } |
108 | |
109 | /* |
110 | * bitmap_clear_ll - clear the specified number of bits at the specified position |
111 | * @map: pointer to a bitmap |
112 | * @start: a bit position in @map |
113 | * @nr: number of bits to set |
114 | * |
115 | * Clear @nr bits start from @start in @map lock-lessly. Several users |
116 | * can set/clear the same bitmap simultaneously without lock. If two |
117 | * users clear the same bit, one user will return remain bits, |
118 | * otherwise return 0. |
119 | */ |
120 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) |
121 | { |
122 | unsigned long *p = map + BIT_WORD(start); |
123 | const int size = start + nr; |
124 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); |
125 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); |
126 | |
127 | while (nr - bits_to_clear >= 0) { |
128 | if (clear_bits_ll(p, mask_to_clear)) |
129 | return nr; |
130 | nr -= bits_to_clear; |
131 | bits_to_clear = BITS_PER_LONG; |
132 | mask_to_clear = ~0UL; |
133 | p++; |
134 | } |
135 | if (nr) { |
136 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); |
137 | if (clear_bits_ll(p, mask_to_clear)) |
138 | return nr; |
139 | } |
140 | |
141 | return 0; |
142 | } |
143 | |
144 | /** |
145 | * gen_pool_create - create a new special memory pool |
146 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
147 | * @nid: node id of the node the pool structure should be allocated on, or -1 |
148 | * |
149 | * Create a new special memory pool that can be used to manage special purpose |
150 | * memory not managed by the regular kmalloc/kfree interface. |
151 | */ |
152 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) |
153 | { |
154 | struct gen_pool *pool; |
155 | |
156 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
157 | if (pool != NULL) { |
158 | spin_lock_init(&pool->lock); |
159 | INIT_LIST_HEAD(&pool->chunks); |
160 | pool->min_alloc_order = min_alloc_order; |
161 | pool->algo = gen_pool_first_fit; |
162 | pool->data = NULL; |
163 | pool->name = NULL; |
164 | } |
165 | return pool; |
166 | } |
167 | EXPORT_SYMBOL(gen_pool_create); |
168 | |
169 | /** |
170 | * gen_pool_add_virt - add a new chunk of special memory to the pool |
171 | * @pool: pool to add new memory chunk to |
172 | * @virt: virtual starting address of memory chunk to add to pool |
173 | * @phys: physical starting address of memory chunk to add to pool |
174 | * @size: size in bytes of the memory chunk to add to pool |
175 | * @nid: node id of the node the chunk structure and bitmap should be |
176 | * allocated on, or -1 |
177 | * |
178 | * Add a new chunk of special memory to the specified pool. |
179 | * |
180 | * Returns 0 on success or a -ve errno on failure. |
181 | */ |
182 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
183 | size_t size, int nid) |
184 | { |
185 | struct gen_pool_chunk *chunk; |
186 | int nbits = size >> pool->min_alloc_order; |
187 | int nbytes = sizeof(struct gen_pool_chunk) + |
188 | BITS_TO_LONGS(nbits) * sizeof(long); |
189 | |
190 | chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); |
191 | if (unlikely(chunk == NULL)) |
192 | return -ENOMEM; |
193 | |
194 | chunk->phys_addr = phys; |
195 | chunk->start_addr = virt; |
196 | chunk->end_addr = virt + size - 1; |
197 | atomic_long_set(&chunk->avail, size); |
198 | |
199 | spin_lock(&pool->lock); |
200 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
201 | spin_unlock(&pool->lock); |
202 | |
203 | return 0; |
204 | } |
205 | EXPORT_SYMBOL(gen_pool_add_virt); |
206 | |
207 | /** |
208 | * gen_pool_virt_to_phys - return the physical address of memory |
209 | * @pool: pool to allocate from |
210 | * @addr: starting address of memory |
211 | * |
212 | * Returns the physical address on success, or -1 on error. |
213 | */ |
214 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
215 | { |
216 | struct gen_pool_chunk *chunk; |
217 | phys_addr_t paddr = -1; |
218 | |
219 | rcu_read_lock(); |
220 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
221 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
222 | paddr = chunk->phys_addr + (addr - chunk->start_addr); |
223 | break; |
224 | } |
225 | } |
226 | rcu_read_unlock(); |
227 | |
228 | return paddr; |
229 | } |
230 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
231 | |
232 | /** |
233 | * gen_pool_destroy - destroy a special memory pool |
234 | * @pool: pool to destroy |
235 | * |
236 | * Destroy the specified special memory pool. Verifies that there are no |
237 | * outstanding allocations. |
238 | */ |
239 | void gen_pool_destroy(struct gen_pool *pool) |
240 | { |
241 | struct list_head *_chunk, *_next_chunk; |
242 | struct gen_pool_chunk *chunk; |
243 | int order = pool->min_alloc_order; |
244 | int bit, end_bit; |
245 | |
246 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
247 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
248 | list_del(&chunk->next_chunk); |
249 | |
250 | end_bit = chunk_size(chunk) >> order; |
251 | bit = find_next_bit(chunk->bits, end_bit, 0); |
252 | BUG_ON(bit < end_bit); |
253 | |
254 | kfree(chunk); |
255 | } |
256 | kfree_const(pool->name); |
257 | kfree(pool); |
258 | } |
259 | EXPORT_SYMBOL(gen_pool_destroy); |
260 | |
261 | /** |
262 | * gen_pool_alloc - allocate special memory from the pool |
263 | * @pool: pool to allocate from |
264 | * @size: number of bytes to allocate from the pool |
265 | * |
266 | * Allocate the requested number of bytes from the specified pool. |
267 | * Uses the pool allocation function (with first-fit algorithm by default). |
268 | * Can not be used in NMI handler on architectures without |
269 | * NMI-safe cmpxchg implementation. |
270 | */ |
271 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
272 | { |
273 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); |
274 | } |
275 | EXPORT_SYMBOL(gen_pool_alloc); |
276 | |
277 | /** |
278 | * gen_pool_alloc_algo - allocate special memory from the pool |
279 | * @pool: pool to allocate from |
280 | * @size: number of bytes to allocate from the pool |
281 | * @algo: algorithm passed from caller |
282 | * @data: data passed to algorithm |
283 | * |
284 | * Allocate the requested number of bytes from the specified pool. |
285 | * Uses the pool allocation function (with first-fit algorithm by default). |
286 | * Can not be used in NMI handler on architectures without |
287 | * NMI-safe cmpxchg implementation. |
288 | */ |
289 | unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, |
290 | genpool_algo_t algo, void *data) |
291 | { |
292 | struct gen_pool_chunk *chunk; |
293 | unsigned long addr = 0; |
294 | int order = pool->min_alloc_order; |
295 | int nbits, start_bit, end_bit, remain; |
296 | |
297 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
298 | BUG_ON(in_nmi()); |
299 | #endif |
300 | |
301 | if (size == 0) |
302 | return 0; |
303 | |
304 | nbits = (size + (1UL << order) - 1) >> order; |
305 | rcu_read_lock(); |
306 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
307 | if (size > atomic_long_read(&chunk->avail)) |
308 | continue; |
309 | |
310 | start_bit = 0; |
311 | end_bit = chunk_size(chunk) >> order; |
312 | retry: |
313 | start_bit = algo(chunk->bits, end_bit, start_bit, |
314 | nbits, data, pool); |
315 | if (start_bit >= end_bit) |
316 | continue; |
317 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
318 | if (remain) { |
319 | remain = bitmap_clear_ll(chunk->bits, start_bit, |
320 | nbits - remain); |
321 | BUG_ON(remain); |
322 | goto retry; |
323 | } |
324 | |
325 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
326 | size = nbits << order; |
327 | atomic_long_sub(size, &chunk->avail); |
328 | break; |
329 | } |
330 | rcu_read_unlock(); |
331 | return addr; |
332 | } |
333 | EXPORT_SYMBOL(gen_pool_alloc_algo); |
334 | |
335 | /** |
336 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
337 | * @pool: pool to allocate from |
338 | * @size: number of bytes to allocate from the pool |
339 | * @dma: dma-view physical address return value. Use NULL if unneeded. |
340 | * |
341 | * Allocate the requested number of bytes from the specified pool. |
342 | * Uses the pool allocation function (with first-fit algorithm by default). |
343 | * Can not be used in NMI handler on architectures without |
344 | * NMI-safe cmpxchg implementation. |
345 | */ |
346 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) |
347 | { |
348 | unsigned long vaddr; |
349 | |
350 | if (!pool) |
351 | return NULL; |
352 | |
353 | vaddr = gen_pool_alloc(pool, size); |
354 | if (!vaddr) |
355 | return NULL; |
356 | |
357 | if (dma) |
358 | *dma = gen_pool_virt_to_phys(pool, vaddr); |
359 | |
360 | return (void *)vaddr; |
361 | } |
362 | EXPORT_SYMBOL(gen_pool_dma_alloc); |
363 | |
364 | /** |
365 | * gen_pool_free - free allocated special memory back to the pool |
366 | * @pool: pool to free to |
367 | * @addr: starting address of memory to free back to pool |
368 | * @size: size in bytes of memory to free |
369 | * |
370 | * Free previously allocated special memory back to the specified |
371 | * pool. Can not be used in NMI handler on architectures without |
372 | * NMI-safe cmpxchg implementation. |
373 | */ |
374 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
375 | { |
376 | struct gen_pool_chunk *chunk; |
377 | int order = pool->min_alloc_order; |
378 | int start_bit, nbits, remain; |
379 | |
380 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
381 | BUG_ON(in_nmi()); |
382 | #endif |
383 | |
384 | nbits = (size + (1UL << order) - 1) >> order; |
385 | rcu_read_lock(); |
386 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
387 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
388 | BUG_ON(addr + size - 1 > chunk->end_addr); |
389 | start_bit = (addr - chunk->start_addr) >> order; |
390 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
391 | BUG_ON(remain); |
392 | size = nbits << order; |
393 | atomic_long_add(size, &chunk->avail); |
394 | rcu_read_unlock(); |
395 | return; |
396 | } |
397 | } |
398 | rcu_read_unlock(); |
399 | BUG(); |
400 | } |
401 | EXPORT_SYMBOL(gen_pool_free); |
402 | |
403 | /** |
404 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool |
405 | * @pool: the generic memory pool |
406 | * @func: func to call |
407 | * @data: additional data used by @func |
408 | * |
409 | * Call @func for every chunk of generic memory pool. The @func is |
410 | * called with rcu_read_lock held. |
411 | */ |
412 | void gen_pool_for_each_chunk(struct gen_pool *pool, |
413 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), |
414 | void *data) |
415 | { |
416 | struct gen_pool_chunk *chunk; |
417 | |
418 | rcu_read_lock(); |
419 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) |
420 | func(pool, chunk, data); |
421 | rcu_read_unlock(); |
422 | } |
423 | EXPORT_SYMBOL(gen_pool_for_each_chunk); |
424 | |
425 | /** |
426 | * addr_in_gen_pool - checks if an address falls within the range of a pool |
427 | * @pool: the generic memory pool |
428 | * @start: start address |
429 | * @size: size of the region |
430 | * |
431 | * Check if the range of addresses falls within the specified pool. Returns |
432 | * true if the entire range is contained in the pool and false otherwise. |
433 | */ |
434 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, |
435 | size_t size) |
436 | { |
437 | bool found = false; |
438 | unsigned long end = start + size - 1; |
439 | struct gen_pool_chunk *chunk; |
440 | |
441 | rcu_read_lock(); |
442 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { |
443 | if (start >= chunk->start_addr && start <= chunk->end_addr) { |
444 | if (end <= chunk->end_addr) { |
445 | found = true; |
446 | break; |
447 | } |
448 | } |
449 | } |
450 | rcu_read_unlock(); |
451 | return found; |
452 | } |
453 | |
454 | /** |
455 | * gen_pool_avail - get available free space of the pool |
456 | * @pool: pool to get available free space |
457 | * |
458 | * Return available free space of the specified pool. |
459 | */ |
460 | size_t gen_pool_avail(struct gen_pool *pool) |
461 | { |
462 | struct gen_pool_chunk *chunk; |
463 | size_t avail = 0; |
464 | |
465 | rcu_read_lock(); |
466 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
467 | avail += atomic_long_read(&chunk->avail); |
468 | rcu_read_unlock(); |
469 | return avail; |
470 | } |
471 | EXPORT_SYMBOL_GPL(gen_pool_avail); |
472 | |
473 | /** |
474 | * gen_pool_size - get size in bytes of memory managed by the pool |
475 | * @pool: pool to get size |
476 | * |
477 | * Return size in bytes of memory managed by the pool. |
478 | */ |
479 | size_t gen_pool_size(struct gen_pool *pool) |
480 | { |
481 | struct gen_pool_chunk *chunk; |
482 | size_t size = 0; |
483 | |
484 | rcu_read_lock(); |
485 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
486 | size += chunk_size(chunk); |
487 | rcu_read_unlock(); |
488 | return size; |
489 | } |
490 | EXPORT_SYMBOL_GPL(gen_pool_size); |
491 | |
492 | /** |
493 | * gen_pool_set_algo - set the allocation algorithm |
494 | * @pool: pool to change allocation algorithm |
495 | * @algo: custom algorithm function |
496 | * @data: additional data used by @algo |
497 | * |
498 | * Call @algo for each memory allocation in the pool. |
499 | * If @algo is NULL use gen_pool_first_fit as default |
500 | * memory allocation function. |
501 | */ |
502 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) |
503 | { |
504 | rcu_read_lock(); |
505 | |
506 | pool->algo = algo; |
507 | if (!pool->algo) |
508 | pool->algo = gen_pool_first_fit; |
509 | |
510 | pool->data = data; |
511 | |
512 | rcu_read_unlock(); |
513 | } |
514 | EXPORT_SYMBOL(gen_pool_set_algo); |
515 | |
516 | /** |
517 | * gen_pool_first_fit - find the first available region |
518 | * of memory matching the size requirement (no alignment constraint) |
519 | * @map: The address to base the search on |
520 | * @size: The bitmap size in bits |
521 | * @start: The bitnumber to start searching at |
522 | * @nr: The number of zeroed bits we're looking for |
523 | * @data: additional data - unused |
524 | * @pool: pool to find the fit region memory from |
525 | */ |
526 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, |
527 | unsigned long start, unsigned int nr, void *data, |
528 | struct gen_pool *pool) |
529 | { |
530 | return bitmap_find_next_zero_area(map, size, start, nr, 0); |
531 | } |
532 | EXPORT_SYMBOL(gen_pool_first_fit); |
533 | |
534 | /** |
535 | * gen_pool_first_fit_align - find the first available region |
536 | * of memory matching the size requirement (alignment constraint) |
537 | * @map: The address to base the search on |
538 | * @size: The bitmap size in bits |
539 | * @start: The bitnumber to start searching at |
540 | * @nr: The number of zeroed bits we're looking for |
541 | * @data: data for alignment |
542 | * @pool: pool to get order from |
543 | */ |
544 | unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, |
545 | unsigned long start, unsigned int nr, void *data, |
546 | struct gen_pool *pool) |
547 | { |
548 | struct genpool_data_align *alignment; |
549 | unsigned long align_mask; |
550 | int order; |
551 | |
552 | alignment = data; |
553 | order = pool->min_alloc_order; |
554 | align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; |
555 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
556 | } |
557 | EXPORT_SYMBOL(gen_pool_first_fit_align); |
558 | |
559 | /** |
560 | * gen_pool_fixed_alloc - reserve a specific region |
561 | * @map: The address to base the search on |
562 | * @size: The bitmap size in bits |
563 | * @start: The bitnumber to start searching at |
564 | * @nr: The number of zeroed bits we're looking for |
565 | * @data: data for alignment |
566 | * @pool: pool to get order from |
567 | */ |
568 | unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, |
569 | unsigned long start, unsigned int nr, void *data, |
570 | struct gen_pool *pool) |
571 | { |
572 | struct genpool_data_fixed *fixed_data; |
573 | int order; |
574 | unsigned long offset_bit; |
575 | unsigned long start_bit; |
576 | |
577 | fixed_data = data; |
578 | order = pool->min_alloc_order; |
579 | offset_bit = fixed_data->offset >> order; |
580 | if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) |
581 | return size; |
582 | |
583 | start_bit = bitmap_find_next_zero_area(map, size, |
584 | start + offset_bit, nr, 0); |
585 | if (start_bit != offset_bit) |
586 | start_bit = size; |
587 | return start_bit; |
588 | } |
589 | EXPORT_SYMBOL(gen_pool_fixed_alloc); |
590 | |
591 | /** |
592 | * gen_pool_first_fit_order_align - find the first available region |
593 | * of memory matching the size requirement. The region will be aligned |
594 | * to the order of the size specified. |
595 | * @map: The address to base the search on |
596 | * @size: The bitmap size in bits |
597 | * @start: The bitnumber to start searching at |
598 | * @nr: The number of zeroed bits we're looking for |
599 | * @data: additional data - unused |
600 | * @pool: pool to find the fit region memory from |
601 | */ |
602 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, |
603 | unsigned long size, unsigned long start, |
604 | unsigned int nr, void *data, struct gen_pool *pool) |
605 | { |
606 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; |
607 | |
608 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
609 | } |
610 | EXPORT_SYMBOL(gen_pool_first_fit_order_align); |
611 | |
612 | /** |
613 | * gen_pool_best_fit - find the best fitting region of memory |
614 | * macthing the size requirement (no alignment constraint) |
615 | * @map: The address to base the search on |
616 | * @size: The bitmap size in bits |
617 | * @start: The bitnumber to start searching at |
618 | * @nr: The number of zeroed bits we're looking for |
619 | * @data: additional data - unused |
620 | * @pool: pool to find the fit region memory from |
621 | * |
622 | * Iterate over the bitmap to find the smallest free region |
623 | * which we can allocate the memory. |
624 | */ |
625 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, |
626 | unsigned long start, unsigned int nr, void *data, |
627 | struct gen_pool *pool) |
628 | { |
629 | unsigned long start_bit = size; |
630 | unsigned long len = size + 1; |
631 | unsigned long index; |
632 | |
633 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); |
634 | |
635 | while (index < size) { |
636 | int next_bit = find_next_bit(map, size, index + nr); |
637 | if ((next_bit - index) < len) { |
638 | len = next_bit - index; |
639 | start_bit = index; |
640 | if (len == nr) |
641 | return start_bit; |
642 | } |
643 | index = bitmap_find_next_zero_area(map, size, |
644 | next_bit + 1, nr, 0); |
645 | } |
646 | |
647 | return start_bit; |
648 | } |
649 | EXPORT_SYMBOL(gen_pool_best_fit); |
650 | |
651 | static void devm_gen_pool_release(struct device *dev, void *res) |
652 | { |
653 | gen_pool_destroy(*(struct gen_pool **)res); |
654 | } |
655 | |
656 | static int devm_gen_pool_match(struct device *dev, void *res, void *data) |
657 | { |
658 | struct gen_pool **p = res; |
659 | |
660 | /* NULL data matches only a pool without an assigned name */ |
661 | if (!data && !(*p)->name) |
662 | return 1; |
663 | |
664 | if (!data || !(*p)->name) |
665 | return 0; |
666 | |
667 | return !strcmp((*p)->name, data); |
668 | } |
669 | |
670 | /** |
671 | * gen_pool_get - Obtain the gen_pool (if any) for a device |
672 | * @dev: device to retrieve the gen_pool from |
673 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device |
674 | * |
675 | * Returns the gen_pool for the device if one is present, or NULL. |
676 | */ |
677 | struct gen_pool *gen_pool_get(struct device *dev, const char *name) |
678 | { |
679 | struct gen_pool **p; |
680 | |
681 | p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, |
682 | (void *)name); |
683 | if (!p) |
684 | return NULL; |
685 | return *p; |
686 | } |
687 | EXPORT_SYMBOL_GPL(gen_pool_get); |
688 | |
689 | /** |
690 | * devm_gen_pool_create - managed gen_pool_create |
691 | * @dev: device that provides the gen_pool |
692 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
693 | * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes |
694 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device |
695 | * |
696 | * Create a new special memory pool that can be used to manage special purpose |
697 | * memory not managed by the regular kmalloc/kfree interface. The pool will be |
698 | * automatically destroyed by the device management code. |
699 | */ |
700 | struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, |
701 | int nid, const char *name) |
702 | { |
703 | struct gen_pool **ptr, *pool; |
704 | const char *pool_name = NULL; |
705 | |
706 | /* Check that genpool to be created is uniquely addressed on device */ |
707 | if (gen_pool_get(dev, name)) |
708 | return ERR_PTR(-EINVAL); |
709 | |
710 | if (name) { |
711 | pool_name = kstrdup_const(name, GFP_KERNEL); |
712 | if (!pool_name) |
713 | return ERR_PTR(-ENOMEM); |
714 | } |
715 | |
716 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); |
717 | if (!ptr) |
718 | goto free_pool_name; |
719 | |
720 | pool = gen_pool_create(min_alloc_order, nid); |
721 | if (!pool) |
722 | goto free_devres; |
723 | |
724 | *ptr = pool; |
725 | pool->name = pool_name; |
726 | devres_add(dev, ptr); |
727 | |
728 | return pool; |
729 | |
730 | free_devres: |
731 | devres_free(ptr); |
732 | free_pool_name: |
733 | kfree_const(pool_name); |
734 | |
735 | return ERR_PTR(-ENOMEM); |
736 | } |
737 | EXPORT_SYMBOL(devm_gen_pool_create); |
738 | |
739 | #ifdef CONFIG_OF |
740 | /** |
741 | * of_gen_pool_get - find a pool by phandle property |
742 | * @np: device node |
743 | * @propname: property name containing phandle(s) |
744 | * @index: index into the phandle array |
745 | * |
746 | * Returns the pool that contains the chunk starting at the physical |
747 | * address of the device tree node pointed at by the phandle property, |
748 | * or NULL if not found. |
749 | */ |
750 | struct gen_pool *of_gen_pool_get(struct device_node *np, |
751 | const char *propname, int index) |
752 | { |
753 | struct platform_device *pdev; |
754 | struct device_node *np_pool, *parent; |
755 | const char *name = NULL; |
756 | struct gen_pool *pool = NULL; |
757 | |
758 | np_pool = of_parse_phandle(np, propname, index); |
759 | if (!np_pool) |
760 | return NULL; |
761 | |
762 | pdev = of_find_device_by_node(np_pool); |
763 | if (!pdev) { |
764 | /* Check if named gen_pool is created by parent node device */ |
765 | parent = of_get_parent(np_pool); |
766 | pdev = of_find_device_by_node(parent); |
767 | of_node_put(parent); |
768 | |
769 | of_property_read_string(np_pool, "label", &name); |
770 | if (!name) |
771 | name = np_pool->name; |
772 | } |
773 | if (pdev) |
774 | pool = gen_pool_get(&pdev->dev, name); |
775 | of_node_put(np_pool); |
776 | |
777 | return pool; |
778 | } |
779 | EXPORT_SYMBOL_GPL(of_gen_pool_get); |
780 | #endif /* CONFIG_OF */ |
781 |