blob: 7ee3dd1ad2af17989aca69c6fdb9502d7ee92472
1 | /* |
2 | * linux/kernel/resource.c |
3 | * |
4 | * Copyright (C) 1999 Linus Torvalds |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> |
6 | * |
7 | * Arbitrary resource management. |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/export.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/ioport.h> |
15 | #include <linux/init.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/spinlock.h> |
18 | #include <linux/fs.h> |
19 | #include <linux/proc_fs.h> |
20 | #include <linux/sched.h> |
21 | #include <linux/seq_file.h> |
22 | #include <linux/device.h> |
23 | #include <linux/pfn.h> |
24 | #include <linux/mm.h> |
25 | #include <linux/resource_ext.h> |
26 | #include <asm/io.h> |
27 | |
28 | |
29 | struct resource ioport_resource = { |
30 | .name = "PCI IO", |
31 | .start = 0, |
32 | .end = IO_SPACE_LIMIT, |
33 | .flags = IORESOURCE_IO, |
34 | }; |
35 | EXPORT_SYMBOL(ioport_resource); |
36 | |
37 | struct resource iomem_resource = { |
38 | .name = "PCI mem", |
39 | .start = 0, |
40 | .end = -1, |
41 | .flags = IORESOURCE_MEM, |
42 | }; |
43 | EXPORT_SYMBOL(iomem_resource); |
44 | |
45 | /* constraints to be met while allocating resources */ |
46 | struct resource_constraint { |
47 | resource_size_t min, max, align; |
48 | resource_size_t (*alignf)(void *, const struct resource *, |
49 | resource_size_t, resource_size_t); |
50 | void *alignf_data; |
51 | }; |
52 | |
53 | static DEFINE_RWLOCK(resource_lock); |
54 | |
55 | /* |
56 | * For memory hotplug, there is no way to free resource entries allocated |
57 | * by boot mem after the system is up. So for reusing the resource entry |
58 | * we need to remember the resource. |
59 | */ |
60 | static struct resource *bootmem_resource_free; |
61 | static DEFINE_SPINLOCK(bootmem_resource_lock); |
62 | |
63 | static struct resource *next_resource(struct resource *p, bool sibling_only) |
64 | { |
65 | /* Caller wants to traverse through siblings only */ |
66 | if (sibling_only) |
67 | return p->sibling; |
68 | |
69 | if (p->child) |
70 | return p->child; |
71 | while (!p->sibling && p->parent) |
72 | p = p->parent; |
73 | return p->sibling; |
74 | } |
75 | |
76 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
77 | { |
78 | struct resource *p = v; |
79 | (*pos)++; |
80 | return (void *)next_resource(p, false); |
81 | } |
82 | |
83 | #ifdef CONFIG_PROC_FS |
84 | |
85 | enum { MAX_IORES_LEVEL = 5 }; |
86 | |
87 | static void *r_start(struct seq_file *m, loff_t *pos) |
88 | __acquires(resource_lock) |
89 | { |
90 | struct resource *p = m->private; |
91 | loff_t l = 0; |
92 | read_lock(&resource_lock); |
93 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) |
94 | ; |
95 | return p; |
96 | } |
97 | |
98 | static void r_stop(struct seq_file *m, void *v) |
99 | __releases(resource_lock) |
100 | { |
101 | read_unlock(&resource_lock); |
102 | } |
103 | |
104 | static int r_show(struct seq_file *m, void *v) |
105 | { |
106 | struct resource *root = m->private; |
107 | struct resource *r = v, *p; |
108 | unsigned long long start, end; |
109 | int width = root->end < 0x10000 ? 4 : 8; |
110 | int depth; |
111 | |
112 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) |
113 | if (p->parent == root) |
114 | break; |
115 | |
116 | if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { |
117 | start = r->start; |
118 | end = r->end; |
119 | } else { |
120 | start = end = 0; |
121 | } |
122 | |
123 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
124 | depth * 2, "", |
125 | width, start, |
126 | width, end, |
127 | r->name ? r->name : "<BAD>"); |
128 | return 0; |
129 | } |
130 | |
131 | static const struct seq_operations resource_op = { |
132 | .start = r_start, |
133 | .next = r_next, |
134 | .stop = r_stop, |
135 | .show = r_show, |
136 | }; |
137 | |
138 | static int ioports_open(struct inode *inode, struct file *file) |
139 | { |
140 | int res = seq_open(file, &resource_op); |
141 | if (!res) { |
142 | struct seq_file *m = file->private_data; |
143 | m->private = &ioport_resource; |
144 | } |
145 | return res; |
146 | } |
147 | |
148 | static int iomem_open(struct inode *inode, struct file *file) |
149 | { |
150 | int res = seq_open(file, &resource_op); |
151 | if (!res) { |
152 | struct seq_file *m = file->private_data; |
153 | m->private = &iomem_resource; |
154 | } |
155 | return res; |
156 | } |
157 | |
158 | static const struct file_operations proc_ioports_operations = { |
159 | .open = ioports_open, |
160 | .read = seq_read, |
161 | .llseek = seq_lseek, |
162 | .release = seq_release, |
163 | }; |
164 | |
165 | static const struct file_operations proc_iomem_operations = { |
166 | .open = iomem_open, |
167 | .read = seq_read, |
168 | .llseek = seq_lseek, |
169 | .release = seq_release, |
170 | }; |
171 | |
172 | static int __init ioresources_init(void) |
173 | { |
174 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
175 | proc_create("iomem", 0, NULL, &proc_iomem_operations); |
176 | return 0; |
177 | } |
178 | __initcall(ioresources_init); |
179 | |
180 | #endif /* CONFIG_PROC_FS */ |
181 | |
182 | static void free_resource(struct resource *res) |
183 | { |
184 | if (!res) |
185 | return; |
186 | |
187 | if (!PageSlab(virt_to_head_page(res))) { |
188 | spin_lock(&bootmem_resource_lock); |
189 | res->sibling = bootmem_resource_free; |
190 | bootmem_resource_free = res; |
191 | spin_unlock(&bootmem_resource_lock); |
192 | } else { |
193 | kfree(res); |
194 | } |
195 | } |
196 | |
197 | static struct resource *alloc_resource(gfp_t flags) |
198 | { |
199 | struct resource *res = NULL; |
200 | |
201 | spin_lock(&bootmem_resource_lock); |
202 | if (bootmem_resource_free) { |
203 | res = bootmem_resource_free; |
204 | bootmem_resource_free = res->sibling; |
205 | } |
206 | spin_unlock(&bootmem_resource_lock); |
207 | |
208 | if (res) |
209 | memset(res, 0, sizeof(struct resource)); |
210 | else |
211 | res = kzalloc(sizeof(struct resource), flags); |
212 | |
213 | return res; |
214 | } |
215 | |
216 | /* Return the conflict entry if you can't request it */ |
217 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
218 | { |
219 | resource_size_t start = new->start; |
220 | resource_size_t end = new->end; |
221 | struct resource *tmp, **p; |
222 | |
223 | if (end < start) |
224 | return root; |
225 | if (start < root->start) |
226 | return root; |
227 | if (end > root->end) |
228 | return root; |
229 | p = &root->child; |
230 | for (;;) { |
231 | tmp = *p; |
232 | if (!tmp || tmp->start > end) { |
233 | new->sibling = tmp; |
234 | *p = new; |
235 | new->parent = root; |
236 | return NULL; |
237 | } |
238 | p = &tmp->sibling; |
239 | if (tmp->end < start) |
240 | continue; |
241 | return tmp; |
242 | } |
243 | } |
244 | |
245 | static int __release_resource(struct resource *old, bool release_child) |
246 | { |
247 | struct resource *tmp, **p, *chd; |
248 | |
249 | p = &old->parent->child; |
250 | for (;;) { |
251 | tmp = *p; |
252 | if (!tmp) |
253 | break; |
254 | if (tmp == old) { |
255 | if (release_child || !(tmp->child)) { |
256 | *p = tmp->sibling; |
257 | } else { |
258 | for (chd = tmp->child;; chd = chd->sibling) { |
259 | chd->parent = tmp->parent; |
260 | if (!(chd->sibling)) |
261 | break; |
262 | } |
263 | *p = tmp->child; |
264 | chd->sibling = tmp->sibling; |
265 | } |
266 | old->parent = NULL; |
267 | return 0; |
268 | } |
269 | p = &tmp->sibling; |
270 | } |
271 | return -EINVAL; |
272 | } |
273 | |
274 | static void __release_child_resources(struct resource *r) |
275 | { |
276 | struct resource *tmp, *p; |
277 | resource_size_t size; |
278 | |
279 | p = r->child; |
280 | r->child = NULL; |
281 | while (p) { |
282 | tmp = p; |
283 | p = p->sibling; |
284 | |
285 | tmp->parent = NULL; |
286 | tmp->sibling = NULL; |
287 | __release_child_resources(tmp); |
288 | |
289 | printk(KERN_DEBUG "release child resource %pR\n", tmp); |
290 | /* need to restore size, and keep flags */ |
291 | size = resource_size(tmp); |
292 | tmp->start = 0; |
293 | tmp->end = size - 1; |
294 | } |
295 | } |
296 | |
297 | void release_child_resources(struct resource *r) |
298 | { |
299 | write_lock(&resource_lock); |
300 | __release_child_resources(r); |
301 | write_unlock(&resource_lock); |
302 | } |
303 | |
304 | /** |
305 | * request_resource_conflict - request and reserve an I/O or memory resource |
306 | * @root: root resource descriptor |
307 | * @new: resource descriptor desired by caller |
308 | * |
309 | * Returns 0 for success, conflict resource on error. |
310 | */ |
311 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
312 | { |
313 | struct resource *conflict; |
314 | |
315 | write_lock(&resource_lock); |
316 | conflict = __request_resource(root, new); |
317 | write_unlock(&resource_lock); |
318 | return conflict; |
319 | } |
320 | |
321 | /** |
322 | * request_resource - request and reserve an I/O or memory resource |
323 | * @root: root resource descriptor |
324 | * @new: resource descriptor desired by caller |
325 | * |
326 | * Returns 0 for success, negative error code on error. |
327 | */ |
328 | int request_resource(struct resource *root, struct resource *new) |
329 | { |
330 | struct resource *conflict; |
331 | |
332 | conflict = request_resource_conflict(root, new); |
333 | return conflict ? -EBUSY : 0; |
334 | } |
335 | |
336 | EXPORT_SYMBOL(request_resource); |
337 | |
338 | /** |
339 | * release_resource - release a previously reserved resource |
340 | * @old: resource pointer |
341 | */ |
342 | int release_resource(struct resource *old) |
343 | { |
344 | int retval; |
345 | |
346 | write_lock(&resource_lock); |
347 | retval = __release_resource(old, true); |
348 | write_unlock(&resource_lock); |
349 | return retval; |
350 | } |
351 | |
352 | EXPORT_SYMBOL(release_resource); |
353 | |
354 | /* |
355 | * Finds the lowest iomem resource existing within [res->start.res->end). |
356 | * The caller must specify res->start, res->end, res->flags, and optionally |
357 | * desc. If found, returns 0, res is overwritten, if not found, returns -1. |
358 | * This function walks the whole tree and not just first level children until |
359 | * and unless first_level_children_only is true. |
360 | */ |
361 | static int find_next_iomem_res(struct resource *res, unsigned long desc, |
362 | bool first_level_children_only) |
363 | { |
364 | resource_size_t start, end; |
365 | struct resource *p; |
366 | bool sibling_only = false; |
367 | |
368 | BUG_ON(!res); |
369 | |
370 | start = res->start; |
371 | end = res->end; |
372 | BUG_ON(start >= end); |
373 | |
374 | if (first_level_children_only) |
375 | sibling_only = true; |
376 | |
377 | read_lock(&resource_lock); |
378 | |
379 | for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { |
380 | if ((p->flags & res->flags) != res->flags) |
381 | continue; |
382 | if ((desc != IORES_DESC_NONE) && (desc != p->desc)) |
383 | continue; |
384 | if (p->start > end) { |
385 | p = NULL; |
386 | break; |
387 | } |
388 | if ((p->end >= start) && (p->start < end)) |
389 | break; |
390 | } |
391 | |
392 | read_unlock(&resource_lock); |
393 | if (!p) |
394 | return -1; |
395 | /* copy data */ |
396 | if (res->start < p->start) |
397 | res->start = p->start; |
398 | if (res->end > p->end) |
399 | res->end = p->end; |
400 | return 0; |
401 | } |
402 | |
403 | /* |
404 | * Walks through iomem resources and calls func() with matching resource |
405 | * ranges. This walks through whole tree and not just first level children. |
406 | * All the memory ranges which overlap start,end and also match flags and |
407 | * desc are valid candidates. |
408 | * |
409 | * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. |
410 | * @flags: I/O resource flags |
411 | * @start: start addr |
412 | * @end: end addr |
413 | * |
414 | * NOTE: For a new descriptor search, define a new IORES_DESC in |
415 | * <linux/ioport.h> and set it in 'desc' of a target resource entry. |
416 | */ |
417 | int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, |
418 | u64 end, void *arg, int (*func)(u64, u64, void *)) |
419 | { |
420 | struct resource res; |
421 | u64 orig_end; |
422 | int ret = -1; |
423 | |
424 | res.start = start; |
425 | res.end = end; |
426 | res.flags = flags; |
427 | orig_end = res.end; |
428 | |
429 | while ((res.start < res.end) && |
430 | (!find_next_iomem_res(&res, desc, false))) { |
431 | |
432 | ret = (*func)(res.start, res.end, arg); |
433 | if (ret) |
434 | break; |
435 | |
436 | res.start = res.end + 1; |
437 | res.end = orig_end; |
438 | } |
439 | |
440 | return ret; |
441 | } |
442 | |
443 | /* |
444 | * This function calls the @func callback against all memory ranges of type |
445 | * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. |
446 | * Now, this function is only for System RAM, it deals with full ranges and |
447 | * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate |
448 | * ranges. |
449 | */ |
450 | int walk_system_ram_res(u64 start, u64 end, void *arg, |
451 | int (*func)(u64, u64, void *)) |
452 | { |
453 | struct resource res; |
454 | u64 orig_end; |
455 | int ret = -1; |
456 | |
457 | res.start = start; |
458 | res.end = end; |
459 | res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
460 | orig_end = res.end; |
461 | while ((res.start < res.end) && |
462 | (!find_next_iomem_res(&res, IORES_DESC_NONE, true))) { |
463 | ret = (*func)(res.start, res.end, arg); |
464 | if (ret) |
465 | break; |
466 | res.start = res.end + 1; |
467 | res.end = orig_end; |
468 | } |
469 | return ret; |
470 | } |
471 | |
472 | #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
473 | |
474 | /* |
475 | * This function calls the @func callback against all memory ranges of type |
476 | * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. |
477 | * It is to be used only for System RAM. |
478 | */ |
479 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
480 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
481 | { |
482 | struct resource res; |
483 | unsigned long pfn, end_pfn; |
484 | u64 orig_end; |
485 | int ret = -1; |
486 | |
487 | res.start = (u64) start_pfn << PAGE_SHIFT; |
488 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
489 | res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
490 | orig_end = res.end; |
491 | while ((res.start < res.end) && |
492 | (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) { |
493 | pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; |
494 | end_pfn = (res.end + 1) >> PAGE_SHIFT; |
495 | if (end_pfn > pfn) |
496 | ret = (*func)(pfn, end_pfn - pfn, arg); |
497 | if (ret) |
498 | break; |
499 | res.start = res.end + 1; |
500 | res.end = orig_end; |
501 | } |
502 | return ret; |
503 | } |
504 | |
505 | #endif |
506 | |
507 | static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) |
508 | { |
509 | return 1; |
510 | } |
511 | /* |
512 | * This generic page_is_ram() returns true if specified address is |
513 | * registered as System RAM in iomem_resource list. |
514 | */ |
515 | int __weak page_is_ram(unsigned long pfn) |
516 | { |
517 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; |
518 | } |
519 | EXPORT_SYMBOL_GPL(page_is_ram); |
520 | |
521 | /** |
522 | * region_intersects() - determine intersection of region with known resources |
523 | * @start: region start address |
524 | * @size: size of region |
525 | * @flags: flags of resource (in iomem_resource) |
526 | * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE |
527 | * |
528 | * Check if the specified region partially overlaps or fully eclipses a |
529 | * resource identified by @flags and @desc (optional with IORES_DESC_NONE). |
530 | * Return REGION_DISJOINT if the region does not overlap @flags/@desc, |
531 | * return REGION_MIXED if the region overlaps @flags/@desc and another |
532 | * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc |
533 | * and no other defined resource. Note that REGION_INTERSECTS is also |
534 | * returned in the case when the specified region overlaps RAM and undefined |
535 | * memory holes. |
536 | * |
537 | * region_intersect() is used by memory remapping functions to ensure |
538 | * the user is not remapping RAM and is a vast speed up over walking |
539 | * through the resource table page by page. |
540 | */ |
541 | int region_intersects(resource_size_t start, size_t size, unsigned long flags, |
542 | unsigned long desc) |
543 | { |
544 | resource_size_t end = start + size - 1; |
545 | int type = 0; int other = 0; |
546 | struct resource *p; |
547 | |
548 | read_lock(&resource_lock); |
549 | for (p = iomem_resource.child; p ; p = p->sibling) { |
550 | bool is_type = (((p->flags & flags) == flags) && |
551 | ((desc == IORES_DESC_NONE) || |
552 | (desc == p->desc))); |
553 | |
554 | if (start >= p->start && start <= p->end) |
555 | is_type ? type++ : other++; |
556 | if (end >= p->start && end <= p->end) |
557 | is_type ? type++ : other++; |
558 | if (p->start >= start && p->end <= end) |
559 | is_type ? type++ : other++; |
560 | } |
561 | read_unlock(&resource_lock); |
562 | |
563 | if (other == 0) |
564 | return type ? REGION_INTERSECTS : REGION_DISJOINT; |
565 | |
566 | if (type) |
567 | return REGION_MIXED; |
568 | |
569 | return REGION_DISJOINT; |
570 | } |
571 | EXPORT_SYMBOL_GPL(region_intersects); |
572 | |
573 | void __weak arch_remove_reservations(struct resource *avail) |
574 | { |
575 | } |
576 | |
577 | static resource_size_t simple_align_resource(void *data, |
578 | const struct resource *avail, |
579 | resource_size_t size, |
580 | resource_size_t align) |
581 | { |
582 | return avail->start; |
583 | } |
584 | |
585 | static void resource_clip(struct resource *res, resource_size_t min, |
586 | resource_size_t max) |
587 | { |
588 | if (res->start < min) |
589 | res->start = min; |
590 | if (res->end > max) |
591 | res->end = max; |
592 | } |
593 | |
594 | /* |
595 | * Find empty slot in the resource tree with the given range and |
596 | * alignment constraints |
597 | */ |
598 | static int __find_resource(struct resource *root, struct resource *old, |
599 | struct resource *new, |
600 | resource_size_t size, |
601 | struct resource_constraint *constraint) |
602 | { |
603 | struct resource *this = root->child; |
604 | struct resource tmp = *new, avail, alloc; |
605 | |
606 | tmp.start = root->start; |
607 | /* |
608 | * Skip past an allocated resource that starts at 0, since the assignment |
609 | * of this->start - 1 to tmp->end below would cause an underflow. |
610 | */ |
611 | if (this && this->start == root->start) { |
612 | tmp.start = (this == old) ? old->start : this->end + 1; |
613 | this = this->sibling; |
614 | } |
615 | for(;;) { |
616 | if (this) |
617 | tmp.end = (this == old) ? this->end : this->start - 1; |
618 | else |
619 | tmp.end = root->end; |
620 | |
621 | if (tmp.end < tmp.start) |
622 | goto next; |
623 | |
624 | resource_clip(&tmp, constraint->min, constraint->max); |
625 | arch_remove_reservations(&tmp); |
626 | |
627 | /* Check for overflow after ALIGN() */ |
628 | avail.start = ALIGN(tmp.start, constraint->align); |
629 | avail.end = tmp.end; |
630 | avail.flags = new->flags & ~IORESOURCE_UNSET; |
631 | if (avail.start >= tmp.start) { |
632 | alloc.flags = avail.flags; |
633 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
634 | size, constraint->align); |
635 | alloc.end = alloc.start + size - 1; |
636 | if (alloc.start <= alloc.end && |
637 | resource_contains(&avail, &alloc)) { |
638 | new->start = alloc.start; |
639 | new->end = alloc.end; |
640 | return 0; |
641 | } |
642 | } |
643 | |
644 | next: if (!this || this->end == root->end) |
645 | break; |
646 | |
647 | if (this != old) |
648 | tmp.start = this->end + 1; |
649 | this = this->sibling; |
650 | } |
651 | return -EBUSY; |
652 | } |
653 | |
654 | /* |
655 | * Find empty slot in the resource tree given range and alignment. |
656 | */ |
657 | static int find_resource(struct resource *root, struct resource *new, |
658 | resource_size_t size, |
659 | struct resource_constraint *constraint) |
660 | { |
661 | return __find_resource(root, NULL, new, size, constraint); |
662 | } |
663 | |
664 | /** |
665 | * reallocate_resource - allocate a slot in the resource tree given range & alignment. |
666 | * The resource will be relocated if the new size cannot be reallocated in the |
667 | * current location. |
668 | * |
669 | * @root: root resource descriptor |
670 | * @old: resource descriptor desired by caller |
671 | * @newsize: new size of the resource descriptor |
672 | * @constraint: the size and alignment constraints to be met. |
673 | */ |
674 | static int reallocate_resource(struct resource *root, struct resource *old, |
675 | resource_size_t newsize, |
676 | struct resource_constraint *constraint) |
677 | { |
678 | int err=0; |
679 | struct resource new = *old; |
680 | struct resource *conflict; |
681 | |
682 | write_lock(&resource_lock); |
683 | |
684 | if ((err = __find_resource(root, old, &new, newsize, constraint))) |
685 | goto out; |
686 | |
687 | if (resource_contains(&new, old)) { |
688 | old->start = new.start; |
689 | old->end = new.end; |
690 | goto out; |
691 | } |
692 | |
693 | if (old->child) { |
694 | err = -EBUSY; |
695 | goto out; |
696 | } |
697 | |
698 | if (resource_contains(old, &new)) { |
699 | old->start = new.start; |
700 | old->end = new.end; |
701 | } else { |
702 | __release_resource(old, true); |
703 | *old = new; |
704 | conflict = __request_resource(root, old); |
705 | BUG_ON(conflict); |
706 | } |
707 | out: |
708 | write_unlock(&resource_lock); |
709 | return err; |
710 | } |
711 | |
712 | |
713 | /** |
714 | * allocate_resource - allocate empty slot in the resource tree given range & alignment. |
715 | * The resource will be reallocated with a new size if it was already allocated |
716 | * @root: root resource descriptor |
717 | * @new: resource descriptor desired by caller |
718 | * @size: requested resource region size |
719 | * @min: minimum boundary to allocate |
720 | * @max: maximum boundary to allocate |
721 | * @align: alignment requested, in bytes |
722 | * @alignf: alignment function, optional, called if not NULL |
723 | * @alignf_data: arbitrary data to pass to the @alignf function |
724 | */ |
725 | int allocate_resource(struct resource *root, struct resource *new, |
726 | resource_size_t size, resource_size_t min, |
727 | resource_size_t max, resource_size_t align, |
728 | resource_size_t (*alignf)(void *, |
729 | const struct resource *, |
730 | resource_size_t, |
731 | resource_size_t), |
732 | void *alignf_data) |
733 | { |
734 | int err; |
735 | struct resource_constraint constraint; |
736 | |
737 | if (!alignf) |
738 | alignf = simple_align_resource; |
739 | |
740 | constraint.min = min; |
741 | constraint.max = max; |
742 | constraint.align = align; |
743 | constraint.alignf = alignf; |
744 | constraint.alignf_data = alignf_data; |
745 | |
746 | if ( new->parent ) { |
747 | /* resource is already allocated, try reallocating with |
748 | the new constraints */ |
749 | return reallocate_resource(root, new, size, &constraint); |
750 | } |
751 | |
752 | write_lock(&resource_lock); |
753 | err = find_resource(root, new, size, &constraint); |
754 | if (err >= 0 && __request_resource(root, new)) |
755 | err = -EBUSY; |
756 | write_unlock(&resource_lock); |
757 | return err; |
758 | } |
759 | |
760 | EXPORT_SYMBOL(allocate_resource); |
761 | |
762 | /** |
763 | * lookup_resource - find an existing resource by a resource start address |
764 | * @root: root resource descriptor |
765 | * @start: resource start address |
766 | * |
767 | * Returns a pointer to the resource if found, NULL otherwise |
768 | */ |
769 | struct resource *lookup_resource(struct resource *root, resource_size_t start) |
770 | { |
771 | struct resource *res; |
772 | |
773 | read_lock(&resource_lock); |
774 | for (res = root->child; res; res = res->sibling) { |
775 | if (res->start == start) |
776 | break; |
777 | } |
778 | read_unlock(&resource_lock); |
779 | |
780 | return res; |
781 | } |
782 | |
783 | /* |
784 | * Insert a resource into the resource tree. If successful, return NULL, |
785 | * otherwise return the conflicting resource (compare to __request_resource()) |
786 | */ |
787 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
788 | { |
789 | struct resource *first, *next; |
790 | |
791 | for (;; parent = first) { |
792 | first = __request_resource(parent, new); |
793 | if (!first) |
794 | return first; |
795 | |
796 | if (first == parent) |
797 | return first; |
798 | if (WARN_ON(first == new)) /* duplicated insertion */ |
799 | return first; |
800 | |
801 | if ((first->start > new->start) || (first->end < new->end)) |
802 | break; |
803 | if ((first->start == new->start) && (first->end == new->end)) |
804 | break; |
805 | } |
806 | |
807 | for (next = first; ; next = next->sibling) { |
808 | /* Partial overlap? Bad, and unfixable */ |
809 | if (next->start < new->start || next->end > new->end) |
810 | return next; |
811 | if (!next->sibling) |
812 | break; |
813 | if (next->sibling->start > new->end) |
814 | break; |
815 | } |
816 | |
817 | new->parent = parent; |
818 | new->sibling = next->sibling; |
819 | new->child = first; |
820 | |
821 | next->sibling = NULL; |
822 | for (next = first; next; next = next->sibling) |
823 | next->parent = new; |
824 | |
825 | if (parent->child == first) { |
826 | parent->child = new; |
827 | } else { |
828 | next = parent->child; |
829 | while (next->sibling != first) |
830 | next = next->sibling; |
831 | next->sibling = new; |
832 | } |
833 | return NULL; |
834 | } |
835 | |
836 | /** |
837 | * insert_resource_conflict - Inserts resource in the resource tree |
838 | * @parent: parent of the new resource |
839 | * @new: new resource to insert |
840 | * |
841 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
842 | * |
843 | * This function is equivalent to request_resource_conflict when no conflict |
844 | * happens. If a conflict happens, and the conflicting resources |
845 | * entirely fit within the range of the new resource, then the new |
846 | * resource is inserted and the conflicting resources become children of |
847 | * the new resource. |
848 | * |
849 | * This function is intended for producers of resources, such as FW modules |
850 | * and bus drivers. |
851 | */ |
852 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
853 | { |
854 | struct resource *conflict; |
855 | |
856 | write_lock(&resource_lock); |
857 | conflict = __insert_resource(parent, new); |
858 | write_unlock(&resource_lock); |
859 | return conflict; |
860 | } |
861 | |
862 | /** |
863 | * insert_resource - Inserts a resource in the resource tree |
864 | * @parent: parent of the new resource |
865 | * @new: new resource to insert |
866 | * |
867 | * Returns 0 on success, -EBUSY if the resource can't be inserted. |
868 | * |
869 | * This function is intended for producers of resources, such as FW modules |
870 | * and bus drivers. |
871 | */ |
872 | int insert_resource(struct resource *parent, struct resource *new) |
873 | { |
874 | struct resource *conflict; |
875 | |
876 | conflict = insert_resource_conflict(parent, new); |
877 | return conflict ? -EBUSY : 0; |
878 | } |
879 | EXPORT_SYMBOL_GPL(insert_resource); |
880 | |
881 | /** |
882 | * insert_resource_expand_to_fit - Insert a resource into the resource tree |
883 | * @root: root resource descriptor |
884 | * @new: new resource to insert |
885 | * |
886 | * Insert a resource into the resource tree, possibly expanding it in order |
887 | * to make it encompass any conflicting resources. |
888 | */ |
889 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) |
890 | { |
891 | if (new->parent) |
892 | return; |
893 | |
894 | write_lock(&resource_lock); |
895 | for (;;) { |
896 | struct resource *conflict; |
897 | |
898 | conflict = __insert_resource(root, new); |
899 | if (!conflict) |
900 | break; |
901 | if (conflict == root) |
902 | break; |
903 | |
904 | /* Ok, expand resource to cover the conflict, then try again .. */ |
905 | if (conflict->start < new->start) |
906 | new->start = conflict->start; |
907 | if (conflict->end > new->end) |
908 | new->end = conflict->end; |
909 | |
910 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); |
911 | } |
912 | write_unlock(&resource_lock); |
913 | } |
914 | |
915 | /** |
916 | * remove_resource - Remove a resource in the resource tree |
917 | * @old: resource to remove |
918 | * |
919 | * Returns 0 on success, -EINVAL if the resource is not valid. |
920 | * |
921 | * This function removes a resource previously inserted by insert_resource() |
922 | * or insert_resource_conflict(), and moves the children (if any) up to |
923 | * where they were before. insert_resource() and insert_resource_conflict() |
924 | * insert a new resource, and move any conflicting resources down to the |
925 | * children of the new resource. |
926 | * |
927 | * insert_resource(), insert_resource_conflict() and remove_resource() are |
928 | * intended for producers of resources, such as FW modules and bus drivers. |
929 | */ |
930 | int remove_resource(struct resource *old) |
931 | { |
932 | int retval; |
933 | |
934 | write_lock(&resource_lock); |
935 | retval = __release_resource(old, false); |
936 | write_unlock(&resource_lock); |
937 | return retval; |
938 | } |
939 | EXPORT_SYMBOL_GPL(remove_resource); |
940 | |
941 | static int __adjust_resource(struct resource *res, resource_size_t start, |
942 | resource_size_t size) |
943 | { |
944 | struct resource *tmp, *parent = res->parent; |
945 | resource_size_t end = start + size - 1; |
946 | int result = -EBUSY; |
947 | |
948 | if (!parent) |
949 | goto skip; |
950 | |
951 | if ((start < parent->start) || (end > parent->end)) |
952 | goto out; |
953 | |
954 | if (res->sibling && (res->sibling->start <= end)) |
955 | goto out; |
956 | |
957 | tmp = parent->child; |
958 | if (tmp != res) { |
959 | while (tmp->sibling != res) |
960 | tmp = tmp->sibling; |
961 | if (start <= tmp->end) |
962 | goto out; |
963 | } |
964 | |
965 | skip: |
966 | for (tmp = res->child; tmp; tmp = tmp->sibling) |
967 | if ((tmp->start < start) || (tmp->end > end)) |
968 | goto out; |
969 | |
970 | res->start = start; |
971 | res->end = end; |
972 | result = 0; |
973 | |
974 | out: |
975 | return result; |
976 | } |
977 | |
978 | /** |
979 | * adjust_resource - modify a resource's start and size |
980 | * @res: resource to modify |
981 | * @start: new start value |
982 | * @size: new size |
983 | * |
984 | * Given an existing resource, change its start and size to match the |
985 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
986 | * Existing children of the resource are assumed to be immutable. |
987 | */ |
988 | int adjust_resource(struct resource *res, resource_size_t start, |
989 | resource_size_t size) |
990 | { |
991 | int result; |
992 | |
993 | write_lock(&resource_lock); |
994 | result = __adjust_resource(res, start, size); |
995 | write_unlock(&resource_lock); |
996 | return result; |
997 | } |
998 | EXPORT_SYMBOL(adjust_resource); |
999 | |
1000 | static void __init __reserve_region_with_split(struct resource *root, |
1001 | resource_size_t start, resource_size_t end, |
1002 | const char *name) |
1003 | { |
1004 | struct resource *parent = root; |
1005 | struct resource *conflict; |
1006 | struct resource *res = alloc_resource(GFP_ATOMIC); |
1007 | struct resource *next_res = NULL; |
1008 | |
1009 | if (!res) |
1010 | return; |
1011 | |
1012 | res->name = name; |
1013 | res->start = start; |
1014 | res->end = end; |
1015 | res->flags = IORESOURCE_BUSY; |
1016 | res->desc = IORES_DESC_NONE; |
1017 | |
1018 | while (1) { |
1019 | |
1020 | conflict = __request_resource(parent, res); |
1021 | if (!conflict) { |
1022 | if (!next_res) |
1023 | break; |
1024 | res = next_res; |
1025 | next_res = NULL; |
1026 | continue; |
1027 | } |
1028 | |
1029 | /* conflict covered whole area */ |
1030 | if (conflict->start <= res->start && |
1031 | conflict->end >= res->end) { |
1032 | free_resource(res); |
1033 | WARN_ON(next_res); |
1034 | break; |
1035 | } |
1036 | |
1037 | /* failed, split and try again */ |
1038 | if (conflict->start > res->start) { |
1039 | end = res->end; |
1040 | res->end = conflict->start - 1; |
1041 | if (conflict->end < end) { |
1042 | next_res = alloc_resource(GFP_ATOMIC); |
1043 | if (!next_res) { |
1044 | free_resource(res); |
1045 | break; |
1046 | } |
1047 | next_res->name = name; |
1048 | next_res->start = conflict->end + 1; |
1049 | next_res->end = end; |
1050 | next_res->flags = IORESOURCE_BUSY; |
1051 | next_res->desc = IORES_DESC_NONE; |
1052 | } |
1053 | } else { |
1054 | res->start = conflict->end + 1; |
1055 | } |
1056 | } |
1057 | |
1058 | } |
1059 | |
1060 | void __init reserve_region_with_split(struct resource *root, |
1061 | resource_size_t start, resource_size_t end, |
1062 | const char *name) |
1063 | { |
1064 | int abort = 0; |
1065 | |
1066 | write_lock(&resource_lock); |
1067 | if (root->start > start || root->end < end) { |
1068 | pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", |
1069 | (unsigned long long)start, (unsigned long long)end, |
1070 | root); |
1071 | if (start > root->end || end < root->start) |
1072 | abort = 1; |
1073 | else { |
1074 | if (end > root->end) |
1075 | end = root->end; |
1076 | if (start < root->start) |
1077 | start = root->start; |
1078 | pr_err("fixing request to [0x%llx-0x%llx]\n", |
1079 | (unsigned long long)start, |
1080 | (unsigned long long)end); |
1081 | } |
1082 | dump_stack(); |
1083 | } |
1084 | if (!abort) |
1085 | __reserve_region_with_split(root, start, end, name); |
1086 | write_unlock(&resource_lock); |
1087 | } |
1088 | |
1089 | /** |
1090 | * resource_alignment - calculate resource's alignment |
1091 | * @res: resource pointer |
1092 | * |
1093 | * Returns alignment on success, 0 (invalid alignment) on failure. |
1094 | */ |
1095 | resource_size_t resource_alignment(struct resource *res) |
1096 | { |
1097 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
1098 | case IORESOURCE_SIZEALIGN: |
1099 | return resource_size(res); |
1100 | case IORESOURCE_STARTALIGN: |
1101 | return res->start; |
1102 | default: |
1103 | return 0; |
1104 | } |
1105 | } |
1106 | |
1107 | /* |
1108 | * This is compatibility stuff for IO resources. |
1109 | * |
1110 | * Note how this, unlike the above, knows about |
1111 | * the IO flag meanings (busy etc). |
1112 | * |
1113 | * request_region creates a new busy region. |
1114 | * |
1115 | * release_region releases a matching busy region. |
1116 | */ |
1117 | |
1118 | static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); |
1119 | |
1120 | /** |
1121 | * __request_region - create a new busy resource region |
1122 | * @parent: parent resource descriptor |
1123 | * @start: resource start address |
1124 | * @n: resource region size |
1125 | * @name: reserving caller's ID string |
1126 | * @flags: IO resource flags |
1127 | */ |
1128 | struct resource * __request_region(struct resource *parent, |
1129 | resource_size_t start, resource_size_t n, |
1130 | const char *name, int flags) |
1131 | { |
1132 | DECLARE_WAITQUEUE(wait, current); |
1133 | struct resource *res = alloc_resource(GFP_KERNEL); |
1134 | |
1135 | if (!res) |
1136 | return NULL; |
1137 | |
1138 | res->name = name; |
1139 | res->start = start; |
1140 | res->end = start + n - 1; |
1141 | |
1142 | write_lock(&resource_lock); |
1143 | |
1144 | for (;;) { |
1145 | struct resource *conflict; |
1146 | |
1147 | res->flags = resource_type(parent) | resource_ext_type(parent); |
1148 | res->flags |= IORESOURCE_BUSY | flags; |
1149 | res->desc = parent->desc; |
1150 | |
1151 | conflict = __request_resource(parent, res); |
1152 | if (!conflict) |
1153 | break; |
1154 | if (conflict != parent) { |
1155 | if (!(conflict->flags & IORESOURCE_BUSY)) { |
1156 | parent = conflict; |
1157 | continue; |
1158 | } |
1159 | } |
1160 | if (conflict->flags & flags & IORESOURCE_MUXED) { |
1161 | add_wait_queue(&muxed_resource_wait, &wait); |
1162 | write_unlock(&resource_lock); |
1163 | set_current_state(TASK_UNINTERRUPTIBLE); |
1164 | schedule(); |
1165 | remove_wait_queue(&muxed_resource_wait, &wait); |
1166 | write_lock(&resource_lock); |
1167 | continue; |
1168 | } |
1169 | /* Uhhuh, that didn't work out.. */ |
1170 | free_resource(res); |
1171 | res = NULL; |
1172 | break; |
1173 | } |
1174 | write_unlock(&resource_lock); |
1175 | return res; |
1176 | } |
1177 | EXPORT_SYMBOL(__request_region); |
1178 | |
1179 | /** |
1180 | * __release_region - release a previously reserved resource region |
1181 | * @parent: parent resource descriptor |
1182 | * @start: resource start address |
1183 | * @n: resource region size |
1184 | * |
1185 | * The described resource region must match a currently busy region. |
1186 | */ |
1187 | void __release_region(struct resource *parent, resource_size_t start, |
1188 | resource_size_t n) |
1189 | { |
1190 | struct resource **p; |
1191 | resource_size_t end; |
1192 | |
1193 | p = &parent->child; |
1194 | end = start + n - 1; |
1195 | |
1196 | write_lock(&resource_lock); |
1197 | |
1198 | for (;;) { |
1199 | struct resource *res = *p; |
1200 | |
1201 | if (!res) |
1202 | break; |
1203 | if (res->start <= start && res->end >= end) { |
1204 | if (!(res->flags & IORESOURCE_BUSY)) { |
1205 | p = &res->child; |
1206 | continue; |
1207 | } |
1208 | if (res->start != start || res->end != end) |
1209 | break; |
1210 | *p = res->sibling; |
1211 | write_unlock(&resource_lock); |
1212 | if (res->flags & IORESOURCE_MUXED) |
1213 | wake_up(&muxed_resource_wait); |
1214 | free_resource(res); |
1215 | return; |
1216 | } |
1217 | p = &res->sibling; |
1218 | } |
1219 | |
1220 | write_unlock(&resource_lock); |
1221 | |
1222 | printk(KERN_WARNING "Trying to free nonexistent resource " |
1223 | "<%016llx-%016llx>\n", (unsigned long long)start, |
1224 | (unsigned long long)end); |
1225 | } |
1226 | EXPORT_SYMBOL(__release_region); |
1227 | |
1228 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1229 | /** |
1230 | * release_mem_region_adjustable - release a previously reserved memory region |
1231 | * @parent: parent resource descriptor |
1232 | * @start: resource start address |
1233 | * @size: resource region size |
1234 | * |
1235 | * This interface is intended for memory hot-delete. The requested region |
1236 | * is released from a currently busy memory resource. The requested region |
1237 | * must either match exactly or fit into a single busy resource entry. In |
1238 | * the latter case, the remaining resource is adjusted accordingly. |
1239 | * Existing children of the busy memory resource must be immutable in the |
1240 | * request. |
1241 | * |
1242 | * Note: |
1243 | * - Additional release conditions, such as overlapping region, can be |
1244 | * supported after they are confirmed as valid cases. |
1245 | * - When a busy memory resource gets split into two entries, the code |
1246 | * assumes that all children remain in the lower address entry for |
1247 | * simplicity. Enhance this logic when necessary. |
1248 | */ |
1249 | int release_mem_region_adjustable(struct resource *parent, |
1250 | resource_size_t start, resource_size_t size) |
1251 | { |
1252 | struct resource **p; |
1253 | struct resource *res; |
1254 | struct resource *new_res; |
1255 | resource_size_t end; |
1256 | int ret = -EINVAL; |
1257 | |
1258 | end = start + size - 1; |
1259 | if ((start < parent->start) || (end > parent->end)) |
1260 | return ret; |
1261 | |
1262 | /* The alloc_resource() result gets checked later */ |
1263 | new_res = alloc_resource(GFP_KERNEL); |
1264 | |
1265 | p = &parent->child; |
1266 | write_lock(&resource_lock); |
1267 | |
1268 | while ((res = *p)) { |
1269 | if (res->start >= end) |
1270 | break; |
1271 | |
1272 | /* look for the next resource if it does not fit into */ |
1273 | if (res->start > start || res->end < end) { |
1274 | p = &res->sibling; |
1275 | continue; |
1276 | } |
1277 | |
1278 | if (!(res->flags & IORESOURCE_MEM)) |
1279 | break; |
1280 | |
1281 | if (!(res->flags & IORESOURCE_BUSY)) { |
1282 | p = &res->child; |
1283 | continue; |
1284 | } |
1285 | |
1286 | /* found the target resource; let's adjust accordingly */ |
1287 | if (res->start == start && res->end == end) { |
1288 | /* free the whole entry */ |
1289 | *p = res->sibling; |
1290 | free_resource(res); |
1291 | ret = 0; |
1292 | } else if (res->start == start && res->end != end) { |
1293 | /* adjust the start */ |
1294 | ret = __adjust_resource(res, end + 1, |
1295 | res->end - end); |
1296 | } else if (res->start != start && res->end == end) { |
1297 | /* adjust the end */ |
1298 | ret = __adjust_resource(res, res->start, |
1299 | start - res->start); |
1300 | } else { |
1301 | /* split into two entries */ |
1302 | if (!new_res) { |
1303 | ret = -ENOMEM; |
1304 | break; |
1305 | } |
1306 | new_res->name = res->name; |
1307 | new_res->start = end + 1; |
1308 | new_res->end = res->end; |
1309 | new_res->flags = res->flags; |
1310 | new_res->desc = res->desc; |
1311 | new_res->parent = res->parent; |
1312 | new_res->sibling = res->sibling; |
1313 | new_res->child = NULL; |
1314 | |
1315 | ret = __adjust_resource(res, res->start, |
1316 | start - res->start); |
1317 | if (ret) |
1318 | break; |
1319 | res->sibling = new_res; |
1320 | new_res = NULL; |
1321 | } |
1322 | |
1323 | break; |
1324 | } |
1325 | |
1326 | write_unlock(&resource_lock); |
1327 | free_resource(new_res); |
1328 | return ret; |
1329 | } |
1330 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
1331 | |
1332 | /* |
1333 | * Managed region resource |
1334 | */ |
1335 | static void devm_resource_release(struct device *dev, void *ptr) |
1336 | { |
1337 | struct resource **r = ptr; |
1338 | |
1339 | release_resource(*r); |
1340 | } |
1341 | |
1342 | /** |
1343 | * devm_request_resource() - request and reserve an I/O or memory resource |
1344 | * @dev: device for which to request the resource |
1345 | * @root: root of the resource tree from which to request the resource |
1346 | * @new: descriptor of the resource to request |
1347 | * |
1348 | * This is a device-managed version of request_resource(). There is usually |
1349 | * no need to release resources requested by this function explicitly since |
1350 | * that will be taken care of when the device is unbound from its driver. |
1351 | * If for some reason the resource needs to be released explicitly, because |
1352 | * of ordering issues for example, drivers must call devm_release_resource() |
1353 | * rather than the regular release_resource(). |
1354 | * |
1355 | * When a conflict is detected between any existing resources and the newly |
1356 | * requested resource, an error message will be printed. |
1357 | * |
1358 | * Returns 0 on success or a negative error code on failure. |
1359 | */ |
1360 | int devm_request_resource(struct device *dev, struct resource *root, |
1361 | struct resource *new) |
1362 | { |
1363 | struct resource *conflict, **ptr; |
1364 | |
1365 | ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); |
1366 | if (!ptr) |
1367 | return -ENOMEM; |
1368 | |
1369 | *ptr = new; |
1370 | |
1371 | conflict = request_resource_conflict(root, new); |
1372 | if (conflict) { |
1373 | dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", |
1374 | new, conflict->name, conflict); |
1375 | devres_free(ptr); |
1376 | return -EBUSY; |
1377 | } |
1378 | |
1379 | devres_add(dev, ptr); |
1380 | return 0; |
1381 | } |
1382 | EXPORT_SYMBOL(devm_request_resource); |
1383 | |
1384 | static int devm_resource_match(struct device *dev, void *res, void *data) |
1385 | { |
1386 | struct resource **ptr = res; |
1387 | |
1388 | return *ptr == data; |
1389 | } |
1390 | |
1391 | /** |
1392 | * devm_release_resource() - release a previously requested resource |
1393 | * @dev: device for which to release the resource |
1394 | * @new: descriptor of the resource to release |
1395 | * |
1396 | * Releases a resource previously requested using devm_request_resource(). |
1397 | */ |
1398 | void devm_release_resource(struct device *dev, struct resource *new) |
1399 | { |
1400 | WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, |
1401 | new)); |
1402 | } |
1403 | EXPORT_SYMBOL(devm_release_resource); |
1404 | |
1405 | struct region_devres { |
1406 | struct resource *parent; |
1407 | resource_size_t start; |
1408 | resource_size_t n; |
1409 | }; |
1410 | |
1411 | static void devm_region_release(struct device *dev, void *res) |
1412 | { |
1413 | struct region_devres *this = res; |
1414 | |
1415 | __release_region(this->parent, this->start, this->n); |
1416 | } |
1417 | |
1418 | static int devm_region_match(struct device *dev, void *res, void *match_data) |
1419 | { |
1420 | struct region_devres *this = res, *match = match_data; |
1421 | |
1422 | return this->parent == match->parent && |
1423 | this->start == match->start && this->n == match->n; |
1424 | } |
1425 | |
1426 | struct resource * __devm_request_region(struct device *dev, |
1427 | struct resource *parent, resource_size_t start, |
1428 | resource_size_t n, const char *name) |
1429 | { |
1430 | struct region_devres *dr = NULL; |
1431 | struct resource *res; |
1432 | |
1433 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), |
1434 | GFP_KERNEL); |
1435 | if (!dr) |
1436 | return NULL; |
1437 | |
1438 | dr->parent = parent; |
1439 | dr->start = start; |
1440 | dr->n = n; |
1441 | |
1442 | res = __request_region(parent, start, n, name, 0); |
1443 | if (res) |
1444 | devres_add(dev, dr); |
1445 | else |
1446 | devres_free(dr); |
1447 | |
1448 | return res; |
1449 | } |
1450 | EXPORT_SYMBOL(__devm_request_region); |
1451 | |
1452 | void __devm_release_region(struct device *dev, struct resource *parent, |
1453 | resource_size_t start, resource_size_t n) |
1454 | { |
1455 | struct region_devres match_data = { parent, start, n }; |
1456 | |
1457 | __release_region(parent, start, n); |
1458 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, |
1459 | &match_data)); |
1460 | } |
1461 | EXPORT_SYMBOL(__devm_release_region); |
1462 | |
1463 | /* |
1464 | * Called from init/main.c to reserve IO ports. |
1465 | */ |
1466 | #define MAXRESERVE 4 |
1467 | static int __init reserve_setup(char *str) |
1468 | { |
1469 | static int reserved; |
1470 | static struct resource reserve[MAXRESERVE]; |
1471 | |
1472 | for (;;) { |
1473 | unsigned int io_start, io_num; |
1474 | int x = reserved; |
1475 | |
1476 | if (get_option (&str, &io_start) != 2) |
1477 | break; |
1478 | if (get_option (&str, &io_num) == 0) |
1479 | break; |
1480 | if (x < MAXRESERVE) { |
1481 | struct resource *res = reserve + x; |
1482 | res->name = "reserved"; |
1483 | res->start = io_start; |
1484 | res->end = io_start + io_num - 1; |
1485 | res->flags = IORESOURCE_BUSY; |
1486 | res->desc = IORES_DESC_NONE; |
1487 | res->child = NULL; |
1488 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) |
1489 | reserved = x+1; |
1490 | } |
1491 | } |
1492 | return 1; |
1493 | } |
1494 | |
1495 | __setup("reserve=", reserve_setup); |
1496 | |
1497 | /* |
1498 | * Check if the requested addr and size spans more than any slot in the |
1499 | * iomem resource tree. |
1500 | */ |
1501 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) |
1502 | { |
1503 | struct resource *p = &iomem_resource; |
1504 | int err = 0; |
1505 | loff_t l; |
1506 | |
1507 | read_lock(&resource_lock); |
1508 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1509 | /* |
1510 | * We can probably skip the resources without |
1511 | * IORESOURCE_IO attribute? |
1512 | */ |
1513 | if (p->start >= addr + size) |
1514 | continue; |
1515 | if (p->end < addr) |
1516 | continue; |
1517 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && |
1518 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) |
1519 | continue; |
1520 | /* |
1521 | * if a resource is "BUSY", it's not a hardware resource |
1522 | * but a driver mapping of such a resource; we don't want |
1523 | * to warn for those; some drivers legitimately map only |
1524 | * partial hardware resources. (example: vesafb) |
1525 | */ |
1526 | if (p->flags & IORESOURCE_BUSY) |
1527 | continue; |
1528 | |
1529 | printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", |
1530 | (unsigned long long)addr, |
1531 | (unsigned long long)(addr + size - 1), |
1532 | p->name, p); |
1533 | err = -1; |
1534 | break; |
1535 | } |
1536 | read_unlock(&resource_lock); |
1537 | |
1538 | return err; |
1539 | } |
1540 | |
1541 | #ifdef CONFIG_STRICT_DEVMEM |
1542 | static int strict_iomem_checks = 1; |
1543 | #else |
1544 | static int strict_iomem_checks; |
1545 | #endif |
1546 | |
1547 | /* |
1548 | * check if an address is reserved in the iomem resource tree |
1549 | * returns 1 if reserved, 0 if not reserved. |
1550 | */ |
1551 | int iomem_is_exclusive(u64 addr) |
1552 | { |
1553 | struct resource *p = &iomem_resource; |
1554 | int err = 0; |
1555 | loff_t l; |
1556 | int size = PAGE_SIZE; |
1557 | |
1558 | if (!strict_iomem_checks) |
1559 | return 0; |
1560 | |
1561 | addr = addr & PAGE_MASK; |
1562 | |
1563 | read_lock(&resource_lock); |
1564 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1565 | /* |
1566 | * We can probably skip the resources without |
1567 | * IORESOURCE_IO attribute? |
1568 | */ |
1569 | if (p->start >= addr + size) |
1570 | break; |
1571 | if (p->end < addr) |
1572 | continue; |
1573 | /* |
1574 | * A resource is exclusive if IORESOURCE_EXCLUSIVE is set |
1575 | * or CONFIG_IO_STRICT_DEVMEM is enabled and the |
1576 | * resource is busy. |
1577 | */ |
1578 | if ((p->flags & IORESOURCE_BUSY) == 0) |
1579 | continue; |
1580 | if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) |
1581 | || p->flags & IORESOURCE_EXCLUSIVE) { |
1582 | err = 1; |
1583 | break; |
1584 | } |
1585 | } |
1586 | read_unlock(&resource_lock); |
1587 | |
1588 | return err; |
1589 | } |
1590 | |
1591 | struct resource_entry *resource_list_create_entry(struct resource *res, |
1592 | size_t extra_size) |
1593 | { |
1594 | struct resource_entry *entry; |
1595 | |
1596 | entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); |
1597 | if (entry) { |
1598 | INIT_LIST_HEAD(&entry->node); |
1599 | entry->res = res ? res : &entry->__res; |
1600 | } |
1601 | |
1602 | return entry; |
1603 | } |
1604 | EXPORT_SYMBOL(resource_list_create_entry); |
1605 | |
1606 | void resource_list_free(struct list_head *head) |
1607 | { |
1608 | struct resource_entry *entry, *tmp; |
1609 | |
1610 | list_for_each_entry_safe(entry, tmp, head, node) |
1611 | resource_list_destroy_entry(entry); |
1612 | } |
1613 | EXPORT_SYMBOL(resource_list_free); |
1614 | |
1615 | static int __init strict_iomem(char *str) |
1616 | { |
1617 | if (strstr(str, "relaxed")) |
1618 | strict_iomem_checks = 0; |
1619 | if (strstr(str, "strict")) |
1620 | strict_iomem_checks = 1; |
1621 | return 1; |
1622 | } |
1623 | |
1624 | __setup("iomem=", strict_iomem); |
1625 |