blob: 9a8b594fbbb62a78bd54dd84261b2473c1d2d79c
1 | /* |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of version 2 of the GNU General Public License as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This program is distributed in the hope that it will be useful, but |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * General Public License for more details. |
12 | */ |
13 | #include <linux/radix-tree.h> |
14 | #include <linux/memremap.h> |
15 | #include <linux/device.h> |
16 | #include <linux/types.h> |
17 | #include <linux/pfn_t.h> |
18 | #include <linux/io.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/memory_hotplug.h> |
21 | |
22 | #ifndef ioremap_cache |
23 | /* temporary while we convert existing ioremap_cache users to memremap */ |
24 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) |
25 | { |
26 | return ioremap(offset, size); |
27 | } |
28 | #endif |
29 | |
30 | #ifndef arch_memremap_wb |
31 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) |
32 | { |
33 | return (__force void *)ioremap_cache(offset, size); |
34 | } |
35 | #endif |
36 | |
37 | static void *try_ram_remap(resource_size_t offset, size_t size) |
38 | { |
39 | unsigned long pfn = PHYS_PFN(offset); |
40 | |
41 | /* In the simple case just return the existing linear address */ |
42 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn))) |
43 | return __va(offset); |
44 | return NULL; /* fallback to arch_memremap_wb */ |
45 | } |
46 | |
47 | /** |
48 | * memremap() - remap an iomem_resource as cacheable memory |
49 | * @offset: iomem resource start address |
50 | * @size: size of remap |
51 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT and MEMREMAP_WC |
52 | * |
53 | * memremap() is "ioremap" for cases where it is known that the resource |
54 | * being mapped does not have i/o side effects and the __iomem |
55 | * annotation is not applicable. In the case of multiple flags, the different |
56 | * mapping types will be attempted in the order listed below until one of |
57 | * them succeeds. |
58 | * |
59 | * MEMREMAP_WB - matches the default mapping for System RAM on |
60 | * the architecture. This is usually a read-allocate write-back cache. |
61 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM |
62 | * memremap() will bypass establishing a new mapping and instead return |
63 | * a pointer into the direct map. |
64 | * |
65 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the |
66 | * cache or are written through to memory and never exist in a |
67 | * cache-dirty state with respect to program visibility. Attempts to |
68 | * map System RAM with this mapping type will fail. |
69 | * |
70 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may |
71 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise |
72 | * uncached. Attempts to map System RAM with this mapping type will fail. |
73 | */ |
74 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) |
75 | { |
76 | int is_ram = region_intersects(offset, size, |
77 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
78 | void *addr = NULL; |
79 | |
80 | if (!flags) |
81 | return NULL; |
82 | |
83 | if (is_ram == REGION_MIXED) { |
84 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", |
85 | &offset, (unsigned long) size); |
86 | return NULL; |
87 | } |
88 | |
89 | /* Try all mapping types requested until one returns non-NULL */ |
90 | if (flags & MEMREMAP_WB) { |
91 | /* |
92 | * MEMREMAP_WB is special in that it can be satisifed |
93 | * from the direct map. Some archs depend on the |
94 | * capability of memremap() to autodetect cases where |
95 | * the requested range is potentially in System RAM. |
96 | */ |
97 | if (is_ram == REGION_INTERSECTS) |
98 | addr = try_ram_remap(offset, size); |
99 | if (!addr) |
100 | addr = arch_memremap_wb(offset, size); |
101 | } |
102 | |
103 | /* |
104 | * If we don't have a mapping yet and other request flags are |
105 | * present then we will be attempting to establish a new virtual |
106 | * address mapping. Enforce that this mapping is not aliasing |
107 | * System RAM. |
108 | */ |
109 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
110 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", |
111 | &offset, (unsigned long) size); |
112 | return NULL; |
113 | } |
114 | |
115 | if (!addr && (flags & MEMREMAP_WT)) |
116 | addr = ioremap_wt(offset, size); |
117 | |
118 | if (!addr && (flags & MEMREMAP_WC)) |
119 | addr = ioremap_wc(offset, size); |
120 | |
121 | return addr; |
122 | } |
123 | EXPORT_SYMBOL(memremap); |
124 | |
125 | void memunmap(void *addr) |
126 | { |
127 | if (is_vmalloc_addr(addr)) |
128 | iounmap((void __iomem *) addr); |
129 | } |
130 | EXPORT_SYMBOL(memunmap); |
131 | |
132 | static void devm_memremap_release(struct device *dev, void *res) |
133 | { |
134 | memunmap(*(void **)res); |
135 | } |
136 | |
137 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) |
138 | { |
139 | return *(void **)res == match_data; |
140 | } |
141 | |
142 | void *devm_memremap(struct device *dev, resource_size_t offset, |
143 | size_t size, unsigned long flags) |
144 | { |
145 | void **ptr, *addr; |
146 | |
147 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
148 | dev_to_node(dev)); |
149 | if (!ptr) |
150 | return ERR_PTR(-ENOMEM); |
151 | |
152 | addr = memremap(offset, size, flags); |
153 | if (addr) { |
154 | *ptr = addr; |
155 | devres_add(dev, ptr); |
156 | } else { |
157 | devres_free(ptr); |
158 | return ERR_PTR(-ENXIO); |
159 | } |
160 | |
161 | return addr; |
162 | } |
163 | EXPORT_SYMBOL(devm_memremap); |
164 | |
165 | void devm_memunmap(struct device *dev, void *addr) |
166 | { |
167 | WARN_ON(devres_release(dev, devm_memremap_release, |
168 | devm_memremap_match, addr)); |
169 | } |
170 | EXPORT_SYMBOL(devm_memunmap); |
171 | |
172 | #ifdef CONFIG_ZONE_DEVICE |
173 | static DEFINE_MUTEX(pgmap_lock); |
174 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); |
175 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) |
176 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) |
177 | |
178 | struct page_map { |
179 | struct resource res; |
180 | struct percpu_ref *ref; |
181 | struct dev_pagemap pgmap; |
182 | struct vmem_altmap altmap; |
183 | }; |
184 | |
185 | void get_zone_device_page(struct page *page) |
186 | { |
187 | percpu_ref_get(page->pgmap->ref); |
188 | } |
189 | EXPORT_SYMBOL(get_zone_device_page); |
190 | |
191 | void put_zone_device_page(struct page *page) |
192 | { |
193 | put_dev_pagemap(page->pgmap); |
194 | } |
195 | EXPORT_SYMBOL(put_zone_device_page); |
196 | |
197 | static void pgmap_radix_release(struct resource *res, resource_size_t end_key) |
198 | { |
199 | resource_size_t key, align_start, align_size, align_end; |
200 | |
201 | align_start = res->start & ~(SECTION_SIZE - 1); |
202 | align_size = ALIGN(resource_size(res), SECTION_SIZE); |
203 | align_end = align_start + align_size - 1; |
204 | |
205 | mutex_lock(&pgmap_lock); |
206 | for (key = res->start; key <= res->end; key += SECTION_SIZE) { |
207 | if (key >= end_key) |
208 | break; |
209 | radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT); |
210 | } |
211 | mutex_unlock(&pgmap_lock); |
212 | } |
213 | |
214 | static unsigned long pfn_first(struct page_map *page_map) |
215 | { |
216 | struct dev_pagemap *pgmap = &page_map->pgmap; |
217 | const struct resource *res = &page_map->res; |
218 | struct vmem_altmap *altmap = pgmap->altmap; |
219 | unsigned long pfn; |
220 | |
221 | pfn = res->start >> PAGE_SHIFT; |
222 | if (altmap) |
223 | pfn += vmem_altmap_offset(altmap); |
224 | return pfn; |
225 | } |
226 | |
227 | static unsigned long pfn_end(struct page_map *page_map) |
228 | { |
229 | const struct resource *res = &page_map->res; |
230 | |
231 | return (res->start + resource_size(res)) >> PAGE_SHIFT; |
232 | } |
233 | |
234 | #define for_each_device_pfn(pfn, map) \ |
235 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) |
236 | |
237 | static void devm_memremap_pages_release(struct device *dev, void *data) |
238 | { |
239 | struct page_map *page_map = data; |
240 | struct resource *res = &page_map->res; |
241 | resource_size_t align_start, align_size; |
242 | struct dev_pagemap *pgmap = &page_map->pgmap; |
243 | |
244 | if (percpu_ref_tryget_live(pgmap->ref)) { |
245 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); |
246 | percpu_ref_put(pgmap->ref); |
247 | } |
248 | |
249 | /* pages are dead and unused, undo the arch mapping */ |
250 | align_start = res->start & ~(SECTION_SIZE - 1); |
251 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
252 | - align_start; |
253 | |
254 | lock_device_hotplug(); |
255 | mem_hotplug_begin(); |
256 | arch_remove_memory(align_start, align_size); |
257 | mem_hotplug_done(); |
258 | unlock_device_hotplug(); |
259 | |
260 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
261 | pgmap_radix_release(res, -1); |
262 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, |
263 | "%s: failed to free all reserved pages\n", __func__); |
264 | } |
265 | |
266 | /* assumes rcu_read_lock() held at entry */ |
267 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys) |
268 | { |
269 | struct page_map *page_map; |
270 | |
271 | WARN_ON_ONCE(!rcu_read_lock_held()); |
272 | |
273 | page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT); |
274 | return page_map ? &page_map->pgmap : NULL; |
275 | } |
276 | |
277 | /** |
278 | * devm_memremap_pages - remap and provide memmap backing for the given resource |
279 | * @dev: hosting device for @res |
280 | * @res: "host memory" address range |
281 | * @ref: a live per-cpu reference count |
282 | * @altmap: optional descriptor for allocating the memmap from @res |
283 | * |
284 | * Notes: |
285 | * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time |
286 | * (or devm release event). |
287 | * |
288 | * 2/ @res is expected to be a host memory range that could feasibly be |
289 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
290 | * this is not enforced. |
291 | */ |
292 | void *devm_memremap_pages(struct device *dev, struct resource *res, |
293 | struct percpu_ref *ref, struct vmem_altmap *altmap) |
294 | { |
295 | resource_size_t key = 0, align_start, align_size, align_end; |
296 | pgprot_t pgprot = PAGE_KERNEL; |
297 | struct dev_pagemap *pgmap; |
298 | struct page_map *page_map; |
299 | int error, nid, is_ram; |
300 | unsigned long pfn; |
301 | |
302 | align_start = res->start & ~(SECTION_SIZE - 1); |
303 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
304 | - align_start; |
305 | is_ram = region_intersects(align_start, align_size, |
306 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
307 | |
308 | if (is_ram != REGION_DISJOINT) { |
309 | WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, |
310 | is_ram == REGION_MIXED ? "mixed" : "ram", res); |
311 | return ERR_PTR(-ENXIO); |
312 | } |
313 | |
314 | if (!ref) |
315 | return ERR_PTR(-EINVAL); |
316 | |
317 | page_map = devres_alloc_node(devm_memremap_pages_release, |
318 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); |
319 | if (!page_map) |
320 | return ERR_PTR(-ENOMEM); |
321 | pgmap = &page_map->pgmap; |
322 | |
323 | memcpy(&page_map->res, res, sizeof(*res)); |
324 | |
325 | pgmap->dev = dev; |
326 | if (altmap) { |
327 | memcpy(&page_map->altmap, altmap, sizeof(*altmap)); |
328 | pgmap->altmap = &page_map->altmap; |
329 | } |
330 | pgmap->ref = ref; |
331 | pgmap->res = &page_map->res; |
332 | |
333 | mutex_lock(&pgmap_lock); |
334 | error = 0; |
335 | align_end = align_start + align_size - 1; |
336 | for (key = align_start; key <= align_end; key += SECTION_SIZE) { |
337 | struct dev_pagemap *dup; |
338 | |
339 | rcu_read_lock(); |
340 | dup = find_dev_pagemap(key); |
341 | rcu_read_unlock(); |
342 | if (dup) { |
343 | dev_err(dev, "%s: %pr collides with mapping for %s\n", |
344 | __func__, res, dev_name(dup->dev)); |
345 | error = -EBUSY; |
346 | break; |
347 | } |
348 | error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT, |
349 | page_map); |
350 | if (error) { |
351 | dev_err(dev, "%s: failed: %d\n", __func__, error); |
352 | break; |
353 | } |
354 | } |
355 | mutex_unlock(&pgmap_lock); |
356 | if (error) |
357 | goto err_radix; |
358 | |
359 | nid = dev_to_node(dev); |
360 | if (nid < 0) |
361 | nid = numa_mem_id(); |
362 | |
363 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, |
364 | align_size); |
365 | if (error) |
366 | goto err_pfn_remap; |
367 | |
368 | lock_device_hotplug(); |
369 | mem_hotplug_begin(); |
370 | error = arch_add_memory(nid, align_start, align_size, true); |
371 | mem_hotplug_done(); |
372 | unlock_device_hotplug(); |
373 | if (error) |
374 | goto err_add_memory; |
375 | |
376 | for_each_device_pfn(pfn, page_map) { |
377 | struct page *page = pfn_to_page(pfn); |
378 | |
379 | /* |
380 | * ZONE_DEVICE pages union ->lru with a ->pgmap back |
381 | * pointer. It is a bug if a ZONE_DEVICE page is ever |
382 | * freed or placed on a driver-private list. Seed the |
383 | * storage with LIST_POISON* values. |
384 | */ |
385 | list_del(&page->lru); |
386 | page->pgmap = pgmap; |
387 | } |
388 | devres_add(dev, page_map); |
389 | return __va(res->start); |
390 | |
391 | err_add_memory: |
392 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
393 | err_pfn_remap: |
394 | err_radix: |
395 | pgmap_radix_release(res, key); |
396 | devres_free(page_map); |
397 | return ERR_PTR(error); |
398 | } |
399 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
400 | |
401 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
402 | { |
403 | /* number of pfns from base where pfn_to_page() is valid */ |
404 | return altmap->reserve + altmap->free; |
405 | } |
406 | |
407 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) |
408 | { |
409 | altmap->alloc -= nr_pfns; |
410 | } |
411 | |
412 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) |
413 | { |
414 | /* |
415 | * 'memmap_start' is the virtual address for the first "struct |
416 | * page" in this range of the vmemmap array. In the case of |
417 | * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple |
418 | * pointer arithmetic, so we can perform this to_vmem_altmap() |
419 | * conversion without concern for the initialization state of |
420 | * the struct page fields. |
421 | */ |
422 | struct page *page = (struct page *) memmap_start; |
423 | struct dev_pagemap *pgmap; |
424 | |
425 | /* |
426 | * Unconditionally retrieve a dev_pagemap associated with the |
427 | * given physical address, this is only for use in the |
428 | * arch_{add|remove}_memory() for setting up and tearing down |
429 | * the memmap. |
430 | */ |
431 | rcu_read_lock(); |
432 | pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); |
433 | rcu_read_unlock(); |
434 | |
435 | return pgmap ? pgmap->altmap : NULL; |
436 | } |
437 | #endif /* CONFIG_ZONE_DEVICE */ |
438 |