blob: 8971370bfb1619eb9d3acd332045129542092ee0
1 | /* |
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. |
3 | * |
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ |
19 | |
20 | #include <linux/scatterlist.h> |
21 | #include <linux/dma-mapping.h> |
22 | #include <linux/stacktrace.h> |
23 | #include <linux/dma-debug.h> |
24 | #include <linux/spinlock.h> |
25 | #include <linux/vmalloc.h> |
26 | #include <linux/debugfs.h> |
27 | #include <linux/uaccess.h> |
28 | #include <linux/export.h> |
29 | #include <linux/device.h> |
30 | #include <linux/types.h> |
31 | #include <linux/sched.h> |
32 | #include <linux/ctype.h> |
33 | #include <linux/list.h> |
34 | #include <linux/slab.h> |
35 | |
36 | #include <asm/sections.h> |
37 | |
38 | #define HASH_SIZE 1024ULL |
39 | #define HASH_FN_SHIFT 13 |
40 | #define HASH_FN_MASK (HASH_SIZE - 1) |
41 | |
42 | enum { |
43 | dma_debug_single, |
44 | dma_debug_page, |
45 | dma_debug_sg, |
46 | dma_debug_coherent, |
47 | dma_debug_resource, |
48 | }; |
49 | |
50 | enum map_err_types { |
51 | MAP_ERR_CHECK_NOT_APPLICABLE, |
52 | MAP_ERR_NOT_CHECKED, |
53 | MAP_ERR_CHECKED, |
54 | }; |
55 | |
56 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
57 | |
58 | /** |
59 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping |
60 | * @list: node on pre-allocated free_entries list |
61 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent |
62 | * @type: single, page, sg, coherent |
63 | * @pfn: page frame of the start address |
64 | * @offset: offset of mapping relative to pfn |
65 | * @size: length of the mapping |
66 | * @direction: enum dma_data_direction |
67 | * @sg_call_ents: 'nents' from dma_map_sg |
68 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg |
69 | * @map_err_type: track whether dma_mapping_error() was checked |
70 | * @stacktrace: support backtraces when a violation is detected |
71 | */ |
72 | struct dma_debug_entry { |
73 | struct list_head list; |
74 | struct device *dev; |
75 | int type; |
76 | unsigned long pfn; |
77 | size_t offset; |
78 | u64 dev_addr; |
79 | u64 size; |
80 | int direction; |
81 | int sg_call_ents; |
82 | int sg_mapped_ents; |
83 | enum map_err_types map_err_type; |
84 | #ifdef CONFIG_STACKTRACE |
85 | struct stack_trace stacktrace; |
86 | unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; |
87 | #endif |
88 | }; |
89 | |
90 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); |
91 | |
92 | struct hash_bucket { |
93 | struct list_head list; |
94 | spinlock_t lock; |
95 | } ____cacheline_aligned_in_smp; |
96 | |
97 | /* Hash list to save the allocated dma addresses */ |
98 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; |
99 | /* List of pre-allocated dma_debug_entry's */ |
100 | static LIST_HEAD(free_entries); |
101 | /* Lock for the list above */ |
102 | static DEFINE_SPINLOCK(free_entries_lock); |
103 | |
104 | /* Global disable flag - will be set in case of an error */ |
105 | static bool global_disable __read_mostly; |
106 | |
107 | /* Early initialization disable flag, set at the end of dma_debug_init */ |
108 | static bool dma_debug_initialized __read_mostly; |
109 | |
110 | static inline bool dma_debug_disabled(void) |
111 | { |
112 | return global_disable || !dma_debug_initialized; |
113 | } |
114 | |
115 | /* Global error count */ |
116 | static u32 error_count; |
117 | |
118 | /* Global error show enable*/ |
119 | static u32 show_all_errors __read_mostly; |
120 | /* Number of errors to show */ |
121 | static u32 show_num_errors = 1; |
122 | |
123 | static u32 num_free_entries; |
124 | static u32 min_free_entries; |
125 | static u32 nr_total_entries; |
126 | |
127 | /* number of preallocated entries requested by kernel cmdline */ |
128 | static u32 req_entries; |
129 | |
130 | /* debugfs dentry's for the stuff above */ |
131 | static struct dentry *dma_debug_dent __read_mostly; |
132 | static struct dentry *global_disable_dent __read_mostly; |
133 | static struct dentry *error_count_dent __read_mostly; |
134 | static struct dentry *show_all_errors_dent __read_mostly; |
135 | static struct dentry *show_num_errors_dent __read_mostly; |
136 | static struct dentry *num_free_entries_dent __read_mostly; |
137 | static struct dentry *min_free_entries_dent __read_mostly; |
138 | static struct dentry *filter_dent __read_mostly; |
139 | |
140 | /* per-driver filter related state */ |
141 | |
142 | #define NAME_MAX_LEN 64 |
143 | |
144 | static char current_driver_name[NAME_MAX_LEN] __read_mostly; |
145 | static struct device_driver *current_driver __read_mostly; |
146 | |
147 | static DEFINE_RWLOCK(driver_name_lock); |
148 | |
149 | static const char *const maperr2str[] = { |
150 | [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", |
151 | [MAP_ERR_NOT_CHECKED] = "dma map error not checked", |
152 | [MAP_ERR_CHECKED] = "dma map error checked", |
153 | }; |
154 | |
155 | static const char *type2name[5] = { "single", "page", |
156 | "scather-gather", "coherent", |
157 | "resource" }; |
158 | |
159 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
160 | "DMA_FROM_DEVICE", "DMA_NONE" }; |
161 | |
162 | /* |
163 | * The access to some variables in this macro is racy. We can't use atomic_t |
164 | * here because all these variables are exported to debugfs. Some of them even |
165 | * writeable. This is also the reason why a lock won't help much. But anyway, |
166 | * the races are no big deal. Here is why: |
167 | * |
168 | * error_count: the addition is racy, but the worst thing that can happen is |
169 | * that we don't count some errors |
170 | * show_num_errors: the subtraction is racy. Also no big deal because in |
171 | * worst case this will result in one warning more in the |
172 | * system log than the user configured. This variable is |
173 | * writeable via debugfs. |
174 | */ |
175 | static inline void dump_entry_trace(struct dma_debug_entry *entry) |
176 | { |
177 | #ifdef CONFIG_STACKTRACE |
178 | if (entry) { |
179 | pr_warning("Mapped at:\n"); |
180 | print_stack_trace(&entry->stacktrace, 0); |
181 | } |
182 | #endif |
183 | } |
184 | |
185 | static bool driver_filter(struct device *dev) |
186 | { |
187 | struct device_driver *drv; |
188 | unsigned long flags; |
189 | bool ret; |
190 | |
191 | /* driver filter off */ |
192 | if (likely(!current_driver_name[0])) |
193 | return true; |
194 | |
195 | /* driver filter on and initialized */ |
196 | if (current_driver && dev && dev->driver == current_driver) |
197 | return true; |
198 | |
199 | /* driver filter on, but we can't filter on a NULL device... */ |
200 | if (!dev) |
201 | return false; |
202 | |
203 | if (current_driver || !current_driver_name[0]) |
204 | return false; |
205 | |
206 | /* driver filter on but not yet initialized */ |
207 | drv = dev->driver; |
208 | if (!drv) |
209 | return false; |
210 | |
211 | /* lock to protect against change of current_driver_name */ |
212 | read_lock_irqsave(&driver_name_lock, flags); |
213 | |
214 | ret = false; |
215 | if (drv->name && |
216 | strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { |
217 | current_driver = drv; |
218 | ret = true; |
219 | } |
220 | |
221 | read_unlock_irqrestore(&driver_name_lock, flags); |
222 | |
223 | return ret; |
224 | } |
225 | |
226 | #define err_printk(dev, entry, format, arg...) do { \ |
227 | error_count += 1; \ |
228 | if (driver_filter(dev) && \ |
229 | (show_all_errors || show_num_errors > 0)) { \ |
230 | WARN(1, "%s %s: " format, \ |
231 | dev ? dev_driver_string(dev) : "NULL", \ |
232 | dev ? dev_name(dev) : "NULL", ## arg); \ |
233 | dump_entry_trace(entry); \ |
234 | } \ |
235 | if (!show_all_errors && show_num_errors > 0) \ |
236 | show_num_errors -= 1; \ |
237 | } while (0); |
238 | |
239 | /* |
240 | * Hash related functions |
241 | * |
242 | * Every DMA-API request is saved into a struct dma_debug_entry. To |
243 | * have quick access to these structs they are stored into a hash. |
244 | */ |
245 | static int hash_fn(struct dma_debug_entry *entry) |
246 | { |
247 | /* |
248 | * Hash function is based on the dma address. |
249 | * We use bits 20-27 here as the index into the hash |
250 | */ |
251 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; |
252 | } |
253 | |
254 | /* |
255 | * Request exclusive access to a hash bucket for a given dma_debug_entry. |
256 | */ |
257 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, |
258 | unsigned long *flags) |
259 | __acquires(&dma_entry_hash[idx].lock) |
260 | { |
261 | int idx = hash_fn(entry); |
262 | unsigned long __flags; |
263 | |
264 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); |
265 | *flags = __flags; |
266 | return &dma_entry_hash[idx]; |
267 | } |
268 | |
269 | /* |
270 | * Give up exclusive access to the hash bucket |
271 | */ |
272 | static void put_hash_bucket(struct hash_bucket *bucket, |
273 | unsigned long *flags) |
274 | __releases(&bucket->lock) |
275 | { |
276 | unsigned long __flags = *flags; |
277 | |
278 | spin_unlock_irqrestore(&bucket->lock, __flags); |
279 | } |
280 | |
281 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) |
282 | { |
283 | return ((a->dev_addr == b->dev_addr) && |
284 | (a->dev == b->dev)) ? true : false; |
285 | } |
286 | |
287 | static bool containing_match(struct dma_debug_entry *a, |
288 | struct dma_debug_entry *b) |
289 | { |
290 | if (a->dev != b->dev) |
291 | return false; |
292 | |
293 | if ((b->dev_addr <= a->dev_addr) && |
294 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) |
295 | return true; |
296 | |
297 | return false; |
298 | } |
299 | |
300 | /* |
301 | * Search a given entry in the hash bucket list |
302 | */ |
303 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
304 | struct dma_debug_entry *ref, |
305 | match_fn match) |
306 | { |
307 | struct dma_debug_entry *entry, *ret = NULL; |
308 | int matches = 0, match_lvl, last_lvl = -1; |
309 | |
310 | list_for_each_entry(entry, &bucket->list, list) { |
311 | if (!match(ref, entry)) |
312 | continue; |
313 | |
314 | /* |
315 | * Some drivers map the same physical address multiple |
316 | * times. Without a hardware IOMMU this results in the |
317 | * same device addresses being put into the dma-debug |
318 | * hash multiple times too. This can result in false |
319 | * positives being reported. Therefore we implement a |
320 | * best-fit algorithm here which returns the entry from |
321 | * the hash which fits best to the reference value |
322 | * instead of the first-fit. |
323 | */ |
324 | matches += 1; |
325 | match_lvl = 0; |
326 | entry->size == ref->size ? ++match_lvl : 0; |
327 | entry->type == ref->type ? ++match_lvl : 0; |
328 | entry->direction == ref->direction ? ++match_lvl : 0; |
329 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; |
330 | |
331 | if (match_lvl == 4) { |
332 | /* perfect-fit - return the result */ |
333 | return entry; |
334 | } else if (match_lvl > last_lvl) { |
335 | /* |
336 | * We found an entry that fits better then the |
337 | * previous one or it is the 1st match. |
338 | */ |
339 | last_lvl = match_lvl; |
340 | ret = entry; |
341 | } |
342 | } |
343 | |
344 | /* |
345 | * If we have multiple matches but no perfect-fit, just return |
346 | * NULL. |
347 | */ |
348 | ret = (matches == 1) ? ret : NULL; |
349 | |
350 | return ret; |
351 | } |
352 | |
353 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, |
354 | struct dma_debug_entry *ref) |
355 | { |
356 | return __hash_bucket_find(bucket, ref, exact_match); |
357 | } |
358 | |
359 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, |
360 | struct dma_debug_entry *ref, |
361 | unsigned long *flags) |
362 | { |
363 | |
364 | unsigned int max_range = dma_get_max_seg_size(ref->dev); |
365 | struct dma_debug_entry *entry, index = *ref; |
366 | unsigned int range = 0; |
367 | |
368 | while (range <= max_range) { |
369 | entry = __hash_bucket_find(*bucket, ref, containing_match); |
370 | |
371 | if (entry) |
372 | return entry; |
373 | |
374 | /* |
375 | * Nothing found, go back a hash bucket |
376 | */ |
377 | put_hash_bucket(*bucket, flags); |
378 | range += (1 << HASH_FN_SHIFT); |
379 | index.dev_addr -= (1 << HASH_FN_SHIFT); |
380 | *bucket = get_hash_bucket(&index, flags); |
381 | } |
382 | |
383 | return NULL; |
384 | } |
385 | |
386 | /* |
387 | * Add an entry to a hash bucket |
388 | */ |
389 | static void hash_bucket_add(struct hash_bucket *bucket, |
390 | struct dma_debug_entry *entry) |
391 | { |
392 | list_add_tail(&entry->list, &bucket->list); |
393 | } |
394 | |
395 | /* |
396 | * Remove entry from a hash bucket list |
397 | */ |
398 | static void hash_bucket_del(struct dma_debug_entry *entry) |
399 | { |
400 | list_del(&entry->list); |
401 | } |
402 | |
403 | static unsigned long long phys_addr(struct dma_debug_entry *entry) |
404 | { |
405 | if (entry->type == dma_debug_resource) |
406 | return __pfn_to_phys(entry->pfn) + entry->offset; |
407 | |
408 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; |
409 | } |
410 | |
411 | /* |
412 | * Dump mapping entries for debugging purposes |
413 | */ |
414 | void debug_dma_dump_mappings(struct device *dev) |
415 | { |
416 | int idx; |
417 | |
418 | for (idx = 0; idx < HASH_SIZE; idx++) { |
419 | struct hash_bucket *bucket = &dma_entry_hash[idx]; |
420 | struct dma_debug_entry *entry; |
421 | unsigned long flags; |
422 | |
423 | spin_lock_irqsave(&bucket->lock, flags); |
424 | |
425 | list_for_each_entry(entry, &bucket->list, list) { |
426 | if (!dev || dev == entry->dev) { |
427 | dev_info(entry->dev, |
428 | "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", |
429 | type2name[entry->type], idx, |
430 | phys_addr(entry), entry->pfn, |
431 | entry->dev_addr, entry->size, |
432 | dir2name[entry->direction], |
433 | maperr2str[entry->map_err_type]); |
434 | } |
435 | } |
436 | |
437 | spin_unlock_irqrestore(&bucket->lock, flags); |
438 | } |
439 | } |
440 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
441 | |
442 | /* |
443 | * For each mapping (initial cacheline in the case of |
444 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a |
445 | * scatterlist, or the cacheline specified in dma_map_single) insert |
446 | * into this tree using the cacheline as the key. At |
447 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If |
448 | * the entry already exists at insertion time add a tag as a reference |
449 | * count for the overlapping mappings. For now, the overlap tracking |
450 | * just ensures that 'unmaps' balance 'maps' before marking the |
451 | * cacheline idle, but we should also be flagging overlaps as an API |
452 | * violation. |
453 | * |
454 | * Memory usage is mostly constrained by the maximum number of available |
455 | * dma-debug entries in that we need a free dma_debug_entry before |
456 | * inserting into the tree. In the case of dma_map_page and |
457 | * dma_alloc_coherent there is only one dma_debug_entry and one |
458 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the |
459 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' |
460 | * entries into the tree. |
461 | * |
462 | * At any time debug_dma_assert_idle() can be called to trigger a |
463 | * warning if any cachelines in the given page are in the active set. |
464 | */ |
465 | static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); |
466 | static DEFINE_SPINLOCK(radix_lock); |
467 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) |
468 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) |
469 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) |
470 | |
471 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) |
472 | { |
473 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + |
474 | (entry->offset >> L1_CACHE_SHIFT); |
475 | } |
476 | |
477 | static int active_cacheline_read_overlap(phys_addr_t cln) |
478 | { |
479 | int overlap = 0, i; |
480 | |
481 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
482 | if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) |
483 | overlap |= 1 << i; |
484 | return overlap; |
485 | } |
486 | |
487 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) |
488 | { |
489 | int i; |
490 | |
491 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) |
492 | return overlap; |
493 | |
494 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
495 | if (overlap & 1 << i) |
496 | radix_tree_tag_set(&dma_active_cacheline, cln, i); |
497 | else |
498 | radix_tree_tag_clear(&dma_active_cacheline, cln, i); |
499 | |
500 | return overlap; |
501 | } |
502 | |
503 | static void active_cacheline_inc_overlap(phys_addr_t cln) |
504 | { |
505 | int overlap = active_cacheline_read_overlap(cln); |
506 | |
507 | overlap = active_cacheline_set_overlap(cln, ++overlap); |
508 | |
509 | /* If we overflowed the overlap counter then we're potentially |
510 | * leaking dma-mappings. Otherwise, if maps and unmaps are |
511 | * balanced then this overflow may cause false negatives in |
512 | * debug_dma_assert_idle() as the cacheline may be marked idle |
513 | * prematurely. |
514 | */ |
515 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
516 | "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", |
517 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
518 | } |
519 | |
520 | static int active_cacheline_dec_overlap(phys_addr_t cln) |
521 | { |
522 | int overlap = active_cacheline_read_overlap(cln); |
523 | |
524 | return active_cacheline_set_overlap(cln, --overlap); |
525 | } |
526 | |
527 | static int active_cacheline_insert(struct dma_debug_entry *entry) |
528 | { |
529 | phys_addr_t cln = to_cacheline_number(entry); |
530 | unsigned long flags; |
531 | int rc; |
532 | |
533 | /* If the device is not writing memory then we don't have any |
534 | * concerns about the cpu consuming stale data. This mitigates |
535 | * legitimate usages of overlapping mappings. |
536 | */ |
537 | if (entry->direction == DMA_TO_DEVICE) |
538 | return 0; |
539 | |
540 | spin_lock_irqsave(&radix_lock, flags); |
541 | rc = radix_tree_insert(&dma_active_cacheline, cln, entry); |
542 | if (rc == -EEXIST) |
543 | active_cacheline_inc_overlap(cln); |
544 | spin_unlock_irqrestore(&radix_lock, flags); |
545 | |
546 | return rc; |
547 | } |
548 | |
549 | static void active_cacheline_remove(struct dma_debug_entry *entry) |
550 | { |
551 | phys_addr_t cln = to_cacheline_number(entry); |
552 | unsigned long flags; |
553 | |
554 | /* ...mirror the insert case */ |
555 | if (entry->direction == DMA_TO_DEVICE) |
556 | return; |
557 | |
558 | spin_lock_irqsave(&radix_lock, flags); |
559 | /* since we are counting overlaps the final put of the |
560 | * cacheline will occur when the overlap count is 0. |
561 | * active_cacheline_dec_overlap() returns -1 in that case |
562 | */ |
563 | if (active_cacheline_dec_overlap(cln) < 0) |
564 | radix_tree_delete(&dma_active_cacheline, cln); |
565 | spin_unlock_irqrestore(&radix_lock, flags); |
566 | } |
567 | |
568 | /** |
569 | * debug_dma_assert_idle() - assert that a page is not undergoing dma |
570 | * @page: page to lookup in the dma_active_cacheline tree |
571 | * |
572 | * Place a call to this routine in cases where the cpu touching the page |
573 | * before the dma completes (page is dma_unmapped) will lead to data |
574 | * corruption. |
575 | */ |
576 | void debug_dma_assert_idle(struct page *page) |
577 | { |
578 | static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; |
579 | struct dma_debug_entry *entry = NULL; |
580 | void **results = (void **) &ents; |
581 | unsigned int nents, i; |
582 | unsigned long flags; |
583 | phys_addr_t cln; |
584 | |
585 | if (dma_debug_disabled()) |
586 | return; |
587 | |
588 | if (!page) |
589 | return; |
590 | |
591 | cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; |
592 | spin_lock_irqsave(&radix_lock, flags); |
593 | nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, |
594 | CACHELINES_PER_PAGE); |
595 | for (i = 0; i < nents; i++) { |
596 | phys_addr_t ent_cln = to_cacheline_number(ents[i]); |
597 | |
598 | if (ent_cln == cln) { |
599 | entry = ents[i]; |
600 | break; |
601 | } else if (ent_cln >= cln + CACHELINES_PER_PAGE) |
602 | break; |
603 | } |
604 | spin_unlock_irqrestore(&radix_lock, flags); |
605 | |
606 | if (!entry) |
607 | return; |
608 | |
609 | cln = to_cacheline_number(entry); |
610 | err_printk(entry->dev, entry, |
611 | "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", |
612 | &cln); |
613 | } |
614 | |
615 | /* |
616 | * Wrapper function for adding an entry to the hash. |
617 | * This function takes care of locking itself. |
618 | */ |
619 | static void add_dma_entry(struct dma_debug_entry *entry) |
620 | { |
621 | struct hash_bucket *bucket; |
622 | unsigned long flags; |
623 | int rc; |
624 | |
625 | bucket = get_hash_bucket(entry, &flags); |
626 | hash_bucket_add(bucket, entry); |
627 | put_hash_bucket(bucket, &flags); |
628 | |
629 | rc = active_cacheline_insert(entry); |
630 | if (rc == -ENOMEM) { |
631 | pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); |
632 | global_disable = true; |
633 | } |
634 | |
635 | /* TODO: report -EEXIST errors here as overlapping mappings are |
636 | * not supported by the DMA API |
637 | */ |
638 | } |
639 | |
640 | static struct dma_debug_entry *__dma_entry_alloc(void) |
641 | { |
642 | struct dma_debug_entry *entry; |
643 | |
644 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); |
645 | list_del(&entry->list); |
646 | memset(entry, 0, sizeof(*entry)); |
647 | |
648 | num_free_entries -= 1; |
649 | if (num_free_entries < min_free_entries) |
650 | min_free_entries = num_free_entries; |
651 | |
652 | return entry; |
653 | } |
654 | |
655 | /* struct dma_entry allocator |
656 | * |
657 | * The next two functions implement the allocator for |
658 | * struct dma_debug_entries. |
659 | */ |
660 | static struct dma_debug_entry *dma_entry_alloc(void) |
661 | { |
662 | struct dma_debug_entry *entry; |
663 | unsigned long flags; |
664 | |
665 | spin_lock_irqsave(&free_entries_lock, flags); |
666 | |
667 | if (list_empty(&free_entries)) { |
668 | global_disable = true; |
669 | spin_unlock_irqrestore(&free_entries_lock, flags); |
670 | pr_err("DMA-API: debugging out of memory - disabling\n"); |
671 | return NULL; |
672 | } |
673 | |
674 | entry = __dma_entry_alloc(); |
675 | |
676 | spin_unlock_irqrestore(&free_entries_lock, flags); |
677 | |
678 | #ifdef CONFIG_STACKTRACE |
679 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
680 | entry->stacktrace.entries = entry->st_entries; |
681 | entry->stacktrace.skip = 2; |
682 | save_stack_trace(&entry->stacktrace); |
683 | #endif |
684 | |
685 | return entry; |
686 | } |
687 | |
688 | static void dma_entry_free(struct dma_debug_entry *entry) |
689 | { |
690 | unsigned long flags; |
691 | |
692 | active_cacheline_remove(entry); |
693 | |
694 | /* |
695 | * add to beginning of the list - this way the entries are |
696 | * more likely cache hot when they are reallocated. |
697 | */ |
698 | spin_lock_irqsave(&free_entries_lock, flags); |
699 | list_add(&entry->list, &free_entries); |
700 | num_free_entries += 1; |
701 | spin_unlock_irqrestore(&free_entries_lock, flags); |
702 | } |
703 | |
704 | int dma_debug_resize_entries(u32 num_entries) |
705 | { |
706 | int i, delta, ret = 0; |
707 | unsigned long flags; |
708 | struct dma_debug_entry *entry; |
709 | LIST_HEAD(tmp); |
710 | |
711 | spin_lock_irqsave(&free_entries_lock, flags); |
712 | |
713 | if (nr_total_entries < num_entries) { |
714 | delta = num_entries - nr_total_entries; |
715 | |
716 | spin_unlock_irqrestore(&free_entries_lock, flags); |
717 | |
718 | for (i = 0; i < delta; i++) { |
719 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
720 | if (!entry) |
721 | break; |
722 | |
723 | list_add_tail(&entry->list, &tmp); |
724 | } |
725 | |
726 | spin_lock_irqsave(&free_entries_lock, flags); |
727 | |
728 | list_splice(&tmp, &free_entries); |
729 | nr_total_entries += i; |
730 | num_free_entries += i; |
731 | } else { |
732 | delta = nr_total_entries - num_entries; |
733 | |
734 | for (i = 0; i < delta && !list_empty(&free_entries); i++) { |
735 | entry = __dma_entry_alloc(); |
736 | kfree(entry); |
737 | } |
738 | |
739 | nr_total_entries -= i; |
740 | } |
741 | |
742 | if (nr_total_entries != num_entries) |
743 | ret = 1; |
744 | |
745 | spin_unlock_irqrestore(&free_entries_lock, flags); |
746 | |
747 | return ret; |
748 | } |
749 | EXPORT_SYMBOL(dma_debug_resize_entries); |
750 | |
751 | /* |
752 | * DMA-API debugging init code |
753 | * |
754 | * The init code does two things: |
755 | * 1. Initialize core data structures |
756 | * 2. Preallocate a given number of dma_debug_entry structs |
757 | */ |
758 | |
759 | static int prealloc_memory(u32 num_entries) |
760 | { |
761 | struct dma_debug_entry *entry, *next_entry; |
762 | int i; |
763 | |
764 | for (i = 0; i < num_entries; ++i) { |
765 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
766 | if (!entry) |
767 | goto out_err; |
768 | |
769 | list_add_tail(&entry->list, &free_entries); |
770 | } |
771 | |
772 | num_free_entries = num_entries; |
773 | min_free_entries = num_entries; |
774 | |
775 | pr_info("DMA-API: preallocated %d debug entries\n", num_entries); |
776 | |
777 | return 0; |
778 | |
779 | out_err: |
780 | |
781 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { |
782 | list_del(&entry->list); |
783 | kfree(entry); |
784 | } |
785 | |
786 | return -ENOMEM; |
787 | } |
788 | |
789 | static ssize_t filter_read(struct file *file, char __user *user_buf, |
790 | size_t count, loff_t *ppos) |
791 | { |
792 | char buf[NAME_MAX_LEN + 1]; |
793 | unsigned long flags; |
794 | int len; |
795 | |
796 | if (!current_driver_name[0]) |
797 | return 0; |
798 | |
799 | /* |
800 | * We can't copy to userspace directly because current_driver_name can |
801 | * only be read under the driver_name_lock with irqs disabled. So |
802 | * create a temporary copy first. |
803 | */ |
804 | read_lock_irqsave(&driver_name_lock, flags); |
805 | len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); |
806 | read_unlock_irqrestore(&driver_name_lock, flags); |
807 | |
808 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
809 | } |
810 | |
811 | static ssize_t filter_write(struct file *file, const char __user *userbuf, |
812 | size_t count, loff_t *ppos) |
813 | { |
814 | char buf[NAME_MAX_LEN]; |
815 | unsigned long flags; |
816 | size_t len; |
817 | int i; |
818 | |
819 | /* |
820 | * We can't copy from userspace directly. Access to |
821 | * current_driver_name is protected with a write_lock with irqs |
822 | * disabled. Since copy_from_user can fault and may sleep we |
823 | * need to copy to temporary buffer first |
824 | */ |
825 | len = min(count, (size_t)(NAME_MAX_LEN - 1)); |
826 | if (copy_from_user(buf, userbuf, len)) |
827 | return -EFAULT; |
828 | |
829 | buf[len] = 0; |
830 | |
831 | write_lock_irqsave(&driver_name_lock, flags); |
832 | |
833 | /* |
834 | * Now handle the string we got from userspace very carefully. |
835 | * The rules are: |
836 | * - only use the first token we got |
837 | * - token delimiter is everything looking like a space |
838 | * character (' ', '\n', '\t' ...) |
839 | * |
840 | */ |
841 | if (!isalnum(buf[0])) { |
842 | /* |
843 | * If the first character userspace gave us is not |
844 | * alphanumerical then assume the filter should be |
845 | * switched off. |
846 | */ |
847 | if (current_driver_name[0]) |
848 | pr_info("DMA-API: switching off dma-debug driver filter\n"); |
849 | current_driver_name[0] = 0; |
850 | current_driver = NULL; |
851 | goto out_unlock; |
852 | } |
853 | |
854 | /* |
855 | * Now parse out the first token and use it as the name for the |
856 | * driver to filter for. |
857 | */ |
858 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
859 | current_driver_name[i] = buf[i]; |
860 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) |
861 | break; |
862 | } |
863 | current_driver_name[i] = 0; |
864 | current_driver = NULL; |
865 | |
866 | pr_info("DMA-API: enable driver filter for driver [%s]\n", |
867 | current_driver_name); |
868 | |
869 | out_unlock: |
870 | write_unlock_irqrestore(&driver_name_lock, flags); |
871 | |
872 | return count; |
873 | } |
874 | |
875 | static const struct file_operations filter_fops = { |
876 | .read = filter_read, |
877 | .write = filter_write, |
878 | .llseek = default_llseek, |
879 | }; |
880 | |
881 | static int dma_debug_fs_init(void) |
882 | { |
883 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); |
884 | if (!dma_debug_dent) { |
885 | pr_err("DMA-API: can not create debugfs directory\n"); |
886 | return -ENOMEM; |
887 | } |
888 | |
889 | global_disable_dent = debugfs_create_bool("disabled", 0444, |
890 | dma_debug_dent, |
891 | &global_disable); |
892 | if (!global_disable_dent) |
893 | goto out_err; |
894 | |
895 | error_count_dent = debugfs_create_u32("error_count", 0444, |
896 | dma_debug_dent, &error_count); |
897 | if (!error_count_dent) |
898 | goto out_err; |
899 | |
900 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, |
901 | dma_debug_dent, |
902 | &show_all_errors); |
903 | if (!show_all_errors_dent) |
904 | goto out_err; |
905 | |
906 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, |
907 | dma_debug_dent, |
908 | &show_num_errors); |
909 | if (!show_num_errors_dent) |
910 | goto out_err; |
911 | |
912 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, |
913 | dma_debug_dent, |
914 | &num_free_entries); |
915 | if (!num_free_entries_dent) |
916 | goto out_err; |
917 | |
918 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, |
919 | dma_debug_dent, |
920 | &min_free_entries); |
921 | if (!min_free_entries_dent) |
922 | goto out_err; |
923 | |
924 | filter_dent = debugfs_create_file("driver_filter", 0644, |
925 | dma_debug_dent, NULL, &filter_fops); |
926 | if (!filter_dent) |
927 | goto out_err; |
928 | |
929 | return 0; |
930 | |
931 | out_err: |
932 | debugfs_remove_recursive(dma_debug_dent); |
933 | |
934 | return -ENOMEM; |
935 | } |
936 | |
937 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
938 | { |
939 | struct dma_debug_entry *entry; |
940 | unsigned long flags; |
941 | int count = 0, i; |
942 | |
943 | local_irq_save(flags); |
944 | |
945 | for (i = 0; i < HASH_SIZE; ++i) { |
946 | spin_lock(&dma_entry_hash[i].lock); |
947 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
948 | if (entry->dev == dev) { |
949 | count += 1; |
950 | *out_entry = entry; |
951 | } |
952 | } |
953 | spin_unlock(&dma_entry_hash[i].lock); |
954 | } |
955 | |
956 | local_irq_restore(flags); |
957 | |
958 | return count; |
959 | } |
960 | |
961 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
962 | { |
963 | struct device *dev = data; |
964 | struct dma_debug_entry *uninitialized_var(entry); |
965 | int count; |
966 | |
967 | if (dma_debug_disabled()) |
968 | return 0; |
969 | |
970 | switch (action) { |
971 | case BUS_NOTIFY_UNBOUND_DRIVER: |
972 | count = device_dma_allocations(dev, &entry); |
973 | if (count == 0) |
974 | break; |
975 | err_printk(dev, entry, "DMA-API: device driver has pending " |
976 | "DMA allocations while released from device " |
977 | "[count=%d]\n" |
978 | "One of leaked entries details: " |
979 | "[device address=0x%016llx] [size=%llu bytes] " |
980 | "[mapped with %s] [mapped as %s]\n", |
981 | count, entry->dev_addr, entry->size, |
982 | dir2name[entry->direction], type2name[entry->type]); |
983 | break; |
984 | default: |
985 | break; |
986 | } |
987 | |
988 | return 0; |
989 | } |
990 | |
991 | void dma_debug_add_bus(struct bus_type *bus) |
992 | { |
993 | struct notifier_block *nb; |
994 | |
995 | if (dma_debug_disabled()) |
996 | return; |
997 | |
998 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
999 | if (nb == NULL) { |
1000 | pr_err("dma_debug_add_bus: out of memory\n"); |
1001 | return; |
1002 | } |
1003 | |
1004 | nb->notifier_call = dma_debug_device_change; |
1005 | |
1006 | bus_register_notifier(bus, nb); |
1007 | } |
1008 | |
1009 | /* |
1010 | * Let the architectures decide how many entries should be preallocated. |
1011 | */ |
1012 | void dma_debug_init(u32 num_entries) |
1013 | { |
1014 | int i; |
1015 | |
1016 | /* Do not use dma_debug_initialized here, since we really want to be |
1017 | * called to set dma_debug_initialized |
1018 | */ |
1019 | if (global_disable) |
1020 | return; |
1021 | |
1022 | for (i = 0; i < HASH_SIZE; ++i) { |
1023 | INIT_LIST_HEAD(&dma_entry_hash[i].list); |
1024 | spin_lock_init(&dma_entry_hash[i].lock); |
1025 | } |
1026 | |
1027 | if (dma_debug_fs_init() != 0) { |
1028 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); |
1029 | global_disable = true; |
1030 | |
1031 | return; |
1032 | } |
1033 | |
1034 | if (req_entries) |
1035 | num_entries = req_entries; |
1036 | |
1037 | if (prealloc_memory(num_entries) != 0) { |
1038 | pr_err("DMA-API: debugging out of memory error - disabled\n"); |
1039 | global_disable = true; |
1040 | |
1041 | return; |
1042 | } |
1043 | |
1044 | nr_total_entries = num_free_entries; |
1045 | |
1046 | dma_debug_initialized = true; |
1047 | |
1048 | pr_info("DMA-API: debugging enabled by kernel config\n"); |
1049 | } |
1050 | |
1051 | static __init int dma_debug_cmdline(char *str) |
1052 | { |
1053 | if (!str) |
1054 | return -EINVAL; |
1055 | |
1056 | if (strncmp(str, "off", 3) == 0) { |
1057 | pr_info("DMA-API: debugging disabled on kernel command line\n"); |
1058 | global_disable = true; |
1059 | } |
1060 | |
1061 | return 0; |
1062 | } |
1063 | |
1064 | static __init int dma_debug_entries_cmdline(char *str) |
1065 | { |
1066 | int res; |
1067 | |
1068 | if (!str) |
1069 | return -EINVAL; |
1070 | |
1071 | res = get_option(&str, &req_entries); |
1072 | |
1073 | if (!res) |
1074 | req_entries = 0; |
1075 | |
1076 | return 0; |
1077 | } |
1078 | |
1079 | __setup("dma_debug=", dma_debug_cmdline); |
1080 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); |
1081 | |
1082 | static void check_unmap(struct dma_debug_entry *ref) |
1083 | { |
1084 | struct dma_debug_entry *entry; |
1085 | struct hash_bucket *bucket; |
1086 | unsigned long flags; |
1087 | |
1088 | bucket = get_hash_bucket(ref, &flags); |
1089 | entry = bucket_find_exact(bucket, ref); |
1090 | |
1091 | if (!entry) { |
1092 | /* must drop lock before calling dma_mapping_error */ |
1093 | put_hash_bucket(bucket, &flags); |
1094 | |
1095 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { |
1096 | err_printk(ref->dev, NULL, |
1097 | "DMA-API: device driver tries to free an " |
1098 | "invalid DMA memory address\n"); |
1099 | } else { |
1100 | err_printk(ref->dev, NULL, |
1101 | "DMA-API: device driver tries to free DMA " |
1102 | "memory it has not allocated [device " |
1103 | "address=0x%016llx] [size=%llu bytes]\n", |
1104 | ref->dev_addr, ref->size); |
1105 | } |
1106 | return; |
1107 | } |
1108 | |
1109 | if (ref->size != entry->size) { |
1110 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
1111 | "DMA memory with different size " |
1112 | "[device address=0x%016llx] [map size=%llu bytes] " |
1113 | "[unmap size=%llu bytes]\n", |
1114 | ref->dev_addr, entry->size, ref->size); |
1115 | } |
1116 | |
1117 | if (ref->type != entry->type) { |
1118 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
1119 | "DMA memory with wrong function " |
1120 | "[device address=0x%016llx] [size=%llu bytes] " |
1121 | "[mapped as %s] [unmapped as %s]\n", |
1122 | ref->dev_addr, ref->size, |
1123 | type2name[entry->type], type2name[ref->type]); |
1124 | } else if ((entry->type == dma_debug_coherent) && |
1125 | (phys_addr(ref) != phys_addr(entry))) { |
1126 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
1127 | "DMA memory with different CPU address " |
1128 | "[device address=0x%016llx] [size=%llu bytes] " |
1129 | "[cpu alloc address=0x%016llx] " |
1130 | "[cpu free address=0x%016llx]", |
1131 | ref->dev_addr, ref->size, |
1132 | phys_addr(entry), |
1133 | phys_addr(ref)); |
1134 | } |
1135 | |
1136 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
1137 | ref->sg_call_ents != entry->sg_call_ents) { |
1138 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
1139 | "DMA sg list with different entry count " |
1140 | "[map count=%d] [unmap count=%d]\n", |
1141 | entry->sg_call_ents, ref->sg_call_ents); |
1142 | } |
1143 | |
1144 | /* |
1145 | * This may be no bug in reality - but most implementations of the |
1146 | * DMA API don't handle this properly, so check for it here |
1147 | */ |
1148 | if (ref->direction != entry->direction) { |
1149 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
1150 | "DMA memory with different direction " |
1151 | "[device address=0x%016llx] [size=%llu bytes] " |
1152 | "[mapped with %s] [unmapped with %s]\n", |
1153 | ref->dev_addr, ref->size, |
1154 | dir2name[entry->direction], |
1155 | dir2name[ref->direction]); |
1156 | } |
1157 | |
1158 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
1159 | err_printk(ref->dev, entry, |
1160 | "DMA-API: device driver failed to check map error" |
1161 | "[device address=0x%016llx] [size=%llu bytes] " |
1162 | "[mapped as %s]", |
1163 | ref->dev_addr, ref->size, |
1164 | type2name[entry->type]); |
1165 | } |
1166 | |
1167 | hash_bucket_del(entry); |
1168 | dma_entry_free(entry); |
1169 | |
1170 | put_hash_bucket(bucket, &flags); |
1171 | } |
1172 | |
1173 | static void check_for_stack(struct device *dev, |
1174 | struct page *page, size_t offset) |
1175 | { |
1176 | void *addr; |
1177 | struct vm_struct *stack_vm_area = task_stack_vm_area(current); |
1178 | |
1179 | if (!stack_vm_area) { |
1180 | /* Stack is direct-mapped. */ |
1181 | if (PageHighMem(page)) |
1182 | return; |
1183 | addr = page_address(page) + offset; |
1184 | if (object_is_on_stack(addr)) |
1185 | err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr); |
1186 | } else { |
1187 | /* Stack is vmalloced. */ |
1188 | int i; |
1189 | |
1190 | for (i = 0; i < stack_vm_area->nr_pages; i++) { |
1191 | if (page != stack_vm_area->pages[i]) |
1192 | continue; |
1193 | |
1194 | addr = (u8 *)current->stack + i * PAGE_SIZE + offset; |
1195 | err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr); |
1196 | break; |
1197 | } |
1198 | } |
1199 | } |
1200 | |
1201 | static inline bool overlap(void *addr, unsigned long len, void *start, void *end) |
1202 | { |
1203 | unsigned long a1 = (unsigned long)addr; |
1204 | unsigned long b1 = a1 + len; |
1205 | unsigned long a2 = (unsigned long)start; |
1206 | unsigned long b2 = (unsigned long)end; |
1207 | |
1208 | return !(b1 <= a2 || a1 >= b2); |
1209 | } |
1210 | |
1211 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) |
1212 | { |
1213 | if (overlap(addr, len, _stext, _etext) || |
1214 | overlap(addr, len, __start_rodata, __end_rodata)) |
1215 | err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
1216 | } |
1217 | |
1218 | static void check_sync(struct device *dev, |
1219 | struct dma_debug_entry *ref, |
1220 | bool to_cpu) |
1221 | { |
1222 | struct dma_debug_entry *entry; |
1223 | struct hash_bucket *bucket; |
1224 | unsigned long flags; |
1225 | |
1226 | bucket = get_hash_bucket(ref, &flags); |
1227 | |
1228 | entry = bucket_find_contain(&bucket, ref, &flags); |
1229 | |
1230 | if (!entry) { |
1231 | err_printk(dev, NULL, "DMA-API: device driver tries " |
1232 | "to sync DMA memory it has not allocated " |
1233 | "[device address=0x%016llx] [size=%llu bytes]\n", |
1234 | (unsigned long long)ref->dev_addr, ref->size); |
1235 | goto out; |
1236 | } |
1237 | |
1238 | if (ref->size > entry->size) { |
1239 | err_printk(dev, entry, "DMA-API: device driver syncs" |
1240 | " DMA memory outside allocated range " |
1241 | "[device address=0x%016llx] " |
1242 | "[allocation size=%llu bytes] " |
1243 | "[sync offset+size=%llu]\n", |
1244 | entry->dev_addr, entry->size, |
1245 | ref->size); |
1246 | } |
1247 | |
1248 | if (entry->direction == DMA_BIDIRECTIONAL) |
1249 | goto out; |
1250 | |
1251 | if (ref->direction != entry->direction) { |
1252 | err_printk(dev, entry, "DMA-API: device driver syncs " |
1253 | "DMA memory with different direction " |
1254 | "[device address=0x%016llx] [size=%llu bytes] " |
1255 | "[mapped with %s] [synced with %s]\n", |
1256 | (unsigned long long)ref->dev_addr, entry->size, |
1257 | dir2name[entry->direction], |
1258 | dir2name[ref->direction]); |
1259 | } |
1260 | |
1261 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
1262 | !(ref->direction == DMA_TO_DEVICE)) |
1263 | err_printk(dev, entry, "DMA-API: device driver syncs " |
1264 | "device read-only DMA memory for cpu " |
1265 | "[device address=0x%016llx] [size=%llu bytes] " |
1266 | "[mapped with %s] [synced with %s]\n", |
1267 | (unsigned long long)ref->dev_addr, entry->size, |
1268 | dir2name[entry->direction], |
1269 | dir2name[ref->direction]); |
1270 | |
1271 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
1272 | !(ref->direction == DMA_FROM_DEVICE)) |
1273 | err_printk(dev, entry, "DMA-API: device driver syncs " |
1274 | "device write-only DMA memory to device " |
1275 | "[device address=0x%016llx] [size=%llu bytes] " |
1276 | "[mapped with %s] [synced with %s]\n", |
1277 | (unsigned long long)ref->dev_addr, entry->size, |
1278 | dir2name[entry->direction], |
1279 | dir2name[ref->direction]); |
1280 | |
1281 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
1282 | ref->sg_call_ents != entry->sg_call_ents) { |
1283 | err_printk(ref->dev, entry, "DMA-API: device driver syncs " |
1284 | "DMA sg list with different entry count " |
1285 | "[map count=%d] [sync count=%d]\n", |
1286 | entry->sg_call_ents, ref->sg_call_ents); |
1287 | } |
1288 | |
1289 | out: |
1290 | put_hash_bucket(bucket, &flags); |
1291 | } |
1292 | |
1293 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
1294 | size_t size, int direction, dma_addr_t dma_addr, |
1295 | bool map_single) |
1296 | { |
1297 | struct dma_debug_entry *entry; |
1298 | |
1299 | if (unlikely(dma_debug_disabled())) |
1300 | return; |
1301 | |
1302 | if (dma_mapping_error(dev, dma_addr)) |
1303 | return; |
1304 | |
1305 | entry = dma_entry_alloc(); |
1306 | if (!entry) |
1307 | return; |
1308 | |
1309 | entry->dev = dev; |
1310 | entry->type = dma_debug_page; |
1311 | entry->pfn = page_to_pfn(page); |
1312 | entry->offset = offset, |
1313 | entry->dev_addr = dma_addr; |
1314 | entry->size = size; |
1315 | entry->direction = direction; |
1316 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
1317 | |
1318 | if (map_single) |
1319 | entry->type = dma_debug_single; |
1320 | |
1321 | check_for_stack(dev, page, offset); |
1322 | |
1323 | if (!PageHighMem(page)) { |
1324 | void *addr = page_address(page) + offset; |
1325 | |
1326 | check_for_illegal_area(dev, addr, size); |
1327 | } |
1328 | |
1329 | add_dma_entry(entry); |
1330 | } |
1331 | EXPORT_SYMBOL(debug_dma_map_page); |
1332 | |
1333 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1334 | { |
1335 | struct dma_debug_entry ref; |
1336 | struct dma_debug_entry *entry; |
1337 | struct hash_bucket *bucket; |
1338 | unsigned long flags; |
1339 | |
1340 | if (unlikely(dma_debug_disabled())) |
1341 | return; |
1342 | |
1343 | ref.dev = dev; |
1344 | ref.dev_addr = dma_addr; |
1345 | bucket = get_hash_bucket(&ref, &flags); |
1346 | |
1347 | list_for_each_entry(entry, &bucket->list, list) { |
1348 | if (!exact_match(&ref, entry)) |
1349 | continue; |
1350 | |
1351 | /* |
1352 | * The same physical address can be mapped multiple |
1353 | * times. Without a hardware IOMMU this results in the |
1354 | * same device addresses being put into the dma-debug |
1355 | * hash multiple times too. This can result in false |
1356 | * positives being reported. Therefore we implement a |
1357 | * best-fit algorithm here which updates the first entry |
1358 | * from the hash which fits the reference value and is |
1359 | * not currently listed as being checked. |
1360 | */ |
1361 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
1362 | entry->map_err_type = MAP_ERR_CHECKED; |
1363 | break; |
1364 | } |
1365 | } |
1366 | |
1367 | put_hash_bucket(bucket, &flags); |
1368 | } |
1369 | EXPORT_SYMBOL(debug_dma_mapping_error); |
1370 | |
1371 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, |
1372 | size_t size, int direction, bool map_single) |
1373 | { |
1374 | struct dma_debug_entry ref = { |
1375 | .type = dma_debug_page, |
1376 | .dev = dev, |
1377 | .dev_addr = addr, |
1378 | .size = size, |
1379 | .direction = direction, |
1380 | }; |
1381 | |
1382 | if (unlikely(dma_debug_disabled())) |
1383 | return; |
1384 | |
1385 | if (map_single) |
1386 | ref.type = dma_debug_single; |
1387 | |
1388 | check_unmap(&ref); |
1389 | } |
1390 | EXPORT_SYMBOL(debug_dma_unmap_page); |
1391 | |
1392 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, |
1393 | int nents, int mapped_ents, int direction) |
1394 | { |
1395 | struct dma_debug_entry *entry; |
1396 | struct scatterlist *s; |
1397 | int i; |
1398 | |
1399 | if (unlikely(dma_debug_disabled())) |
1400 | return; |
1401 | |
1402 | for_each_sg(sg, s, mapped_ents, i) { |
1403 | entry = dma_entry_alloc(); |
1404 | if (!entry) |
1405 | return; |
1406 | |
1407 | entry->type = dma_debug_sg; |
1408 | entry->dev = dev; |
1409 | entry->pfn = page_to_pfn(sg_page(s)); |
1410 | entry->offset = s->offset, |
1411 | entry->size = sg_dma_len(s); |
1412 | entry->dev_addr = sg_dma_address(s); |
1413 | entry->direction = direction; |
1414 | entry->sg_call_ents = nents; |
1415 | entry->sg_mapped_ents = mapped_ents; |
1416 | |
1417 | check_for_stack(dev, sg_page(s), s->offset); |
1418 | |
1419 | if (!PageHighMem(sg_page(s))) { |
1420 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
1421 | } |
1422 | |
1423 | add_dma_entry(entry); |
1424 | } |
1425 | } |
1426 | EXPORT_SYMBOL(debug_dma_map_sg); |
1427 | |
1428 | static int get_nr_mapped_entries(struct device *dev, |
1429 | struct dma_debug_entry *ref) |
1430 | { |
1431 | struct dma_debug_entry *entry; |
1432 | struct hash_bucket *bucket; |
1433 | unsigned long flags; |
1434 | int mapped_ents; |
1435 | |
1436 | bucket = get_hash_bucket(ref, &flags); |
1437 | entry = bucket_find_exact(bucket, ref); |
1438 | mapped_ents = 0; |
1439 | |
1440 | if (entry) |
1441 | mapped_ents = entry->sg_mapped_ents; |
1442 | put_hash_bucket(bucket, &flags); |
1443 | |
1444 | return mapped_ents; |
1445 | } |
1446 | |
1447 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
1448 | int nelems, int dir) |
1449 | { |
1450 | struct scatterlist *s; |
1451 | int mapped_ents = 0, i; |
1452 | |
1453 | if (unlikely(dma_debug_disabled())) |
1454 | return; |
1455 | |
1456 | for_each_sg(sglist, s, nelems, i) { |
1457 | |
1458 | struct dma_debug_entry ref = { |
1459 | .type = dma_debug_sg, |
1460 | .dev = dev, |
1461 | .pfn = page_to_pfn(sg_page(s)), |
1462 | .offset = s->offset, |
1463 | .dev_addr = sg_dma_address(s), |
1464 | .size = sg_dma_len(s), |
1465 | .direction = dir, |
1466 | .sg_call_ents = nelems, |
1467 | }; |
1468 | |
1469 | if (mapped_ents && i >= mapped_ents) |
1470 | break; |
1471 | |
1472 | if (!i) |
1473 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
1474 | |
1475 | check_unmap(&ref); |
1476 | } |
1477 | } |
1478 | EXPORT_SYMBOL(debug_dma_unmap_sg); |
1479 | |
1480 | void debug_dma_alloc_coherent(struct device *dev, size_t size, |
1481 | dma_addr_t dma_addr, void *virt) |
1482 | { |
1483 | struct dma_debug_entry *entry; |
1484 | |
1485 | if (unlikely(dma_debug_disabled())) |
1486 | return; |
1487 | |
1488 | if (unlikely(virt == NULL)) |
1489 | return; |
1490 | |
1491 | entry = dma_entry_alloc(); |
1492 | if (!entry) |
1493 | return; |
1494 | |
1495 | entry->type = dma_debug_coherent; |
1496 | entry->dev = dev; |
1497 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
1498 | entry->offset = (size_t) virt & ~PAGE_MASK; |
1499 | entry->size = size; |
1500 | entry->dev_addr = dma_addr; |
1501 | entry->direction = DMA_BIDIRECTIONAL; |
1502 | |
1503 | add_dma_entry(entry); |
1504 | } |
1505 | EXPORT_SYMBOL(debug_dma_alloc_coherent); |
1506 | |
1507 | void debug_dma_free_coherent(struct device *dev, size_t size, |
1508 | void *virt, dma_addr_t addr) |
1509 | { |
1510 | struct dma_debug_entry ref = { |
1511 | .type = dma_debug_coherent, |
1512 | .dev = dev, |
1513 | .pfn = page_to_pfn(virt_to_page(virt)), |
1514 | .offset = (size_t) virt & ~PAGE_MASK, |
1515 | .dev_addr = addr, |
1516 | .size = size, |
1517 | .direction = DMA_BIDIRECTIONAL, |
1518 | }; |
1519 | |
1520 | if (unlikely(dma_debug_disabled())) |
1521 | return; |
1522 | |
1523 | check_unmap(&ref); |
1524 | } |
1525 | EXPORT_SYMBOL(debug_dma_free_coherent); |
1526 | |
1527 | void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, |
1528 | int direction, dma_addr_t dma_addr) |
1529 | { |
1530 | struct dma_debug_entry *entry; |
1531 | |
1532 | if (unlikely(dma_debug_disabled())) |
1533 | return; |
1534 | |
1535 | entry = dma_entry_alloc(); |
1536 | if (!entry) |
1537 | return; |
1538 | |
1539 | entry->type = dma_debug_resource; |
1540 | entry->dev = dev; |
1541 | entry->pfn = PHYS_PFN(addr); |
1542 | entry->offset = offset_in_page(addr); |
1543 | entry->size = size; |
1544 | entry->dev_addr = dma_addr; |
1545 | entry->direction = direction; |
1546 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
1547 | |
1548 | add_dma_entry(entry); |
1549 | } |
1550 | EXPORT_SYMBOL(debug_dma_map_resource); |
1551 | |
1552 | void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, |
1553 | size_t size, int direction) |
1554 | { |
1555 | struct dma_debug_entry ref = { |
1556 | .type = dma_debug_resource, |
1557 | .dev = dev, |
1558 | .dev_addr = dma_addr, |
1559 | .size = size, |
1560 | .direction = direction, |
1561 | }; |
1562 | |
1563 | if (unlikely(dma_debug_disabled())) |
1564 | return; |
1565 | |
1566 | check_unmap(&ref); |
1567 | } |
1568 | EXPORT_SYMBOL(debug_dma_unmap_resource); |
1569 | |
1570 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
1571 | size_t size, int direction) |
1572 | { |
1573 | struct dma_debug_entry ref; |
1574 | |
1575 | if (unlikely(dma_debug_disabled())) |
1576 | return; |
1577 | |
1578 | ref.type = dma_debug_single; |
1579 | ref.dev = dev; |
1580 | ref.dev_addr = dma_handle; |
1581 | ref.size = size; |
1582 | ref.direction = direction; |
1583 | ref.sg_call_ents = 0; |
1584 | |
1585 | check_sync(dev, &ref, true); |
1586 | } |
1587 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); |
1588 | |
1589 | void debug_dma_sync_single_for_device(struct device *dev, |
1590 | dma_addr_t dma_handle, size_t size, |
1591 | int direction) |
1592 | { |
1593 | struct dma_debug_entry ref; |
1594 | |
1595 | if (unlikely(dma_debug_disabled())) |
1596 | return; |
1597 | |
1598 | ref.type = dma_debug_single; |
1599 | ref.dev = dev; |
1600 | ref.dev_addr = dma_handle; |
1601 | ref.size = size; |
1602 | ref.direction = direction; |
1603 | ref.sg_call_ents = 0; |
1604 | |
1605 | check_sync(dev, &ref, false); |
1606 | } |
1607 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); |
1608 | |
1609 | void debug_dma_sync_single_range_for_cpu(struct device *dev, |
1610 | dma_addr_t dma_handle, |
1611 | unsigned long offset, size_t size, |
1612 | int direction) |
1613 | { |
1614 | struct dma_debug_entry ref; |
1615 | |
1616 | if (unlikely(dma_debug_disabled())) |
1617 | return; |
1618 | |
1619 | ref.type = dma_debug_single; |
1620 | ref.dev = dev; |
1621 | ref.dev_addr = dma_handle; |
1622 | ref.size = offset + size; |
1623 | ref.direction = direction; |
1624 | ref.sg_call_ents = 0; |
1625 | |
1626 | check_sync(dev, &ref, true); |
1627 | } |
1628 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); |
1629 | |
1630 | void debug_dma_sync_single_range_for_device(struct device *dev, |
1631 | dma_addr_t dma_handle, |
1632 | unsigned long offset, |
1633 | size_t size, int direction) |
1634 | { |
1635 | struct dma_debug_entry ref; |
1636 | |
1637 | if (unlikely(dma_debug_disabled())) |
1638 | return; |
1639 | |
1640 | ref.type = dma_debug_single; |
1641 | ref.dev = dev; |
1642 | ref.dev_addr = dma_handle; |
1643 | ref.size = offset + size; |
1644 | ref.direction = direction; |
1645 | ref.sg_call_ents = 0; |
1646 | |
1647 | check_sync(dev, &ref, false); |
1648 | } |
1649 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); |
1650 | |
1651 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
1652 | int nelems, int direction) |
1653 | { |
1654 | struct scatterlist *s; |
1655 | int mapped_ents = 0, i; |
1656 | |
1657 | if (unlikely(dma_debug_disabled())) |
1658 | return; |
1659 | |
1660 | for_each_sg(sg, s, nelems, i) { |
1661 | |
1662 | struct dma_debug_entry ref = { |
1663 | .type = dma_debug_sg, |
1664 | .dev = dev, |
1665 | .pfn = page_to_pfn(sg_page(s)), |
1666 | .offset = s->offset, |
1667 | .dev_addr = sg_dma_address(s), |
1668 | .size = sg_dma_len(s), |
1669 | .direction = direction, |
1670 | .sg_call_ents = nelems, |
1671 | }; |
1672 | |
1673 | if (!i) |
1674 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
1675 | |
1676 | if (i >= mapped_ents) |
1677 | break; |
1678 | |
1679 | check_sync(dev, &ref, true); |
1680 | } |
1681 | } |
1682 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); |
1683 | |
1684 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
1685 | int nelems, int direction) |
1686 | { |
1687 | struct scatterlist *s; |
1688 | int mapped_ents = 0, i; |
1689 | |
1690 | if (unlikely(dma_debug_disabled())) |
1691 | return; |
1692 | |
1693 | for_each_sg(sg, s, nelems, i) { |
1694 | |
1695 | struct dma_debug_entry ref = { |
1696 | .type = dma_debug_sg, |
1697 | .dev = dev, |
1698 | .pfn = page_to_pfn(sg_page(s)), |
1699 | .offset = s->offset, |
1700 | .dev_addr = sg_dma_address(s), |
1701 | .size = sg_dma_len(s), |
1702 | .direction = direction, |
1703 | .sg_call_ents = nelems, |
1704 | }; |
1705 | if (!i) |
1706 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
1707 | |
1708 | if (i >= mapped_ents) |
1709 | break; |
1710 | |
1711 | check_sync(dev, &ref, false); |
1712 | } |
1713 | } |
1714 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |
1715 | |
1716 | static int __init dma_debug_driver_setup(char *str) |
1717 | { |
1718 | int i; |
1719 | |
1720 | for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { |
1721 | current_driver_name[i] = *str; |
1722 | if (*str == 0) |
1723 | break; |
1724 | } |
1725 | |
1726 | if (current_driver_name[0]) |
1727 | pr_info("DMA-API: enable driver filter for driver [%s]\n", |
1728 | current_driver_name); |
1729 | |
1730 | |
1731 | return 1; |
1732 | } |
1733 | __setup("dma_debug_driver=", dma_debug_driver_setup); |
1734 |