xref: /linux-6.15/kernel/dma/debug.c (revision aef7ee76)
145051539SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * Copyright (C) 2008 Advanced Micro Devices, Inc.
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Author: Joerg Roedel <[email protected]>
6cf65a0f6SChristoph Hellwig  */
7cf65a0f6SChristoph Hellwig 
8f737b095SRobin Murphy #define pr_fmt(fmt)	"DMA-API: " fmt
9f737b095SRobin Murphy 
10cf65a0f6SChristoph Hellwig #include <linux/sched/task_stack.h>
11cf65a0f6SChristoph Hellwig #include <linux/scatterlist.h>
12a1fd09e8SChristoph Hellwig #include <linux/dma-map-ops.h>
13cf65a0f6SChristoph Hellwig #include <linux/sched/task.h>
14cf65a0f6SChristoph Hellwig #include <linux/stacktrace.h>
15cf65a0f6SChristoph Hellwig #include <linux/spinlock.h>
16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
17cf65a0f6SChristoph Hellwig #include <linux/debugfs.h>
18cf65a0f6SChristoph Hellwig #include <linux/uaccess.h>
19cf65a0f6SChristoph Hellwig #include <linux/export.h>
20cf65a0f6SChristoph Hellwig #include <linux/device.h>
21cf65a0f6SChristoph Hellwig #include <linux/types.h>
22cf65a0f6SChristoph Hellwig #include <linux/sched.h>
23cf65a0f6SChristoph Hellwig #include <linux/ctype.h>
24cf65a0f6SChristoph Hellwig #include <linux/list.h>
25cf65a0f6SChristoph Hellwig #include <linux/slab.h>
26cf65a0f6SChristoph Hellwig #include <asm/sections.h>
27a1fd09e8SChristoph Hellwig #include "debug.h"
28cf65a0f6SChristoph Hellwig 
295e76f564SEric Dumazet #define HASH_SIZE       16384ULL
30cf65a0f6SChristoph Hellwig #define HASH_FN_SHIFT   13
31cf65a0f6SChristoph Hellwig #define HASH_FN_MASK    (HASH_SIZE - 1)
32cf65a0f6SChristoph Hellwig 
33cf65a0f6SChristoph Hellwig #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
342b9d9ac0SRobin Murphy /* If the pool runs out, add this many new entries at once */
35ad78dee0SRobin Murphy #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36cf65a0f6SChristoph Hellwig 
37cf65a0f6SChristoph Hellwig enum {
38cf65a0f6SChristoph Hellwig 	dma_debug_single,
39cf65a0f6SChristoph Hellwig 	dma_debug_sg,
40cf65a0f6SChristoph Hellwig 	dma_debug_coherent,
41cf65a0f6SChristoph Hellwig 	dma_debug_resource,
42cf65a0f6SChristoph Hellwig };
43cf65a0f6SChristoph Hellwig 
44cf65a0f6SChristoph Hellwig enum map_err_types {
45cf65a0f6SChristoph Hellwig 	MAP_ERR_CHECK_NOT_APPLICABLE,
46cf65a0f6SChristoph Hellwig 	MAP_ERR_NOT_CHECKED,
47cf65a0f6SChristoph Hellwig 	MAP_ERR_CHECKED,
48cf65a0f6SChristoph Hellwig };
49cf65a0f6SChristoph Hellwig 
50cf65a0f6SChristoph Hellwig #define DMA_DEBUG_STACKTRACE_ENTRIES 5
51cf65a0f6SChristoph Hellwig 
52cf65a0f6SChristoph Hellwig /**
53cf65a0f6SChristoph Hellwig  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54cf65a0f6SChristoph Hellwig  * @list: node on pre-allocated free_entries list
55cf65a0f6SChristoph Hellwig  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56479623fdSDesnes Nunes  * @dev_addr: dma address
57cf65a0f6SChristoph Hellwig  * @size: length of the mapping
58d3694f30SEric Dumazet  * @type: single, page, sg, coherent
59cf65a0f6SChristoph Hellwig  * @direction: enum dma_data_direction
60cf65a0f6SChristoph Hellwig  * @sg_call_ents: 'nents' from dma_map_sg
61cf65a0f6SChristoph Hellwig  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
629d4f645aSChristoph Hellwig  * @paddr: physical start address of the mapping
63cf65a0f6SChristoph Hellwig  * @map_err_type: track whether dma_mapping_error() was checked
647c65aa3cSRandy Dunlap  * @stack_len: number of backtrace entries in @stack_entries
657c65aa3cSRandy Dunlap  * @stack_entries: stack of backtrace history
66cf65a0f6SChristoph Hellwig  */
67cf65a0f6SChristoph Hellwig struct dma_debug_entry {
68cf65a0f6SChristoph Hellwig 	struct list_head list;
69cf65a0f6SChristoph Hellwig 	struct device    *dev;
70cf65a0f6SChristoph Hellwig 	u64              dev_addr;
71cf65a0f6SChristoph Hellwig 	u64              size;
72d3694f30SEric Dumazet 	int              type;
73cf65a0f6SChristoph Hellwig 	int              direction;
74cf65a0f6SChristoph Hellwig 	int		 sg_call_ents;
75cf65a0f6SChristoph Hellwig 	int		 sg_mapped_ents;
769d4f645aSChristoph Hellwig 	phys_addr_t	 paddr;
77cf65a0f6SChristoph Hellwig 	enum map_err_types  map_err_type;
78cf65a0f6SChristoph Hellwig #ifdef CONFIG_STACKTRACE
79746017edSThomas Gleixner 	unsigned int	stack_len;
80746017edSThomas Gleixner 	unsigned long	stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
81cf65a0f6SChristoph Hellwig #endif
82d3694f30SEric Dumazet } ____cacheline_aligned_in_smp;
83cf65a0f6SChristoph Hellwig 
84cf65a0f6SChristoph Hellwig typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85cf65a0f6SChristoph Hellwig 
86cf65a0f6SChristoph Hellwig struct hash_bucket {
87cf65a0f6SChristoph Hellwig 	struct list_head list;
88cf65a0f6SChristoph Hellwig 	spinlock_t lock;
895e76f564SEric Dumazet };
90cf65a0f6SChristoph Hellwig 
91cf65a0f6SChristoph Hellwig /* Hash list to save the allocated dma addresses */
92cf65a0f6SChristoph Hellwig static struct hash_bucket dma_entry_hash[HASH_SIZE];
93cf65a0f6SChristoph Hellwig /* List of pre-allocated dma_debug_entry's */
94cf65a0f6SChristoph Hellwig static LIST_HEAD(free_entries);
95cf65a0f6SChristoph Hellwig /* Lock for the list above */
96cf65a0f6SChristoph Hellwig static DEFINE_SPINLOCK(free_entries_lock);
97cf65a0f6SChristoph Hellwig 
98cf65a0f6SChristoph Hellwig /* Global disable flag - will be set in case of an error */
99cf65a0f6SChristoph Hellwig static bool global_disable __read_mostly;
100cf65a0f6SChristoph Hellwig 
101cf65a0f6SChristoph Hellwig /* Early initialization disable flag, set at the end of dma_debug_init */
102cf65a0f6SChristoph Hellwig static bool dma_debug_initialized __read_mostly;
103cf65a0f6SChristoph Hellwig 
dma_debug_disabled(void)104cf65a0f6SChristoph Hellwig static inline bool dma_debug_disabled(void)
105cf65a0f6SChristoph Hellwig {
106cf65a0f6SChristoph Hellwig 	return global_disable || !dma_debug_initialized;
107cf65a0f6SChristoph Hellwig }
108cf65a0f6SChristoph Hellwig 
109cf65a0f6SChristoph Hellwig /* Global error count */
110cf65a0f6SChristoph Hellwig static u32 error_count;
111cf65a0f6SChristoph Hellwig 
112cf65a0f6SChristoph Hellwig /* Global error show enable*/
113cf65a0f6SChristoph Hellwig static u32 show_all_errors __read_mostly;
114cf65a0f6SChristoph Hellwig /* Number of errors to show */
115cf65a0f6SChristoph Hellwig static u32 show_num_errors = 1;
116cf65a0f6SChristoph Hellwig 
117cf65a0f6SChristoph Hellwig static u32 num_free_entries;
118cf65a0f6SChristoph Hellwig static u32 min_free_entries;
119cf65a0f6SChristoph Hellwig static u32 nr_total_entries;
120cf65a0f6SChristoph Hellwig 
121cf65a0f6SChristoph Hellwig /* number of preallocated entries requested by kernel cmdline */
122cf65a0f6SChristoph Hellwig static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
123cf65a0f6SChristoph Hellwig 
124cf65a0f6SChristoph Hellwig /* per-driver filter related state */
125cf65a0f6SChristoph Hellwig 
126cf65a0f6SChristoph Hellwig #define NAME_MAX_LEN	64
127cf65a0f6SChristoph Hellwig 
128cf65a0f6SChristoph Hellwig static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
129cf65a0f6SChristoph Hellwig static struct device_driver *current_driver                    __read_mostly;
130cf65a0f6SChristoph Hellwig 
131cf65a0f6SChristoph Hellwig static DEFINE_RWLOCK(driver_name_lock);
132cf65a0f6SChristoph Hellwig 
133cf65a0f6SChristoph Hellwig static const char *const maperr2str[] = {
134cf65a0f6SChristoph Hellwig 	[MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135cf65a0f6SChristoph Hellwig 	[MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136cf65a0f6SChristoph Hellwig 	[MAP_ERR_CHECKED] = "dma map error checked",
137cf65a0f6SChristoph Hellwig };
138cf65a0f6SChristoph Hellwig 
1399bb50ed7SGrygorii Strashko static const char *type2name[] = {
1409bb50ed7SGrygorii Strashko 	[dma_debug_single] = "single",
14136d91e85SChuck Lever 	[dma_debug_sg] = "scatter-gather",
1429bb50ed7SGrygorii Strashko 	[dma_debug_coherent] = "coherent",
1439bb50ed7SGrygorii Strashko 	[dma_debug_resource] = "resource",
1449bb50ed7SGrygorii Strashko };
145cf65a0f6SChristoph Hellwig 
14623efed6fSChristoph Hellwig static const char *dir2name[] = {
14723efed6fSChristoph Hellwig 	[DMA_BIDIRECTIONAL]	= "DMA_BIDIRECTIONAL",
14823efed6fSChristoph Hellwig 	[DMA_TO_DEVICE]		= "DMA_TO_DEVICE",
14923efed6fSChristoph Hellwig 	[DMA_FROM_DEVICE]	= "DMA_FROM_DEVICE",
15023efed6fSChristoph Hellwig 	[DMA_NONE]		= "DMA_NONE",
15123efed6fSChristoph Hellwig };
152cf65a0f6SChristoph Hellwig 
153cf65a0f6SChristoph Hellwig /*
154cf65a0f6SChristoph Hellwig  * The access to some variables in this macro is racy. We can't use atomic_t
155cf65a0f6SChristoph Hellwig  * here because all these variables are exported to debugfs. Some of them even
156cf65a0f6SChristoph Hellwig  * writeable. This is also the reason why a lock won't help much. But anyway,
157cf65a0f6SChristoph Hellwig  * the races are no big deal. Here is why:
158cf65a0f6SChristoph Hellwig  *
159cf65a0f6SChristoph Hellwig  *   error_count: the addition is racy, but the worst thing that can happen is
160cf65a0f6SChristoph Hellwig  *                that we don't count some errors
161cf65a0f6SChristoph Hellwig  *   show_num_errors: the subtraction is racy. Also no big deal because in
162cf65a0f6SChristoph Hellwig  *                    worst case this will result in one warning more in the
163cf65a0f6SChristoph Hellwig  *                    system log than the user configured. This variable is
164cf65a0f6SChristoph Hellwig  *                    writeable via debugfs.
165cf65a0f6SChristoph Hellwig  */
dump_entry_trace(struct dma_debug_entry * entry)166cf65a0f6SChristoph Hellwig static inline void dump_entry_trace(struct dma_debug_entry *entry)
167cf65a0f6SChristoph Hellwig {
168cf65a0f6SChristoph Hellwig #ifdef CONFIG_STACKTRACE
169cf65a0f6SChristoph Hellwig 	if (entry) {
170fc65104cSKefeng Wang 		pr_warn("Mapped at:\n");
171746017edSThomas Gleixner 		stack_trace_print(entry->stack_entries, entry->stack_len, 0);
172cf65a0f6SChristoph Hellwig 	}
173cf65a0f6SChristoph Hellwig #endif
174cf65a0f6SChristoph Hellwig }
175cf65a0f6SChristoph Hellwig 
driver_filter(struct device * dev)176cf65a0f6SChristoph Hellwig static bool driver_filter(struct device *dev)
177cf65a0f6SChristoph Hellwig {
178cf65a0f6SChristoph Hellwig 	struct device_driver *drv;
179cf65a0f6SChristoph Hellwig 	unsigned long flags;
180cf65a0f6SChristoph Hellwig 	bool ret;
181cf65a0f6SChristoph Hellwig 
182cf65a0f6SChristoph Hellwig 	/* driver filter off */
183cf65a0f6SChristoph Hellwig 	if (likely(!current_driver_name[0]))
184cf65a0f6SChristoph Hellwig 		return true;
185cf65a0f6SChristoph Hellwig 
186cf65a0f6SChristoph Hellwig 	/* driver filter on and initialized */
187cf65a0f6SChristoph Hellwig 	if (current_driver && dev && dev->driver == current_driver)
188cf65a0f6SChristoph Hellwig 		return true;
189cf65a0f6SChristoph Hellwig 
190cf65a0f6SChristoph Hellwig 	/* driver filter on, but we can't filter on a NULL device... */
191cf65a0f6SChristoph Hellwig 	if (!dev)
192cf65a0f6SChristoph Hellwig 		return false;
193cf65a0f6SChristoph Hellwig 
194cf65a0f6SChristoph Hellwig 	if (current_driver || !current_driver_name[0])
195cf65a0f6SChristoph Hellwig 		return false;
196cf65a0f6SChristoph Hellwig 
197cf65a0f6SChristoph Hellwig 	/* driver filter on but not yet initialized */
198cf65a0f6SChristoph Hellwig 	drv = dev->driver;
199cf65a0f6SChristoph Hellwig 	if (!drv)
200cf65a0f6SChristoph Hellwig 		return false;
201cf65a0f6SChristoph Hellwig 
202cf65a0f6SChristoph Hellwig 	/* lock to protect against change of current_driver_name */
203cf65a0f6SChristoph Hellwig 	read_lock_irqsave(&driver_name_lock, flags);
204cf65a0f6SChristoph Hellwig 
205cf65a0f6SChristoph Hellwig 	ret = false;
206cf65a0f6SChristoph Hellwig 	if (drv->name &&
207cf65a0f6SChristoph Hellwig 	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208cf65a0f6SChristoph Hellwig 		current_driver = drv;
209cf65a0f6SChristoph Hellwig 		ret = true;
210cf65a0f6SChristoph Hellwig 	}
211cf65a0f6SChristoph Hellwig 
212cf65a0f6SChristoph Hellwig 	read_unlock_irqrestore(&driver_name_lock, flags);
213cf65a0f6SChristoph Hellwig 
214cf65a0f6SChristoph Hellwig 	return ret;
215cf65a0f6SChristoph Hellwig }
216cf65a0f6SChristoph Hellwig 
217cf65a0f6SChristoph Hellwig #define err_printk(dev, entry, format, arg...) do {			\
218cf65a0f6SChristoph Hellwig 		error_count += 1;					\
219cf65a0f6SChristoph Hellwig 		if (driver_filter(dev) &&				\
220cf65a0f6SChristoph Hellwig 		    (show_all_errors || show_num_errors > 0)) {		\
221f737b095SRobin Murphy 			WARN(1, pr_fmt("%s %s: ") format,		\
222cf65a0f6SChristoph Hellwig 			     dev ? dev_driver_string(dev) : "NULL",	\
223cf65a0f6SChristoph Hellwig 			     dev ? dev_name(dev) : "NULL", ## arg);	\
224cf65a0f6SChristoph Hellwig 			dump_entry_trace(entry);			\
225cf65a0f6SChristoph Hellwig 		}							\
226cf65a0f6SChristoph Hellwig 		if (!show_all_errors && show_num_errors > 0)		\
227cf65a0f6SChristoph Hellwig 			show_num_errors -= 1;				\
228cf65a0f6SChristoph Hellwig 	} while (0);
229cf65a0f6SChristoph Hellwig 
230cf65a0f6SChristoph Hellwig /*
231cf65a0f6SChristoph Hellwig  * Hash related functions
232cf65a0f6SChristoph Hellwig  *
233cf65a0f6SChristoph Hellwig  * Every DMA-API request is saved into a struct dma_debug_entry. To
234cf65a0f6SChristoph Hellwig  * have quick access to these structs they are stored into a hash.
235cf65a0f6SChristoph Hellwig  */
hash_fn(struct dma_debug_entry * entry)236cf65a0f6SChristoph Hellwig static int hash_fn(struct dma_debug_entry *entry)
237cf65a0f6SChristoph Hellwig {
238cf65a0f6SChristoph Hellwig 	/*
239cf65a0f6SChristoph Hellwig 	 * Hash function is based on the dma address.
240cf65a0f6SChristoph Hellwig 	 * We use bits 20-27 here as the index into the hash
241cf65a0f6SChristoph Hellwig 	 */
242cf65a0f6SChristoph Hellwig 	return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243cf65a0f6SChristoph Hellwig }
244cf65a0f6SChristoph Hellwig 
245cf65a0f6SChristoph Hellwig /*
246cf65a0f6SChristoph Hellwig  * Request exclusive access to a hash bucket for a given dma_debug_entry.
247cf65a0f6SChristoph Hellwig  */
get_hash_bucket(struct dma_debug_entry * entry,unsigned long * flags)248cf65a0f6SChristoph Hellwig static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249cf65a0f6SChristoph Hellwig 					   unsigned long *flags)
250cf65a0f6SChristoph Hellwig 	__acquires(&dma_entry_hash[idx].lock)
251cf65a0f6SChristoph Hellwig {
252cf65a0f6SChristoph Hellwig 	int idx = hash_fn(entry);
253cf65a0f6SChristoph Hellwig 	unsigned long __flags;
254cf65a0f6SChristoph Hellwig 
255cf65a0f6SChristoph Hellwig 	spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256cf65a0f6SChristoph Hellwig 	*flags = __flags;
257cf65a0f6SChristoph Hellwig 	return &dma_entry_hash[idx];
258cf65a0f6SChristoph Hellwig }
259cf65a0f6SChristoph Hellwig 
260cf65a0f6SChristoph Hellwig /*
261cf65a0f6SChristoph Hellwig  * Give up exclusive access to the hash bucket
262cf65a0f6SChristoph Hellwig  */
put_hash_bucket(struct hash_bucket * bucket,unsigned long flags)263cf65a0f6SChristoph Hellwig static void put_hash_bucket(struct hash_bucket *bucket,
26450f579a2SDan Carpenter 			    unsigned long flags)
265cf65a0f6SChristoph Hellwig 	__releases(&bucket->lock)
266cf65a0f6SChristoph Hellwig {
26750f579a2SDan Carpenter 	spin_unlock_irqrestore(&bucket->lock, flags);
268cf65a0f6SChristoph Hellwig }
269cf65a0f6SChristoph Hellwig 
exact_match(struct dma_debug_entry * a,struct dma_debug_entry * b)270cf65a0f6SChristoph Hellwig static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271cf65a0f6SChristoph Hellwig {
272cf65a0f6SChristoph Hellwig 	return ((a->dev_addr == b->dev_addr) &&
273cf65a0f6SChristoph Hellwig 		(a->dev == b->dev)) ? true : false;
274cf65a0f6SChristoph Hellwig }
275cf65a0f6SChristoph Hellwig 
containing_match(struct dma_debug_entry * a,struct dma_debug_entry * b)276cf65a0f6SChristoph Hellwig static bool containing_match(struct dma_debug_entry *a,
277cf65a0f6SChristoph Hellwig 			     struct dma_debug_entry *b)
278cf65a0f6SChristoph Hellwig {
279cf65a0f6SChristoph Hellwig 	if (a->dev != b->dev)
280cf65a0f6SChristoph Hellwig 		return false;
281cf65a0f6SChristoph Hellwig 
282cf65a0f6SChristoph Hellwig 	if ((b->dev_addr <= a->dev_addr) &&
283cf65a0f6SChristoph Hellwig 	    ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284cf65a0f6SChristoph Hellwig 		return true;
285cf65a0f6SChristoph Hellwig 
286cf65a0f6SChristoph Hellwig 	return false;
287cf65a0f6SChristoph Hellwig }
288cf65a0f6SChristoph Hellwig 
289cf65a0f6SChristoph Hellwig /*
290cf65a0f6SChristoph Hellwig  * Search a given entry in the hash bucket list
291cf65a0f6SChristoph Hellwig  */
__hash_bucket_find(struct hash_bucket * bucket,struct dma_debug_entry * ref,match_fn match)292cf65a0f6SChristoph Hellwig static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293cf65a0f6SChristoph Hellwig 						  struct dma_debug_entry *ref,
294cf65a0f6SChristoph Hellwig 						  match_fn match)
295cf65a0f6SChristoph Hellwig {
296cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry, *ret = NULL;
297cf65a0f6SChristoph Hellwig 	int matches = 0, match_lvl, last_lvl = -1;
298cf65a0f6SChristoph Hellwig 
299cf65a0f6SChristoph Hellwig 	list_for_each_entry(entry, &bucket->list, list) {
300cf65a0f6SChristoph Hellwig 		if (!match(ref, entry))
301cf65a0f6SChristoph Hellwig 			continue;
302cf65a0f6SChristoph Hellwig 
303cf65a0f6SChristoph Hellwig 		/*
304cf65a0f6SChristoph Hellwig 		 * Some drivers map the same physical address multiple
305cf65a0f6SChristoph Hellwig 		 * times. Without a hardware IOMMU this results in the
306cf65a0f6SChristoph Hellwig 		 * same device addresses being put into the dma-debug
307cf65a0f6SChristoph Hellwig 		 * hash multiple times too. This can result in false
308cf65a0f6SChristoph Hellwig 		 * positives being reported. Therefore we implement a
309cf65a0f6SChristoph Hellwig 		 * best-fit algorithm here which returns the entry from
310cf65a0f6SChristoph Hellwig 		 * the hash which fits best to the reference value
311cf65a0f6SChristoph Hellwig 		 * instead of the first-fit.
312cf65a0f6SChristoph Hellwig 		 */
313cf65a0f6SChristoph Hellwig 		matches += 1;
314cf65a0f6SChristoph Hellwig 		match_lvl = 0;
315cf65a0f6SChristoph Hellwig 		entry->size         == ref->size         ? ++match_lvl : 0;
316cf65a0f6SChristoph Hellwig 		entry->type         == ref->type         ? ++match_lvl : 0;
317cf65a0f6SChristoph Hellwig 		entry->direction    == ref->direction    ? ++match_lvl : 0;
318cf65a0f6SChristoph Hellwig 		entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
319cf65a0f6SChristoph Hellwig 
320cf65a0f6SChristoph Hellwig 		if (match_lvl == 4) {
321cf65a0f6SChristoph Hellwig 			/* perfect-fit - return the result */
322cf65a0f6SChristoph Hellwig 			return entry;
323cf65a0f6SChristoph Hellwig 		} else if (match_lvl > last_lvl) {
324cf65a0f6SChristoph Hellwig 			/*
325cf65a0f6SChristoph Hellwig 			 * We found an entry that fits better then the
326cf65a0f6SChristoph Hellwig 			 * previous one or it is the 1st match.
327cf65a0f6SChristoph Hellwig 			 */
328cf65a0f6SChristoph Hellwig 			last_lvl = match_lvl;
329cf65a0f6SChristoph Hellwig 			ret      = entry;
330cf65a0f6SChristoph Hellwig 		}
331cf65a0f6SChristoph Hellwig 	}
332cf65a0f6SChristoph Hellwig 
333cf65a0f6SChristoph Hellwig 	/*
334cf65a0f6SChristoph Hellwig 	 * If we have multiple matches but no perfect-fit, just return
335cf65a0f6SChristoph Hellwig 	 * NULL.
336cf65a0f6SChristoph Hellwig 	 */
337cf65a0f6SChristoph Hellwig 	ret = (matches == 1) ? ret : NULL;
338cf65a0f6SChristoph Hellwig 
339cf65a0f6SChristoph Hellwig 	return ret;
340cf65a0f6SChristoph Hellwig }
341cf65a0f6SChristoph Hellwig 
bucket_find_exact(struct hash_bucket * bucket,struct dma_debug_entry * ref)342cf65a0f6SChristoph Hellwig static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343cf65a0f6SChristoph Hellwig 						 struct dma_debug_entry *ref)
344cf65a0f6SChristoph Hellwig {
345cf65a0f6SChristoph Hellwig 	return __hash_bucket_find(bucket, ref, exact_match);
346cf65a0f6SChristoph Hellwig }
347cf65a0f6SChristoph Hellwig 
bucket_find_contain(struct hash_bucket ** bucket,struct dma_debug_entry * ref,unsigned long * flags)348cf65a0f6SChristoph Hellwig static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349cf65a0f6SChristoph Hellwig 						   struct dma_debug_entry *ref,
350cf65a0f6SChristoph Hellwig 						   unsigned long *flags)
351cf65a0f6SChristoph Hellwig {
352cf65a0f6SChristoph Hellwig 
353cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry, index = *ref;
3542995b800SRobin Murphy 	int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
355cf65a0f6SChristoph Hellwig 
3562995b800SRobin Murphy 	for (int i = 0; i < limit; i++) {
357cf65a0f6SChristoph Hellwig 		entry = __hash_bucket_find(*bucket, ref, containing_match);
358cf65a0f6SChristoph Hellwig 
359cf65a0f6SChristoph Hellwig 		if (entry)
360cf65a0f6SChristoph Hellwig 			return entry;
361cf65a0f6SChristoph Hellwig 
362cf65a0f6SChristoph Hellwig 		/*
363cf65a0f6SChristoph Hellwig 		 * Nothing found, go back a hash bucket
364cf65a0f6SChristoph Hellwig 		 */
36550f579a2SDan Carpenter 		put_hash_bucket(*bucket, *flags);
366cf65a0f6SChristoph Hellwig 		index.dev_addr -= (1 << HASH_FN_SHIFT);
367cf65a0f6SChristoph Hellwig 		*bucket = get_hash_bucket(&index, flags);
368cf65a0f6SChristoph Hellwig 	}
369cf65a0f6SChristoph Hellwig 
370cf65a0f6SChristoph Hellwig 	return NULL;
371cf65a0f6SChristoph Hellwig }
372cf65a0f6SChristoph Hellwig 
373cf65a0f6SChristoph Hellwig /*
374cf65a0f6SChristoph Hellwig  * Add an entry to a hash bucket
375cf65a0f6SChristoph Hellwig  */
hash_bucket_add(struct hash_bucket * bucket,struct dma_debug_entry * entry)376cf65a0f6SChristoph Hellwig static void hash_bucket_add(struct hash_bucket *bucket,
377cf65a0f6SChristoph Hellwig 			    struct dma_debug_entry *entry)
378cf65a0f6SChristoph Hellwig {
379cf65a0f6SChristoph Hellwig 	list_add_tail(&entry->list, &bucket->list);
380cf65a0f6SChristoph Hellwig }
381cf65a0f6SChristoph Hellwig 
382cf65a0f6SChristoph Hellwig /*
383cf65a0f6SChristoph Hellwig  * Remove entry from a hash bucket list
384cf65a0f6SChristoph Hellwig  */
hash_bucket_del(struct dma_debug_entry * entry)385cf65a0f6SChristoph Hellwig static void hash_bucket_del(struct dma_debug_entry *entry)
386cf65a0f6SChristoph Hellwig {
387cf65a0f6SChristoph Hellwig 	list_del(&entry->list);
388cf65a0f6SChristoph Hellwig }
389cf65a0f6SChristoph Hellwig 
390cf65a0f6SChristoph Hellwig /*
391cf65a0f6SChristoph Hellwig  * For each mapping (initial cacheline in the case of
392cf65a0f6SChristoph Hellwig  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
393cf65a0f6SChristoph Hellwig  * scatterlist, or the cacheline specified in dma_map_single) insert
394cf65a0f6SChristoph Hellwig  * into this tree using the cacheline as the key. At
395cf65a0f6SChristoph Hellwig  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
396cf65a0f6SChristoph Hellwig  * the entry already exists at insertion time add a tag as a reference
397cf65a0f6SChristoph Hellwig  * count for the overlapping mappings.  For now, the overlap tracking
398cf65a0f6SChristoph Hellwig  * just ensures that 'unmaps' balance 'maps' before marking the
399cf65a0f6SChristoph Hellwig  * cacheline idle, but we should also be flagging overlaps as an API
400cf65a0f6SChristoph Hellwig  * violation.
401cf65a0f6SChristoph Hellwig  *
402cf65a0f6SChristoph Hellwig  * Memory usage is mostly constrained by the maximum number of available
403cf65a0f6SChristoph Hellwig  * dma-debug entries in that we need a free dma_debug_entry before
404cf65a0f6SChristoph Hellwig  * inserting into the tree.  In the case of dma_map_page and
405cf65a0f6SChristoph Hellwig  * dma_alloc_coherent there is only one dma_debug_entry and one
406cf65a0f6SChristoph Hellwig  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
407cf65a0f6SChristoph Hellwig  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
408cf65a0f6SChristoph Hellwig  * entries into the tree.
409bd44ca3dSRik van Riel  *
410bd44ca3dSRik van Riel  * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
411bd44ca3dSRik van Riel  * up right back in the DMA debugging code, leading to a deadlock.
412cf65a0f6SChristoph Hellwig  */
413bd44ca3dSRik van Riel static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
414cf65a0f6SChristoph Hellwig static DEFINE_SPINLOCK(radix_lock);
415cf65a0f6SChristoph Hellwig #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
416cf65a0f6SChristoph Hellwig #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
417cf65a0f6SChristoph Hellwig #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
418cf65a0f6SChristoph Hellwig 
to_cacheline_number(struct dma_debug_entry * entry)419cf65a0f6SChristoph Hellwig static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
420cf65a0f6SChristoph Hellwig {
4219d4f645aSChristoph Hellwig 	return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
4229d4f645aSChristoph Hellwig 		(offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
423cf65a0f6SChristoph Hellwig }
424cf65a0f6SChristoph Hellwig 
active_cacheline_read_overlap(phys_addr_t cln)425cf65a0f6SChristoph Hellwig static int active_cacheline_read_overlap(phys_addr_t cln)
426cf65a0f6SChristoph Hellwig {
427cf65a0f6SChristoph Hellwig 	int overlap = 0, i;
428cf65a0f6SChristoph Hellwig 
429cf65a0f6SChristoph Hellwig 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
430cf65a0f6SChristoph Hellwig 		if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
431cf65a0f6SChristoph Hellwig 			overlap |= 1 << i;
432cf65a0f6SChristoph Hellwig 	return overlap;
433cf65a0f6SChristoph Hellwig }
434cf65a0f6SChristoph Hellwig 
active_cacheline_set_overlap(phys_addr_t cln,int overlap)435cf65a0f6SChristoph Hellwig static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
436cf65a0f6SChristoph Hellwig {
437cf65a0f6SChristoph Hellwig 	int i;
438cf65a0f6SChristoph Hellwig 
439cf65a0f6SChristoph Hellwig 	if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
440cf65a0f6SChristoph Hellwig 		return overlap;
441cf65a0f6SChristoph Hellwig 
442cf65a0f6SChristoph Hellwig 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
443cf65a0f6SChristoph Hellwig 		if (overlap & 1 << i)
444cf65a0f6SChristoph Hellwig 			radix_tree_tag_set(&dma_active_cacheline, cln, i);
445cf65a0f6SChristoph Hellwig 		else
446cf65a0f6SChristoph Hellwig 			radix_tree_tag_clear(&dma_active_cacheline, cln, i);
447cf65a0f6SChristoph Hellwig 
448cf65a0f6SChristoph Hellwig 	return overlap;
449cf65a0f6SChristoph Hellwig }
450cf65a0f6SChristoph Hellwig 
active_cacheline_inc_overlap(phys_addr_t cln)451cf65a0f6SChristoph Hellwig static void active_cacheline_inc_overlap(phys_addr_t cln)
452cf65a0f6SChristoph Hellwig {
453cf65a0f6SChristoph Hellwig 	int overlap = active_cacheline_read_overlap(cln);
454cf65a0f6SChristoph Hellwig 
455cf65a0f6SChristoph Hellwig 	overlap = active_cacheline_set_overlap(cln, ++overlap);
456cf65a0f6SChristoph Hellwig 
457cf65a0f6SChristoph Hellwig 	/* If we overflowed the overlap counter then we're potentially
4585848dc5bSLinus Torvalds 	 * leaking dma-mappings.
459cf65a0f6SChristoph Hellwig 	 */
460cf65a0f6SChristoph Hellwig 	WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
461f737b095SRobin Murphy 		  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
462cf65a0f6SChristoph Hellwig 		  ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
463cf65a0f6SChristoph Hellwig }
464cf65a0f6SChristoph Hellwig 
active_cacheline_dec_overlap(phys_addr_t cln)465cf65a0f6SChristoph Hellwig static int active_cacheline_dec_overlap(phys_addr_t cln)
466cf65a0f6SChristoph Hellwig {
467cf65a0f6SChristoph Hellwig 	int overlap = active_cacheline_read_overlap(cln);
468cf65a0f6SChristoph Hellwig 
469cf65a0f6SChristoph Hellwig 	return active_cacheline_set_overlap(cln, --overlap);
470cf65a0f6SChristoph Hellwig }
471cf65a0f6SChristoph Hellwig 
active_cacheline_insert(struct dma_debug_entry * entry)472cf65a0f6SChristoph Hellwig static int active_cacheline_insert(struct dma_debug_entry *entry)
473cf65a0f6SChristoph Hellwig {
474cf65a0f6SChristoph Hellwig 	phys_addr_t cln = to_cacheline_number(entry);
475cf65a0f6SChristoph Hellwig 	unsigned long flags;
476cf65a0f6SChristoph Hellwig 	int rc;
477cf65a0f6SChristoph Hellwig 
478cf65a0f6SChristoph Hellwig 	/* If the device is not writing memory then we don't have any
479cf65a0f6SChristoph Hellwig 	 * concerns about the cpu consuming stale data.  This mitigates
480cf65a0f6SChristoph Hellwig 	 * legitimate usages of overlapping mappings.
481cf65a0f6SChristoph Hellwig 	 */
482cf65a0f6SChristoph Hellwig 	if (entry->direction == DMA_TO_DEVICE)
483cf65a0f6SChristoph Hellwig 		return 0;
484cf65a0f6SChristoph Hellwig 
485cf65a0f6SChristoph Hellwig 	spin_lock_irqsave(&radix_lock, flags);
486cf65a0f6SChristoph Hellwig 	rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
487cf65a0f6SChristoph Hellwig 	if (rc == -EEXIST)
488cf65a0f6SChristoph Hellwig 		active_cacheline_inc_overlap(cln);
489cf65a0f6SChristoph Hellwig 	spin_unlock_irqrestore(&radix_lock, flags);
490cf65a0f6SChristoph Hellwig 
491cf65a0f6SChristoph Hellwig 	return rc;
492cf65a0f6SChristoph Hellwig }
493cf65a0f6SChristoph Hellwig 
active_cacheline_remove(struct dma_debug_entry * entry)494cf65a0f6SChristoph Hellwig static void active_cacheline_remove(struct dma_debug_entry *entry)
495cf65a0f6SChristoph Hellwig {
496cf65a0f6SChristoph Hellwig 	phys_addr_t cln = to_cacheline_number(entry);
497cf65a0f6SChristoph Hellwig 	unsigned long flags;
498cf65a0f6SChristoph Hellwig 
499cf65a0f6SChristoph Hellwig 	/* ...mirror the insert case */
500cf65a0f6SChristoph Hellwig 	if (entry->direction == DMA_TO_DEVICE)
501cf65a0f6SChristoph Hellwig 		return;
502cf65a0f6SChristoph Hellwig 
503cf65a0f6SChristoph Hellwig 	spin_lock_irqsave(&radix_lock, flags);
504cf65a0f6SChristoph Hellwig 	/* since we are counting overlaps the final put of the
505cf65a0f6SChristoph Hellwig 	 * cacheline will occur when the overlap count is 0.
506cf65a0f6SChristoph Hellwig 	 * active_cacheline_dec_overlap() returns -1 in that case
507cf65a0f6SChristoph Hellwig 	 */
508cf65a0f6SChristoph Hellwig 	if (active_cacheline_dec_overlap(cln) < 0)
509cf65a0f6SChristoph Hellwig 		radix_tree_delete(&dma_active_cacheline, cln);
510cf65a0f6SChristoph Hellwig 	spin_unlock_irqrestore(&radix_lock, flags);
511cf65a0f6SChristoph Hellwig }
512cf65a0f6SChristoph Hellwig 
513cf65a0f6SChristoph Hellwig /*
514bd89d69aSDesnes Nunes  * Dump mappings entries on kernel space for debugging purposes
515bd89d69aSDesnes Nunes  */
debug_dma_dump_mappings(struct device * dev)516bd89d69aSDesnes Nunes void debug_dma_dump_mappings(struct device *dev)
517bd89d69aSDesnes Nunes {
518bd89d69aSDesnes Nunes 	int idx;
519bd89d69aSDesnes Nunes 	phys_addr_t cln;
520bd89d69aSDesnes Nunes 
521bd89d69aSDesnes Nunes 	for (idx = 0; idx < HASH_SIZE; idx++) {
522bd89d69aSDesnes Nunes 		struct hash_bucket *bucket = &dma_entry_hash[idx];
523bd89d69aSDesnes Nunes 		struct dma_debug_entry *entry;
524bd89d69aSDesnes Nunes 		unsigned long flags;
525bd89d69aSDesnes Nunes 
526bd89d69aSDesnes Nunes 		spin_lock_irqsave(&bucket->lock, flags);
527bd89d69aSDesnes Nunes 		list_for_each_entry(entry, &bucket->list, list) {
528bd89d69aSDesnes Nunes 			if (!dev || dev == entry->dev) {
529bd89d69aSDesnes Nunes 				cln = to_cacheline_number(entry);
530bd89d69aSDesnes Nunes 				dev_info(entry->dev,
5319d4f645aSChristoph Hellwig 					 "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
532bd89d69aSDesnes Nunes 					 type2name[entry->type], idx,
5339d4f645aSChristoph Hellwig 					 &entry->paddr, entry->dev_addr,
5349d4f645aSChristoph Hellwig 					 entry->size, &cln,
5359d4f645aSChristoph Hellwig 					 dir2name[entry->direction],
536bd89d69aSDesnes Nunes 					 maperr2str[entry->map_err_type]);
537bd89d69aSDesnes Nunes 			}
538bd89d69aSDesnes Nunes 		}
539bd89d69aSDesnes Nunes 		spin_unlock_irqrestore(&bucket->lock, flags);
540bd89d69aSDesnes Nunes 
541bd89d69aSDesnes Nunes 		cond_resched();
542bd89d69aSDesnes Nunes 	}
543bd89d69aSDesnes Nunes }
544bd89d69aSDesnes Nunes 
545bd89d69aSDesnes Nunes /*
546bd89d69aSDesnes Nunes  * Dump mappings entries on user space via debugfs
547bd89d69aSDesnes Nunes  */
dump_show(struct seq_file * seq,void * v)548bd89d69aSDesnes Nunes static int dump_show(struct seq_file *seq, void *v)
549bd89d69aSDesnes Nunes {
550bd89d69aSDesnes Nunes 	int idx;
551bd89d69aSDesnes Nunes 	phys_addr_t cln;
552bd89d69aSDesnes Nunes 
553bd89d69aSDesnes Nunes 	for (idx = 0; idx < HASH_SIZE; idx++) {
554bd89d69aSDesnes Nunes 		struct hash_bucket *bucket = &dma_entry_hash[idx];
555bd89d69aSDesnes Nunes 		struct dma_debug_entry *entry;
556bd89d69aSDesnes Nunes 		unsigned long flags;
557bd89d69aSDesnes Nunes 
558bd89d69aSDesnes Nunes 		spin_lock_irqsave(&bucket->lock, flags);
559bd89d69aSDesnes Nunes 		list_for_each_entry(entry, &bucket->list, list) {
560bd89d69aSDesnes Nunes 			cln = to_cacheline_number(entry);
561bd89d69aSDesnes Nunes 			seq_printf(seq,
5629d4f645aSChristoph Hellwig 				   "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
563bd89d69aSDesnes Nunes 				   dev_driver_string(entry->dev),
564bd89d69aSDesnes Nunes 				   dev_name(entry->dev),
565bd89d69aSDesnes Nunes 				   type2name[entry->type], idx,
5669d4f645aSChristoph Hellwig 				   &entry->paddr, entry->dev_addr,
5679d4f645aSChristoph Hellwig 				   entry->size, &cln,
5689d4f645aSChristoph Hellwig 				   dir2name[entry->direction],
569bd89d69aSDesnes Nunes 				   maperr2str[entry->map_err_type]);
570bd89d69aSDesnes Nunes 		}
571bd89d69aSDesnes Nunes 		spin_unlock_irqrestore(&bucket->lock, flags);
572bd89d69aSDesnes Nunes 	}
573bd89d69aSDesnes Nunes 	return 0;
574bd89d69aSDesnes Nunes }
575bd89d69aSDesnes Nunes DEFINE_SHOW_ATTRIBUTE(dump);
576bd89d69aSDesnes Nunes 
577bd89d69aSDesnes Nunes /*
578cf65a0f6SChristoph Hellwig  * Wrapper function for adding an entry to the hash.
579cf65a0f6SChristoph Hellwig  * This function takes care of locking itself.
580cf65a0f6SChristoph Hellwig  */
add_dma_entry(struct dma_debug_entry * entry,unsigned long attrs)581c2bbf9d1SHamza Mahfooz static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
582cf65a0f6SChristoph Hellwig {
583cf65a0f6SChristoph Hellwig 	struct hash_bucket *bucket;
584cf65a0f6SChristoph Hellwig 	unsigned long flags;
585cf65a0f6SChristoph Hellwig 	int rc;
586cf65a0f6SChristoph Hellwig 
587cf65a0f6SChristoph Hellwig 	bucket = get_hash_bucket(entry, &flags);
588cf65a0f6SChristoph Hellwig 	hash_bucket_add(bucket, entry);
58950f579a2SDan Carpenter 	put_hash_bucket(bucket, flags);
590cf65a0f6SChristoph Hellwig 
591cf65a0f6SChristoph Hellwig 	rc = active_cacheline_insert(entry);
592cf65a0f6SChristoph Hellwig 	if (rc == -ENOMEM) {
593e19f8fa6SRob Clark 		pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
594cf65a0f6SChristoph Hellwig 		global_disable = true;
595c2bbf9d1SHamza Mahfooz 	} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
596510e1a72SHamza Mahfooz 		err_printk(entry->dev, entry,
597510e1a72SHamza Mahfooz 			"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
598cf65a0f6SChristoph Hellwig 	}
599cf65a0f6SChristoph Hellwig }
600cf65a0f6SChristoph Hellwig 
dma_debug_create_entries(gfp_t gfp)601ad78dee0SRobin Murphy static int dma_debug_create_entries(gfp_t gfp)
6022b9d9ac0SRobin Murphy {
603ad78dee0SRobin Murphy 	struct dma_debug_entry *entry;
6042b9d9ac0SRobin Murphy 	int i;
6052b9d9ac0SRobin Murphy 
606ad78dee0SRobin Murphy 	entry = (void *)get_zeroed_page(gfp);
6072b9d9ac0SRobin Murphy 	if (!entry)
608ad78dee0SRobin Murphy 		return -ENOMEM;
6092b9d9ac0SRobin Murphy 
610ad78dee0SRobin Murphy 	for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
611ad78dee0SRobin Murphy 		list_add_tail(&entry[i].list, &free_entries);
6122b9d9ac0SRobin Murphy 
613ad78dee0SRobin Murphy 	num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
614ad78dee0SRobin Murphy 	nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
6152b9d9ac0SRobin Murphy 
6162b9d9ac0SRobin Murphy 	return 0;
6172b9d9ac0SRobin Murphy }
6182b9d9ac0SRobin Murphy 
__dma_entry_alloc(void)619cf65a0f6SChristoph Hellwig static struct dma_debug_entry *__dma_entry_alloc(void)
620cf65a0f6SChristoph Hellwig {
621cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
622cf65a0f6SChristoph Hellwig 
623cf65a0f6SChristoph Hellwig 	entry = list_entry(free_entries.next, struct dma_debug_entry, list);
624cf65a0f6SChristoph Hellwig 	list_del(&entry->list);
625cf65a0f6SChristoph Hellwig 	memset(entry, 0, sizeof(*entry));
626cf65a0f6SChristoph Hellwig 
627cf65a0f6SChristoph Hellwig 	num_free_entries -= 1;
628cf65a0f6SChristoph Hellwig 	if (num_free_entries < min_free_entries)
629cf65a0f6SChristoph Hellwig 		min_free_entries = num_free_entries;
630cf65a0f6SChristoph Hellwig 
631cf65a0f6SChristoph Hellwig 	return entry;
632cf65a0f6SChristoph Hellwig }
633cf65a0f6SChristoph Hellwig 
634fb5a4315SSergey Senozhatsky /*
635fb5a4315SSergey Senozhatsky  * This should be called outside of free_entries_lock scope to avoid potential
636fb5a4315SSergey Senozhatsky  * deadlocks with serial consoles that use DMA.
637fb5a4315SSergey Senozhatsky  */
__dma_entry_alloc_check_leak(u32 nr_entries)638fb5a4315SSergey Senozhatsky static void __dma_entry_alloc_check_leak(u32 nr_entries)
639ceb51173SRobin Murphy {
640fb5a4315SSergey Senozhatsky 	u32 tmp = nr_entries % nr_prealloc_entries;
641ceb51173SRobin Murphy 
642ceb51173SRobin Murphy 	/* Shout each time we tick over some multiple of the initial pool */
643ceb51173SRobin Murphy 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
644ceb51173SRobin Murphy 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
645fb5a4315SSergey Senozhatsky 			nr_entries,
646fb5a4315SSergey Senozhatsky 			(nr_entries / nr_prealloc_entries));
647ceb51173SRobin Murphy 	}
648ceb51173SRobin Murphy }
649ceb51173SRobin Murphy 
650cf65a0f6SChristoph Hellwig /* struct dma_entry allocator
651cf65a0f6SChristoph Hellwig  *
652cf65a0f6SChristoph Hellwig  * The next two functions implement the allocator for
653cf65a0f6SChristoph Hellwig  * struct dma_debug_entries.
654cf65a0f6SChristoph Hellwig  */
dma_entry_alloc(void)655cf65a0f6SChristoph Hellwig static struct dma_debug_entry *dma_entry_alloc(void)
656cf65a0f6SChristoph Hellwig {
657fb5a4315SSergey Senozhatsky 	bool alloc_check_leak = false;
658cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
659cf65a0f6SChristoph Hellwig 	unsigned long flags;
660fb5a4315SSergey Senozhatsky 	u32 nr_entries;
661cf65a0f6SChristoph Hellwig 
662cf65a0f6SChristoph Hellwig 	spin_lock_irqsave(&free_entries_lock, flags);
6632b9d9ac0SRobin Murphy 	if (num_free_entries == 0) {
664ad78dee0SRobin Murphy 		if (dma_debug_create_entries(GFP_ATOMIC)) {
665cf65a0f6SChristoph Hellwig 			global_disable = true;
666cf65a0f6SChristoph Hellwig 			spin_unlock_irqrestore(&free_entries_lock, flags);
667f737b095SRobin Murphy 			pr_err("debugging out of memory - disabling\n");
668cf65a0f6SChristoph Hellwig 			return NULL;
669cf65a0f6SChristoph Hellwig 		}
670fb5a4315SSergey Senozhatsky 		alloc_check_leak = true;
671fb5a4315SSergey Senozhatsky 		nr_entries = nr_total_entries;
6722b9d9ac0SRobin Murphy 	}
673cf65a0f6SChristoph Hellwig 
674cf65a0f6SChristoph Hellwig 	entry = __dma_entry_alloc();
675cf65a0f6SChristoph Hellwig 
676cf65a0f6SChristoph Hellwig 	spin_unlock_irqrestore(&free_entries_lock, flags);
677cf65a0f6SChristoph Hellwig 
678fb5a4315SSergey Senozhatsky 	if (alloc_check_leak)
679fb5a4315SSergey Senozhatsky 		__dma_entry_alloc_check_leak(nr_entries);
680fb5a4315SSergey Senozhatsky 
681cf65a0f6SChristoph Hellwig #ifdef CONFIG_STACKTRACE
682746017edSThomas Gleixner 	entry->stack_len = stack_trace_save(entry->stack_entries,
683746017edSThomas Gleixner 					    ARRAY_SIZE(entry->stack_entries),
684746017edSThomas Gleixner 					    1);
685cf65a0f6SChristoph Hellwig #endif
686cf65a0f6SChristoph Hellwig 	return entry;
687cf65a0f6SChristoph Hellwig }
688cf65a0f6SChristoph Hellwig 
dma_entry_free(struct dma_debug_entry * entry)689cf65a0f6SChristoph Hellwig static void dma_entry_free(struct dma_debug_entry *entry)
690cf65a0f6SChristoph Hellwig {
691cf65a0f6SChristoph Hellwig 	unsigned long flags;
692cf65a0f6SChristoph Hellwig 
693cf65a0f6SChristoph Hellwig 	active_cacheline_remove(entry);
694cf65a0f6SChristoph Hellwig 
695cf65a0f6SChristoph Hellwig 	/*
696cf65a0f6SChristoph Hellwig 	 * add to beginning of the list - this way the entries are
697cf65a0f6SChristoph Hellwig 	 * more likely cache hot when they are reallocated.
698cf65a0f6SChristoph Hellwig 	 */
699cf65a0f6SChristoph Hellwig 	spin_lock_irqsave(&free_entries_lock, flags);
700cf65a0f6SChristoph Hellwig 	list_add(&entry->list, &free_entries);
701cf65a0f6SChristoph Hellwig 	num_free_entries += 1;
702cf65a0f6SChristoph Hellwig 	spin_unlock_irqrestore(&free_entries_lock, flags);
703cf65a0f6SChristoph Hellwig }
704cf65a0f6SChristoph Hellwig 
705cf65a0f6SChristoph Hellwig /*
706cf65a0f6SChristoph Hellwig  * DMA-API debugging init code
707cf65a0f6SChristoph Hellwig  *
708cf65a0f6SChristoph Hellwig  * The init code does two things:
709cf65a0f6SChristoph Hellwig  *   1. Initialize core data structures
710cf65a0f6SChristoph Hellwig  *   2. Preallocate a given number of dma_debug_entry structs
711cf65a0f6SChristoph Hellwig  */
712cf65a0f6SChristoph Hellwig 
filter_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)713cf65a0f6SChristoph Hellwig static ssize_t filter_read(struct file *file, char __user *user_buf,
714cf65a0f6SChristoph Hellwig 			   size_t count, loff_t *ppos)
715cf65a0f6SChristoph Hellwig {
716cf65a0f6SChristoph Hellwig 	char buf[NAME_MAX_LEN + 1];
717cf65a0f6SChristoph Hellwig 	unsigned long flags;
718cf65a0f6SChristoph Hellwig 	int len;
719cf65a0f6SChristoph Hellwig 
720cf65a0f6SChristoph Hellwig 	if (!current_driver_name[0])
721cf65a0f6SChristoph Hellwig 		return 0;
722cf65a0f6SChristoph Hellwig 
723cf65a0f6SChristoph Hellwig 	/*
724cf65a0f6SChristoph Hellwig 	 * We can't copy to userspace directly because current_driver_name can
725cf65a0f6SChristoph Hellwig 	 * only be read under the driver_name_lock with irqs disabled. So
726cf65a0f6SChristoph Hellwig 	 * create a temporary copy first.
727cf65a0f6SChristoph Hellwig 	 */
728cf65a0f6SChristoph Hellwig 	read_lock_irqsave(&driver_name_lock, flags);
729cf65a0f6SChristoph Hellwig 	len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
730cf65a0f6SChristoph Hellwig 	read_unlock_irqrestore(&driver_name_lock, flags);
731cf65a0f6SChristoph Hellwig 
732cf65a0f6SChristoph Hellwig 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
733cf65a0f6SChristoph Hellwig }
734cf65a0f6SChristoph Hellwig 
filter_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)735cf65a0f6SChristoph Hellwig static ssize_t filter_write(struct file *file, const char __user *userbuf,
736cf65a0f6SChristoph Hellwig 			    size_t count, loff_t *ppos)
737cf65a0f6SChristoph Hellwig {
738cf65a0f6SChristoph Hellwig 	char buf[NAME_MAX_LEN];
739cf65a0f6SChristoph Hellwig 	unsigned long flags;
740cf65a0f6SChristoph Hellwig 	size_t len;
741cf65a0f6SChristoph Hellwig 	int i;
742cf65a0f6SChristoph Hellwig 
743cf65a0f6SChristoph Hellwig 	/*
744cf65a0f6SChristoph Hellwig 	 * We can't copy from userspace directly. Access to
745cf65a0f6SChristoph Hellwig 	 * current_driver_name is protected with a write_lock with irqs
746cf65a0f6SChristoph Hellwig 	 * disabled. Since copy_from_user can fault and may sleep we
747cf65a0f6SChristoph Hellwig 	 * need to copy to temporary buffer first
748cf65a0f6SChristoph Hellwig 	 */
749cf65a0f6SChristoph Hellwig 	len = min(count, (size_t)(NAME_MAX_LEN - 1));
750cf65a0f6SChristoph Hellwig 	if (copy_from_user(buf, userbuf, len))
751cf65a0f6SChristoph Hellwig 		return -EFAULT;
752cf65a0f6SChristoph Hellwig 
753cf65a0f6SChristoph Hellwig 	buf[len] = 0;
754cf65a0f6SChristoph Hellwig 
755cf65a0f6SChristoph Hellwig 	write_lock_irqsave(&driver_name_lock, flags);
756cf65a0f6SChristoph Hellwig 
757cf65a0f6SChristoph Hellwig 	/*
758cf65a0f6SChristoph Hellwig 	 * Now handle the string we got from userspace very carefully.
759cf65a0f6SChristoph Hellwig 	 * The rules are:
760cf65a0f6SChristoph Hellwig 	 *         - only use the first token we got
761cf65a0f6SChristoph Hellwig 	 *         - token delimiter is everything looking like a space
762cf65a0f6SChristoph Hellwig 	 *           character (' ', '\n', '\t' ...)
763cf65a0f6SChristoph Hellwig 	 *
764cf65a0f6SChristoph Hellwig 	 */
765cf65a0f6SChristoph Hellwig 	if (!isalnum(buf[0])) {
766cf65a0f6SChristoph Hellwig 		/*
767cf65a0f6SChristoph Hellwig 		 * If the first character userspace gave us is not
768cf65a0f6SChristoph Hellwig 		 * alphanumerical then assume the filter should be
769cf65a0f6SChristoph Hellwig 		 * switched off.
770cf65a0f6SChristoph Hellwig 		 */
771cf65a0f6SChristoph Hellwig 		if (current_driver_name[0])
772f737b095SRobin Murphy 			pr_info("switching off dma-debug driver filter\n");
773cf65a0f6SChristoph Hellwig 		current_driver_name[0] = 0;
774cf65a0f6SChristoph Hellwig 		current_driver = NULL;
775cf65a0f6SChristoph Hellwig 		goto out_unlock;
776cf65a0f6SChristoph Hellwig 	}
777cf65a0f6SChristoph Hellwig 
778cf65a0f6SChristoph Hellwig 	/*
779cf65a0f6SChristoph Hellwig 	 * Now parse out the first token and use it as the name for the
780cf65a0f6SChristoph Hellwig 	 * driver to filter for.
781cf65a0f6SChristoph Hellwig 	 */
782cf65a0f6SChristoph Hellwig 	for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
783cf65a0f6SChristoph Hellwig 		current_driver_name[i] = buf[i];
784cf65a0f6SChristoph Hellwig 		if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
785cf65a0f6SChristoph Hellwig 			break;
786cf65a0f6SChristoph Hellwig 	}
787cf65a0f6SChristoph Hellwig 	current_driver_name[i] = 0;
788cf65a0f6SChristoph Hellwig 	current_driver = NULL;
789cf65a0f6SChristoph Hellwig 
790f737b095SRobin Murphy 	pr_info("enable driver filter for driver [%s]\n",
791cf65a0f6SChristoph Hellwig 		current_driver_name);
792cf65a0f6SChristoph Hellwig 
793cf65a0f6SChristoph Hellwig out_unlock:
794cf65a0f6SChristoph Hellwig 	write_unlock_irqrestore(&driver_name_lock, flags);
795cf65a0f6SChristoph Hellwig 
796cf65a0f6SChristoph Hellwig 	return count;
797cf65a0f6SChristoph Hellwig }
798cf65a0f6SChristoph Hellwig 
799cf65a0f6SChristoph Hellwig static const struct file_operations filter_fops = {
800cf65a0f6SChristoph Hellwig 	.read  = filter_read,
801cf65a0f6SChristoph Hellwig 	.write = filter_write,
802cf65a0f6SChristoph Hellwig 	.llseek = default_llseek,
803cf65a0f6SChristoph Hellwig };
804cf65a0f6SChristoph Hellwig 
dma_debug_fs_init(void)805173735c3SAnthony Iliopoulos static int __init dma_debug_fs_init(void)
806cf65a0f6SChristoph Hellwig {
8078e4d81b9SGreg Kroah-Hartman 	struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
808cf65a0f6SChristoph Hellwig 
8098e4d81b9SGreg Kroah-Hartman 	debugfs_create_bool("disabled", 0444, dentry, &global_disable);
8108e4d81b9SGreg Kroah-Hartman 	debugfs_create_u32("error_count", 0444, dentry, &error_count);
8118e4d81b9SGreg Kroah-Hartman 	debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
8128e4d81b9SGreg Kroah-Hartman 	debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
8138e4d81b9SGreg Kroah-Hartman 	debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
8148e4d81b9SGreg Kroah-Hartman 	debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
8158e4d81b9SGreg Kroah-Hartman 	debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
8168e4d81b9SGreg Kroah-Hartman 	debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
8170a3b192cSCorentin Labbe 	debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
818173735c3SAnthony Iliopoulos 
819173735c3SAnthony Iliopoulos 	return 0;
820cf65a0f6SChristoph Hellwig }
821173735c3SAnthony Iliopoulos core_initcall_sync(dma_debug_fs_init);
822cf65a0f6SChristoph Hellwig 
device_dma_allocations(struct device * dev,struct dma_debug_entry ** out_entry)823cf65a0f6SChristoph Hellwig static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
824cf65a0f6SChristoph Hellwig {
825cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
826cf65a0f6SChristoph Hellwig 	unsigned long flags;
827cf65a0f6SChristoph Hellwig 	int count = 0, i;
828cf65a0f6SChristoph Hellwig 
829cf65a0f6SChristoph Hellwig 	for (i = 0; i < HASH_SIZE; ++i) {
830cf65a0f6SChristoph Hellwig 		spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
831cf65a0f6SChristoph Hellwig 		list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
832cf65a0f6SChristoph Hellwig 			if (entry->dev == dev) {
833cf65a0f6SChristoph Hellwig 				count += 1;
834cf65a0f6SChristoph Hellwig 				*out_entry = entry;
835cf65a0f6SChristoph Hellwig 			}
836cf65a0f6SChristoph Hellwig 		}
837cf65a0f6SChristoph Hellwig 		spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
838cf65a0f6SChristoph Hellwig 	}
839cf65a0f6SChristoph Hellwig 
840cf65a0f6SChristoph Hellwig 	return count;
841cf65a0f6SChristoph Hellwig }
842cf65a0f6SChristoph Hellwig 
dma_debug_device_change(struct notifier_block * nb,unsigned long action,void * data)843cf65a0f6SChristoph Hellwig static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
844cf65a0f6SChristoph Hellwig {
845cf65a0f6SChristoph Hellwig 	struct device *dev = data;
8463f649ab7SKees Cook 	struct dma_debug_entry *entry;
847cf65a0f6SChristoph Hellwig 	int count;
848cf65a0f6SChristoph Hellwig 
849cf65a0f6SChristoph Hellwig 	if (dma_debug_disabled())
850cf65a0f6SChristoph Hellwig 		return 0;
851cf65a0f6SChristoph Hellwig 
852cf65a0f6SChristoph Hellwig 	switch (action) {
853cf65a0f6SChristoph Hellwig 	case BUS_NOTIFY_UNBOUND_DRIVER:
854cf65a0f6SChristoph Hellwig 		count = device_dma_allocations(dev, &entry);
855cf65a0f6SChristoph Hellwig 		if (count == 0)
856cf65a0f6SChristoph Hellwig 			break;
857f737b095SRobin Murphy 		err_printk(dev, entry, "device driver has pending "
858cf65a0f6SChristoph Hellwig 				"DMA allocations while released from device "
859cf65a0f6SChristoph Hellwig 				"[count=%d]\n"
860cf65a0f6SChristoph Hellwig 				"One of leaked entries details: "
861cf65a0f6SChristoph Hellwig 				"[device address=0x%016llx] [size=%llu bytes] "
862cf65a0f6SChristoph Hellwig 				"[mapped with %s] [mapped as %s]\n",
863cf65a0f6SChristoph Hellwig 			count, entry->dev_addr, entry->size,
864cf65a0f6SChristoph Hellwig 			dir2name[entry->direction], type2name[entry->type]);
865cf65a0f6SChristoph Hellwig 		break;
866cf65a0f6SChristoph Hellwig 	default:
867cf65a0f6SChristoph Hellwig 		break;
868cf65a0f6SChristoph Hellwig 	}
869cf65a0f6SChristoph Hellwig 
870cf65a0f6SChristoph Hellwig 	return 0;
871cf65a0f6SChristoph Hellwig }
872cf65a0f6SChristoph Hellwig 
dma_debug_add_bus(const struct bus_type * bus)87386438841SGreg Kroah-Hartman void dma_debug_add_bus(const struct bus_type *bus)
874cf65a0f6SChristoph Hellwig {
875cf65a0f6SChristoph Hellwig 	struct notifier_block *nb;
876cf65a0f6SChristoph Hellwig 
877cf65a0f6SChristoph Hellwig 	if (dma_debug_disabled())
878cf65a0f6SChristoph Hellwig 		return;
879cf65a0f6SChristoph Hellwig 
880cf65a0f6SChristoph Hellwig 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
881cf65a0f6SChristoph Hellwig 	if (nb == NULL) {
882cf65a0f6SChristoph Hellwig 		pr_err("dma_debug_add_bus: out of memory\n");
883cf65a0f6SChristoph Hellwig 		return;
884cf65a0f6SChristoph Hellwig 	}
885cf65a0f6SChristoph Hellwig 
886cf65a0f6SChristoph Hellwig 	nb->notifier_call = dma_debug_device_change;
887cf65a0f6SChristoph Hellwig 
888cf65a0f6SChristoph Hellwig 	bus_register_notifier(bus, nb);
889cf65a0f6SChristoph Hellwig }
890cf65a0f6SChristoph Hellwig 
dma_debug_init(void)891cf65a0f6SChristoph Hellwig static int dma_debug_init(void)
892cf65a0f6SChristoph Hellwig {
893ad78dee0SRobin Murphy 	int i, nr_pages;
894cf65a0f6SChristoph Hellwig 
895cf65a0f6SChristoph Hellwig 	/* Do not use dma_debug_initialized here, since we really want to be
896cf65a0f6SChristoph Hellwig 	 * called to set dma_debug_initialized
897cf65a0f6SChristoph Hellwig 	 */
898cf65a0f6SChristoph Hellwig 	if (global_disable)
899cf65a0f6SChristoph Hellwig 		return 0;
900cf65a0f6SChristoph Hellwig 
901cf65a0f6SChristoph Hellwig 	for (i = 0; i < HASH_SIZE; ++i) {
902cf65a0f6SChristoph Hellwig 		INIT_LIST_HEAD(&dma_entry_hash[i].list);
903cf65a0f6SChristoph Hellwig 		spin_lock_init(&dma_entry_hash[i].lock);
904cf65a0f6SChristoph Hellwig 	}
905cf65a0f6SChristoph Hellwig 
906ad78dee0SRobin Murphy 	nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
907ad78dee0SRobin Murphy 	for (i = 0; i < nr_pages; ++i)
908ad78dee0SRobin Murphy 		dma_debug_create_entries(GFP_KERNEL);
909ad78dee0SRobin Murphy 	if (num_free_entries >= nr_prealloc_entries) {
910ad78dee0SRobin Murphy 		pr_info("preallocated %d debug entries\n", nr_total_entries);
911ad78dee0SRobin Murphy 	} else if (num_free_entries > 0) {
912ad78dee0SRobin Murphy 		pr_warn("%d debug entries requested but only %d allocated\n",
913ad78dee0SRobin Murphy 			nr_prealloc_entries, nr_total_entries);
914ad78dee0SRobin Murphy 	} else {
915f737b095SRobin Murphy 		pr_err("debugging out of memory error - disabled\n");
916cf65a0f6SChristoph Hellwig 		global_disable = true;
917cf65a0f6SChristoph Hellwig 
918cf65a0f6SChristoph Hellwig 		return 0;
919cf65a0f6SChristoph Hellwig 	}
9202b9d9ac0SRobin Murphy 	min_free_entries = num_free_entries;
921cf65a0f6SChristoph Hellwig 
922cf65a0f6SChristoph Hellwig 	dma_debug_initialized = true;
923cf65a0f6SChristoph Hellwig 
924f737b095SRobin Murphy 	pr_info("debugging enabled by kernel config\n");
925cf65a0f6SChristoph Hellwig 	return 0;
926cf65a0f6SChristoph Hellwig }
927cf65a0f6SChristoph Hellwig core_initcall(dma_debug_init);
928cf65a0f6SChristoph Hellwig 
dma_debug_cmdline(char * str)929cf65a0f6SChristoph Hellwig static __init int dma_debug_cmdline(char *str)
930cf65a0f6SChristoph Hellwig {
931cf65a0f6SChristoph Hellwig 	if (!str)
932cf65a0f6SChristoph Hellwig 		return -EINVAL;
933cf65a0f6SChristoph Hellwig 
934cf65a0f6SChristoph Hellwig 	if (strncmp(str, "off", 3) == 0) {
935f737b095SRobin Murphy 		pr_info("debugging disabled on kernel command line\n");
936cf65a0f6SChristoph Hellwig 		global_disable = true;
937cf65a0f6SChristoph Hellwig 	}
938cf65a0f6SChristoph Hellwig 
93980e43909SRandy Dunlap 	return 1;
940cf65a0f6SChristoph Hellwig }
941cf65a0f6SChristoph Hellwig 
dma_debug_entries_cmdline(char * str)942cf65a0f6SChristoph Hellwig static __init int dma_debug_entries_cmdline(char *str)
943cf65a0f6SChristoph Hellwig {
944cf65a0f6SChristoph Hellwig 	if (!str)
945cf65a0f6SChristoph Hellwig 		return -EINVAL;
946cf65a0f6SChristoph Hellwig 	if (!get_option(&str, &nr_prealloc_entries))
947cf65a0f6SChristoph Hellwig 		nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
94880e43909SRandy Dunlap 	return 1;
949cf65a0f6SChristoph Hellwig }
950cf65a0f6SChristoph Hellwig 
951cf65a0f6SChristoph Hellwig __setup("dma_debug=", dma_debug_cmdline);
952cf65a0f6SChristoph Hellwig __setup("dma_debug_entries=", dma_debug_entries_cmdline);
953cf65a0f6SChristoph Hellwig 
check_unmap(struct dma_debug_entry * ref)954cf65a0f6SChristoph Hellwig static void check_unmap(struct dma_debug_entry *ref)
955cf65a0f6SChristoph Hellwig {
956cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
957cf65a0f6SChristoph Hellwig 	struct hash_bucket *bucket;
958cf65a0f6SChristoph Hellwig 	unsigned long flags;
959cf65a0f6SChristoph Hellwig 
960cf65a0f6SChristoph Hellwig 	bucket = get_hash_bucket(ref, &flags);
961cf65a0f6SChristoph Hellwig 	entry = bucket_find_exact(bucket, ref);
962cf65a0f6SChristoph Hellwig 
963cf65a0f6SChristoph Hellwig 	if (!entry) {
964cf65a0f6SChristoph Hellwig 		/* must drop lock before calling dma_mapping_error */
96550f579a2SDan Carpenter 		put_hash_bucket(bucket, flags);
966cf65a0f6SChristoph Hellwig 
967cf65a0f6SChristoph Hellwig 		if (dma_mapping_error(ref->dev, ref->dev_addr)) {
968cf65a0f6SChristoph Hellwig 			err_printk(ref->dev, NULL,
969f737b095SRobin Murphy 				   "device driver tries to free an "
970cf65a0f6SChristoph Hellwig 				   "invalid DMA memory address\n");
971cf65a0f6SChristoph Hellwig 		} else {
972cf65a0f6SChristoph Hellwig 			err_printk(ref->dev, NULL,
973f737b095SRobin Murphy 				   "device driver tries to free DMA "
974cf65a0f6SChristoph Hellwig 				   "memory it has not allocated [device "
975cf65a0f6SChristoph Hellwig 				   "address=0x%016llx] [size=%llu bytes]\n",
976cf65a0f6SChristoph Hellwig 				   ref->dev_addr, ref->size);
977cf65a0f6SChristoph Hellwig 		}
978cf65a0f6SChristoph Hellwig 		return;
979cf65a0f6SChristoph Hellwig 	}
980cf65a0f6SChristoph Hellwig 
981cf65a0f6SChristoph Hellwig 	if (ref->size != entry->size) {
982f737b095SRobin Murphy 		err_printk(ref->dev, entry, "device driver frees "
983cf65a0f6SChristoph Hellwig 			   "DMA memory with different size "
984cf65a0f6SChristoph Hellwig 			   "[device address=0x%016llx] [map size=%llu bytes] "
985cf65a0f6SChristoph Hellwig 			   "[unmap size=%llu bytes]\n",
986cf65a0f6SChristoph Hellwig 			   ref->dev_addr, entry->size, ref->size);
987cf65a0f6SChristoph Hellwig 	}
988cf65a0f6SChristoph Hellwig 
989cf65a0f6SChristoph Hellwig 	if (ref->type != entry->type) {
990f737b095SRobin Murphy 		err_printk(ref->dev, entry, "device driver frees "
991cf65a0f6SChristoph Hellwig 			   "DMA memory with wrong function "
992cf65a0f6SChristoph Hellwig 			   "[device address=0x%016llx] [size=%llu bytes] "
993cf65a0f6SChristoph Hellwig 			   "[mapped as %s] [unmapped as %s]\n",
994cf65a0f6SChristoph Hellwig 			   ref->dev_addr, ref->size,
995cf65a0f6SChristoph Hellwig 			   type2name[entry->type], type2name[ref->type]);
9969d4f645aSChristoph Hellwig 	} else if (entry->type == dma_debug_coherent &&
9979d4f645aSChristoph Hellwig 		   ref->paddr != entry->paddr) {
998f737b095SRobin Murphy 		err_printk(ref->dev, entry, "device driver frees "
999cf65a0f6SChristoph Hellwig 			   "DMA memory with different CPU address "
1000cf65a0f6SChristoph Hellwig 			   "[device address=0x%016llx] [size=%llu bytes] "
10019d4f645aSChristoph Hellwig 			   "[cpu alloc address=0x%pa] "
10029d4f645aSChristoph Hellwig 			   "[cpu free address=0x%pa]",
1003cf65a0f6SChristoph Hellwig 			   ref->dev_addr, ref->size,
10049d4f645aSChristoph Hellwig 			   &entry->paddr,
10059d4f645aSChristoph Hellwig 			   &ref->paddr);
1006cf65a0f6SChristoph Hellwig 	}
1007cf65a0f6SChristoph Hellwig 
1008cf65a0f6SChristoph Hellwig 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1009cf65a0f6SChristoph Hellwig 	    ref->sg_call_ents != entry->sg_call_ents) {
1010f737b095SRobin Murphy 		err_printk(ref->dev, entry, "device driver frees "
1011cf65a0f6SChristoph Hellwig 			   "DMA sg list with different entry count "
1012cf65a0f6SChristoph Hellwig 			   "[map count=%d] [unmap count=%d]\n",
1013cf65a0f6SChristoph Hellwig 			   entry->sg_call_ents, ref->sg_call_ents);
1014cf65a0f6SChristoph Hellwig 	}
1015cf65a0f6SChristoph Hellwig 
1016cf65a0f6SChristoph Hellwig 	/*
1017cf65a0f6SChristoph Hellwig 	 * This may be no bug in reality - but most implementations of the
1018cf65a0f6SChristoph Hellwig 	 * DMA API don't handle this properly, so check for it here
1019cf65a0f6SChristoph Hellwig 	 */
1020cf65a0f6SChristoph Hellwig 	if (ref->direction != entry->direction) {
1021f737b095SRobin Murphy 		err_printk(ref->dev, entry, "device driver frees "
1022cf65a0f6SChristoph Hellwig 			   "DMA memory with different direction "
1023cf65a0f6SChristoph Hellwig 			   "[device address=0x%016llx] [size=%llu bytes] "
1024cf65a0f6SChristoph Hellwig 			   "[mapped with %s] [unmapped with %s]\n",
1025cf65a0f6SChristoph Hellwig 			   ref->dev_addr, ref->size,
1026cf65a0f6SChristoph Hellwig 			   dir2name[entry->direction],
1027cf65a0f6SChristoph Hellwig 			   dir2name[ref->direction]);
1028cf65a0f6SChristoph Hellwig 	}
1029cf65a0f6SChristoph Hellwig 
1030cf65a0f6SChristoph Hellwig 	/*
1031cf65a0f6SChristoph Hellwig 	 * Drivers should use dma_mapping_error() to check the returned
1032cf65a0f6SChristoph Hellwig 	 * addresses of dma_map_single() and dma_map_page().
1033985098a0SMauro Carvalho Chehab 	 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1034cf65a0f6SChristoph Hellwig 	 */
1035cf65a0f6SChristoph Hellwig 	if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1036cf65a0f6SChristoph Hellwig 		err_printk(ref->dev, entry,
1037f737b095SRobin Murphy 			   "device driver failed to check map error"
1038cf65a0f6SChristoph Hellwig 			   "[device address=0x%016llx] [size=%llu bytes] "
1039cf65a0f6SChristoph Hellwig 			   "[mapped as %s]",
1040cf65a0f6SChristoph Hellwig 			   ref->dev_addr, ref->size,
1041cf65a0f6SChristoph Hellwig 			   type2name[entry->type]);
1042cf65a0f6SChristoph Hellwig 	}
1043cf65a0f6SChristoph Hellwig 
1044cf65a0f6SChristoph Hellwig 	hash_bucket_del(entry);
104550f579a2SDan Carpenter 	put_hash_bucket(bucket, flags);
10467543c3e3SLevi Yun 
10477543c3e3SLevi Yun 	/*
10487543c3e3SLevi Yun 	 * Free the entry outside of bucket_lock to avoid ABBA deadlocks
10497543c3e3SLevi Yun 	 * between that and radix_lock.
10507543c3e3SLevi Yun 	 */
10517543c3e3SLevi Yun 	dma_entry_free(entry);
1052cf65a0f6SChristoph Hellwig }
1053cf65a0f6SChristoph Hellwig 
check_for_stack(struct device * dev,struct page * page,size_t offset)1054cf65a0f6SChristoph Hellwig static void check_for_stack(struct device *dev,
1055cf65a0f6SChristoph Hellwig 			    struct page *page, size_t offset)
1056cf65a0f6SChristoph Hellwig {
1057cf65a0f6SChristoph Hellwig 	void *addr;
1058cf65a0f6SChristoph Hellwig 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1059cf65a0f6SChristoph Hellwig 
1060cf65a0f6SChristoph Hellwig 	if (!stack_vm_area) {
1061cf65a0f6SChristoph Hellwig 		/* Stack is direct-mapped. */
1062cf65a0f6SChristoph Hellwig 		if (PageHighMem(page))
1063cf65a0f6SChristoph Hellwig 			return;
1064cf65a0f6SChristoph Hellwig 		addr = page_address(page) + offset;
1065cf65a0f6SChristoph Hellwig 		if (object_is_on_stack(addr))
1066f737b095SRobin Murphy 			err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1067cf65a0f6SChristoph Hellwig 	} else {
1068cf65a0f6SChristoph Hellwig 		/* Stack is vmalloced. */
1069cf65a0f6SChristoph Hellwig 		int i;
1070cf65a0f6SChristoph Hellwig 
1071cf65a0f6SChristoph Hellwig 		for (i = 0; i < stack_vm_area->nr_pages; i++) {
1072cf65a0f6SChristoph Hellwig 			if (page != stack_vm_area->pages[i])
1073cf65a0f6SChristoph Hellwig 				continue;
1074cf65a0f6SChristoph Hellwig 
1075cf65a0f6SChristoph Hellwig 			addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1076f737b095SRobin Murphy 			err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1077cf65a0f6SChristoph Hellwig 			break;
1078cf65a0f6SChristoph Hellwig 		}
1079cf65a0f6SChristoph Hellwig 	}
1080cf65a0f6SChristoph Hellwig }
1081cf65a0f6SChristoph Hellwig 
check_for_illegal_area(struct device * dev,void * addr,unsigned long len)1082cf65a0f6SChristoph Hellwig static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1083cf65a0f6SChristoph Hellwig {
10841d7db834SKefeng Wang 	if (memory_intersects(_stext, _etext, addr, len) ||
10851d7db834SKefeng Wang 	    memory_intersects(__start_rodata, __end_rodata, addr, len))
1086f737b095SRobin Murphy 		err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1087cf65a0f6SChristoph Hellwig }
1088cf65a0f6SChristoph Hellwig 
check_sync(struct device * dev,struct dma_debug_entry * ref,bool to_cpu)1089cf65a0f6SChristoph Hellwig static void check_sync(struct device *dev,
1090cf65a0f6SChristoph Hellwig 		       struct dma_debug_entry *ref,
1091cf65a0f6SChristoph Hellwig 		       bool to_cpu)
1092cf65a0f6SChristoph Hellwig {
1093cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1094cf65a0f6SChristoph Hellwig 	struct hash_bucket *bucket;
1095cf65a0f6SChristoph Hellwig 	unsigned long flags;
1096cf65a0f6SChristoph Hellwig 
1097cf65a0f6SChristoph Hellwig 	bucket = get_hash_bucket(ref, &flags);
1098cf65a0f6SChristoph Hellwig 
1099cf65a0f6SChristoph Hellwig 	entry = bucket_find_contain(&bucket, ref, &flags);
1100cf65a0f6SChristoph Hellwig 
1101cf65a0f6SChristoph Hellwig 	if (!entry) {
1102f737b095SRobin Murphy 		err_printk(dev, NULL, "device driver tries "
1103cf65a0f6SChristoph Hellwig 				"to sync DMA memory it has not allocated "
1104cf65a0f6SChristoph Hellwig 				"[device address=0x%016llx] [size=%llu bytes]\n",
1105cf65a0f6SChristoph Hellwig 				(unsigned long long)ref->dev_addr, ref->size);
1106cf65a0f6SChristoph Hellwig 		goto out;
1107cf65a0f6SChristoph Hellwig 	}
1108cf65a0f6SChristoph Hellwig 
1109cf65a0f6SChristoph Hellwig 	if (ref->size > entry->size) {
1110f737b095SRobin Murphy 		err_printk(dev, entry, "device driver syncs"
1111cf65a0f6SChristoph Hellwig 				" DMA memory outside allocated range "
1112cf65a0f6SChristoph Hellwig 				"[device address=0x%016llx] "
1113cf65a0f6SChristoph Hellwig 				"[allocation size=%llu bytes] "
1114cf65a0f6SChristoph Hellwig 				"[sync offset+size=%llu]\n",
1115cf65a0f6SChristoph Hellwig 				entry->dev_addr, entry->size,
1116cf65a0f6SChristoph Hellwig 				ref->size);
1117cf65a0f6SChristoph Hellwig 	}
1118cf65a0f6SChristoph Hellwig 
1119cf65a0f6SChristoph Hellwig 	if (entry->direction == DMA_BIDIRECTIONAL)
1120cf65a0f6SChristoph Hellwig 		goto out;
1121cf65a0f6SChristoph Hellwig 
1122cf65a0f6SChristoph Hellwig 	if (ref->direction != entry->direction) {
1123f737b095SRobin Murphy 		err_printk(dev, entry, "device driver syncs "
1124cf65a0f6SChristoph Hellwig 				"DMA memory with different direction "
1125cf65a0f6SChristoph Hellwig 				"[device address=0x%016llx] [size=%llu bytes] "
1126cf65a0f6SChristoph Hellwig 				"[mapped with %s] [synced with %s]\n",
1127cf65a0f6SChristoph Hellwig 				(unsigned long long)ref->dev_addr, entry->size,
1128cf65a0f6SChristoph Hellwig 				dir2name[entry->direction],
1129cf65a0f6SChristoph Hellwig 				dir2name[ref->direction]);
1130cf65a0f6SChristoph Hellwig 	}
1131cf65a0f6SChristoph Hellwig 
1132cf65a0f6SChristoph Hellwig 	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1133cf65a0f6SChristoph Hellwig 		      !(ref->direction == DMA_TO_DEVICE))
1134f737b095SRobin Murphy 		err_printk(dev, entry, "device driver syncs "
1135cf65a0f6SChristoph Hellwig 				"device read-only DMA memory for cpu "
1136cf65a0f6SChristoph Hellwig 				"[device address=0x%016llx] [size=%llu bytes] "
1137cf65a0f6SChristoph Hellwig 				"[mapped with %s] [synced with %s]\n",
1138cf65a0f6SChristoph Hellwig 				(unsigned long long)ref->dev_addr, entry->size,
1139cf65a0f6SChristoph Hellwig 				dir2name[entry->direction],
1140cf65a0f6SChristoph Hellwig 				dir2name[ref->direction]);
1141cf65a0f6SChristoph Hellwig 
1142cf65a0f6SChristoph Hellwig 	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1143cf65a0f6SChristoph Hellwig 		       !(ref->direction == DMA_FROM_DEVICE))
1144f737b095SRobin Murphy 		err_printk(dev, entry, "device driver syncs "
1145cf65a0f6SChristoph Hellwig 				"device write-only DMA memory to device "
1146cf65a0f6SChristoph Hellwig 				"[device address=0x%016llx] [size=%llu bytes] "
1147cf65a0f6SChristoph Hellwig 				"[mapped with %s] [synced with %s]\n",
1148cf65a0f6SChristoph Hellwig 				(unsigned long long)ref->dev_addr, entry->size,
1149cf65a0f6SChristoph Hellwig 				dir2name[entry->direction],
1150cf65a0f6SChristoph Hellwig 				dir2name[ref->direction]);
1151cf65a0f6SChristoph Hellwig 
1152cf65a0f6SChristoph Hellwig 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1153cf65a0f6SChristoph Hellwig 	    ref->sg_call_ents != entry->sg_call_ents) {
1154f737b095SRobin Murphy 		err_printk(ref->dev, entry, "device driver syncs "
1155cf65a0f6SChristoph Hellwig 			   "DMA sg list with different entry count "
1156cf65a0f6SChristoph Hellwig 			   "[map count=%d] [sync count=%d]\n",
1157cf65a0f6SChristoph Hellwig 			   entry->sg_call_ents, ref->sg_call_ents);
1158cf65a0f6SChristoph Hellwig 	}
1159cf65a0f6SChristoph Hellwig 
1160cf65a0f6SChristoph Hellwig out:
116150f579a2SDan Carpenter 	put_hash_bucket(bucket, flags);
1162cf65a0f6SChristoph Hellwig }
1163cf65a0f6SChristoph Hellwig 
check_sg_segment(struct device * dev,struct scatterlist * sg)1164cf65a0f6SChristoph Hellwig static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1165cf65a0f6SChristoph Hellwig {
1166cf65a0f6SChristoph Hellwig 	unsigned int max_seg = dma_get_max_seg_size(dev);
1167cf65a0f6SChristoph Hellwig 	u64 start, end, boundary = dma_get_seg_boundary(dev);
1168cf65a0f6SChristoph Hellwig 
1169cf65a0f6SChristoph Hellwig 	/*
1170cf65a0f6SChristoph Hellwig 	 * Either the driver forgot to set dma_parms appropriately, or
1171cf65a0f6SChristoph Hellwig 	 * whoever generated the list forgot to check them.
1172cf65a0f6SChristoph Hellwig 	 */
1173cf65a0f6SChristoph Hellwig 	if (sg->length > max_seg)
1174f737b095SRobin Murphy 		err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1175cf65a0f6SChristoph Hellwig 			   sg->length, max_seg);
1176cf65a0f6SChristoph Hellwig 	/*
1177cf65a0f6SChristoph Hellwig 	 * In some cases this could potentially be the DMA API
1178cf65a0f6SChristoph Hellwig 	 * implementation's fault, but it would usually imply that
1179cf65a0f6SChristoph Hellwig 	 * the scatterlist was built inappropriately to begin with.
1180cf65a0f6SChristoph Hellwig 	 */
1181cf65a0f6SChristoph Hellwig 	start = sg_dma_address(sg);
1182cf65a0f6SChristoph Hellwig 	end = start + sg_dma_len(sg) - 1;
1183cf65a0f6SChristoph Hellwig 	if ((start ^ end) & ~boundary)
1184f737b095SRobin Murphy 		err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1185cf65a0f6SChristoph Hellwig 			   start, end, boundary);
1186cf65a0f6SChristoph Hellwig }
1187cf65a0f6SChristoph Hellwig 
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)118899c65fa7SStephen Boyd void debug_dma_map_single(struct device *dev, const void *addr,
118999c65fa7SStephen Boyd 			    unsigned long len)
119099c65fa7SStephen Boyd {
119199c65fa7SStephen Boyd 	if (unlikely(dma_debug_disabled()))
119299c65fa7SStephen Boyd 		return;
119399c65fa7SStephen Boyd 
119499c65fa7SStephen Boyd 	if (!virt_addr_valid(addr))
1195f737b095SRobin Murphy 		err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
119699c65fa7SStephen Boyd 			   addr, len);
119799c65fa7SStephen Boyd 
119899c65fa7SStephen Boyd 	if (is_vmalloc_addr(addr))
1199f737b095SRobin Murphy 		err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
120099c65fa7SStephen Boyd 			   addr, len);
120199c65fa7SStephen Boyd }
120299c65fa7SStephen Boyd EXPORT_SYMBOL(debug_dma_map_single);
120399c65fa7SStephen Boyd 
debug_dma_map_page(struct device * dev,struct page * page,size_t offset,size_t size,int direction,dma_addr_t dma_addr,unsigned long attrs)1204cf65a0f6SChristoph Hellwig void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1205c2bbf9d1SHamza Mahfooz 			size_t size, int direction, dma_addr_t dma_addr,
1206c2bbf9d1SHamza Mahfooz 			unsigned long attrs)
1207cf65a0f6SChristoph Hellwig {
1208cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1209cf65a0f6SChristoph Hellwig 
1210cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1211cf65a0f6SChristoph Hellwig 		return;
1212cf65a0f6SChristoph Hellwig 
1213cf65a0f6SChristoph Hellwig 	if (dma_mapping_error(dev, dma_addr))
1214cf65a0f6SChristoph Hellwig 		return;
1215cf65a0f6SChristoph Hellwig 
1216cf65a0f6SChristoph Hellwig 	entry = dma_entry_alloc();
1217cf65a0f6SChristoph Hellwig 	if (!entry)
1218cf65a0f6SChristoph Hellwig 		return;
1219cf65a0f6SChristoph Hellwig 
1220cf65a0f6SChristoph Hellwig 	entry->dev       = dev;
12212e05ea5cSChristoph Hellwig 	entry->type      = dma_debug_single;
1222*aef7ee76SFedor Pchelkin 	entry->paddr	 = page_to_phys(page) + offset;
1223cf65a0f6SChristoph Hellwig 	entry->dev_addr  = dma_addr;
1224cf65a0f6SChristoph Hellwig 	entry->size      = size;
1225cf65a0f6SChristoph Hellwig 	entry->direction = direction;
1226cf65a0f6SChristoph Hellwig 	entry->map_err_type = MAP_ERR_NOT_CHECKED;
1227cf65a0f6SChristoph Hellwig 
1228cf65a0f6SChristoph Hellwig 	check_for_stack(dev, page, offset);
1229cf65a0f6SChristoph Hellwig 
1230cf65a0f6SChristoph Hellwig 	if (!PageHighMem(page)) {
1231cf65a0f6SChristoph Hellwig 		void *addr = page_address(page) + offset;
1232cf65a0f6SChristoph Hellwig 
1233cf65a0f6SChristoph Hellwig 		check_for_illegal_area(dev, addr, size);
1234cf65a0f6SChristoph Hellwig 	}
1235cf65a0f6SChristoph Hellwig 
1236c2bbf9d1SHamza Mahfooz 	add_dma_entry(entry, attrs);
1237cf65a0f6SChristoph Hellwig }
1238cf65a0f6SChristoph Hellwig 
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)1239cf65a0f6SChristoph Hellwig void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1240cf65a0f6SChristoph Hellwig {
1241cf65a0f6SChristoph Hellwig 	struct dma_debug_entry ref;
1242cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1243cf65a0f6SChristoph Hellwig 	struct hash_bucket *bucket;
1244cf65a0f6SChristoph Hellwig 	unsigned long flags;
1245cf65a0f6SChristoph Hellwig 
1246cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1247cf65a0f6SChristoph Hellwig 		return;
1248cf65a0f6SChristoph Hellwig 
1249cf65a0f6SChristoph Hellwig 	ref.dev = dev;
1250cf65a0f6SChristoph Hellwig 	ref.dev_addr = dma_addr;
1251cf65a0f6SChristoph Hellwig 	bucket = get_hash_bucket(&ref, &flags);
1252cf65a0f6SChristoph Hellwig 
1253cf65a0f6SChristoph Hellwig 	list_for_each_entry(entry, &bucket->list, list) {
1254cf65a0f6SChristoph Hellwig 		if (!exact_match(&ref, entry))
1255cf65a0f6SChristoph Hellwig 			continue;
1256cf65a0f6SChristoph Hellwig 
1257cf65a0f6SChristoph Hellwig 		/*
1258cf65a0f6SChristoph Hellwig 		 * The same physical address can be mapped multiple
1259cf65a0f6SChristoph Hellwig 		 * times. Without a hardware IOMMU this results in the
1260cf65a0f6SChristoph Hellwig 		 * same device addresses being put into the dma-debug
1261cf65a0f6SChristoph Hellwig 		 * hash multiple times too. This can result in false
1262cf65a0f6SChristoph Hellwig 		 * positives being reported. Therefore we implement a
1263cf65a0f6SChristoph Hellwig 		 * best-fit algorithm here which updates the first entry
1264cf65a0f6SChristoph Hellwig 		 * from the hash which fits the reference value and is
1265cf65a0f6SChristoph Hellwig 		 * not currently listed as being checked.
1266cf65a0f6SChristoph Hellwig 		 */
1267cf65a0f6SChristoph Hellwig 		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1268cf65a0f6SChristoph Hellwig 			entry->map_err_type = MAP_ERR_CHECKED;
1269cf65a0f6SChristoph Hellwig 			break;
1270cf65a0f6SChristoph Hellwig 		}
1271cf65a0f6SChristoph Hellwig 	}
1272cf65a0f6SChristoph Hellwig 
127350f579a2SDan Carpenter 	put_hash_bucket(bucket, flags);
1274cf65a0f6SChristoph Hellwig }
1275cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(debug_dma_mapping_error);
1276cf65a0f6SChristoph Hellwig 
debug_dma_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,int direction)1277479623fdSDesnes Nunes void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
12782e05ea5cSChristoph Hellwig 			  size_t size, int direction)
1279cf65a0f6SChristoph Hellwig {
1280cf65a0f6SChristoph Hellwig 	struct dma_debug_entry ref = {
12812e05ea5cSChristoph Hellwig 		.type           = dma_debug_single,
1282cf65a0f6SChristoph Hellwig 		.dev            = dev,
1283479623fdSDesnes Nunes 		.dev_addr       = dma_addr,
1284cf65a0f6SChristoph Hellwig 		.size           = size,
1285cf65a0f6SChristoph Hellwig 		.direction      = direction,
1286cf65a0f6SChristoph Hellwig 	};
1287cf65a0f6SChristoph Hellwig 
1288cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1289cf65a0f6SChristoph Hellwig 		return;
1290cf65a0f6SChristoph Hellwig 	check_unmap(&ref);
1291cf65a0f6SChristoph Hellwig }
1292cf65a0f6SChristoph Hellwig 
debug_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,int mapped_ents,int direction,unsigned long attrs)1293cf65a0f6SChristoph Hellwig void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1294c2bbf9d1SHamza Mahfooz 		      int nents, int mapped_ents, int direction,
1295c2bbf9d1SHamza Mahfooz 		      unsigned long attrs)
1296cf65a0f6SChristoph Hellwig {
1297cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1298cf65a0f6SChristoph Hellwig 	struct scatterlist *s;
1299cf65a0f6SChristoph Hellwig 	int i;
1300cf65a0f6SChristoph Hellwig 
1301cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1302cf65a0f6SChristoph Hellwig 		return;
1303cf65a0f6SChristoph Hellwig 
1304293d92cbSGerald Schaefer 	for_each_sg(sg, s, nents, i) {
1305293d92cbSGerald Schaefer 		check_for_stack(dev, sg_page(s), s->offset);
1306293d92cbSGerald Schaefer 		if (!PageHighMem(sg_page(s)))
1307293d92cbSGerald Schaefer 			check_for_illegal_area(dev, sg_virt(s), s->length);
1308293d92cbSGerald Schaefer 	}
1309293d92cbSGerald Schaefer 
1310cf65a0f6SChristoph Hellwig 	for_each_sg(sg, s, mapped_ents, i) {
1311cf65a0f6SChristoph Hellwig 		entry = dma_entry_alloc();
1312cf65a0f6SChristoph Hellwig 		if (!entry)
1313cf65a0f6SChristoph Hellwig 			return;
1314cf65a0f6SChristoph Hellwig 
1315cf65a0f6SChristoph Hellwig 		entry->type           = dma_debug_sg;
1316cf65a0f6SChristoph Hellwig 		entry->dev            = dev;
13179d4f645aSChristoph Hellwig 		entry->paddr	      = sg_phys(s);
1318cf65a0f6SChristoph Hellwig 		entry->size           = sg_dma_len(s);
1319cf65a0f6SChristoph Hellwig 		entry->dev_addr       = sg_dma_address(s);
1320cf65a0f6SChristoph Hellwig 		entry->direction      = direction;
1321cf65a0f6SChristoph Hellwig 		entry->sg_call_ents   = nents;
1322cf65a0f6SChristoph Hellwig 		entry->sg_mapped_ents = mapped_ents;
1323cf65a0f6SChristoph Hellwig 
1324cf65a0f6SChristoph Hellwig 		check_sg_segment(dev, s);
1325cf65a0f6SChristoph Hellwig 
1326c2bbf9d1SHamza Mahfooz 		add_dma_entry(entry, attrs);
1327cf65a0f6SChristoph Hellwig 	}
1328cf65a0f6SChristoph Hellwig }
1329cf65a0f6SChristoph Hellwig 
get_nr_mapped_entries(struct device * dev,struct dma_debug_entry * ref)1330cf65a0f6SChristoph Hellwig static int get_nr_mapped_entries(struct device *dev,
1331cf65a0f6SChristoph Hellwig 				 struct dma_debug_entry *ref)
1332cf65a0f6SChristoph Hellwig {
1333cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1334cf65a0f6SChristoph Hellwig 	struct hash_bucket *bucket;
1335cf65a0f6SChristoph Hellwig 	unsigned long flags;
1336cf65a0f6SChristoph Hellwig 	int mapped_ents;
1337cf65a0f6SChristoph Hellwig 
1338cf65a0f6SChristoph Hellwig 	bucket       = get_hash_bucket(ref, &flags);
1339cf65a0f6SChristoph Hellwig 	entry        = bucket_find_exact(bucket, ref);
1340cf65a0f6SChristoph Hellwig 	mapped_ents  = 0;
1341cf65a0f6SChristoph Hellwig 
1342cf65a0f6SChristoph Hellwig 	if (entry)
1343cf65a0f6SChristoph Hellwig 		mapped_ents = entry->sg_mapped_ents;
134450f579a2SDan Carpenter 	put_hash_bucket(bucket, flags);
1345cf65a0f6SChristoph Hellwig 
1346cf65a0f6SChristoph Hellwig 	return mapped_ents;
1347cf65a0f6SChristoph Hellwig }
1348cf65a0f6SChristoph Hellwig 
debug_dma_unmap_sg(struct device * dev,struct scatterlist * sglist,int nelems,int dir)1349cf65a0f6SChristoph Hellwig void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1350cf65a0f6SChristoph Hellwig 			int nelems, int dir)
1351cf65a0f6SChristoph Hellwig {
1352cf65a0f6SChristoph Hellwig 	struct scatterlist *s;
1353cf65a0f6SChristoph Hellwig 	int mapped_ents = 0, i;
1354cf65a0f6SChristoph Hellwig 
1355cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1356cf65a0f6SChristoph Hellwig 		return;
1357cf65a0f6SChristoph Hellwig 
1358cf65a0f6SChristoph Hellwig 	for_each_sg(sglist, s, nelems, i) {
1359cf65a0f6SChristoph Hellwig 
1360cf65a0f6SChristoph Hellwig 		struct dma_debug_entry ref = {
1361cf65a0f6SChristoph Hellwig 			.type           = dma_debug_sg,
1362cf65a0f6SChristoph Hellwig 			.dev            = dev,
13639d4f645aSChristoph Hellwig 			.paddr		= sg_phys(s),
1364cf65a0f6SChristoph Hellwig 			.dev_addr       = sg_dma_address(s),
1365cf65a0f6SChristoph Hellwig 			.size           = sg_dma_len(s),
1366cf65a0f6SChristoph Hellwig 			.direction      = dir,
1367cf65a0f6SChristoph Hellwig 			.sg_call_ents   = nelems,
1368cf65a0f6SChristoph Hellwig 		};
1369cf65a0f6SChristoph Hellwig 
1370cf65a0f6SChristoph Hellwig 		if (mapped_ents && i >= mapped_ents)
1371cf65a0f6SChristoph Hellwig 			break;
1372cf65a0f6SChristoph Hellwig 
1373cf65a0f6SChristoph Hellwig 		if (!i)
1374cf65a0f6SChristoph Hellwig 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1375cf65a0f6SChristoph Hellwig 
1376cf65a0f6SChristoph Hellwig 		check_unmap(&ref);
1377cf65a0f6SChristoph Hellwig 	}
1378cf65a0f6SChristoph Hellwig }
1379cf65a0f6SChristoph Hellwig 
virt_to_paddr(void * virt)1380*aef7ee76SFedor Pchelkin static phys_addr_t virt_to_paddr(void *virt)
1381*aef7ee76SFedor Pchelkin {
1382*aef7ee76SFedor Pchelkin 	struct page *page;
1383*aef7ee76SFedor Pchelkin 
1384*aef7ee76SFedor Pchelkin 	if (is_vmalloc_addr(virt))
1385*aef7ee76SFedor Pchelkin 		page = vmalloc_to_page(virt);
1386*aef7ee76SFedor Pchelkin 	else
1387*aef7ee76SFedor Pchelkin 		page = virt_to_page(virt);
1388*aef7ee76SFedor Pchelkin 
1389*aef7ee76SFedor Pchelkin 	return page_to_phys(page) + offset_in_page(virt);
1390*aef7ee76SFedor Pchelkin }
1391*aef7ee76SFedor Pchelkin 
debug_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t dma_addr,void * virt,unsigned long attrs)1392cf65a0f6SChristoph Hellwig void debug_dma_alloc_coherent(struct device *dev, size_t size,
1393c2bbf9d1SHamza Mahfooz 			      dma_addr_t dma_addr, void *virt,
1394c2bbf9d1SHamza Mahfooz 			      unsigned long attrs)
1395cf65a0f6SChristoph Hellwig {
1396cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1397cf65a0f6SChristoph Hellwig 
1398cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1399cf65a0f6SChristoph Hellwig 		return;
1400cf65a0f6SChristoph Hellwig 
1401cf65a0f6SChristoph Hellwig 	if (unlikely(virt == NULL))
1402cf65a0f6SChristoph Hellwig 		return;
1403cf65a0f6SChristoph Hellwig 
1404cf65a0f6SChristoph Hellwig 	/* handle vmalloc and linear addresses */
1405cf65a0f6SChristoph Hellwig 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1406cf65a0f6SChristoph Hellwig 		return;
1407cf65a0f6SChristoph Hellwig 
1408cf65a0f6SChristoph Hellwig 	entry = dma_entry_alloc();
1409cf65a0f6SChristoph Hellwig 	if (!entry)
1410cf65a0f6SChristoph Hellwig 		return;
1411cf65a0f6SChristoph Hellwig 
1412cf65a0f6SChristoph Hellwig 	entry->type      = dma_debug_coherent;
1413cf65a0f6SChristoph Hellwig 	entry->dev       = dev;
1414*aef7ee76SFedor Pchelkin 	entry->paddr	 = virt_to_paddr(virt);
1415cf65a0f6SChristoph Hellwig 	entry->size      = size;
1416cf65a0f6SChristoph Hellwig 	entry->dev_addr  = dma_addr;
1417cf65a0f6SChristoph Hellwig 	entry->direction = DMA_BIDIRECTIONAL;
1418cf65a0f6SChristoph Hellwig 
1419c2bbf9d1SHamza Mahfooz 	add_dma_entry(entry, attrs);
1420cf65a0f6SChristoph Hellwig }
1421cf65a0f6SChristoph Hellwig 
debug_dma_free_coherent(struct device * dev,size_t size,void * virt,dma_addr_t dma_addr)1422cf65a0f6SChristoph Hellwig void debug_dma_free_coherent(struct device *dev, size_t size,
1423479623fdSDesnes Nunes 			 void *virt, dma_addr_t dma_addr)
1424cf65a0f6SChristoph Hellwig {
1425cf65a0f6SChristoph Hellwig 	struct dma_debug_entry ref = {
1426cf65a0f6SChristoph Hellwig 		.type           = dma_debug_coherent,
1427cf65a0f6SChristoph Hellwig 		.dev            = dev,
1428479623fdSDesnes Nunes 		.dev_addr       = dma_addr,
1429cf65a0f6SChristoph Hellwig 		.size           = size,
1430cf65a0f6SChristoph Hellwig 		.direction      = DMA_BIDIRECTIONAL,
1431cf65a0f6SChristoph Hellwig 	};
1432cf65a0f6SChristoph Hellwig 
1433cf65a0f6SChristoph Hellwig 	/* handle vmalloc and linear addresses */
1434cf65a0f6SChristoph Hellwig 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1435cf65a0f6SChristoph Hellwig 		return;
1436cf65a0f6SChristoph Hellwig 
1437*aef7ee76SFedor Pchelkin 	ref.paddr = virt_to_paddr(virt);
1438cf65a0f6SChristoph Hellwig 
1439cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1440cf65a0f6SChristoph Hellwig 		return;
1441cf65a0f6SChristoph Hellwig 
1442cf65a0f6SChristoph Hellwig 	check_unmap(&ref);
1443cf65a0f6SChristoph Hellwig }
1444cf65a0f6SChristoph Hellwig 
debug_dma_map_resource(struct device * dev,phys_addr_t addr,size_t size,int direction,dma_addr_t dma_addr,unsigned long attrs)1445cf65a0f6SChristoph Hellwig void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1446c2bbf9d1SHamza Mahfooz 			    int direction, dma_addr_t dma_addr,
1447c2bbf9d1SHamza Mahfooz 			    unsigned long attrs)
1448cf65a0f6SChristoph Hellwig {
1449cf65a0f6SChristoph Hellwig 	struct dma_debug_entry *entry;
1450cf65a0f6SChristoph Hellwig 
1451cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1452cf65a0f6SChristoph Hellwig 		return;
1453cf65a0f6SChristoph Hellwig 
1454cf65a0f6SChristoph Hellwig 	entry = dma_entry_alloc();
1455cf65a0f6SChristoph Hellwig 	if (!entry)
1456cf65a0f6SChristoph Hellwig 		return;
1457cf65a0f6SChristoph Hellwig 
1458cf65a0f6SChristoph Hellwig 	entry->type		= dma_debug_resource;
1459cf65a0f6SChristoph Hellwig 	entry->dev		= dev;
14609d4f645aSChristoph Hellwig 	entry->paddr		= addr;
1461cf65a0f6SChristoph Hellwig 	entry->size		= size;
1462cf65a0f6SChristoph Hellwig 	entry->dev_addr		= dma_addr;
1463cf65a0f6SChristoph Hellwig 	entry->direction	= direction;
1464cf65a0f6SChristoph Hellwig 	entry->map_err_type	= MAP_ERR_NOT_CHECKED;
1465cf65a0f6SChristoph Hellwig 
1466c2bbf9d1SHamza Mahfooz 	add_dma_entry(entry, attrs);
1467cf65a0f6SChristoph Hellwig }
1468cf65a0f6SChristoph Hellwig 
debug_dma_unmap_resource(struct device * dev,dma_addr_t dma_addr,size_t size,int direction)1469cf65a0f6SChristoph Hellwig void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1470cf65a0f6SChristoph Hellwig 			      size_t size, int direction)
1471cf65a0f6SChristoph Hellwig {
1472cf65a0f6SChristoph Hellwig 	struct dma_debug_entry ref = {
1473cf65a0f6SChristoph Hellwig 		.type           = dma_debug_resource,
1474cf65a0f6SChristoph Hellwig 		.dev            = dev,
1475cf65a0f6SChristoph Hellwig 		.dev_addr       = dma_addr,
1476cf65a0f6SChristoph Hellwig 		.size           = size,
1477cf65a0f6SChristoph Hellwig 		.direction      = direction,
1478cf65a0f6SChristoph Hellwig 	};
1479cf65a0f6SChristoph Hellwig 
1480cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1481cf65a0f6SChristoph Hellwig 		return;
1482cf65a0f6SChristoph Hellwig 
1483cf65a0f6SChristoph Hellwig 	check_unmap(&ref);
1484cf65a0f6SChristoph Hellwig }
1485cf65a0f6SChristoph Hellwig 
debug_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)1486cf65a0f6SChristoph Hellwig void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1487cf65a0f6SChristoph Hellwig 				   size_t size, int direction)
1488cf65a0f6SChristoph Hellwig {
1489cf65a0f6SChristoph Hellwig 	struct dma_debug_entry ref;
1490cf65a0f6SChristoph Hellwig 
1491cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1492cf65a0f6SChristoph Hellwig 		return;
1493cf65a0f6SChristoph Hellwig 
1494cf65a0f6SChristoph Hellwig 	ref.type         = dma_debug_single;
1495cf65a0f6SChristoph Hellwig 	ref.dev          = dev;
1496cf65a0f6SChristoph Hellwig 	ref.dev_addr     = dma_handle;
1497cf65a0f6SChristoph Hellwig 	ref.size         = size;
1498cf65a0f6SChristoph Hellwig 	ref.direction    = direction;
1499cf65a0f6SChristoph Hellwig 	ref.sg_call_ents = 0;
1500cf65a0f6SChristoph Hellwig 
1501cf65a0f6SChristoph Hellwig 	check_sync(dev, &ref, true);
1502cf65a0f6SChristoph Hellwig }
1503cf65a0f6SChristoph Hellwig 
debug_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)1504cf65a0f6SChristoph Hellwig void debug_dma_sync_single_for_device(struct device *dev,
1505cf65a0f6SChristoph Hellwig 				      dma_addr_t dma_handle, size_t size,
1506cf65a0f6SChristoph Hellwig 				      int direction)
1507cf65a0f6SChristoph Hellwig {
1508cf65a0f6SChristoph Hellwig 	struct dma_debug_entry ref;
1509cf65a0f6SChristoph Hellwig 
1510cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1511cf65a0f6SChristoph Hellwig 		return;
1512cf65a0f6SChristoph Hellwig 
1513cf65a0f6SChristoph Hellwig 	ref.type         = dma_debug_single;
1514cf65a0f6SChristoph Hellwig 	ref.dev          = dev;
1515cf65a0f6SChristoph Hellwig 	ref.dev_addr     = dma_handle;
1516cf65a0f6SChristoph Hellwig 	ref.size         = size;
1517cf65a0f6SChristoph Hellwig 	ref.direction    = direction;
1518cf65a0f6SChristoph Hellwig 	ref.sg_call_ents = 0;
1519cf65a0f6SChristoph Hellwig 
1520cf65a0f6SChristoph Hellwig 	check_sync(dev, &ref, false);
1521cf65a0f6SChristoph Hellwig }
1522cf65a0f6SChristoph Hellwig 
debug_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,int direction)1523cf65a0f6SChristoph Hellwig void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1524cf65a0f6SChristoph Hellwig 			       int nelems, int direction)
1525cf65a0f6SChristoph Hellwig {
1526cf65a0f6SChristoph Hellwig 	struct scatterlist *s;
1527cf65a0f6SChristoph Hellwig 	int mapped_ents = 0, i;
1528cf65a0f6SChristoph Hellwig 
1529cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1530cf65a0f6SChristoph Hellwig 		return;
1531cf65a0f6SChristoph Hellwig 
1532cf65a0f6SChristoph Hellwig 	for_each_sg(sg, s, nelems, i) {
1533cf65a0f6SChristoph Hellwig 
1534cf65a0f6SChristoph Hellwig 		struct dma_debug_entry ref = {
1535cf65a0f6SChristoph Hellwig 			.type           = dma_debug_sg,
1536cf65a0f6SChristoph Hellwig 			.dev            = dev,
15379d4f645aSChristoph Hellwig 			.paddr		= sg_phys(s),
1538cf65a0f6SChristoph Hellwig 			.dev_addr       = sg_dma_address(s),
1539cf65a0f6SChristoph Hellwig 			.size           = sg_dma_len(s),
1540cf65a0f6SChristoph Hellwig 			.direction      = direction,
1541cf65a0f6SChristoph Hellwig 			.sg_call_ents   = nelems,
1542cf65a0f6SChristoph Hellwig 		};
1543cf65a0f6SChristoph Hellwig 
1544cf65a0f6SChristoph Hellwig 		if (!i)
1545cf65a0f6SChristoph Hellwig 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1546cf65a0f6SChristoph Hellwig 
1547cf65a0f6SChristoph Hellwig 		if (i >= mapped_ents)
1548cf65a0f6SChristoph Hellwig 			break;
1549cf65a0f6SChristoph Hellwig 
1550cf65a0f6SChristoph Hellwig 		check_sync(dev, &ref, true);
1551cf65a0f6SChristoph Hellwig 	}
1552cf65a0f6SChristoph Hellwig }
1553cf65a0f6SChristoph Hellwig 
debug_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,int direction)1554cf65a0f6SChristoph Hellwig void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1555cf65a0f6SChristoph Hellwig 				  int nelems, int direction)
1556cf65a0f6SChristoph Hellwig {
1557cf65a0f6SChristoph Hellwig 	struct scatterlist *s;
1558cf65a0f6SChristoph Hellwig 	int mapped_ents = 0, i;
1559cf65a0f6SChristoph Hellwig 
1560cf65a0f6SChristoph Hellwig 	if (unlikely(dma_debug_disabled()))
1561cf65a0f6SChristoph Hellwig 		return;
1562cf65a0f6SChristoph Hellwig 
1563cf65a0f6SChristoph Hellwig 	for_each_sg(sg, s, nelems, i) {
1564cf65a0f6SChristoph Hellwig 
1565cf65a0f6SChristoph Hellwig 		struct dma_debug_entry ref = {
1566cf65a0f6SChristoph Hellwig 			.type           = dma_debug_sg,
1567cf65a0f6SChristoph Hellwig 			.dev            = dev,
15689d4f645aSChristoph Hellwig 			.paddr		= sg_phys(sg),
1569cf65a0f6SChristoph Hellwig 			.dev_addr       = sg_dma_address(s),
1570cf65a0f6SChristoph Hellwig 			.size           = sg_dma_len(s),
1571cf65a0f6SChristoph Hellwig 			.direction      = direction,
1572cf65a0f6SChristoph Hellwig 			.sg_call_ents   = nelems,
1573cf65a0f6SChristoph Hellwig 		};
1574cf65a0f6SChristoph Hellwig 		if (!i)
1575cf65a0f6SChristoph Hellwig 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1576cf65a0f6SChristoph Hellwig 
1577cf65a0f6SChristoph Hellwig 		if (i >= mapped_ents)
1578cf65a0f6SChristoph Hellwig 			break;
1579cf65a0f6SChristoph Hellwig 
1580cf65a0f6SChristoph Hellwig 		check_sync(dev, &ref, false);
1581cf65a0f6SChristoph Hellwig 	}
1582cf65a0f6SChristoph Hellwig }
1583cf65a0f6SChristoph Hellwig 
dma_debug_driver_setup(char * str)1584cf65a0f6SChristoph Hellwig static int __init dma_debug_driver_setup(char *str)
1585cf65a0f6SChristoph Hellwig {
1586cf65a0f6SChristoph Hellwig 	int i;
1587cf65a0f6SChristoph Hellwig 
1588cf65a0f6SChristoph Hellwig 	for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1589cf65a0f6SChristoph Hellwig 		current_driver_name[i] = *str;
1590cf65a0f6SChristoph Hellwig 		if (*str == 0)
1591cf65a0f6SChristoph Hellwig 			break;
1592cf65a0f6SChristoph Hellwig 	}
1593cf65a0f6SChristoph Hellwig 
1594cf65a0f6SChristoph Hellwig 	if (current_driver_name[0])
1595f737b095SRobin Murphy 		pr_info("enable driver filter for driver [%s]\n",
1596cf65a0f6SChristoph Hellwig 			current_driver_name);
1597cf65a0f6SChristoph Hellwig 
1598cf65a0f6SChristoph Hellwig 
1599cf65a0f6SChristoph Hellwig 	return 1;
1600cf65a0f6SChristoph Hellwig }
1601cf65a0f6SChristoph Hellwig __setup("dma_debug_driver=", dma_debug_driver_setup);
1602