debug.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  4. *
  5. * Author: Joerg Roedel <joerg.roedel@amd.com>
  6. */
  7. #define pr_fmt(fmt) "DMA-API: " fmt
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/dma-map-ops.h>
  11. #include <linux/sched/task.h>
  12. #include <linux/stacktrace.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/types.h>
  20. #include <linux/sched.h>
  21. #include <linux/ctype.h>
  22. #include <linux/list.h>
  23. #include <linux/slab.h>
  24. #include <asm/sections.h>
  25. #include "debug.h"
  26. #define HASH_SIZE 16384ULL
  27. #define HASH_FN_SHIFT 13
  28. #define HASH_FN_MASK (HASH_SIZE - 1)
  29. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  30. /* If the pool runs out, add this many new entries at once */
  31. #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
  32. enum {
  33. dma_debug_single,
  34. dma_debug_sg,
  35. dma_debug_coherent,
  36. dma_debug_resource,
  37. };
  38. enum map_err_types {
  39. MAP_ERR_CHECK_NOT_APPLICABLE,
  40. MAP_ERR_NOT_CHECKED,
  41. MAP_ERR_CHECKED,
  42. };
  43. #define DMA_DEBUG_STACKTRACE_ENTRIES 5
  44. /**
  45. * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
  46. * @list: node on pre-allocated free_entries list
  47. * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
  48. * @size: length of the mapping
  49. * @type: single, page, sg, coherent
  50. * @direction: enum dma_data_direction
  51. * @sg_call_ents: 'nents' from dma_map_sg
  52. * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
  53. * @pfn: page frame of the start address
  54. * @offset: offset of mapping relative to pfn
  55. * @map_err_type: track whether dma_mapping_error() was checked
  56. * @stacktrace: support backtraces when a violation is detected
  57. */
  58. struct dma_debug_entry {
  59. struct list_head list;
  60. struct device *dev;
  61. u64 dev_addr;
  62. u64 size;
  63. int type;
  64. int direction;
  65. int sg_call_ents;
  66. int sg_mapped_ents;
  67. unsigned long pfn;
  68. size_t offset;
  69. enum map_err_types map_err_type;
  70. #ifdef CONFIG_STACKTRACE
  71. unsigned int stack_len;
  72. unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
  73. #endif
  74. } ____cacheline_aligned_in_smp;
  75. typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
  76. struct hash_bucket {
  77. struct list_head list;
  78. spinlock_t lock;
  79. };
  80. /* Hash list to save the allocated dma addresses */
  81. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  82. /* List of pre-allocated dma_debug_entry's */
  83. static LIST_HEAD(free_entries);
  84. /* Lock for the list above */
  85. static DEFINE_SPINLOCK(free_entries_lock);
  86. /* Global disable flag - will be set in case of an error */
  87. static bool global_disable __read_mostly;
  88. /* Early initialization disable flag, set at the end of dma_debug_init */
  89. static bool dma_debug_initialized __read_mostly;
  90. static inline bool dma_debug_disabled(void)
  91. {
  92. return global_disable || !dma_debug_initialized;
  93. }
  94. /* Global error count */
  95. static u32 error_count;
  96. /* Global error show enable*/
  97. static u32 show_all_errors __read_mostly;
  98. /* Number of errors to show */
  99. static u32 show_num_errors = 1;
  100. static u32 num_free_entries;
  101. static u32 min_free_entries;
  102. static u32 nr_total_entries;
  103. /* number of preallocated entries requested by kernel cmdline */
  104. static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
  105. /* per-driver filter related state */
  106. #define NAME_MAX_LEN 64
  107. static char current_driver_name[NAME_MAX_LEN] __read_mostly;
  108. static struct device_driver *current_driver __read_mostly;
  109. static DEFINE_RWLOCK(driver_name_lock);
  110. static const char *const maperr2str[] = {
  111. [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
  112. [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
  113. [MAP_ERR_CHECKED] = "dma map error checked",
  114. };
  115. static const char *type2name[] = {
  116. [dma_debug_single] = "single",
  117. [dma_debug_sg] = "scather-gather",
  118. [dma_debug_coherent] = "coherent",
  119. [dma_debug_resource] = "resource",
  120. };
  121. static const char *dir2name[] = {
  122. [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
  123. [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
  124. [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
  125. [DMA_NONE] = "DMA_NONE",
  126. };
  127. /*
  128. * The access to some variables in this macro is racy. We can't use atomic_t
  129. * here because all these variables are exported to debugfs. Some of them even
  130. * writeable. This is also the reason why a lock won't help much. But anyway,
  131. * the races are no big deal. Here is why:
  132. *
  133. * error_count: the addition is racy, but the worst thing that can happen is
  134. * that we don't count some errors
  135. * show_num_errors: the subtraction is racy. Also no big deal because in
  136. * worst case this will result in one warning more in the
  137. * system log than the user configured. This variable is
  138. * writeable via debugfs.
  139. */
  140. static inline void dump_entry_trace(struct dma_debug_entry *entry)
  141. {
  142. #ifdef CONFIG_STACKTRACE
  143. if (entry) {
  144. pr_warn("Mapped at:\n");
  145. stack_trace_print(entry->stack_entries, entry->stack_len, 0);
  146. }
  147. #endif
  148. }
  149. static bool driver_filter(struct device *dev)
  150. {
  151. struct device_driver *drv;
  152. unsigned long flags;
  153. bool ret;
  154. /* driver filter off */
  155. if (likely(!current_driver_name[0]))
  156. return true;
  157. /* driver filter on and initialized */
  158. if (current_driver && dev && dev->driver == current_driver)
  159. return true;
  160. /* driver filter on, but we can't filter on a NULL device... */
  161. if (!dev)
  162. return false;
  163. if (current_driver || !current_driver_name[0])
  164. return false;
  165. /* driver filter on but not yet initialized */
  166. drv = dev->driver;
  167. if (!drv)
  168. return false;
  169. /* lock to protect against change of current_driver_name */
  170. read_lock_irqsave(&driver_name_lock, flags);
  171. ret = false;
  172. if (drv->name &&
  173. strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
  174. current_driver = drv;
  175. ret = true;
  176. }
  177. read_unlock_irqrestore(&driver_name_lock, flags);
  178. return ret;
  179. }
  180. #define err_printk(dev, entry, format, arg...) do { \
  181. error_count += 1; \
  182. if (driver_filter(dev) && \
  183. (show_all_errors || show_num_errors > 0)) { \
  184. WARN(1, pr_fmt("%s %s: ") format, \
  185. dev ? dev_driver_string(dev) : "NULL", \
  186. dev ? dev_name(dev) : "NULL", ## arg); \
  187. dump_entry_trace(entry); \
  188. } \
  189. if (!show_all_errors && show_num_errors > 0) \
  190. show_num_errors -= 1; \
  191. } while (0);
  192. /*
  193. * Hash related functions
  194. *
  195. * Every DMA-API request is saved into a struct dma_debug_entry. To
  196. * have quick access to these structs they are stored into a hash.
  197. */
  198. static int hash_fn(struct dma_debug_entry *entry)
  199. {
  200. /*
  201. * Hash function is based on the dma address.
  202. * We use bits 20-27 here as the index into the hash
  203. */
  204. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  205. }
  206. /*
  207. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  208. */
  209. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  210. unsigned long *flags)
  211. __acquires(&dma_entry_hash[idx].lock)
  212. {
  213. int idx = hash_fn(entry);
  214. unsigned long __flags;
  215. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  216. *flags = __flags;
  217. return &dma_entry_hash[idx];
  218. }
  219. /*
  220. * Give up exclusive access to the hash bucket
  221. */
  222. static void put_hash_bucket(struct hash_bucket *bucket,
  223. unsigned long flags)
  224. __releases(&bucket->lock)
  225. {
  226. spin_unlock_irqrestore(&bucket->lock, flags);
  227. }
  228. static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
  229. {
  230. return ((a->dev_addr == b->dev_addr) &&
  231. (a->dev == b->dev)) ? true : false;
  232. }
  233. static bool containing_match(struct dma_debug_entry *a,
  234. struct dma_debug_entry *b)
  235. {
  236. if (a->dev != b->dev)
  237. return false;
  238. if ((b->dev_addr <= a->dev_addr) &&
  239. ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
  240. return true;
  241. return false;
  242. }
  243. /*
  244. * Search a given entry in the hash bucket list
  245. */
  246. static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
  247. struct dma_debug_entry *ref,
  248. match_fn match)
  249. {
  250. struct dma_debug_entry *entry, *ret = NULL;
  251. int matches = 0, match_lvl, last_lvl = -1;
  252. list_for_each_entry(entry, &bucket->list, list) {
  253. if (!match(ref, entry))
  254. continue;
  255. /*
  256. * Some drivers map the same physical address multiple
  257. * times. Without a hardware IOMMU this results in the
  258. * same device addresses being put into the dma-debug
  259. * hash multiple times too. This can result in false
  260. * positives being reported. Therefore we implement a
  261. * best-fit algorithm here which returns the entry from
  262. * the hash which fits best to the reference value
  263. * instead of the first-fit.
  264. */
  265. matches += 1;
  266. match_lvl = 0;
  267. entry->size == ref->size ? ++match_lvl : 0;
  268. entry->type == ref->type ? ++match_lvl : 0;
  269. entry->direction == ref->direction ? ++match_lvl : 0;
  270. entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
  271. if (match_lvl == 4) {
  272. /* perfect-fit - return the result */
  273. return entry;
  274. } else if (match_lvl > last_lvl) {
  275. /*
  276. * We found an entry that fits better then the
  277. * previous one or it is the 1st match.
  278. */
  279. last_lvl = match_lvl;
  280. ret = entry;
  281. }
  282. }
  283. /*
  284. * If we have multiple matches but no perfect-fit, just return
  285. * NULL.
  286. */
  287. ret = (matches == 1) ? ret : NULL;
  288. return ret;
  289. }
  290. static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
  291. struct dma_debug_entry *ref)
  292. {
  293. return __hash_bucket_find(bucket, ref, exact_match);
  294. }
  295. static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
  296. struct dma_debug_entry *ref,
  297. unsigned long *flags)
  298. {
  299. unsigned int max_range = dma_get_max_seg_size(ref->dev);
  300. struct dma_debug_entry *entry, index = *ref;
  301. unsigned int range = 0;
  302. while (range <= max_range) {
  303. entry = __hash_bucket_find(*bucket, ref, containing_match);
  304. if (entry)
  305. return entry;
  306. /*
  307. * Nothing found, go back a hash bucket
  308. */
  309. put_hash_bucket(*bucket, *flags);
  310. range += (1 << HASH_FN_SHIFT);
  311. index.dev_addr -= (1 << HASH_FN_SHIFT);
  312. *bucket = get_hash_bucket(&index, flags);
  313. }
  314. return NULL;
  315. }
  316. /*
  317. * Add an entry to a hash bucket
  318. */
  319. static void hash_bucket_add(struct hash_bucket *bucket,
  320. struct dma_debug_entry *entry)
  321. {
  322. list_add_tail(&entry->list, &bucket->list);
  323. }
  324. /*
  325. * Remove entry from a hash bucket list
  326. */
  327. static void hash_bucket_del(struct dma_debug_entry *entry)
  328. {
  329. list_del(&entry->list);
  330. }
  331. static unsigned long long phys_addr(struct dma_debug_entry *entry)
  332. {
  333. if (entry->type == dma_debug_resource)
  334. return __pfn_to_phys(entry->pfn) + entry->offset;
  335. return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
  336. }
  337. /*
  338. * Dump mapping entries for debugging purposes
  339. */
  340. void debug_dma_dump_mappings(struct device *dev)
  341. {
  342. int idx;
  343. for (idx = 0; idx < HASH_SIZE; idx++) {
  344. struct hash_bucket *bucket = &dma_entry_hash[idx];
  345. struct dma_debug_entry *entry;
  346. unsigned long flags;
  347. spin_lock_irqsave(&bucket->lock, flags);
  348. list_for_each_entry(entry, &bucket->list, list) {
  349. if (!dev || dev == entry->dev) {
  350. dev_info(entry->dev,
  351. "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
  352. type2name[entry->type], idx,
  353. phys_addr(entry), entry->pfn,
  354. entry->dev_addr, entry->size,
  355. dir2name[entry->direction],
  356. maperr2str[entry->map_err_type]);
  357. }
  358. }
  359. spin_unlock_irqrestore(&bucket->lock, flags);
  360. cond_resched();
  361. }
  362. }
  363. /*
  364. * For each mapping (initial cacheline in the case of
  365. * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
  366. * scatterlist, or the cacheline specified in dma_map_single) insert
  367. * into this tree using the cacheline as the key. At
  368. * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
  369. * the entry already exists at insertion time add a tag as a reference
  370. * count for the overlapping mappings. For now, the overlap tracking
  371. * just ensures that 'unmaps' balance 'maps' before marking the
  372. * cacheline idle, but we should also be flagging overlaps as an API
  373. * violation.
  374. *
  375. * Memory usage is mostly constrained by the maximum number of available
  376. * dma-debug entries in that we need a free dma_debug_entry before
  377. * inserting into the tree. In the case of dma_map_page and
  378. * dma_alloc_coherent there is only one dma_debug_entry and one
  379. * dma_active_cacheline entry to track per event. dma_map_sg(), on the
  380. * other hand, consumes a single dma_debug_entry, but inserts 'nents'
  381. * entries into the tree.
  382. */
  383. static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
  384. static DEFINE_SPINLOCK(radix_lock);
  385. #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
  386. #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
  387. #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
  388. static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
  389. {
  390. return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
  391. (entry->offset >> L1_CACHE_SHIFT);
  392. }
  393. static int active_cacheline_read_overlap(phys_addr_t cln)
  394. {
  395. int overlap = 0, i;
  396. for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
  397. if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
  398. overlap |= 1 << i;
  399. return overlap;
  400. }
  401. static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
  402. {
  403. int i;
  404. if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
  405. return overlap;
  406. for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
  407. if (overlap & 1 << i)
  408. radix_tree_tag_set(&dma_active_cacheline, cln, i);
  409. else
  410. radix_tree_tag_clear(&dma_active_cacheline, cln, i);
  411. return overlap;
  412. }
  413. static void active_cacheline_inc_overlap(phys_addr_t cln)
  414. {
  415. int overlap = active_cacheline_read_overlap(cln);
  416. overlap = active_cacheline_set_overlap(cln, ++overlap);
  417. /* If we overflowed the overlap counter then we're potentially
  418. * leaking dma-mappings.
  419. */
  420. WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
  421. pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
  422. ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
  423. }
  424. static int active_cacheline_dec_overlap(phys_addr_t cln)
  425. {
  426. int overlap = active_cacheline_read_overlap(cln);
  427. return active_cacheline_set_overlap(cln, --overlap);
  428. }
  429. static int active_cacheline_insert(struct dma_debug_entry *entry)
  430. {
  431. phys_addr_t cln = to_cacheline_number(entry);
  432. unsigned long flags;
  433. int rc;
  434. /* If the device is not writing memory then we don't have any
  435. * concerns about the cpu consuming stale data. This mitigates
  436. * legitimate usages of overlapping mappings.
  437. */
  438. if (entry->direction == DMA_TO_DEVICE)
  439. return 0;
  440. spin_lock_irqsave(&radix_lock, flags);
  441. rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
  442. if (rc == -EEXIST)
  443. active_cacheline_inc_overlap(cln);
  444. spin_unlock_irqrestore(&radix_lock, flags);
  445. return rc;
  446. }
  447. static void active_cacheline_remove(struct dma_debug_entry *entry)
  448. {
  449. phys_addr_t cln = to_cacheline_number(entry);
  450. unsigned long flags;
  451. /* ...mirror the insert case */
  452. if (entry->direction == DMA_TO_DEVICE)
  453. return;
  454. spin_lock_irqsave(&radix_lock, flags);
  455. /* since we are counting overlaps the final put of the
  456. * cacheline will occur when the overlap count is 0.
  457. * active_cacheline_dec_overlap() returns -1 in that case
  458. */
  459. if (active_cacheline_dec_overlap(cln) < 0)
  460. radix_tree_delete(&dma_active_cacheline, cln);
  461. spin_unlock_irqrestore(&radix_lock, flags);
  462. }
  463. /*
  464. * Wrapper function for adding an entry to the hash.
  465. * This function takes care of locking itself.
  466. */
  467. static void add_dma_entry(struct dma_debug_entry *entry)
  468. {
  469. struct hash_bucket *bucket;
  470. unsigned long flags;
  471. int rc;
  472. bucket = get_hash_bucket(entry, &flags);
  473. hash_bucket_add(bucket, entry);
  474. put_hash_bucket(bucket, flags);
  475. rc = active_cacheline_insert(entry);
  476. if (rc == -ENOMEM) {
  477. pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
  478. global_disable = true;
  479. }
  480. /* TODO: report -EEXIST errors here as overlapping mappings are
  481. * not supported by the DMA API
  482. */
  483. }
  484. static int dma_debug_create_entries(gfp_t gfp)
  485. {
  486. struct dma_debug_entry *entry;
  487. int i;
  488. entry = (void *)get_zeroed_page(gfp);
  489. if (!entry)
  490. return -ENOMEM;
  491. for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
  492. list_add_tail(&entry[i].list, &free_entries);
  493. num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
  494. nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
  495. return 0;
  496. }
  497. static struct dma_debug_entry *__dma_entry_alloc(void)
  498. {
  499. struct dma_debug_entry *entry;
  500. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  501. list_del(&entry->list);
  502. memset(entry, 0, sizeof(*entry));
  503. num_free_entries -= 1;
  504. if (num_free_entries < min_free_entries)
  505. min_free_entries = num_free_entries;
  506. return entry;
  507. }
  508. static void __dma_entry_alloc_check_leak(void)
  509. {
  510. u32 tmp = nr_total_entries % nr_prealloc_entries;
  511. /* Shout each time we tick over some multiple of the initial pool */
  512. if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
  513. pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
  514. nr_total_entries,
  515. (nr_total_entries / nr_prealloc_entries));
  516. }
  517. }
  518. /* struct dma_entry allocator
  519. *
  520. * The next two functions implement the allocator for
  521. * struct dma_debug_entries.
  522. */
  523. static struct dma_debug_entry *dma_entry_alloc(void)
  524. {
  525. struct dma_debug_entry *entry;
  526. unsigned long flags;
  527. spin_lock_irqsave(&free_entries_lock, flags);
  528. if (num_free_entries == 0) {
  529. if (dma_debug_create_entries(GFP_ATOMIC)) {
  530. global_disable = true;
  531. spin_unlock_irqrestore(&free_entries_lock, flags);
  532. pr_err("debugging out of memory - disabling\n");
  533. return NULL;
  534. }
  535. __dma_entry_alloc_check_leak();
  536. }
  537. entry = __dma_entry_alloc();
  538. spin_unlock_irqrestore(&free_entries_lock, flags);
  539. #ifdef CONFIG_STACKTRACE
  540. entry->stack_len = stack_trace_save(entry->stack_entries,
  541. ARRAY_SIZE(entry->stack_entries),
  542. 1);
  543. #endif
  544. return entry;
  545. }
  546. static void dma_entry_free(struct dma_debug_entry *entry)
  547. {
  548. unsigned long flags;
  549. active_cacheline_remove(entry);
  550. /*
  551. * add to beginning of the list - this way the entries are
  552. * more likely cache hot when they are reallocated.
  553. */
  554. spin_lock_irqsave(&free_entries_lock, flags);
  555. list_add(&entry->list, &free_entries);
  556. num_free_entries += 1;
  557. spin_unlock_irqrestore(&free_entries_lock, flags);
  558. }
  559. /*
  560. * DMA-API debugging init code
  561. *
  562. * The init code does two things:
  563. * 1. Initialize core data structures
  564. * 2. Preallocate a given number of dma_debug_entry structs
  565. */
  566. static ssize_t filter_read(struct file *file, char __user *user_buf,
  567. size_t count, loff_t *ppos)
  568. {
  569. char buf[NAME_MAX_LEN + 1];
  570. unsigned long flags;
  571. int len;
  572. if (!current_driver_name[0])
  573. return 0;
  574. /*
  575. * We can't copy to userspace directly because current_driver_name can
  576. * only be read under the driver_name_lock with irqs disabled. So
  577. * create a temporary copy first.
  578. */
  579. read_lock_irqsave(&driver_name_lock, flags);
  580. len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
  581. read_unlock_irqrestore(&driver_name_lock, flags);
  582. return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  583. }
  584. static ssize_t filter_write(struct file *file, const char __user *userbuf,
  585. size_t count, loff_t *ppos)
  586. {
  587. char buf[NAME_MAX_LEN];
  588. unsigned long flags;
  589. size_t len;
  590. int i;
  591. /*
  592. * We can't copy from userspace directly. Access to
  593. * current_driver_name is protected with a write_lock with irqs
  594. * disabled. Since copy_from_user can fault and may sleep we
  595. * need to copy to temporary buffer first
  596. */
  597. len = min(count, (size_t)(NAME_MAX_LEN - 1));
  598. if (copy_from_user(buf, userbuf, len))
  599. return -EFAULT;
  600. buf[len] = 0;
  601. write_lock_irqsave(&driver_name_lock, flags);
  602. /*
  603. * Now handle the string we got from userspace very carefully.
  604. * The rules are:
  605. * - only use the first token we got
  606. * - token delimiter is everything looking like a space
  607. * character (' ', '\n', '\t' ...)
  608. *
  609. */
  610. if (!isalnum(buf[0])) {
  611. /*
  612. * If the first character userspace gave us is not
  613. * alphanumerical then assume the filter should be
  614. * switched off.
  615. */
  616. if (current_driver_name[0])
  617. pr_info("switching off dma-debug driver filter\n");
  618. current_driver_name[0] = 0;
  619. current_driver = NULL;
  620. goto out_unlock;
  621. }
  622. /*
  623. * Now parse out the first token and use it as the name for the
  624. * driver to filter for.
  625. */
  626. for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
  627. current_driver_name[i] = buf[i];
  628. if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
  629. break;
  630. }
  631. current_driver_name[i] = 0;
  632. current_driver = NULL;
  633. pr_info("enable driver filter for driver [%s]\n",
  634. current_driver_name);
  635. out_unlock:
  636. write_unlock_irqrestore(&driver_name_lock, flags);
  637. return count;
  638. }
  639. static const struct file_operations filter_fops = {
  640. .read = filter_read,
  641. .write = filter_write,
  642. .llseek = default_llseek,
  643. };
  644. static int dump_show(struct seq_file *seq, void *v)
  645. {
  646. int idx;
  647. for (idx = 0; idx < HASH_SIZE; idx++) {
  648. struct hash_bucket *bucket = &dma_entry_hash[idx];
  649. struct dma_debug_entry *entry;
  650. unsigned long flags;
  651. spin_lock_irqsave(&bucket->lock, flags);
  652. list_for_each_entry(entry, &bucket->list, list) {
  653. seq_printf(seq,
  654. "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
  655. dev_name(entry->dev),
  656. dev_driver_string(entry->dev),
  657. type2name[entry->type], idx,
  658. phys_addr(entry), entry->pfn,
  659. entry->dev_addr, entry->size,
  660. dir2name[entry->direction],
  661. maperr2str[entry->map_err_type]);
  662. }
  663. spin_unlock_irqrestore(&bucket->lock, flags);
  664. }
  665. return 0;
  666. }
  667. DEFINE_SHOW_ATTRIBUTE(dump);
  668. static int __init dma_debug_fs_init(void)
  669. {
  670. struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
  671. debugfs_create_bool("disabled", 0444, dentry, &global_disable);
  672. debugfs_create_u32("error_count", 0444, dentry, &error_count);
  673. debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
  674. debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
  675. debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
  676. debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
  677. debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
  678. debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
  679. debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
  680. return 0;
  681. }
  682. core_initcall_sync(dma_debug_fs_init);
  683. static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
  684. {
  685. struct dma_debug_entry *entry;
  686. unsigned long flags;
  687. int count = 0, i;
  688. for (i = 0; i < HASH_SIZE; ++i) {
  689. spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
  690. list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
  691. if (entry->dev == dev) {
  692. count += 1;
  693. *out_entry = entry;
  694. }
  695. }
  696. spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
  697. }
  698. return count;
  699. }
  700. static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
  701. {
  702. struct device *dev = data;
  703. struct dma_debug_entry *entry;
  704. int count;
  705. if (dma_debug_disabled())
  706. return 0;
  707. switch (action) {
  708. case BUS_NOTIFY_UNBOUND_DRIVER:
  709. count = device_dma_allocations(dev, &entry);
  710. if (count == 0)
  711. break;
  712. err_printk(dev, entry, "device driver has pending "
  713. "DMA allocations while released from device "
  714. "[count=%d]\n"
  715. "One of leaked entries details: "
  716. "[device address=0x%016llx] [size=%llu bytes] "
  717. "[mapped with %s] [mapped as %s]\n",
  718. count, entry->dev_addr, entry->size,
  719. dir2name[entry->direction], type2name[entry->type]);
  720. break;
  721. default:
  722. break;
  723. }
  724. return 0;
  725. }
  726. void dma_debug_add_bus(struct bus_type *bus)
  727. {
  728. struct notifier_block *nb;
  729. if (dma_debug_disabled())
  730. return;
  731. nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
  732. if (nb == NULL) {
  733. pr_err("dma_debug_add_bus: out of memory\n");
  734. return;
  735. }
  736. nb->notifier_call = dma_debug_device_change;
  737. bus_register_notifier(bus, nb);
  738. }
  739. static int dma_debug_init(void)
  740. {
  741. int i, nr_pages;
  742. /* Do not use dma_debug_initialized here, since we really want to be
  743. * called to set dma_debug_initialized
  744. */
  745. if (global_disable)
  746. return 0;
  747. for (i = 0; i < HASH_SIZE; ++i) {
  748. INIT_LIST_HEAD(&dma_entry_hash[i].list);
  749. spin_lock_init(&dma_entry_hash[i].lock);
  750. }
  751. nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
  752. for (i = 0; i < nr_pages; ++i)
  753. dma_debug_create_entries(GFP_KERNEL);
  754. if (num_free_entries >= nr_prealloc_entries) {
  755. pr_info("preallocated %d debug entries\n", nr_total_entries);
  756. } else if (num_free_entries > 0) {
  757. pr_warn("%d debug entries requested but only %d allocated\n",
  758. nr_prealloc_entries, nr_total_entries);
  759. } else {
  760. pr_err("debugging out of memory error - disabled\n");
  761. global_disable = true;
  762. return 0;
  763. }
  764. min_free_entries = num_free_entries;
  765. dma_debug_initialized = true;
  766. pr_info("debugging enabled by kernel config\n");
  767. return 0;
  768. }
  769. core_initcall(dma_debug_init);
  770. static __init int dma_debug_cmdline(char *str)
  771. {
  772. if (!str)
  773. return -EINVAL;
  774. if (strncmp(str, "off", 3) == 0) {
  775. pr_info("debugging disabled on kernel command line\n");
  776. global_disable = true;
  777. }
  778. return 1;
  779. }
  780. static __init int dma_debug_entries_cmdline(char *str)
  781. {
  782. if (!str)
  783. return -EINVAL;
  784. if (!get_option(&str, &nr_prealloc_entries))
  785. nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
  786. return 1;
  787. }
  788. __setup("dma_debug=", dma_debug_cmdline);
  789. __setup("dma_debug_entries=", dma_debug_entries_cmdline);
  790. static void check_unmap(struct dma_debug_entry *ref)
  791. {
  792. struct dma_debug_entry *entry;
  793. struct hash_bucket *bucket;
  794. unsigned long flags;
  795. bucket = get_hash_bucket(ref, &flags);
  796. entry = bucket_find_exact(bucket, ref);
  797. if (!entry) {
  798. /* must drop lock before calling dma_mapping_error */
  799. put_hash_bucket(bucket, flags);
  800. if (dma_mapping_error(ref->dev, ref->dev_addr)) {
  801. err_printk(ref->dev, NULL,
  802. "device driver tries to free an "
  803. "invalid DMA memory address\n");
  804. } else {
  805. err_printk(ref->dev, NULL,
  806. "device driver tries to free DMA "
  807. "memory it has not allocated [device "
  808. "address=0x%016llx] [size=%llu bytes]\n",
  809. ref->dev_addr, ref->size);
  810. }
  811. return;
  812. }
  813. if (ref->size != entry->size) {
  814. err_printk(ref->dev, entry, "device driver frees "
  815. "DMA memory with different size "
  816. "[device address=0x%016llx] [map size=%llu bytes] "
  817. "[unmap size=%llu bytes]\n",
  818. ref->dev_addr, entry->size, ref->size);
  819. }
  820. if (ref->type != entry->type) {
  821. err_printk(ref->dev, entry, "device driver frees "
  822. "DMA memory with wrong function "
  823. "[device address=0x%016llx] [size=%llu bytes] "
  824. "[mapped as %s] [unmapped as %s]\n",
  825. ref->dev_addr, ref->size,
  826. type2name[entry->type], type2name[ref->type]);
  827. } else if ((entry->type == dma_debug_coherent) &&
  828. (phys_addr(ref) != phys_addr(entry))) {
  829. err_printk(ref->dev, entry, "device driver frees "
  830. "DMA memory with different CPU address "
  831. "[device address=0x%016llx] [size=%llu bytes] "
  832. "[cpu alloc address=0x%016llx] "
  833. "[cpu free address=0x%016llx]",
  834. ref->dev_addr, ref->size,
  835. phys_addr(entry),
  836. phys_addr(ref));
  837. }
  838. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  839. ref->sg_call_ents != entry->sg_call_ents) {
  840. err_printk(ref->dev, entry, "device driver frees "
  841. "DMA sg list with different entry count "
  842. "[map count=%d] [unmap count=%d]\n",
  843. entry->sg_call_ents, ref->sg_call_ents);
  844. }
  845. /*
  846. * This may be no bug in reality - but most implementations of the
  847. * DMA API don't handle this properly, so check for it here
  848. */
  849. if (ref->direction != entry->direction) {
  850. err_printk(ref->dev, entry, "device driver frees "
  851. "DMA memory with different direction "
  852. "[device address=0x%016llx] [size=%llu bytes] "
  853. "[mapped with %s] [unmapped with %s]\n",
  854. ref->dev_addr, ref->size,
  855. dir2name[entry->direction],
  856. dir2name[ref->direction]);
  857. }
  858. /*
  859. * Drivers should use dma_mapping_error() to check the returned
  860. * addresses of dma_map_single() and dma_map_page().
  861. * If not, print this warning message. See Documentation/core-api/dma-api.rst.
  862. */
  863. if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
  864. err_printk(ref->dev, entry,
  865. "device driver failed to check map error"
  866. "[device address=0x%016llx] [size=%llu bytes] "
  867. "[mapped as %s]",
  868. ref->dev_addr, ref->size,
  869. type2name[entry->type]);
  870. }
  871. hash_bucket_del(entry);
  872. dma_entry_free(entry);
  873. put_hash_bucket(bucket, flags);
  874. }
  875. static void check_for_stack(struct device *dev,
  876. struct page *page, size_t offset)
  877. {
  878. void *addr;
  879. struct vm_struct *stack_vm_area = task_stack_vm_area(current);
  880. if (!stack_vm_area) {
  881. /* Stack is direct-mapped. */
  882. if (PageHighMem(page))
  883. return;
  884. addr = page_address(page) + offset;
  885. if (object_is_on_stack(addr))
  886. err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
  887. } else {
  888. /* Stack is vmalloced. */
  889. int i;
  890. for (i = 0; i < stack_vm_area->nr_pages; i++) {
  891. if (page != stack_vm_area->pages[i])
  892. continue;
  893. addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
  894. err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
  895. break;
  896. }
  897. }
  898. }
  899. static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
  900. {
  901. unsigned long a1 = (unsigned long)addr;
  902. unsigned long b1 = a1 + len;
  903. unsigned long a2 = (unsigned long)start;
  904. unsigned long b2 = (unsigned long)end;
  905. return !(b1 <= a2 || a1 >= b2);
  906. }
  907. static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
  908. {
  909. if (overlap(addr, len, _stext, _etext) ||
  910. overlap(addr, len, __start_rodata, __end_rodata))
  911. err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
  912. }
  913. static void check_sync(struct device *dev,
  914. struct dma_debug_entry *ref,
  915. bool to_cpu)
  916. {
  917. struct dma_debug_entry *entry;
  918. struct hash_bucket *bucket;
  919. unsigned long flags;
  920. bucket = get_hash_bucket(ref, &flags);
  921. entry = bucket_find_contain(&bucket, ref, &flags);
  922. if (!entry) {
  923. err_printk(dev, NULL, "device driver tries "
  924. "to sync DMA memory it has not allocated "
  925. "[device address=0x%016llx] [size=%llu bytes]\n",
  926. (unsigned long long)ref->dev_addr, ref->size);
  927. goto out;
  928. }
  929. if (ref->size > entry->size) {
  930. err_printk(dev, entry, "device driver syncs"
  931. " DMA memory outside allocated range "
  932. "[device address=0x%016llx] "
  933. "[allocation size=%llu bytes] "
  934. "[sync offset+size=%llu]\n",
  935. entry->dev_addr, entry->size,
  936. ref->size);
  937. }
  938. if (entry->direction == DMA_BIDIRECTIONAL)
  939. goto out;
  940. if (ref->direction != entry->direction) {
  941. err_printk(dev, entry, "device driver syncs "
  942. "DMA memory with different direction "
  943. "[device address=0x%016llx] [size=%llu bytes] "
  944. "[mapped with %s] [synced with %s]\n",
  945. (unsigned long long)ref->dev_addr, entry->size,
  946. dir2name[entry->direction],
  947. dir2name[ref->direction]);
  948. }
  949. if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
  950. !(ref->direction == DMA_TO_DEVICE))
  951. err_printk(dev, entry, "device driver syncs "
  952. "device read-only DMA memory for cpu "
  953. "[device address=0x%016llx] [size=%llu bytes] "
  954. "[mapped with %s] [synced with %s]\n",
  955. (unsigned long long)ref->dev_addr, entry->size,
  956. dir2name[entry->direction],
  957. dir2name[ref->direction]);
  958. if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
  959. !(ref->direction == DMA_FROM_DEVICE))
  960. err_printk(dev, entry, "device driver syncs "
  961. "device write-only DMA memory to device "
  962. "[device address=0x%016llx] [size=%llu bytes] "
  963. "[mapped with %s] [synced with %s]\n",
  964. (unsigned long long)ref->dev_addr, entry->size,
  965. dir2name[entry->direction],
  966. dir2name[ref->direction]);
  967. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  968. ref->sg_call_ents != entry->sg_call_ents) {
  969. err_printk(ref->dev, entry, "device driver syncs "
  970. "DMA sg list with different entry count "
  971. "[map count=%d] [sync count=%d]\n",
  972. entry->sg_call_ents, ref->sg_call_ents);
  973. }
  974. out:
  975. put_hash_bucket(bucket, flags);
  976. }
  977. static void check_sg_segment(struct device *dev, struct scatterlist *sg)
  978. {
  979. #ifdef CONFIG_DMA_API_DEBUG_SG
  980. unsigned int max_seg = dma_get_max_seg_size(dev);
  981. u64 start, end, boundary = dma_get_seg_boundary(dev);
  982. /*
  983. * Either the driver forgot to set dma_parms appropriately, or
  984. * whoever generated the list forgot to check them.
  985. */
  986. if (sg->length > max_seg)
  987. err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
  988. sg->length, max_seg);
  989. /*
  990. * In some cases this could potentially be the DMA API
  991. * implementation's fault, but it would usually imply that
  992. * the scatterlist was built inappropriately to begin with.
  993. */
  994. start = sg_dma_address(sg);
  995. end = start + sg_dma_len(sg) - 1;
  996. if ((start ^ end) & ~boundary)
  997. err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
  998. start, end, boundary);
  999. #endif
  1000. }
  1001. void debug_dma_map_single(struct device *dev, const void *addr,
  1002. unsigned long len)
  1003. {
  1004. if (unlikely(dma_debug_disabled()))
  1005. return;
  1006. if (!virt_addr_valid(addr))
  1007. err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
  1008. addr, len);
  1009. if (is_vmalloc_addr(addr))
  1010. err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
  1011. addr, len);
  1012. }
  1013. EXPORT_SYMBOL(debug_dma_map_single);
  1014. void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
  1015. size_t size, int direction, dma_addr_t dma_addr)
  1016. {
  1017. struct dma_debug_entry *entry;
  1018. if (unlikely(dma_debug_disabled()))
  1019. return;
  1020. if (dma_mapping_error(dev, dma_addr))
  1021. return;
  1022. entry = dma_entry_alloc();
  1023. if (!entry)
  1024. return;
  1025. entry->dev = dev;
  1026. entry->type = dma_debug_single;
  1027. entry->pfn = page_to_pfn(page);
  1028. entry->offset = offset;
  1029. entry->dev_addr = dma_addr;
  1030. entry->size = size;
  1031. entry->direction = direction;
  1032. entry->map_err_type = MAP_ERR_NOT_CHECKED;
  1033. check_for_stack(dev, page, offset);
  1034. if (!PageHighMem(page)) {
  1035. void *addr = page_address(page) + offset;
  1036. check_for_illegal_area(dev, addr, size);
  1037. }
  1038. add_dma_entry(entry);
  1039. }
  1040. void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  1041. {
  1042. struct dma_debug_entry ref;
  1043. struct dma_debug_entry *entry;
  1044. struct hash_bucket *bucket;
  1045. unsigned long flags;
  1046. if (unlikely(dma_debug_disabled()))
  1047. return;
  1048. ref.dev = dev;
  1049. ref.dev_addr = dma_addr;
  1050. bucket = get_hash_bucket(&ref, &flags);
  1051. list_for_each_entry(entry, &bucket->list, list) {
  1052. if (!exact_match(&ref, entry))
  1053. continue;
  1054. /*
  1055. * The same physical address can be mapped multiple
  1056. * times. Without a hardware IOMMU this results in the
  1057. * same device addresses being put into the dma-debug
  1058. * hash multiple times too. This can result in false
  1059. * positives being reported. Therefore we implement a
  1060. * best-fit algorithm here which updates the first entry
  1061. * from the hash which fits the reference value and is
  1062. * not currently listed as being checked.
  1063. */
  1064. if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
  1065. entry->map_err_type = MAP_ERR_CHECKED;
  1066. break;
  1067. }
  1068. }
  1069. put_hash_bucket(bucket, flags);
  1070. }
  1071. EXPORT_SYMBOL(debug_dma_mapping_error);
  1072. void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  1073. size_t size, int direction)
  1074. {
  1075. struct dma_debug_entry ref = {
  1076. .type = dma_debug_single,
  1077. .dev = dev,
  1078. .dev_addr = addr,
  1079. .size = size,
  1080. .direction = direction,
  1081. };
  1082. if (unlikely(dma_debug_disabled()))
  1083. return;
  1084. check_unmap(&ref);
  1085. }
  1086. void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  1087. int nents, int mapped_ents, int direction)
  1088. {
  1089. struct dma_debug_entry *entry;
  1090. struct scatterlist *s;
  1091. int i;
  1092. if (unlikely(dma_debug_disabled()))
  1093. return;
  1094. for_each_sg(sg, s, nents, i) {
  1095. check_for_stack(dev, sg_page(s), s->offset);
  1096. if (!PageHighMem(sg_page(s)))
  1097. check_for_illegal_area(dev, sg_virt(s), s->length);
  1098. }
  1099. for_each_sg(sg, s, mapped_ents, i) {
  1100. entry = dma_entry_alloc();
  1101. if (!entry)
  1102. return;
  1103. entry->type = dma_debug_sg;
  1104. entry->dev = dev;
  1105. entry->pfn = page_to_pfn(sg_page(s));
  1106. entry->offset = s->offset;
  1107. entry->size = sg_dma_len(s);
  1108. entry->dev_addr = sg_dma_address(s);
  1109. entry->direction = direction;
  1110. entry->sg_call_ents = nents;
  1111. entry->sg_mapped_ents = mapped_ents;
  1112. check_sg_segment(dev, s);
  1113. add_dma_entry(entry);
  1114. }
  1115. }
  1116. static int get_nr_mapped_entries(struct device *dev,
  1117. struct dma_debug_entry *ref)
  1118. {
  1119. struct dma_debug_entry *entry;
  1120. struct hash_bucket *bucket;
  1121. unsigned long flags;
  1122. int mapped_ents;
  1123. bucket = get_hash_bucket(ref, &flags);
  1124. entry = bucket_find_exact(bucket, ref);
  1125. mapped_ents = 0;
  1126. if (entry)
  1127. mapped_ents = entry->sg_mapped_ents;
  1128. put_hash_bucket(bucket, flags);
  1129. return mapped_ents;
  1130. }
  1131. void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  1132. int nelems, int dir)
  1133. {
  1134. struct scatterlist *s;
  1135. int mapped_ents = 0, i;
  1136. if (unlikely(dma_debug_disabled()))
  1137. return;
  1138. for_each_sg(sglist, s, nelems, i) {
  1139. struct dma_debug_entry ref = {
  1140. .type = dma_debug_sg,
  1141. .dev = dev,
  1142. .pfn = page_to_pfn(sg_page(s)),
  1143. .offset = s->offset,
  1144. .dev_addr = sg_dma_address(s),
  1145. .size = sg_dma_len(s),
  1146. .direction = dir,
  1147. .sg_call_ents = nelems,
  1148. };
  1149. if (mapped_ents && i >= mapped_ents)
  1150. break;
  1151. if (!i)
  1152. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1153. check_unmap(&ref);
  1154. }
  1155. }
  1156. void debug_dma_alloc_coherent(struct device *dev, size_t size,
  1157. dma_addr_t dma_addr, void *virt)
  1158. {
  1159. struct dma_debug_entry *entry;
  1160. if (unlikely(dma_debug_disabled()))
  1161. return;
  1162. if (unlikely(virt == NULL))
  1163. return;
  1164. /* handle vmalloc and linear addresses */
  1165. if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
  1166. return;
  1167. entry = dma_entry_alloc();
  1168. if (!entry)
  1169. return;
  1170. entry->type = dma_debug_coherent;
  1171. entry->dev = dev;
  1172. entry->offset = offset_in_page(virt);
  1173. entry->size = size;
  1174. entry->dev_addr = dma_addr;
  1175. entry->direction = DMA_BIDIRECTIONAL;
  1176. if (is_vmalloc_addr(virt))
  1177. entry->pfn = vmalloc_to_pfn(virt);
  1178. else
  1179. entry->pfn = page_to_pfn(virt_to_page(virt));
  1180. add_dma_entry(entry);
  1181. }
  1182. void debug_dma_free_coherent(struct device *dev, size_t size,
  1183. void *virt, dma_addr_t addr)
  1184. {
  1185. struct dma_debug_entry ref = {
  1186. .type = dma_debug_coherent,
  1187. .dev = dev,
  1188. .offset = offset_in_page(virt),
  1189. .dev_addr = addr,
  1190. .size = size,
  1191. .direction = DMA_BIDIRECTIONAL,
  1192. };
  1193. /* handle vmalloc and linear addresses */
  1194. if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
  1195. return;
  1196. if (is_vmalloc_addr(virt))
  1197. ref.pfn = vmalloc_to_pfn(virt);
  1198. else
  1199. ref.pfn = page_to_pfn(virt_to_page(virt));
  1200. if (unlikely(dma_debug_disabled()))
  1201. return;
  1202. check_unmap(&ref);
  1203. }
  1204. void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
  1205. int direction, dma_addr_t dma_addr)
  1206. {
  1207. struct dma_debug_entry *entry;
  1208. if (unlikely(dma_debug_disabled()))
  1209. return;
  1210. entry = dma_entry_alloc();
  1211. if (!entry)
  1212. return;
  1213. entry->type = dma_debug_resource;
  1214. entry->dev = dev;
  1215. entry->pfn = PHYS_PFN(addr);
  1216. entry->offset = offset_in_page(addr);
  1217. entry->size = size;
  1218. entry->dev_addr = dma_addr;
  1219. entry->direction = direction;
  1220. entry->map_err_type = MAP_ERR_NOT_CHECKED;
  1221. add_dma_entry(entry);
  1222. }
  1223. void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
  1224. size_t size, int direction)
  1225. {
  1226. struct dma_debug_entry ref = {
  1227. .type = dma_debug_resource,
  1228. .dev = dev,
  1229. .dev_addr = dma_addr,
  1230. .size = size,
  1231. .direction = direction,
  1232. };
  1233. if (unlikely(dma_debug_disabled()))
  1234. return;
  1235. check_unmap(&ref);
  1236. }
  1237. void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  1238. size_t size, int direction)
  1239. {
  1240. struct dma_debug_entry ref;
  1241. if (unlikely(dma_debug_disabled()))
  1242. return;
  1243. ref.type = dma_debug_single;
  1244. ref.dev = dev;
  1245. ref.dev_addr = dma_handle;
  1246. ref.size = size;
  1247. ref.direction = direction;
  1248. ref.sg_call_ents = 0;
  1249. check_sync(dev, &ref, true);
  1250. }
  1251. void debug_dma_sync_single_for_device(struct device *dev,
  1252. dma_addr_t dma_handle, size_t size,
  1253. int direction)
  1254. {
  1255. struct dma_debug_entry ref;
  1256. if (unlikely(dma_debug_disabled()))
  1257. return;
  1258. ref.type = dma_debug_single;
  1259. ref.dev = dev;
  1260. ref.dev_addr = dma_handle;
  1261. ref.size = size;
  1262. ref.direction = direction;
  1263. ref.sg_call_ents = 0;
  1264. check_sync(dev, &ref, false);
  1265. }
  1266. void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  1267. int nelems, int direction)
  1268. {
  1269. struct scatterlist *s;
  1270. int mapped_ents = 0, i;
  1271. if (unlikely(dma_debug_disabled()))
  1272. return;
  1273. for_each_sg(sg, s, nelems, i) {
  1274. struct dma_debug_entry ref = {
  1275. .type = dma_debug_sg,
  1276. .dev = dev,
  1277. .pfn = page_to_pfn(sg_page(s)),
  1278. .offset = s->offset,
  1279. .dev_addr = sg_dma_address(s),
  1280. .size = sg_dma_len(s),
  1281. .direction = direction,
  1282. .sg_call_ents = nelems,
  1283. };
  1284. if (!i)
  1285. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1286. if (i >= mapped_ents)
  1287. break;
  1288. check_sync(dev, &ref, true);
  1289. }
  1290. }
  1291. void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  1292. int nelems, int direction)
  1293. {
  1294. struct scatterlist *s;
  1295. int mapped_ents = 0, i;
  1296. if (unlikely(dma_debug_disabled()))
  1297. return;
  1298. for_each_sg(sg, s, nelems, i) {
  1299. struct dma_debug_entry ref = {
  1300. .type = dma_debug_sg,
  1301. .dev = dev,
  1302. .pfn = page_to_pfn(sg_page(s)),
  1303. .offset = s->offset,
  1304. .dev_addr = sg_dma_address(s),
  1305. .size = sg_dma_len(s),
  1306. .direction = direction,
  1307. .sg_call_ents = nelems,
  1308. };
  1309. if (!i)
  1310. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1311. if (i >= mapped_ents)
  1312. break;
  1313. check_sync(dev, &ref, false);
  1314. }
  1315. }
  1316. static int __init dma_debug_driver_setup(char *str)
  1317. {
  1318. int i;
  1319. for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
  1320. current_driver_name[i] = *str;
  1321. if (*str == 0)
  1322. break;
  1323. }
  1324. if (current_driver_name[0])
  1325. pr_info("enable driver filter for driver [%s]\n",
  1326. current_driver_name);
  1327. return 1;
  1328. }
  1329. __setup("dma_debug_driver=", dma_debug_driver_setup);