mapping.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch-independent dma-mapping routines
  4. *
  5. * Copyright (c) 2006 SUSE Linux Products GmbH
  6. * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
  7. */
  8. #include <linux/memblock.h> /* for max_pfn */
  9. #include <linux/acpi.h>
  10. #include <linux/dma-map-ops.h>
  11. #include <linux/export.h>
  12. #include <linux/gfp.h>
  13. #include <linux/of_device.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include "debug.h"
  17. #include "direct.h"
  18. /*
  19. * Managed DMA API
  20. */
  21. struct dma_devres {
  22. size_t size;
  23. void *vaddr;
  24. dma_addr_t dma_handle;
  25. unsigned long attrs;
  26. };
  27. static void dmam_release(struct device *dev, void *res)
  28. {
  29. struct dma_devres *this = res;
  30. dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
  31. this->attrs);
  32. }
  33. static int dmam_match(struct device *dev, void *res, void *match_data)
  34. {
  35. struct dma_devres *this = res, *match = match_data;
  36. if (this->vaddr == match->vaddr) {
  37. WARN_ON(this->size != match->size ||
  38. this->dma_handle != match->dma_handle);
  39. return 1;
  40. }
  41. return 0;
  42. }
  43. /**
  44. * dmam_free_coherent - Managed dma_free_coherent()
  45. * @dev: Device to free coherent memory for
  46. * @size: Size of allocation
  47. * @vaddr: Virtual address of the memory to free
  48. * @dma_handle: DMA handle of the memory to free
  49. *
  50. * Managed dma_free_coherent().
  51. */
  52. void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  53. dma_addr_t dma_handle)
  54. {
  55. struct dma_devres match_data = { size, vaddr, dma_handle };
  56. dma_free_coherent(dev, size, vaddr, dma_handle);
  57. WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
  58. }
  59. EXPORT_SYMBOL(dmam_free_coherent);
  60. /**
  61. * dmam_alloc_attrs - Managed dma_alloc_attrs()
  62. * @dev: Device to allocate non_coherent memory for
  63. * @size: Size of allocation
  64. * @dma_handle: Out argument for allocated DMA handle
  65. * @gfp: Allocation flags
  66. * @attrs: Flags in the DMA_ATTR_* namespace.
  67. *
  68. * Managed dma_alloc_attrs(). Memory allocated using this function will be
  69. * automatically released on driver detach.
  70. *
  71. * RETURNS:
  72. * Pointer to allocated memory on success, NULL on failure.
  73. */
  74. void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  75. gfp_t gfp, unsigned long attrs)
  76. {
  77. struct dma_devres *dr;
  78. void *vaddr;
  79. dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
  80. if (!dr)
  81. return NULL;
  82. vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
  83. if (!vaddr) {
  84. devres_free(dr);
  85. return NULL;
  86. }
  87. dr->vaddr = vaddr;
  88. dr->dma_handle = *dma_handle;
  89. dr->size = size;
  90. dr->attrs = attrs;
  91. devres_add(dev, dr);
  92. return vaddr;
  93. }
  94. EXPORT_SYMBOL(dmam_alloc_attrs);
  95. static bool dma_go_direct(struct device *dev, dma_addr_t mask,
  96. const struct dma_map_ops *ops)
  97. {
  98. if (likely(!ops))
  99. return true;
  100. #ifdef CONFIG_DMA_OPS_BYPASS
  101. if (dev->dma_ops_bypass)
  102. return min_not_zero(mask, dev->bus_dma_limit) >=
  103. dma_direct_get_required_mask(dev);
  104. #endif
  105. return false;
  106. }
  107. /*
  108. * Check if the devices uses a direct mapping for streaming DMA operations.
  109. * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
  110. * enough.
  111. */
  112. static inline bool dma_alloc_direct(struct device *dev,
  113. const struct dma_map_ops *ops)
  114. {
  115. return dma_go_direct(dev, dev->coherent_dma_mask, ops);
  116. }
  117. static inline bool dma_map_direct(struct device *dev,
  118. const struct dma_map_ops *ops)
  119. {
  120. return dma_go_direct(dev, *dev->dma_mask, ops);
  121. }
  122. dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
  123. size_t offset, size_t size, enum dma_data_direction dir,
  124. unsigned long attrs)
  125. {
  126. const struct dma_map_ops *ops = get_dma_ops(dev);
  127. dma_addr_t addr;
  128. BUG_ON(!valid_dma_direction(dir));
  129. if (WARN_ON_ONCE(!dev->dma_mask))
  130. return DMA_MAPPING_ERROR;
  131. if (dma_map_direct(dev, ops))
  132. addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
  133. else
  134. addr = ops->map_page(dev, page, offset, size, dir, attrs);
  135. debug_dma_map_page(dev, page, offset, size, dir, addr);
  136. return addr;
  137. }
  138. EXPORT_SYMBOL(dma_map_page_attrs);
  139. void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
  140. enum dma_data_direction dir, unsigned long attrs)
  141. {
  142. const struct dma_map_ops *ops = get_dma_ops(dev);
  143. BUG_ON(!valid_dma_direction(dir));
  144. if (dma_map_direct(dev, ops))
  145. dma_direct_unmap_page(dev, addr, size, dir, attrs);
  146. else if (ops->unmap_page)
  147. ops->unmap_page(dev, addr, size, dir, attrs);
  148. debug_dma_unmap_page(dev, addr, size, dir);
  149. }
  150. EXPORT_SYMBOL(dma_unmap_page_attrs);
  151. /*
  152. * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  153. * It should never return a value < 0.
  154. */
  155. int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
  156. enum dma_data_direction dir, unsigned long attrs)
  157. {
  158. const struct dma_map_ops *ops = get_dma_ops(dev);
  159. int ents;
  160. BUG_ON(!valid_dma_direction(dir));
  161. if (WARN_ON_ONCE(!dev->dma_mask))
  162. return 0;
  163. if (dma_map_direct(dev, ops))
  164. ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
  165. else
  166. ents = ops->map_sg(dev, sg, nents, dir, attrs);
  167. BUG_ON(ents < 0);
  168. debug_dma_map_sg(dev, sg, nents, ents, dir);
  169. return ents;
  170. }
  171. EXPORT_SYMBOL(dma_map_sg_attrs);
  172. void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  173. int nents, enum dma_data_direction dir,
  174. unsigned long attrs)
  175. {
  176. const struct dma_map_ops *ops = get_dma_ops(dev);
  177. BUG_ON(!valid_dma_direction(dir));
  178. debug_dma_unmap_sg(dev, sg, nents, dir);
  179. if (dma_map_direct(dev, ops))
  180. dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
  181. else if (ops->unmap_sg)
  182. ops->unmap_sg(dev, sg, nents, dir, attrs);
  183. }
  184. EXPORT_SYMBOL(dma_unmap_sg_attrs);
  185. dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
  186. size_t size, enum dma_data_direction dir, unsigned long attrs)
  187. {
  188. const struct dma_map_ops *ops = get_dma_ops(dev);
  189. dma_addr_t addr = DMA_MAPPING_ERROR;
  190. BUG_ON(!valid_dma_direction(dir));
  191. if (WARN_ON_ONCE(!dev->dma_mask))
  192. return DMA_MAPPING_ERROR;
  193. /* Don't allow RAM to be mapped */
  194. if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
  195. return DMA_MAPPING_ERROR;
  196. if (dma_map_direct(dev, ops))
  197. addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
  198. else if (ops->map_resource)
  199. addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
  200. debug_dma_map_resource(dev, phys_addr, size, dir, addr);
  201. return addr;
  202. }
  203. EXPORT_SYMBOL(dma_map_resource);
  204. void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
  205. enum dma_data_direction dir, unsigned long attrs)
  206. {
  207. const struct dma_map_ops *ops = get_dma_ops(dev);
  208. BUG_ON(!valid_dma_direction(dir));
  209. if (!dma_map_direct(dev, ops) && ops->unmap_resource)
  210. ops->unmap_resource(dev, addr, size, dir, attrs);
  211. debug_dma_unmap_resource(dev, addr, size, dir);
  212. }
  213. EXPORT_SYMBOL(dma_unmap_resource);
  214. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  215. enum dma_data_direction dir)
  216. {
  217. const struct dma_map_ops *ops = get_dma_ops(dev);
  218. BUG_ON(!valid_dma_direction(dir));
  219. if (dma_map_direct(dev, ops))
  220. dma_direct_sync_single_for_cpu(dev, addr, size, dir);
  221. else if (ops->sync_single_for_cpu)
  222. ops->sync_single_for_cpu(dev, addr, size, dir);
  223. debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  224. }
  225. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  226. void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
  227. size_t size, enum dma_data_direction dir)
  228. {
  229. const struct dma_map_ops *ops = get_dma_ops(dev);
  230. BUG_ON(!valid_dma_direction(dir));
  231. if (dma_map_direct(dev, ops))
  232. dma_direct_sync_single_for_device(dev, addr, size, dir);
  233. else if (ops->sync_single_for_device)
  234. ops->sync_single_for_device(dev, addr, size, dir);
  235. debug_dma_sync_single_for_device(dev, addr, size, dir);
  236. }
  237. EXPORT_SYMBOL(dma_sync_single_for_device);
  238. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  239. int nelems, enum dma_data_direction dir)
  240. {
  241. const struct dma_map_ops *ops = get_dma_ops(dev);
  242. BUG_ON(!valid_dma_direction(dir));
  243. if (dma_map_direct(dev, ops))
  244. dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
  245. else if (ops->sync_sg_for_cpu)
  246. ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  247. debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  248. }
  249. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  250. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  251. int nelems, enum dma_data_direction dir)
  252. {
  253. const struct dma_map_ops *ops = get_dma_ops(dev);
  254. BUG_ON(!valid_dma_direction(dir));
  255. if (dma_map_direct(dev, ops))
  256. dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
  257. else if (ops->sync_sg_for_device)
  258. ops->sync_sg_for_device(dev, sg, nelems, dir);
  259. debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  260. }
  261. EXPORT_SYMBOL(dma_sync_sg_for_device);
  262. /*
  263. * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
  264. * that the intention is to allow exporting memory allocated via the
  265. * coherent DMA APIs through the dma_buf API, which only accepts a
  266. * scattertable. This presents a couple of problems:
  267. * 1. Not all memory allocated via the coherent DMA APIs is backed by
  268. * a struct page
  269. * 2. Passing coherent DMA memory into the streaming APIs is not allowed
  270. * as we will try to flush the memory through a different alias to that
  271. * actually being used (and the flushes are redundant.)
  272. */
  273. int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
  274. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  275. unsigned long attrs)
  276. {
  277. const struct dma_map_ops *ops = get_dma_ops(dev);
  278. if (dma_alloc_direct(dev, ops))
  279. return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
  280. size, attrs);
  281. if (!ops->get_sgtable)
  282. return -ENXIO;
  283. return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
  284. }
  285. EXPORT_SYMBOL(dma_get_sgtable_attrs);
  286. #ifdef CONFIG_MMU
  287. /*
  288. * Return the page attributes used for mapping dma_alloc_* memory, either in
  289. * kernel space if remapping is needed, or to userspace through dma_mmap_*.
  290. */
  291. pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
  292. {
  293. if (force_dma_unencrypted(dev))
  294. prot = pgprot_decrypted(prot);
  295. if (dev_is_dma_coherent(dev))
  296. return prot;
  297. #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
  298. if (attrs & DMA_ATTR_WRITE_COMBINE)
  299. return pgprot_writecombine(prot);
  300. #endif
  301. if (attrs & DMA_ATTR_SYS_CACHE_ONLY ||
  302. attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
  303. return pgprot_syscached(prot);
  304. return pgprot_dmacoherent(prot);
  305. }
  306. #endif /* CONFIG_MMU */
  307. /**
  308. * dma_can_mmap - check if a given device supports dma_mmap_*
  309. * @dev: device to check
  310. *
  311. * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
  312. * map DMA allocations to userspace.
  313. */
  314. bool dma_can_mmap(struct device *dev)
  315. {
  316. const struct dma_map_ops *ops = get_dma_ops(dev);
  317. if (dma_alloc_direct(dev, ops))
  318. return dma_direct_can_mmap(dev);
  319. return ops->mmap != NULL;
  320. }
  321. EXPORT_SYMBOL_GPL(dma_can_mmap);
  322. /**
  323. * dma_mmap_attrs - map a coherent DMA allocation into user space
  324. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  325. * @vma: vm_area_struct describing requested user mapping
  326. * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  327. * @dma_addr: device-view address returned from dma_alloc_attrs
  328. * @size: size of memory originally requested in dma_alloc_attrs
  329. * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  330. *
  331. * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
  332. * space. The coherent DMA buffer must not be freed by the driver until the
  333. * user space mapping has been released.
  334. */
  335. int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
  336. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  337. unsigned long attrs)
  338. {
  339. const struct dma_map_ops *ops = get_dma_ops(dev);
  340. if (dma_alloc_direct(dev, ops))
  341. return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
  342. attrs);
  343. if (!ops->mmap)
  344. return -ENXIO;
  345. return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  346. }
  347. EXPORT_SYMBOL(dma_mmap_attrs);
  348. u64 dma_get_required_mask(struct device *dev)
  349. {
  350. const struct dma_map_ops *ops = get_dma_ops(dev);
  351. if (dma_alloc_direct(dev, ops))
  352. return dma_direct_get_required_mask(dev);
  353. if (ops->get_required_mask)
  354. return ops->get_required_mask(dev);
  355. /*
  356. * We require every DMA ops implementation to at least support a 32-bit
  357. * DMA mask (and use bounce buffering if that isn't supported in
  358. * hardware). As the direct mapping code has its own routine to
  359. * actually report an optimal mask we default to 32-bit here as that
  360. * is the right thing for most IOMMUs, and at least not actively
  361. * harmful in general.
  362. */
  363. return DMA_BIT_MASK(32);
  364. }
  365. EXPORT_SYMBOL_GPL(dma_get_required_mask);
  366. void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  367. gfp_t flag, unsigned long attrs)
  368. {
  369. const struct dma_map_ops *ops = get_dma_ops(dev);
  370. void *cpu_addr;
  371. WARN_ON_ONCE(!dev->coherent_dma_mask);
  372. if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
  373. return cpu_addr;
  374. /* let the implementation decide on the zone to allocate from: */
  375. flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
  376. if (dma_alloc_direct(dev, ops))
  377. cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
  378. else if (ops->alloc)
  379. cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  380. else
  381. return NULL;
  382. debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  383. return cpu_addr;
  384. }
  385. EXPORT_SYMBOL(dma_alloc_attrs);
  386. void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
  387. dma_addr_t dma_handle, unsigned long attrs)
  388. {
  389. const struct dma_map_ops *ops = get_dma_ops(dev);
  390. if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
  391. return;
  392. /*
  393. * On non-coherent platforms which implement DMA-coherent buffers via
  394. * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
  395. * this far in IRQ context is a) at risk of a BUG_ON() or trying to
  396. * sleep on some machines, and b) an indication that the driver is
  397. * probably misusing the coherent API anyway.
  398. */
  399. WARN_ON(irqs_disabled());
  400. if (!cpu_addr)
  401. return;
  402. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  403. if (dma_alloc_direct(dev, ops))
  404. dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
  405. else if (ops->free)
  406. ops->free(dev, size, cpu_addr, dma_handle, attrs);
  407. }
  408. EXPORT_SYMBOL(dma_free_attrs);
  409. struct page *dma_alloc_pages(struct device *dev, size_t size,
  410. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
  411. {
  412. const struct dma_map_ops *ops = get_dma_ops(dev);
  413. struct page *page;
  414. if (WARN_ON_ONCE(!dev->coherent_dma_mask))
  415. return NULL;
  416. if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
  417. return NULL;
  418. size = PAGE_ALIGN(size);
  419. if (dma_alloc_direct(dev, ops))
  420. page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
  421. else if (ops->alloc_pages)
  422. page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
  423. else
  424. return NULL;
  425. debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
  426. return page;
  427. }
  428. EXPORT_SYMBOL_GPL(dma_alloc_pages);
  429. void dma_free_pages(struct device *dev, size_t size, struct page *page,
  430. dma_addr_t dma_handle, enum dma_data_direction dir)
  431. {
  432. const struct dma_map_ops *ops = get_dma_ops(dev);
  433. size = PAGE_ALIGN(size);
  434. debug_dma_unmap_page(dev, dma_handle, size, dir);
  435. if (dma_alloc_direct(dev, ops))
  436. dma_direct_free_pages(dev, size, page, dma_handle, dir);
  437. else if (ops->free_pages)
  438. ops->free_pages(dev, size, page, dma_handle, dir);
  439. }
  440. EXPORT_SYMBOL_GPL(dma_free_pages);
  441. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  442. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
  443. {
  444. const struct dma_map_ops *ops = get_dma_ops(dev);
  445. void *vaddr;
  446. if (!ops || !ops->alloc_noncoherent) {
  447. struct page *page;
  448. page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
  449. if (!page)
  450. return NULL;
  451. return page_address(page);
  452. }
  453. size = PAGE_ALIGN(size);
  454. vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp);
  455. if (vaddr)
  456. debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir,
  457. *dma_handle);
  458. return vaddr;
  459. }
  460. EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
  461. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  462. dma_addr_t dma_handle, enum dma_data_direction dir)
  463. {
  464. const struct dma_map_ops *ops = get_dma_ops(dev);
  465. if (!ops || !ops->free_noncoherent) {
  466. dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
  467. return;
  468. }
  469. size = PAGE_ALIGN(size);
  470. debug_dma_unmap_page(dev, dma_handle, size, dir);
  471. ops->free_noncoherent(dev, size, vaddr, dma_handle, dir);
  472. }
  473. EXPORT_SYMBOL_GPL(dma_free_noncoherent);
  474. int dma_supported(struct device *dev, u64 mask)
  475. {
  476. const struct dma_map_ops *ops = get_dma_ops(dev);
  477. /*
  478. * ->dma_supported sets the bypass flag, so we must always call
  479. * into the method here unless the device is truly direct mapped.
  480. */
  481. if (!ops)
  482. return dma_direct_supported(dev, mask);
  483. if (!ops->dma_supported)
  484. return 1;
  485. return ops->dma_supported(dev, mask);
  486. }
  487. EXPORT_SYMBOL(dma_supported);
  488. #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
  489. void arch_dma_set_mask(struct device *dev, u64 mask);
  490. #else
  491. #define arch_dma_set_mask(dev, mask) do { } while (0)
  492. #endif
  493. int dma_set_mask(struct device *dev, u64 mask)
  494. {
  495. /*
  496. * Truncate the mask to the actually supported dma_addr_t width to
  497. * avoid generating unsupportable addresses.
  498. */
  499. mask = (dma_addr_t)mask;
  500. if (!dev->dma_mask || !dma_supported(dev, mask))
  501. return -EIO;
  502. arch_dma_set_mask(dev, mask);
  503. *dev->dma_mask = mask;
  504. return 0;
  505. }
  506. EXPORT_SYMBOL(dma_set_mask);
  507. #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
  508. int dma_set_coherent_mask(struct device *dev, u64 mask)
  509. {
  510. /*
  511. * Truncate the mask to the actually supported dma_addr_t width to
  512. * avoid generating unsupportable addresses.
  513. */
  514. mask = (dma_addr_t)mask;
  515. if (!dma_supported(dev, mask))
  516. return -EIO;
  517. dev->coherent_dma_mask = mask;
  518. return 0;
  519. }
  520. EXPORT_SYMBOL(dma_set_coherent_mask);
  521. #endif
  522. size_t dma_max_mapping_size(struct device *dev)
  523. {
  524. const struct dma_map_ops *ops = get_dma_ops(dev);
  525. size_t size = SIZE_MAX;
  526. if (dma_map_direct(dev, ops))
  527. size = dma_direct_max_mapping_size(dev);
  528. else if (ops && ops->max_mapping_size)
  529. size = ops->max_mapping_size(dev);
  530. return size;
  531. }
  532. EXPORT_SYMBOL_GPL(dma_max_mapping_size);
  533. bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
  534. {
  535. const struct dma_map_ops *ops = get_dma_ops(dev);
  536. if (dma_map_direct(dev, ops))
  537. return dma_direct_need_sync(dev, dma_addr);
  538. return ops->sync_single_for_cpu || ops->sync_single_for_device;
  539. }
  540. EXPORT_SYMBOL_GPL(dma_need_sync);
  541. unsigned long dma_get_merge_boundary(struct device *dev)
  542. {
  543. const struct dma_map_ops *ops = get_dma_ops(dev);
  544. if (!ops || !ops->get_merge_boundary)
  545. return 0; /* can't merge */
  546. return ops->get_merge_boundary(dev);
  547. }
  548. EXPORT_SYMBOL_GPL(dma_get_merge_boundary);