dma-mapping.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_DMA_MAPPING_H
  3. #define _LINUX_DMA_MAPPING_H
  4. #include <linux/sizes.h>
  5. #include <linux/string.h>
  6. #include <linux/device.h>
  7. #include <linux/err.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/bug.h>
  11. #include <linux/mem_encrypt.h>
  12. /**
  13. * List of possible attributes associated with a DMA mapping. The semantics
  14. * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
  15. */
  16. /*
  17. * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
  18. * may be weakly ordered, that is that reads and writes may pass each other.
  19. */
  20. #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
  21. /*
  22. * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
  23. * buffered to improve performance.
  24. */
  25. #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
  26. /*
  27. * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
  28. * virtual mapping for the allocated buffer.
  29. */
  30. #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
  31. /*
  32. * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
  33. * the CPU cache for the given buffer assuming that it has been already
  34. * transferred to 'device' domain.
  35. */
  36. #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
  37. /*
  38. * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
  39. * in physical memory.
  40. */
  41. #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
  42. /*
  43. * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
  44. * that it's probably not worth the time to try to allocate memory to in a way
  45. * that gives better TLB efficiency.
  46. */
  47. #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
  48. /*
  49. * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
  50. * allocation failure reports (similarly to __GFP_NOWARN).
  51. */
  52. #define DMA_ATTR_NO_WARN (1UL << 8)
  53. /*
  54. * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
  55. * accessible at an elevated privilege level (and ideally inaccessible or
  56. * at least read-only at lesser-privileged levels).
  57. */
  58. #define DMA_ATTR_PRIVILEGED (1UL << 9)
  59. /*
  60. * This is a hint to the DMA-mapping subsystem that the device is expected
  61. * to overwrite the entire mapped size, thus the caller does not require any
  62. * of the previous buffer contents to be preserved. This allows
  63. * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
  64. */
  65. #define DMA_ATTR_OVERWRITE (1UL << 10)
  66. /*
  67. * DMA_ATTR_SYS_CACHE_ONLY: used to indicate that the buffer should be mapped
  68. * with the correct memory attributes so that it can be cached in the system
  69. * or last level cache. This is useful for buffers that are being mapped for
  70. * devices that are non-coherent, but can use the system cache.
  71. */
  72. #define DMA_ATTR_SYS_CACHE_ONLY (1UL << 14)
  73. /*
  74. * DMA_ATTR_SYS_CACHE_ONLY_NWA: used to indicate that the buffer should be
  75. * mapped with the correct memory attributes so that it can be cached in the
  76. * system or last level cache, with a no write allocate cache policy. This is
  77. * useful for buffers that are being mapped for devices that are non-coherent,
  78. * but can use the system cache.
  79. */
  80. #define DMA_ATTR_SYS_CACHE_ONLY_NWA (1UL << 15)
  81. /*
  82. * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
  83. * be given to a device to use as a DMA source or target. It is specific to a
  84. * given device and there may be a translation between the CPU physical address
  85. * space and the bus address space.
  86. *
  87. * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
  88. * be used directly in drivers, but checked for using dma_mapping_error()
  89. * instead.
  90. */
  91. #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
  92. #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  93. #ifdef CONFIG_DMA_API_DEBUG
  94. void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
  95. void debug_dma_map_single(struct device *dev, const void *addr,
  96. unsigned long len);
  97. #else
  98. static inline void debug_dma_mapping_error(struct device *dev,
  99. dma_addr_t dma_addr)
  100. {
  101. }
  102. static inline void debug_dma_map_single(struct device *dev, const void *addr,
  103. unsigned long len)
  104. {
  105. }
  106. #endif /* CONFIG_DMA_API_DEBUG */
  107. #ifdef CONFIG_HAS_DMA
  108. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  109. {
  110. debug_dma_mapping_error(dev, dma_addr);
  111. if (dma_addr == DMA_MAPPING_ERROR)
  112. return -ENOMEM;
  113. return 0;
  114. }
  115. dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
  116. size_t offset, size_t size, enum dma_data_direction dir,
  117. unsigned long attrs);
  118. void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
  119. enum dma_data_direction dir, unsigned long attrs);
  120. int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
  121. enum dma_data_direction dir, unsigned long attrs);
  122. void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  123. int nents, enum dma_data_direction dir,
  124. unsigned long attrs);
  125. dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
  126. size_t size, enum dma_data_direction dir, unsigned long attrs);
  127. void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
  128. enum dma_data_direction dir, unsigned long attrs);
  129. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  130. enum dma_data_direction dir);
  131. void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
  132. size_t size, enum dma_data_direction dir);
  133. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  134. int nelems, enum dma_data_direction dir);
  135. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  136. int nelems, enum dma_data_direction dir);
  137. void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  138. gfp_t flag, unsigned long attrs);
  139. void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
  140. dma_addr_t dma_handle, unsigned long attrs);
  141. void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  142. gfp_t gfp, unsigned long attrs);
  143. void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  144. dma_addr_t dma_handle);
  145. int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
  146. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  147. unsigned long attrs);
  148. int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
  149. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  150. unsigned long attrs);
  151. bool dma_can_mmap(struct device *dev);
  152. int dma_supported(struct device *dev, u64 mask);
  153. int dma_set_mask(struct device *dev, u64 mask);
  154. int dma_set_coherent_mask(struct device *dev, u64 mask);
  155. u64 dma_get_required_mask(struct device *dev);
  156. size_t dma_max_mapping_size(struct device *dev);
  157. bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
  158. unsigned long dma_get_merge_boundary(struct device *dev);
  159. #else /* CONFIG_HAS_DMA */
  160. static inline dma_addr_t dma_map_page_attrs(struct device *dev,
  161. struct page *page, size_t offset, size_t size,
  162. enum dma_data_direction dir, unsigned long attrs)
  163. {
  164. return DMA_MAPPING_ERROR;
  165. }
  166. static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
  167. size_t size, enum dma_data_direction dir, unsigned long attrs)
  168. {
  169. }
  170. static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  171. int nents, enum dma_data_direction dir, unsigned long attrs)
  172. {
  173. return 0;
  174. }
  175. static inline void dma_unmap_sg_attrs(struct device *dev,
  176. struct scatterlist *sg, int nents, enum dma_data_direction dir,
  177. unsigned long attrs)
  178. {
  179. }
  180. static inline dma_addr_t dma_map_resource(struct device *dev,
  181. phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
  182. unsigned long attrs)
  183. {
  184. return DMA_MAPPING_ERROR;
  185. }
  186. static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
  187. size_t size, enum dma_data_direction dir, unsigned long attrs)
  188. {
  189. }
  190. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  191. size_t size, enum dma_data_direction dir)
  192. {
  193. }
  194. static inline void dma_sync_single_for_device(struct device *dev,
  195. dma_addr_t addr, size_t size, enum dma_data_direction dir)
  196. {
  197. }
  198. static inline void dma_sync_sg_for_cpu(struct device *dev,
  199. struct scatterlist *sg, int nelems, enum dma_data_direction dir)
  200. {
  201. }
  202. static inline void dma_sync_sg_for_device(struct device *dev,
  203. struct scatterlist *sg, int nelems, enum dma_data_direction dir)
  204. {
  205. }
  206. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  207. {
  208. return -ENOMEM;
  209. }
  210. static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  211. dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
  212. {
  213. return NULL;
  214. }
  215. static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
  216. dma_addr_t dma_handle, unsigned long attrs)
  217. {
  218. }
  219. static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
  220. dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  221. {
  222. return NULL;
  223. }
  224. static inline void dmam_free_coherent(struct device *dev, size_t size,
  225. void *vaddr, dma_addr_t dma_handle)
  226. {
  227. }
  228. static inline int dma_get_sgtable_attrs(struct device *dev,
  229. struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
  230. size_t size, unsigned long attrs)
  231. {
  232. return -ENXIO;
  233. }
  234. static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
  235. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  236. unsigned long attrs)
  237. {
  238. return -ENXIO;
  239. }
  240. static inline bool dma_can_mmap(struct device *dev)
  241. {
  242. return false;
  243. }
  244. static inline int dma_supported(struct device *dev, u64 mask)
  245. {
  246. return 0;
  247. }
  248. static inline int dma_set_mask(struct device *dev, u64 mask)
  249. {
  250. return -EIO;
  251. }
  252. static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  253. {
  254. return -EIO;
  255. }
  256. static inline u64 dma_get_required_mask(struct device *dev)
  257. {
  258. return 0;
  259. }
  260. static inline size_t dma_max_mapping_size(struct device *dev)
  261. {
  262. return 0;
  263. }
  264. static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
  265. {
  266. return false;
  267. }
  268. static inline unsigned long dma_get_merge_boundary(struct device *dev)
  269. {
  270. return 0;
  271. }
  272. #endif /* CONFIG_HAS_DMA */
  273. struct page *dma_alloc_pages(struct device *dev, size_t size,
  274. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
  275. void dma_free_pages(struct device *dev, size_t size, struct page *page,
  276. dma_addr_t dma_handle, enum dma_data_direction dir);
  277. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  278. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
  279. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  280. dma_addr_t dma_handle, enum dma_data_direction dir);
  281. static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  282. size_t size, enum dma_data_direction dir, unsigned long attrs)
  283. {
  284. /* DMA must never operate on areas that might be remapped. */
  285. if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
  286. "rejecting DMA map of vmalloc memory\n"))
  287. return DMA_MAPPING_ERROR;
  288. debug_dma_map_single(dev, ptr, size);
  289. return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
  290. size, dir, attrs);
  291. }
  292. static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  293. size_t size, enum dma_data_direction dir, unsigned long attrs)
  294. {
  295. return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
  296. }
  297. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  298. dma_addr_t addr, unsigned long offset, size_t size,
  299. enum dma_data_direction dir)
  300. {
  301. return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
  302. }
  303. static inline void dma_sync_single_range_for_device(struct device *dev,
  304. dma_addr_t addr, unsigned long offset, size_t size,
  305. enum dma_data_direction dir)
  306. {
  307. return dma_sync_single_for_device(dev, addr + offset, size, dir);
  308. }
  309. /**
  310. * dma_map_sgtable - Map the given buffer for DMA
  311. * @dev: The device for which to perform the DMA operation
  312. * @sgt: The sg_table object describing the buffer
  313. * @dir: DMA direction
  314. * @attrs: Optional DMA attributes for the map operation
  315. *
  316. * Maps a buffer described by a scatterlist stored in the given sg_table
  317. * object for the @dir DMA operation by the @dev device. After success the
  318. * ownership for the buffer is transferred to the DMA domain. One has to
  319. * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
  320. * ownership of the buffer back to the CPU domain before touching the
  321. * buffer by the CPU.
  322. *
  323. * Returns 0 on success or -EINVAL on error during mapping the buffer.
  324. */
  325. static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
  326. enum dma_data_direction dir, unsigned long attrs)
  327. {
  328. int nents;
  329. nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
  330. if (nents <= 0)
  331. return -EINVAL;
  332. sgt->nents = nents;
  333. return 0;
  334. }
  335. /**
  336. * dma_unmap_sgtable - Unmap the given buffer for DMA
  337. * @dev: The device for which to perform the DMA operation
  338. * @sgt: The sg_table object describing the buffer
  339. * @dir: DMA direction
  340. * @attrs: Optional DMA attributes for the unmap operation
  341. *
  342. * Unmaps a buffer described by a scatterlist stored in the given sg_table
  343. * object for the @dir DMA operation by the @dev device. After this function
  344. * the ownership of the buffer is transferred back to the CPU domain.
  345. */
  346. static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
  347. enum dma_data_direction dir, unsigned long attrs)
  348. {
  349. dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
  350. }
  351. /**
  352. * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
  353. * @dev: The device for which to perform the DMA operation
  354. * @sgt: The sg_table object describing the buffer
  355. * @dir: DMA direction
  356. *
  357. * Performs the needed cache synchronization and moves the ownership of the
  358. * buffer back to the CPU domain, so it is safe to perform any access to it
  359. * by the CPU. Before doing any further DMA operations, one has to transfer
  360. * the ownership of the buffer back to the DMA domain by calling the
  361. * dma_sync_sgtable_for_device().
  362. */
  363. static inline void dma_sync_sgtable_for_cpu(struct device *dev,
  364. struct sg_table *sgt, enum dma_data_direction dir)
  365. {
  366. dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
  367. }
  368. /**
  369. * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
  370. * @dev: The device for which to perform the DMA operation
  371. * @sgt: The sg_table object describing the buffer
  372. * @dir: DMA direction
  373. *
  374. * Performs the needed cache synchronization and moves the ownership of the
  375. * buffer back to the DMA domain, so it is safe to perform the DMA operation.
  376. * Once finished, one has to call dma_sync_sgtable_for_cpu() or
  377. * dma_unmap_sgtable().
  378. */
  379. static inline void dma_sync_sgtable_for_device(struct device *dev,
  380. struct sg_table *sgt, enum dma_data_direction dir)
  381. {
  382. dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
  383. }
  384. #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  385. #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  386. #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  387. #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  388. #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
  389. #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
  390. #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  391. #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  392. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  393. dma_addr_t *dma_handle, gfp_t gfp)
  394. {
  395. return dma_alloc_attrs(dev, size, dma_handle, gfp,
  396. (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
  397. }
  398. static inline void dma_free_coherent(struct device *dev, size_t size,
  399. void *cpu_addr, dma_addr_t dma_handle)
  400. {
  401. return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  402. }
  403. static inline u64 dma_get_mask(struct device *dev)
  404. {
  405. if (dev->dma_mask && *dev->dma_mask)
  406. return *dev->dma_mask;
  407. return DMA_BIT_MASK(32);
  408. }
  409. /*
  410. * Set both the DMA mask and the coherent DMA mask to the same thing.
  411. * Note that we don't check the return value from dma_set_coherent_mask()
  412. * as the DMA API guarantees that the coherent DMA mask can be set to
  413. * the same or smaller than the streaming DMA mask.
  414. */
  415. static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  416. {
  417. int rc = dma_set_mask(dev, mask);
  418. if (rc == 0)
  419. dma_set_coherent_mask(dev, mask);
  420. return rc;
  421. }
  422. /*
  423. * Similar to the above, except it deals with the case where the device
  424. * does not have dev->dma_mask appropriately setup.
  425. */
  426. static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  427. {
  428. dev->dma_mask = &dev->coherent_dma_mask;
  429. return dma_set_mask_and_coherent(dev, mask);
  430. }
  431. /**
  432. * dma_addressing_limited - return if the device is addressing limited
  433. * @dev: device to check
  434. *
  435. * Return %true if the devices DMA mask is too small to address all memory in
  436. * the system, else %false. Lack of addressing bits is the prime reason for
  437. * bounce buffering, but might not be the only one.
  438. */
  439. static inline bool dma_addressing_limited(struct device *dev)
  440. {
  441. return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
  442. dma_get_required_mask(dev);
  443. }
  444. static inline unsigned int dma_get_max_seg_size(struct device *dev)
  445. {
  446. if (dev->dma_parms && dev->dma_parms->max_segment_size)
  447. return dev->dma_parms->max_segment_size;
  448. return SZ_64K;
  449. }
  450. static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
  451. {
  452. if (dev->dma_parms) {
  453. dev->dma_parms->max_segment_size = size;
  454. return 0;
  455. }
  456. return -EIO;
  457. }
  458. static inline unsigned long dma_get_seg_boundary(struct device *dev)
  459. {
  460. if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  461. return dev->dma_parms->segment_boundary_mask;
  462. return ULONG_MAX;
  463. }
  464. /**
  465. * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
  466. * @dev: device to guery the boundary for
  467. * @page_shift: ilog() of the IOMMU page size
  468. *
  469. * Return the segment boundary in IOMMU page units (which may be different from
  470. * the CPU page size) for the passed in device.
  471. *
  472. * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
  473. * non-DMA API callers.
  474. */
  475. static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
  476. unsigned int page_shift)
  477. {
  478. if (!dev)
  479. return (U32_MAX >> page_shift) + 1;
  480. return (dma_get_seg_boundary(dev) >> page_shift) + 1;
  481. }
  482. static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  483. {
  484. if (dev->dma_parms) {
  485. dev->dma_parms->segment_boundary_mask = mask;
  486. return 0;
  487. }
  488. return -EIO;
  489. }
  490. static inline unsigned int dma_get_min_align_mask(struct device *dev)
  491. {
  492. if (dev->dma_parms)
  493. return dev->dma_parms->min_align_mask;
  494. return 0;
  495. }
  496. static inline int dma_set_min_align_mask(struct device *dev,
  497. unsigned int min_align_mask)
  498. {
  499. if (WARN_ON_ONCE(!dev->dma_parms))
  500. return -EIO;
  501. dev->dma_parms->min_align_mask = min_align_mask;
  502. return 0;
  503. }
  504. static inline int dma_get_cache_alignment(void)
  505. {
  506. #ifdef ARCH_DMA_MINALIGN
  507. return ARCH_DMA_MINALIGN;
  508. #endif
  509. return 1;
  510. }
  511. static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
  512. dma_addr_t *dma_handle, gfp_t gfp)
  513. {
  514. return dmam_alloc_attrs(dev, size, dma_handle, gfp,
  515. (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
  516. }
  517. static inline void *dma_alloc_wc(struct device *dev, size_t size,
  518. dma_addr_t *dma_addr, gfp_t gfp)
  519. {
  520. unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
  521. if (gfp & __GFP_NOWARN)
  522. attrs |= DMA_ATTR_NO_WARN;
  523. return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
  524. }
  525. static inline void dma_free_wc(struct device *dev, size_t size,
  526. void *cpu_addr, dma_addr_t dma_addr)
  527. {
  528. return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  529. DMA_ATTR_WRITE_COMBINE);
  530. }
  531. static inline int dma_mmap_wc(struct device *dev,
  532. struct vm_area_struct *vma,
  533. void *cpu_addr, dma_addr_t dma_addr,
  534. size_t size)
  535. {
  536. return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  537. DMA_ATTR_WRITE_COMBINE);
  538. }
  539. #ifdef CONFIG_NEED_DMA_MAP_STATE
  540. #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
  541. #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
  542. #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
  543. #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
  544. #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
  545. #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
  546. #else
  547. #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  548. #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  549. #define dma_unmap_addr(PTR, ADDR_NAME) (0)
  550. #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
  551. #define dma_unmap_len(PTR, LEN_NAME) (0)
  552. #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
  553. #endif
  554. /*
  555. * Legacy interface to set up the dma offset map. Drivers really should not
  556. * actually use it, but we have a few legacy cases left.
  557. */
  558. int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
  559. dma_addr_t dma_start, u64 size);
  560. extern const struct dma_map_ops dma_virt_ops;
  561. #endif /* _LINUX_DMA_MAPPING_H */