dma-map-ops.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * This header is for implementations of dma_map_ops and related code.
  4. * It should not be included in drivers just using the DMA API.
  5. */
  6. #ifndef _LINUX_DMA_MAP_OPS_H
  7. #define _LINUX_DMA_MAP_OPS_H
  8. #include <linux/dma-mapping.h>
  9. #include <linux/pgtable.h>
  10. #include <linux/android_kabi.h>
  11. struct cma;
  12. struct dma_map_ops {
  13. void *(*alloc)(struct device *dev, size_t size,
  14. dma_addr_t *dma_handle, gfp_t gfp,
  15. unsigned long attrs);
  16. void (*free)(struct device *dev, size_t size, void *vaddr,
  17. dma_addr_t dma_handle, unsigned long attrs);
  18. struct page *(*alloc_pages)(struct device *dev, size_t size,
  19. dma_addr_t *dma_handle, enum dma_data_direction dir,
  20. gfp_t gfp);
  21. void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
  22. dma_addr_t dma_handle, enum dma_data_direction dir);
  23. void *(*alloc_noncoherent)(struct device *dev, size_t size,
  24. dma_addr_t *dma_handle, enum dma_data_direction dir,
  25. gfp_t gfp);
  26. void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
  27. dma_addr_t dma_handle, enum dma_data_direction dir);
  28. int (*mmap)(struct device *, struct vm_area_struct *,
  29. void *, dma_addr_t, size_t, unsigned long attrs);
  30. int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
  31. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  32. unsigned long attrs);
  33. dma_addr_t (*map_page)(struct device *dev, struct page *page,
  34. unsigned long offset, size_t size,
  35. enum dma_data_direction dir, unsigned long attrs);
  36. void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
  37. size_t size, enum dma_data_direction dir,
  38. unsigned long attrs);
  39. /*
  40. * map_sg returns 0 on error and a value > 0 on success.
  41. * It should never return a value < 0.
  42. */
  43. int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
  44. enum dma_data_direction dir, unsigned long attrs);
  45. void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
  46. enum dma_data_direction dir, unsigned long attrs);
  47. dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
  48. size_t size, enum dma_data_direction dir,
  49. unsigned long attrs);
  50. void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
  51. size_t size, enum dma_data_direction dir,
  52. unsigned long attrs);
  53. void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
  54. size_t size, enum dma_data_direction dir);
  55. void (*sync_single_for_device)(struct device *dev,
  56. dma_addr_t dma_handle, size_t size,
  57. enum dma_data_direction dir);
  58. void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
  59. int nents, enum dma_data_direction dir);
  60. void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
  61. int nents, enum dma_data_direction dir);
  62. void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
  63. enum dma_data_direction direction);
  64. int (*dma_supported)(struct device *dev, u64 mask);
  65. u64 (*get_required_mask)(struct device *dev);
  66. size_t (*max_mapping_size)(struct device *dev);
  67. unsigned long (*get_merge_boundary)(struct device *dev);
  68. ANDROID_KABI_RESERVE(1);
  69. ANDROID_KABI_RESERVE(2);
  70. ANDROID_KABI_RESERVE(3);
  71. ANDROID_KABI_RESERVE(4);
  72. };
  73. #ifdef CONFIG_DMA_OPS
  74. #include <asm/dma-mapping.h>
  75. static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  76. {
  77. if (dev->dma_ops)
  78. return dev->dma_ops;
  79. return get_arch_dma_ops(dev->bus);
  80. }
  81. static inline void set_dma_ops(struct device *dev,
  82. const struct dma_map_ops *dma_ops)
  83. {
  84. dev->dma_ops = dma_ops;
  85. }
  86. #else /* CONFIG_DMA_OPS */
  87. static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  88. {
  89. return NULL;
  90. }
  91. static inline void set_dma_ops(struct device *dev,
  92. const struct dma_map_ops *dma_ops)
  93. {
  94. }
  95. #endif /* CONFIG_DMA_OPS */
  96. #ifdef CONFIG_DMA_CMA
  97. extern struct cma *dma_contiguous_default_area;
  98. static inline struct cma *dev_get_cma_area(struct device *dev)
  99. {
  100. if (dev && dev->cma_area)
  101. return dev->cma_area;
  102. return dma_contiguous_default_area;
  103. }
  104. void dma_contiguous_reserve(phys_addr_t addr_limit);
  105. int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  106. phys_addr_t limit, struct cma **res_cma, bool fixed);
  107. struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
  108. unsigned int order, bool no_warn);
  109. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  110. int count);
  111. struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
  112. void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
  113. void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
  114. #else /* CONFIG_DMA_CMA */
  115. static inline struct cma *dev_get_cma_area(struct device *dev)
  116. {
  117. return NULL;
  118. }
  119. static inline void dma_contiguous_reserve(phys_addr_t limit)
  120. {
  121. }
  122. static inline int dma_contiguous_reserve_area(phys_addr_t size,
  123. phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
  124. bool fixed)
  125. {
  126. return -ENOSYS;
  127. }
  128. static inline struct page *dma_alloc_from_contiguous(struct device *dev,
  129. size_t count, unsigned int order, bool no_warn)
  130. {
  131. return NULL;
  132. }
  133. static inline bool dma_release_from_contiguous(struct device *dev,
  134. struct page *pages, int count)
  135. {
  136. return false;
  137. }
  138. /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
  139. static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
  140. gfp_t gfp)
  141. {
  142. return NULL;
  143. }
  144. static inline void dma_free_contiguous(struct device *dev, struct page *page,
  145. size_t size)
  146. {
  147. __free_pages(page, get_order(size));
  148. }
  149. #endif /* CONFIG_DMA_CMA*/
  150. #ifdef CONFIG_DMA_PERNUMA_CMA
  151. void dma_pernuma_cma_reserve(void);
  152. #else
  153. static inline void dma_pernuma_cma_reserve(void) { }
  154. #endif /* CONFIG_DMA_PERNUMA_CMA */
  155. #ifdef CONFIG_DMA_DECLARE_COHERENT
  156. int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  157. dma_addr_t device_addr, size_t size);
  158. int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
  159. dma_addr_t *dma_handle, void **ret);
  160. int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
  161. int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
  162. void *cpu_addr, size_t size, int *ret);
  163. void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
  164. dma_addr_t *dma_handle);
  165. int dma_release_from_global_coherent(int order, void *vaddr);
  166. int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
  167. size_t size, int *ret);
  168. #else
  169. static inline int dma_declare_coherent_memory(struct device *dev,
  170. phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
  171. {
  172. return -ENOSYS;
  173. }
  174. #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
  175. #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
  176. #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
  177. static inline void *dma_alloc_from_global_coherent(struct device *dev,
  178. ssize_t size, dma_addr_t *dma_handle)
  179. {
  180. return NULL;
  181. }
  182. static inline int dma_release_from_global_coherent(int order, void *vaddr)
  183. {
  184. return 0;
  185. }
  186. static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
  187. void *cpu_addr, size_t size, int *ret)
  188. {
  189. return 0;
  190. }
  191. #endif /* CONFIG_DMA_DECLARE_COHERENT */
  192. int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  193. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  194. unsigned long attrs);
  195. int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  196. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  197. unsigned long attrs);
  198. struct page *dma_common_alloc_pages(struct device *dev, size_t size,
  199. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
  200. void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
  201. dma_addr_t dma_handle, enum dma_data_direction dir);
  202. struct page **dma_common_find_pages(void *cpu_addr);
  203. void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
  204. const void *caller);
  205. void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
  206. const void *caller);
  207. void dma_common_free_remap(void *cpu_addr, size_t size);
  208. struct page *dma_alloc_from_pool(struct device *dev, size_t size,
  209. void **cpu_addr, gfp_t flags,
  210. bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
  211. bool dma_free_from_pool(struct device *dev, void *start, size_t size);
  212. #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
  213. #include <asm/dma-coherence.h>
  214. #elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
  215. defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
  216. defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
  217. static inline bool dev_is_dma_coherent(struct device *dev)
  218. {
  219. return dev->dma_coherent;
  220. }
  221. #else
  222. static inline bool dev_is_dma_coherent(struct device *dev)
  223. {
  224. return true;
  225. }
  226. #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
  227. void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  228. gfp_t gfp, unsigned long attrs);
  229. void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
  230. dma_addr_t dma_addr, unsigned long attrs);
  231. #ifdef CONFIG_MMU
  232. /*
  233. * Page protection so that devices that can't snoop CPU caches can use the
  234. * memory coherently. We default to pgprot_noncached which is usually used
  235. * for ioremap as a safe bet, but architectures can override this with less
  236. * strict semantics if possible.
  237. */
  238. #ifndef pgprot_dmacoherent
  239. #define pgprot_dmacoherent(prot) pgprot_noncached(prot)
  240. #endif
  241. /*
  242. * If there is no system cache pgprot, then fallback to dmacoherent
  243. * pgprot, as the expectation is that the device is not coherent.
  244. */
  245. #ifndef pgprot_syscached
  246. #define pgprot_syscached(prot) pgprot_dmacoherent(prot)
  247. #endif
  248. pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
  249. #else
  250. static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
  251. unsigned long attrs)
  252. {
  253. return prot; /* no protection bits supported without page tables */
  254. }
  255. #endif /* CONFIG_MMU */
  256. #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
  257. void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
  258. enum dma_data_direction dir);
  259. #else
  260. static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
  261. enum dma_data_direction dir)
  262. {
  263. }
  264. #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
  265. #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
  266. void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
  267. enum dma_data_direction dir);
  268. #else
  269. static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
  270. enum dma_data_direction dir)
  271. {
  272. }
  273. #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
  274. #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
  275. void arch_sync_dma_for_cpu_all(void);
  276. #else
  277. static inline void arch_sync_dma_for_cpu_all(void)
  278. {
  279. }
  280. #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
  281. #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
  282. void arch_dma_prep_coherent(struct page *page, size_t size);
  283. #else
  284. static inline void arch_dma_prep_coherent(struct page *page, size_t size)
  285. {
  286. }
  287. #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
  288. #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
  289. void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
  290. #else
  291. static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
  292. {
  293. }
  294. #endif /* ARCH_HAS_DMA_MARK_CLEAN */
  295. void *arch_dma_set_uncached(void *addr, size_t size);
  296. void arch_dma_clear_uncached(void *addr, size_t size);
  297. #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
  298. void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
  299. const struct iommu_ops *iommu, bool coherent);
  300. #else
  301. static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
  302. u64 size, const struct iommu_ops *iommu, bool coherent)
  303. {
  304. }
  305. #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
  306. #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
  307. void arch_teardown_dma_ops(struct device *dev);
  308. #else
  309. static inline void arch_teardown_dma_ops(struct device *dev)
  310. {
  311. }
  312. #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
  313. #ifdef CONFIG_DMA_API_DEBUG
  314. void dma_debug_add_bus(struct bus_type *bus);
  315. void debug_dma_dump_mappings(struct device *dev);
  316. #else
  317. static inline void dma_debug_add_bus(struct bus_type *bus)
  318. {
  319. }
  320. static inline void debug_dma_dump_mappings(struct device *dev)
  321. {
  322. }
  323. #endif /* CONFIG_DMA_API_DEBUG */
  324. extern const struct dma_map_ops dma_dummy_ops;
  325. #endif /* _LINUX_DMA_MAP_OPS_H */