dma-direct.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Internals of the DMA direct mapping implementation. Only for use by the
  4. * DMA mapping code and IOMMU drivers.
  5. */
  6. #ifndef _LINUX_DMA_DIRECT_H
  7. #define _LINUX_DMA_DIRECT_H 1
  8. #include <linux/dma-mapping.h>
  9. #include <linux/dma-map-ops.h>
  10. #include <linux/memblock.h> /* for min_low_pfn */
  11. #include <linux/mem_encrypt.h>
  12. #include <linux/swiotlb.h>
  13. extern unsigned int zone_dma_bits;
  14. /*
  15. * Record the mapping of CPU physical to DMA addresses for a given region.
  16. */
  17. struct bus_dma_region {
  18. phys_addr_t cpu_start;
  19. dma_addr_t dma_start;
  20. u64 size;
  21. u64 offset;
  22. };
  23. static inline bool zone_dma32_is_empty(int node)
  24. {
  25. #ifdef CONFIG_ZONE_DMA32
  26. pg_data_t *pgdat = NODE_DATA(node);
  27. return zone_is_empty(&pgdat->node_zones[ZONE_DMA32]);
  28. #else
  29. return true;
  30. #endif
  31. }
  32. static inline bool zone_dma32_are_empty(void)
  33. {
  34. #ifdef CONFIG_NUMA
  35. int node;
  36. for_each_node(node)
  37. if (!zone_dma32_is_empty(node))
  38. return false;
  39. #else
  40. if (!zone_dma32_is_empty(numa_node_id()))
  41. return false;
  42. #endif
  43. return true;
  44. }
  45. static inline dma_addr_t translate_phys_to_dma(struct device *dev,
  46. phys_addr_t paddr)
  47. {
  48. const struct bus_dma_region *m;
  49. for (m = dev->dma_range_map; m->size; m++)
  50. if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
  51. return (dma_addr_t)paddr - m->offset;
  52. /* make sure dma_capable fails when no translation is available */
  53. return DMA_MAPPING_ERROR;
  54. }
  55. static inline phys_addr_t translate_dma_to_phys(struct device *dev,
  56. dma_addr_t dma_addr)
  57. {
  58. const struct bus_dma_region *m;
  59. for (m = dev->dma_range_map; m->size; m++)
  60. if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
  61. return (phys_addr_t)dma_addr + m->offset;
  62. return (phys_addr_t)-1;
  63. }
  64. #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
  65. #include <asm/dma-direct.h>
  66. #ifndef phys_to_dma_unencrypted
  67. #define phys_to_dma_unencrypted phys_to_dma
  68. #endif
  69. #else
  70. static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
  71. phys_addr_t paddr)
  72. {
  73. if (dev->dma_range_map)
  74. return translate_phys_to_dma(dev, paddr);
  75. return paddr;
  76. }
  77. /*
  78. * If memory encryption is supported, phys_to_dma will set the memory encryption
  79. * bit in the DMA address, and dma_to_phys will clear it.
  80. * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
  81. * buffers.
  82. */
  83. static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  84. {
  85. return __sme_set(phys_to_dma_unencrypted(dev, paddr));
  86. }
  87. static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
  88. {
  89. phys_addr_t paddr;
  90. if (dev->dma_range_map)
  91. paddr = translate_dma_to_phys(dev, dma_addr);
  92. else
  93. paddr = dma_addr;
  94. return __sme_clr(paddr);
  95. }
  96. #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
  97. #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
  98. bool force_dma_unencrypted(struct device *dev);
  99. #else
  100. static inline bool force_dma_unencrypted(struct device *dev)
  101. {
  102. return false;
  103. }
  104. #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
  105. static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
  106. bool is_ram)
  107. {
  108. dma_addr_t end = addr + size - 1;
  109. if (addr == DMA_MAPPING_ERROR)
  110. return false;
  111. if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
  112. min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
  113. return false;
  114. return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
  115. }
  116. u64 dma_direct_get_required_mask(struct device *dev);
  117. void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  118. gfp_t gfp, unsigned long attrs);
  119. void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
  120. dma_addr_t dma_addr, unsigned long attrs);
  121. struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
  122. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
  123. void dma_direct_free_pages(struct device *dev, size_t size,
  124. struct page *page, dma_addr_t dma_addr,
  125. enum dma_data_direction dir);
  126. int dma_direct_supported(struct device *dev, u64 mask);
  127. dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
  128. size_t size, enum dma_data_direction dir, unsigned long attrs);
  129. #endif /* _LINUX_DMA_DIRECT_H */