Kconfig 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. # SPDX-License-Identifier: GPL-2.0-only
  2. config NO_DMA
  3. bool
  4. config HAS_DMA
  5. bool
  6. depends on !NO_DMA
  7. default y
  8. config DMA_OPS
  9. depends on HAS_DMA
  10. bool
  11. #
  12. # IOMMU drivers that can bypass the IOMMU code and optionally use the direct
  13. # mapping fast path should select this option and set the dma_ops_bypass
  14. # flag in struct device where applicable
  15. #
  16. config DMA_OPS_BYPASS
  17. bool
  18. config NEED_SG_DMA_LENGTH
  19. bool
  20. config NEED_DMA_MAP_STATE
  21. bool
  22. config ARCH_DMA_ADDR_T_64BIT
  23. def_bool 64BIT || PHYS_ADDR_T_64BIT
  24. config ARCH_HAS_DMA_COHERENCE_H
  25. bool
  26. config ARCH_HAS_DMA_SET_MASK
  27. bool
  28. #
  29. # Select this option if the architecture needs special handling for
  30. # DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
  31. # people thing of when saying write combine, so very few platforms should
  32. # need to enable this.
  33. #
  34. config ARCH_HAS_DMA_WRITE_COMBINE
  35. bool
  36. #
  37. # Select if the architectures provides the arch_dma_mark_clean hook
  38. #
  39. config ARCH_HAS_DMA_MARK_CLEAN
  40. bool
  41. config DMA_DECLARE_COHERENT
  42. bool
  43. config ARCH_HAS_SETUP_DMA_OPS
  44. bool
  45. config ARCH_HAS_TEARDOWN_DMA_OPS
  46. bool
  47. config ARCH_HAS_SYNC_DMA_FOR_DEVICE
  48. bool
  49. config ARCH_HAS_SYNC_DMA_FOR_CPU
  50. bool
  51. select NEED_DMA_MAP_STATE
  52. config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
  53. bool
  54. config ARCH_HAS_DMA_PREP_COHERENT
  55. bool
  56. config ARCH_HAS_FORCE_DMA_UNENCRYPTED
  57. bool
  58. config DMA_VIRT_OPS
  59. bool
  60. depends on HAS_DMA
  61. select DMA_OPS
  62. config SWIOTLB
  63. bool
  64. select NEED_DMA_MAP_STATE
  65. #
  66. # Should be selected if we can mmap non-coherent mappings to userspace.
  67. # The only thing that is really required is a way to set an uncached bit
  68. # in the pagetables
  69. #
  70. config DMA_NONCOHERENT_MMAP
  71. default y if !MMU
  72. bool
  73. config DMA_COHERENT_POOL
  74. select GENERIC_ALLOCATOR
  75. bool
  76. config DMA_REMAP
  77. bool
  78. depends on MMU
  79. select DMA_NONCOHERENT_MMAP
  80. config DMA_DIRECT_REMAP
  81. bool
  82. select DMA_REMAP
  83. select DMA_COHERENT_POOL
  84. config DMA_CMA
  85. bool "DMA Contiguous Memory Allocator"
  86. depends on HAVE_DMA_CONTIGUOUS && CMA
  87. help
  88. This enables the Contiguous Memory Allocator which allows drivers
  89. to allocate big physically-contiguous blocks of memory for use with
  90. hardware components that do not support I/O map nor scatter-gather.
  91. You can disable CMA by specifying "cma=0" on the kernel's command
  92. line.
  93. For more information see <kernel/dma/contiguous.c>.
  94. If unsure, say "n".
  95. if DMA_CMA
  96. config DMA_PERNUMA_CMA
  97. bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
  98. default NUMA && ARM64
  99. help
  100. Enable this option to get pernuma CMA areas so that devices like
  101. ARM64 SMMU can get local memory by DMA coherent APIs.
  102. You can set the size of pernuma CMA by specifying "cma_pernuma=size"
  103. on the kernel's command line.
  104. comment "Default contiguous memory area size:"
  105. config CMA_SIZE_MBYTES
  106. int "Size in Mega Bytes"
  107. depends on !CMA_SIZE_SEL_PERCENTAGE
  108. default 0 if X86
  109. default 16
  110. help
  111. Defines the size (in MiB) of the default memory area for Contiguous
  112. Memory Allocator. If the size of 0 is selected, CMA is disabled by
  113. default, but it can be enabled by passing cma=size[MG] to the kernel.
  114. config CMA_SIZE_PERCENTAGE
  115. int "Percentage of total memory"
  116. depends on !CMA_SIZE_SEL_MBYTES
  117. default 0 if X86
  118. default 10
  119. help
  120. Defines the size of the default memory area for Contiguous Memory
  121. Allocator as a percentage of the total memory in the system.
  122. If 0 percent is selected, CMA is disabled by default, but it can be
  123. enabled by passing cma=size[MG] to the kernel.
  124. choice
  125. prompt "Selected region size"
  126. default CMA_SIZE_SEL_MBYTES
  127. config CMA_SIZE_SEL_MBYTES
  128. bool "Use mega bytes value only"
  129. config CMA_SIZE_SEL_PERCENTAGE
  130. bool "Use percentage value only"
  131. config CMA_SIZE_SEL_MIN
  132. bool "Use lower value (minimum)"
  133. config CMA_SIZE_SEL_MAX
  134. bool "Use higher value (maximum)"
  135. endchoice
  136. config CMA_ALIGNMENT
  137. int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
  138. range 2 12
  139. default 8
  140. help
  141. DMA mapping framework by default aligns all buffers to the smallest
  142. PAGE_SIZE order which is greater than or equal to the requested buffer
  143. size. This works well for buffers up to a few hundreds kilobytes, but
  144. for larger buffers it just a memory waste. With this parameter you can
  145. specify the maximum PAGE_SIZE order for contiguous buffers. Larger
  146. buffers will be aligned only to this specified order. The order is
  147. expressed as a power of two multiplied by the PAGE_SIZE.
  148. For example, if your system defaults to 4KiB pages, the order value
  149. of 8 means that the buffers will be aligned up to 1MiB only.
  150. If unsure, leave the default value "8".
  151. endif
  152. config DMA_API_DEBUG
  153. bool "Enable debugging of DMA-API usage"
  154. select NEED_DMA_MAP_STATE
  155. help
  156. Enable this option to debug the use of the DMA API by device drivers.
  157. With this option you will be able to detect common bugs in device
  158. drivers like double-freeing of DMA mappings or freeing mappings that
  159. were never allocated.
  160. This option causes a performance degradation. Use only if you want to
  161. debug device drivers and dma interactions.
  162. If unsure, say N.
  163. config DMA_API_DEBUG_SG
  164. bool "Debug DMA scatter-gather usage"
  165. default y
  166. depends on DMA_API_DEBUG
  167. help
  168. Perform extra checking that callers of dma_map_sg() have respected the
  169. appropriate segment length/boundary limits for the given device when
  170. preparing DMA scatterlists.
  171. This is particularly likely to have been overlooked in cases where the
  172. dma_map_sg() API is used for general bulk mapping of pages rather than
  173. preparing literal scatter-gather descriptors, where there is a risk of
  174. unexpected behaviour from DMA API implementations if the scatterlist
  175. is technically out-of-spec.
  176. If unsure, say N.