dma-iommu.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4. *
  5. * Provide default implementations of the DMA mapping callbacks for
  6. * busses using the iommu infrastructure
  7. */
  8. #include <linux/dma-direct.h>
  9. #include <linux/pci.h>
  10. #include <asm/iommu.h>
  11. /*
  12. * Generic iommu implementation
  13. */
  14. /* Allocates a contiguous real buffer and creates mappings over it.
  15. * Returns the virtual address of the buffer and sets dma_handle
  16. * to the dma address (mapping) of the first page.
  17. */
  18. static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
  19. dma_addr_t *dma_handle, gfp_t flag,
  20. unsigned long attrs)
  21. {
  22. return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
  23. dma_handle, dev->coherent_dma_mask, flag,
  24. dev_to_node(dev));
  25. }
  26. static void dma_iommu_free_coherent(struct device *dev, size_t size,
  27. void *vaddr, dma_addr_t dma_handle,
  28. unsigned long attrs)
  29. {
  30. iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
  31. }
  32. /* Creates TCEs for a user provided buffer. The user buffer must be
  33. * contiguous real kernel storage (not vmalloc). The address passed here
  34. * comprises a page address and offset into that page. The dma_addr_t
  35. * returned will point to the same byte within the page as was passed in.
  36. */
  37. static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
  38. unsigned long offset, size_t size,
  39. enum dma_data_direction direction,
  40. unsigned long attrs)
  41. {
  42. return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
  43. size, dma_get_mask(dev), direction, attrs);
  44. }
  45. static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
  46. size_t size, enum dma_data_direction direction,
  47. unsigned long attrs)
  48. {
  49. iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
  50. attrs);
  51. }
  52. static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
  53. int nelems, enum dma_data_direction direction,
  54. unsigned long attrs)
  55. {
  56. return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
  57. dma_get_mask(dev), direction, attrs);
  58. }
  59. static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
  60. int nelems, enum dma_data_direction direction,
  61. unsigned long attrs)
  62. {
  63. ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
  64. direction, attrs);
  65. }
  66. static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
  67. {
  68. struct pci_dev *pdev = to_pci_dev(dev);
  69. struct pci_controller *phb = pci_bus_to_host(pdev->bus);
  70. if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
  71. return false;
  72. return phb->controller_ops.iommu_bypass_supported(pdev, mask);
  73. }
  74. /* We support DMA to/from any memory page via the iommu */
  75. int dma_iommu_dma_supported(struct device *dev, u64 mask)
  76. {
  77. struct iommu_table *tbl = get_iommu_table_base(dev);
  78. if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
  79. dev->dma_ops_bypass = true;
  80. dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
  81. return 1;
  82. }
  83. if (!tbl) {
  84. dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
  85. return 0;
  86. }
  87. if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
  88. dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
  89. dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
  90. mask, tbl->it_offset << tbl->it_page_shift);
  91. return 0;
  92. }
  93. dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
  94. dev->dma_ops_bypass = false;
  95. return 1;
  96. }
  97. u64 dma_iommu_get_required_mask(struct device *dev)
  98. {
  99. struct iommu_table *tbl = get_iommu_table_base(dev);
  100. u64 mask;
  101. if (dev_is_pci(dev)) {
  102. u64 bypass_mask = dma_direct_get_required_mask(dev);
  103. if (dma_iommu_dma_supported(dev, bypass_mask)) {
  104. dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
  105. return bypass_mask;
  106. }
  107. }
  108. if (!tbl)
  109. return 0;
  110. mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
  111. tbl->it_page_shift - 1);
  112. mask += mask - 1;
  113. return mask;
  114. }
  115. const struct dma_map_ops dma_iommu_ops = {
  116. .alloc = dma_iommu_alloc_coherent,
  117. .free = dma_iommu_free_coherent,
  118. .map_sg = dma_iommu_map_sg,
  119. .unmap_sg = dma_iommu_unmap_sg,
  120. .dma_supported = dma_iommu_dma_supported,
  121. .map_page = dma_iommu_map_page,
  122. .unmap_page = dma_iommu_unmap_page,
  123. .get_required_mask = dma_iommu_get_required_mask,
  124. .mmap = dma_common_mmap,
  125. .get_sgtable = dma_common_get_sgtable,
  126. .alloc_pages = dma_common_alloc_pages,
  127. .free_pages = dma_common_free_pages,
  128. };