swiotlb-xen.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2010
  4. * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  5. *
  6. * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  7. *
  8. * PV guests under Xen are running in an non-contiguous memory architecture.
  9. *
  10. * When PCI pass-through is utilized, this necessitates an IOMMU for
  11. * translating bus (DMA) to virtual and vice-versa and also providing a
  12. * mechanism to have contiguous pages for device drivers operations (say DMA
  13. * operations).
  14. *
  15. * Specifically, under Xen the Linux idea of pages is an illusion. It
  16. * assumes that pages start at zero and go up to the available memory. To
  17. * help with that, the Linux Xen MMU provides a lookup mechanism to
  18. * translate the page frame numbers (PFN) to machine frame numbers (MFN)
  19. * and vice-versa. The MFN are the "real" frame numbers. Furthermore
  20. * memory is not contiguous. Xen hypervisor stitches memory for guests
  21. * from different pools, which means there is no guarantee that PFN==MFN
  22. * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
  23. * allocated in descending order (high to low), meaning the guest might
  24. * never get any MFN's under the 4GB mark.
  25. */
  26. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  27. #include <linux/memblock.h>
  28. #include <linux/dma-direct.h>
  29. #include <linux/dma-map-ops.h>
  30. #include <linux/export.h>
  31. #include <xen/swiotlb-xen.h>
  32. #include <xen/page.h>
  33. #include <xen/xen-ops.h>
  34. #include <xen/hvc-console.h>
  35. #include <asm/dma-mapping.h>
  36. #include <asm/xen/page-coherent.h>
  37. #include <trace/events/swiotlb.h>
  38. #define MAX_DMA_BITS 32
  39. /*
  40. * Used to do a quick range check in swiotlb_tbl_unmap_single and
  41. * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  42. * API.
  43. */
  44. static char *xen_io_tlb_start, *xen_io_tlb_end;
  45. static unsigned long xen_io_tlb_nslabs;
  46. /*
  47. * Quick lookup value of the bus address of the IOTLB.
  48. */
  49. static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
  50. {
  51. unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
  52. phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
  53. baddr |= paddr & ~XEN_PAGE_MASK;
  54. return baddr;
  55. }
  56. static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
  57. {
  58. return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
  59. }
  60. static inline phys_addr_t xen_bus_to_phys(struct device *dev,
  61. phys_addr_t baddr)
  62. {
  63. unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
  64. phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
  65. (baddr & ~XEN_PAGE_MASK);
  66. return paddr;
  67. }
  68. static inline phys_addr_t xen_dma_to_phys(struct device *dev,
  69. dma_addr_t dma_addr)
  70. {
  71. return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
  72. }
  73. static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
  74. {
  75. return xen_phys_to_dma(dev, virt_to_phys(address));
  76. }
  77. static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
  78. {
  79. unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
  80. unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
  81. next_bfn = pfn_to_bfn(xen_pfn);
  82. for (i = 1; i < nr_pages; i++)
  83. if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
  84. return 1;
  85. return 0;
  86. }
  87. static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
  88. {
  89. unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
  90. unsigned long xen_pfn = bfn_to_local_pfn(bfn);
  91. phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
  92. /* If the address is outside our domain, it CAN
  93. * have the same virtual address as another address
  94. * in our domain. Therefore _only_ check address within our domain.
  95. */
  96. if (pfn_valid(PFN_DOWN(paddr))) {
  97. return paddr >= virt_to_phys(xen_io_tlb_start) &&
  98. paddr < virt_to_phys(xen_io_tlb_end);
  99. }
  100. return 0;
  101. }
  102. static int
  103. xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
  104. {
  105. int i, rc;
  106. int dma_bits;
  107. dma_addr_t dma_handle;
  108. phys_addr_t p = virt_to_phys(buf);
  109. dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
  110. i = 0;
  111. do {
  112. int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
  113. do {
  114. rc = xen_create_contiguous_region(
  115. p + (i << IO_TLB_SHIFT),
  116. get_order(slabs << IO_TLB_SHIFT),
  117. dma_bits, &dma_handle);
  118. } while (rc && dma_bits++ < MAX_DMA_BITS);
  119. if (rc)
  120. return rc;
  121. i += slabs;
  122. } while (i < nslabs);
  123. return 0;
  124. }
  125. static unsigned long xen_set_nslabs(unsigned long nr_tbl)
  126. {
  127. if (!nr_tbl) {
  128. xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
  129. xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
  130. } else
  131. xen_io_tlb_nslabs = nr_tbl;
  132. return xen_io_tlb_nslabs << IO_TLB_SHIFT;
  133. }
  134. enum xen_swiotlb_err {
  135. XEN_SWIOTLB_UNKNOWN = 0,
  136. XEN_SWIOTLB_ENOMEM,
  137. XEN_SWIOTLB_EFIXUP
  138. };
  139. static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
  140. {
  141. switch (err) {
  142. case XEN_SWIOTLB_ENOMEM:
  143. return "Cannot allocate Xen-SWIOTLB buffer\n";
  144. case XEN_SWIOTLB_EFIXUP:
  145. return "Failed to get contiguous memory for DMA from Xen!\n"\
  146. "You either: don't have the permissions, do not have"\
  147. " enough free memory under 4GB, or the hypervisor memory"\
  148. " is too fragmented!";
  149. default:
  150. break;
  151. }
  152. return "";
  153. }
  154. int __ref xen_swiotlb_init(int verbose, bool early)
  155. {
  156. unsigned long bytes, order;
  157. int rc = -ENOMEM;
  158. enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
  159. unsigned int repeat = 3;
  160. xen_io_tlb_nslabs = swiotlb_nr_tbl();
  161. retry:
  162. bytes = xen_set_nslabs(xen_io_tlb_nslabs);
  163. order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
  164. /*
  165. * IO TLB memory already allocated. Just use it.
  166. */
  167. if (io_tlb_start != 0) {
  168. xen_io_tlb_start = phys_to_virt(io_tlb_start);
  169. goto end;
  170. }
  171. /*
  172. * Get IO TLB memory from any location.
  173. */
  174. if (early) {
  175. xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
  176. PAGE_SIZE);
  177. if (!xen_io_tlb_start)
  178. panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  179. __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
  180. } else {
  181. #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  182. #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
  183. while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
  184. xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
  185. if (xen_io_tlb_start)
  186. break;
  187. order--;
  188. }
  189. if (order != get_order(bytes)) {
  190. pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
  191. (PAGE_SIZE << order) >> 20);
  192. xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
  193. bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
  194. }
  195. }
  196. if (!xen_io_tlb_start) {
  197. m_ret = XEN_SWIOTLB_ENOMEM;
  198. goto error;
  199. }
  200. /*
  201. * And replace that memory with pages under 4GB.
  202. */
  203. rc = xen_swiotlb_fixup(xen_io_tlb_start,
  204. bytes,
  205. xen_io_tlb_nslabs);
  206. if (rc) {
  207. if (early)
  208. memblock_free(__pa(xen_io_tlb_start),
  209. PAGE_ALIGN(bytes));
  210. else {
  211. free_pages((unsigned long)xen_io_tlb_start, order);
  212. xen_io_tlb_start = NULL;
  213. }
  214. m_ret = XEN_SWIOTLB_EFIXUP;
  215. goto error;
  216. }
  217. if (early) {
  218. if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
  219. verbose))
  220. panic("Cannot allocate SWIOTLB buffer");
  221. rc = 0;
  222. } else
  223. rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
  224. end:
  225. xen_io_tlb_end = xen_io_tlb_start + bytes;
  226. if (!rc)
  227. swiotlb_set_max_segment(PAGE_SIZE);
  228. return rc;
  229. error:
  230. if (repeat--) {
  231. xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
  232. (xen_io_tlb_nslabs >> 1));
  233. pr_info("Lowering to %luMB\n",
  234. (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
  235. goto retry;
  236. }
  237. pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
  238. if (early)
  239. panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
  240. else
  241. free_pages((unsigned long)xen_io_tlb_start, order);
  242. return rc;
  243. }
  244. static void *
  245. xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
  246. dma_addr_t *dma_handle, gfp_t flags,
  247. unsigned long attrs)
  248. {
  249. void *ret;
  250. int order = get_order(size);
  251. u64 dma_mask = DMA_BIT_MASK(32);
  252. phys_addr_t phys;
  253. dma_addr_t dev_addr;
  254. /*
  255. * Ignore region specifiers - the kernel's ideas of
  256. * pseudo-phys memory layout has nothing to do with the
  257. * machine physical layout. We can't allocate highmem
  258. * because we can't return a pointer to it.
  259. */
  260. flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
  261. /* Convert the size to actually allocated. */
  262. size = 1UL << (order + XEN_PAGE_SHIFT);
  263. /* On ARM this function returns an ioremap'ped virtual address for
  264. * which virt_to_phys doesn't return the corresponding physical
  265. * address. In fact on ARM virt_to_phys only works for kernel direct
  266. * mapped RAM memory. Also see comment below.
  267. */
  268. ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
  269. if (!ret)
  270. return ret;
  271. if (hwdev && hwdev->coherent_dma_mask)
  272. dma_mask = hwdev->coherent_dma_mask;
  273. /* At this point dma_handle is the dma address, next we are
  274. * going to set it to the machine address.
  275. * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
  276. * to *dma_handle. */
  277. phys = dma_to_phys(hwdev, *dma_handle);
  278. dev_addr = xen_phys_to_dma(hwdev, phys);
  279. if (((dev_addr + size - 1 <= dma_mask)) &&
  280. !range_straddles_page_boundary(phys, size))
  281. *dma_handle = dev_addr;
  282. else {
  283. if (xen_create_contiguous_region(phys, order,
  284. fls64(dma_mask), dma_handle) != 0) {
  285. xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
  286. return NULL;
  287. }
  288. *dma_handle = phys_to_dma(hwdev, *dma_handle);
  289. SetPageXenRemapped(virt_to_page(ret));
  290. }
  291. memset(ret, 0, size);
  292. return ret;
  293. }
  294. static void
  295. xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
  296. dma_addr_t dev_addr, unsigned long attrs)
  297. {
  298. int order = get_order(size);
  299. phys_addr_t phys;
  300. u64 dma_mask = DMA_BIT_MASK(32);
  301. struct page *page;
  302. if (hwdev && hwdev->coherent_dma_mask)
  303. dma_mask = hwdev->coherent_dma_mask;
  304. /* do not use virt_to_phys because on ARM it doesn't return you the
  305. * physical address */
  306. phys = xen_dma_to_phys(hwdev, dev_addr);
  307. /* Convert the size to actually allocated. */
  308. size = 1UL << (order + XEN_PAGE_SHIFT);
  309. if (is_vmalloc_addr(vaddr))
  310. page = vmalloc_to_page(vaddr);
  311. else
  312. page = virt_to_page(vaddr);
  313. if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
  314. range_straddles_page_boundary(phys, size)) &&
  315. TestClearPageXenRemapped(page))
  316. xen_destroy_contiguous_region(phys, order);
  317. xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
  318. attrs);
  319. }
  320. /*
  321. * Map a single buffer of the indicated size for DMA in streaming mode. The
  322. * physical address to use is returned.
  323. *
  324. * Once the device is given the dma address, the device owns this memory until
  325. * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  326. */
  327. static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
  328. unsigned long offset, size_t size,
  329. enum dma_data_direction dir,
  330. unsigned long attrs)
  331. {
  332. phys_addr_t map, phys = page_to_phys(page) + offset;
  333. dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
  334. BUG_ON(dir == DMA_NONE);
  335. /*
  336. * If the address happens to be in the device's DMA window,
  337. * we can safely return the device addr and not worry about bounce
  338. * buffering it.
  339. */
  340. if (dma_capable(dev, dev_addr, size, true) &&
  341. !range_straddles_page_boundary(phys, size) &&
  342. !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
  343. swiotlb_force != SWIOTLB_FORCE)
  344. goto done;
  345. /*
  346. * Oh well, have to allocate and map a bounce buffer.
  347. */
  348. trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
  349. map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
  350. if (map == (phys_addr_t)DMA_MAPPING_ERROR)
  351. return DMA_MAPPING_ERROR;
  352. phys = map;
  353. dev_addr = xen_phys_to_dma(dev, map);
  354. /*
  355. * Ensure that the address returned is DMA'ble
  356. */
  357. if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
  358. swiotlb_tbl_unmap_single(dev, map, size, size, dir,
  359. attrs | DMA_ATTR_SKIP_CPU_SYNC);
  360. return DMA_MAPPING_ERROR;
  361. }
  362. done:
  363. if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
  364. if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
  365. arch_sync_dma_for_device(phys, size, dir);
  366. else
  367. xen_dma_sync_for_device(dev, dev_addr, size, dir);
  368. }
  369. return dev_addr;
  370. }
  371. /*
  372. * Unmap a single streaming mode DMA translation. The dma_addr and size must
  373. * match what was provided for in a previous xen_swiotlb_map_page call. All
  374. * other usages are undefined.
  375. *
  376. * After this call, reads by the cpu to the buffer are guaranteed to see
  377. * whatever the device wrote there.
  378. */
  379. static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  380. size_t size, enum dma_data_direction dir, unsigned long attrs)
  381. {
  382. phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
  383. BUG_ON(dir == DMA_NONE);
  384. if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
  385. if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
  386. arch_sync_dma_for_cpu(paddr, size, dir);
  387. else
  388. xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
  389. }
  390. /* NOTE: We use dev_addr here, not paddr! */
  391. if (is_xen_swiotlb_buffer(hwdev, dev_addr))
  392. swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
  393. }
  394. static void
  395. xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
  396. size_t size, enum dma_data_direction dir)
  397. {
  398. phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
  399. if (!dev_is_dma_coherent(dev)) {
  400. if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
  401. arch_sync_dma_for_cpu(paddr, size, dir);
  402. else
  403. xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
  404. }
  405. if (is_xen_swiotlb_buffer(dev, dma_addr))
  406. swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
  407. }
  408. static void
  409. xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
  410. size_t size, enum dma_data_direction dir)
  411. {
  412. phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
  413. if (is_xen_swiotlb_buffer(dev, dma_addr))
  414. swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
  415. if (!dev_is_dma_coherent(dev)) {
  416. if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
  417. arch_sync_dma_for_device(paddr, size, dir);
  418. else
  419. xen_dma_sync_for_device(dev, dma_addr, size, dir);
  420. }
  421. }
  422. /*
  423. * Unmap a set of streaming mode DMA translations. Again, cpu read rules
  424. * concerning calls here are the same as for swiotlb_unmap_page() above.
  425. */
  426. static void
  427. xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  428. enum dma_data_direction dir, unsigned long attrs)
  429. {
  430. struct scatterlist *sg;
  431. int i;
  432. BUG_ON(dir == DMA_NONE);
  433. for_each_sg(sgl, sg, nelems, i)
  434. xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
  435. dir, attrs);
  436. }
  437. static int
  438. xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
  439. enum dma_data_direction dir, unsigned long attrs)
  440. {
  441. struct scatterlist *sg;
  442. int i;
  443. BUG_ON(dir == DMA_NONE);
  444. for_each_sg(sgl, sg, nelems, i) {
  445. sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
  446. sg->offset, sg->length, dir, attrs);
  447. if (sg->dma_address == DMA_MAPPING_ERROR)
  448. goto out_unmap;
  449. sg_dma_len(sg) = sg->length;
  450. }
  451. return nelems;
  452. out_unmap:
  453. xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
  454. sg_dma_len(sgl) = 0;
  455. return 0;
  456. }
  457. static void
  458. xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
  459. int nelems, enum dma_data_direction dir)
  460. {
  461. struct scatterlist *sg;
  462. int i;
  463. for_each_sg(sgl, sg, nelems, i) {
  464. xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
  465. sg->length, dir);
  466. }
  467. }
  468. static void
  469. xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
  470. int nelems, enum dma_data_direction dir)
  471. {
  472. struct scatterlist *sg;
  473. int i;
  474. for_each_sg(sgl, sg, nelems, i) {
  475. xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
  476. sg->length, dir);
  477. }
  478. }
  479. /*
  480. * Return whether the given device DMA address mask can be supported
  481. * properly. For example, if your device can only drive the low 24-bits
  482. * during bus mastering, then you would pass 0x00ffffff as the mask to
  483. * this function.
  484. */
  485. static int
  486. xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
  487. {
  488. return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
  489. }
  490. const struct dma_map_ops xen_swiotlb_dma_ops = {
  491. .alloc = xen_swiotlb_alloc_coherent,
  492. .free = xen_swiotlb_free_coherent,
  493. .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
  494. .sync_single_for_device = xen_swiotlb_sync_single_for_device,
  495. .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
  496. .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
  497. .map_sg = xen_swiotlb_map_sg,
  498. .unmap_sg = xen_swiotlb_unmap_sg,
  499. .map_page = xen_swiotlb_map_page,
  500. .unmap_page = xen_swiotlb_unmap_page,
  501. .dma_supported = xen_swiotlb_dma_supported,
  502. .mmap = dma_common_mmap,
  503. .get_sgtable = dma_common_get_sgtable,
  504. .alloc_pages = dma_common_alloc_pages,
  505. .free_pages = dma_common_free_pages,
  506. };