xrp_cma_alloc.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /*
  2. * Copyright (c) 2017 Cadence Design Systems Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining
  5. * a copy of this software and associated documentation files (the
  6. * "Software"), to deal in the Software without restriction, including
  7. * without limitation the rights to use, copy, modify, merge, publish,
  8. * distribute, sublicense, and/or sell copies of the Software, and to
  9. * permit persons to whom the Software is furnished to do so, subject to
  10. * the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included
  13. * in all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  18. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  19. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  20. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  21. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Alternatively you can use and distribute this file under the terms of
  24. * the GNU General Public License version 2 or later.
  25. */
  26. #include <linux/version.h>
  27. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
  28. #include <linux/dma-mapping.h>
  29. #else
  30. #include <linux/dma-direct.h>
  31. #endif
  32. #include <linux/kernel.h>
  33. #include <linux/slab.h>
  34. #include "xrp_cma_alloc.h"
  35. struct xrp_cma_allocation {
  36. struct xrp_allocation allocation;
  37. void *kvaddr;
  38. };
  39. struct xrp_cma_pool {
  40. struct xrp_allocation_pool pool;
  41. struct device *dev;
  42. };
  43. static long xrp_cma_alloc(struct xrp_allocation_pool *allocation_pool,
  44. u32 size, u32 align, struct xrp_allocation **alloc)
  45. {
  46. struct xrp_cma_pool *pool = container_of(allocation_pool,
  47. struct xrp_cma_pool, pool);
  48. struct xrp_cma_allocation *new_cma;
  49. struct xrp_allocation *new;
  50. dma_addr_t dma_addr;
  51. void *kvaddr;
  52. size = ALIGN(size, PAGE_SIZE);
  53. new_cma = kzalloc(sizeof(struct xrp_cma_allocation), GFP_KERNEL);
  54. if (!new_cma)
  55. return -ENOMEM;
  56. new = &new_cma->allocation;
  57. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
  58. {
  59. DEFINE_DMA_ATTRS(attrs);
  60. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  61. kvaddr = dma_alloc_attrs(pool->dev, size, &dma_addr,
  62. GFP_KERNEL, &attrs);
  63. }
  64. #else
  65. kvaddr = dma_alloc_attrs(pool->dev, size, &dma_addr, GFP_KERNEL,
  66. DMA_ATTR_NO_KERNEL_MAPPING);
  67. #endif
  68. if (!kvaddr) {
  69. kfree(new_cma);
  70. return -ENOMEM;
  71. }
  72. new->pool = allocation_pool;
  73. new->start = dma_to_phys(pool->dev, dma_addr);
  74. new->size = size;
  75. atomic_set(&new->ref, 0);
  76. xrp_allocation_get(new);
  77. new_cma->kvaddr = kvaddr;
  78. *alloc = new;
  79. return 0;
  80. }
  81. static void xrp_cma_free(struct xrp_allocation *xrp_allocation)
  82. {
  83. struct xrp_cma_pool *pool = container_of(xrp_allocation->pool,
  84. struct xrp_cma_pool, pool);
  85. struct xrp_cma_allocation *a = container_of(xrp_allocation,
  86. struct xrp_cma_allocation,
  87. allocation);
  88. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
  89. DEFINE_DMA_ATTRS(attrs);
  90. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  91. dma_free_attrs(pool->dev, xrp_allocation->size,
  92. a->kvaddr,
  93. phys_to_dma(pool->dev, xrp_allocation->start),
  94. &attrs);
  95. #else
  96. dma_free_attrs(pool->dev, xrp_allocation->size,
  97. a->kvaddr,
  98. phys_to_dma(pool->dev, xrp_allocation->start),
  99. DMA_ATTR_NO_KERNEL_MAPPING);
  100. #endif
  101. kfree(a);
  102. }
  103. static void xrp_cma_free_pool(struct xrp_allocation_pool *allocation_pool)
  104. {
  105. struct xrp_cma_pool *pool = container_of(allocation_pool,
  106. struct xrp_cma_pool, pool);
  107. kfree(pool);
  108. }
  109. static phys_addr_t xrp_cma_offset(const struct xrp_allocation *allocation)
  110. {
  111. return allocation->start;
  112. }
  113. static const struct xrp_allocation_ops xrp_cma_pool_ops = {
  114. .alloc = xrp_cma_alloc,
  115. .free = xrp_cma_free,
  116. .free_pool = xrp_cma_free_pool,
  117. .offset = xrp_cma_offset,
  118. };
  119. long xrp_init_cma_pool(struct xrp_allocation_pool **ppool, struct device *dev)
  120. {
  121. struct xrp_cma_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  122. if (!pool)
  123. return -ENOMEM;
  124. pool->pool.ops = &xrp_cma_pool_ops;
  125. pool->dev = dev;
  126. *ppool = &pool->pool;
  127. return 0;
  128. }