sched_fence.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/kthread.h>
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/wait.h>
  28. #include <drm/gpu_scheduler.h>
  29. static struct kmem_cache *sched_fence_slab;
  30. static int __init drm_sched_fence_slab_init(void)
  31. {
  32. sched_fence_slab = kmem_cache_create(
  33. "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
  34. SLAB_HWCACHE_ALIGN, NULL);
  35. if (!sched_fence_slab)
  36. return -ENOMEM;
  37. return 0;
  38. }
  39. static void __exit drm_sched_fence_slab_fini(void)
  40. {
  41. rcu_barrier();
  42. kmem_cache_destroy(sched_fence_slab);
  43. }
  44. void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
  45. {
  46. int ret = dma_fence_signal(&fence->scheduled);
  47. if (!ret)
  48. DMA_FENCE_TRACE(&fence->scheduled,
  49. "signaled from irq context\n");
  50. else
  51. DMA_FENCE_TRACE(&fence->scheduled,
  52. "was already signaled\n");
  53. }
  54. void drm_sched_fence_finished(struct drm_sched_fence *fence)
  55. {
  56. int ret = dma_fence_signal(&fence->finished);
  57. if (!ret)
  58. DMA_FENCE_TRACE(&fence->finished,
  59. "signaled from irq context\n");
  60. else
  61. DMA_FENCE_TRACE(&fence->finished,
  62. "was already signaled\n");
  63. }
  64. static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
  65. {
  66. return "drm_sched";
  67. }
  68. static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
  69. {
  70. struct drm_sched_fence *fence = to_drm_sched_fence(f);
  71. return (const char *)fence->sched->name;
  72. }
  73. /**
  74. * drm_sched_fence_free - free up the fence memory
  75. *
  76. * @rcu: RCU callback head
  77. *
  78. * Free up the fence memory after the RCU grace period.
  79. */
  80. static void drm_sched_fence_free(struct rcu_head *rcu)
  81. {
  82. struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
  83. struct drm_sched_fence *fence = to_drm_sched_fence(f);
  84. kmem_cache_free(sched_fence_slab, fence);
  85. }
  86. /**
  87. * drm_sched_fence_release_scheduled - callback that fence can be freed
  88. *
  89. * @f: fence
  90. *
  91. * This function is called when the reference count becomes zero.
  92. * It just RCU schedules freeing up the fence.
  93. */
  94. static void drm_sched_fence_release_scheduled(struct dma_fence *f)
  95. {
  96. struct drm_sched_fence *fence = to_drm_sched_fence(f);
  97. dma_fence_put(fence->parent);
  98. call_rcu(&fence->finished.rcu, drm_sched_fence_free);
  99. }
  100. /**
  101. * drm_sched_fence_release_finished - drop extra reference
  102. *
  103. * @f: fence
  104. *
  105. * Drop the extra reference from the scheduled fence to the base fence.
  106. */
  107. static void drm_sched_fence_release_finished(struct dma_fence *f)
  108. {
  109. struct drm_sched_fence *fence = to_drm_sched_fence(f);
  110. dma_fence_put(&fence->scheduled);
  111. }
  112. static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
  113. .get_driver_name = drm_sched_fence_get_driver_name,
  114. .get_timeline_name = drm_sched_fence_get_timeline_name,
  115. .release = drm_sched_fence_release_scheduled,
  116. };
  117. static const struct dma_fence_ops drm_sched_fence_ops_finished = {
  118. .get_driver_name = drm_sched_fence_get_driver_name,
  119. .get_timeline_name = drm_sched_fence_get_timeline_name,
  120. .release = drm_sched_fence_release_finished,
  121. };
  122. struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
  123. {
  124. if (f->ops == &drm_sched_fence_ops_scheduled)
  125. return container_of(f, struct drm_sched_fence, scheduled);
  126. if (f->ops == &drm_sched_fence_ops_finished)
  127. return container_of(f, struct drm_sched_fence, finished);
  128. return NULL;
  129. }
  130. EXPORT_SYMBOL(to_drm_sched_fence);
  131. struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
  132. void *owner)
  133. {
  134. struct drm_sched_fence *fence = NULL;
  135. unsigned seq;
  136. fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
  137. if (fence == NULL)
  138. return NULL;
  139. fence->owner = owner;
  140. fence->sched = entity->rq->sched;
  141. spin_lock_init(&fence->lock);
  142. seq = atomic_inc_return(&entity->fence_seq);
  143. dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
  144. &fence->lock, entity->fence_context, seq);
  145. dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
  146. &fence->lock, entity->fence_context + 1, seq);
  147. return fence;
  148. }
  149. module_init(drm_sched_fence_slab_init);
  150. module_exit(drm_sched_fence_slab_fini);
  151. MODULE_DESCRIPTION("DRM GPU scheduler");
  152. MODULE_LICENSE("GPL and additional rights");