vha_plat_thead_light.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File vha_plat_thead_light_fpga_c910.c
  5. * ---------------------------------------------------------------------------
  6. *
  7. * Copyright (C) 2020 Alibaba Group Holding Limited
  8. *
  9. *****************************************************************************/
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/of.h>
  15. #include <linux/clk.h>
  16. #include <linux/err.h>
  17. #include <linux/pm_runtime.h>
  18. #include <img_mem_man.h>
  19. #include "vha_plat.h"
  20. #include "vha_plat_dt.h"
  21. const struct of_device_id vha_plat_dt_of_ids[] = {
  22. { .compatible = "img,ax3386-nna" },
  23. //{ .compatible = VHA_PLAT_DT_OF_ID },
  24. { }
  25. };
  26. static struct heap_config example_heap_configs[] = {
  27. {
  28. .type = IMG_MEM_HEAP_TYPE_UNIFIED,
  29. .options.unified = {
  30. .gfp_type = GFP_KERNEL | __GFP_ZERO,
  31. },
  32. .to_dev_addr = NULL,
  33. },
  34. {
  35. .type = IMG_MEM_HEAP_TYPE_DMABUF,
  36. .to_dev_addr = NULL,
  37. },
  38. {
  39. .type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
  40. .to_dev_addr = NULL,
  41. },
  42. };
  43. struct npu_plat_if {
  44. struct clk *npu_pclk;
  45. struct clk *npu_aclk;
  46. };
  47. static struct npu_plat_if *g_npi;
  48. /*
  49. * IO hooks.
  50. * NOTE: customer may want to use spinlock to avoid
  51. * problems with multi threaded IO access
  52. */
  53. uint64_t vha_plat_read64(void *addr)
  54. {
  55. return readq((volatile void __iomem *)addr);
  56. }
  57. void vha_plat_write64(void *addr, uint64_t val)
  58. {
  59. writeq(val, (volatile void __iomem *)addr);
  60. }
  61. int vha_plat_dt_hw_init(struct platform_device *pdev)
  62. {
  63. struct device *dev = &pdev->dev;
  64. int ret;
  65. uint64_t dma_mask;
  66. dev_dbg(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
  67. if (dev->dma_mask) {
  68. dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
  69. __func__, dev->dma_mask, *dev->dma_mask);
  70. } else {
  71. dev_info(dev, "%s mask unset, setting coherent\n", __func__);
  72. dev->dma_mask = &dev->coherent_dma_mask;
  73. }
  74. /* Try alternative dma_mask setting from device tree */
  75. if (!of_property_read_u64(pdev->dev.of_node, "dma-mask",
  76. (uint64_t *)&dma_mask)) {
  77. dev_info(dev, "%s forcing custom mask from DT : %#llx\n",
  78. __func__, dma_mask);
  79. } else {
  80. /* If alternative mask not defined in
  81. * DT -> "dma-mask" property, use the default one (32bit) */
  82. dma_mask = dma_get_mask(dev);
  83. }
  84. ret = dma_set_mask(dev, dma_mask);
  85. if (ret) {
  86. dev_err(dev, "%s failed to set dma mask\n", __func__);
  87. return ret;
  88. }
  89. /* get clock domain, voltage regulator, set clock rate, etc */
  90. g_npi = devm_kzalloc(&pdev->dev, sizeof(*g_npi), GFP_KERNEL);
  91. if (!g_npi)
  92. return -ENOMEM;
  93. g_npi->npu_pclk = devm_clk_get(&pdev->dev, "pclk");
  94. if (IS_ERR(g_npi->npu_pclk)) {
  95. dev_warn(&pdev->dev, "failed to get npu pclk");
  96. g_npi->npu_pclk == NULL;
  97. }
  98. g_npi->npu_aclk = devm_clk_get(&pdev->dev, "aclk");
  99. if (IS_ERR(g_npi->npu_aclk)) {
  100. dev_warn(&pdev->dev, "failed to get npu aclk");
  101. g_npi->npu_aclk == NULL;
  102. }
  103. return 0;
  104. }
  105. /* return platform global heaps */
  106. void vha_plat_dt_get_heaps(struct heap_config **heap_configs, int *num_heaps)
  107. {
  108. *heap_configs = example_heap_configs;
  109. *num_heaps = sizeof(example_heap_configs)/sizeof(struct heap_config);
  110. }
  111. static int vha_plat_dt_clk_prepare_enable(struct npu_plat_if *npi)
  112. {
  113. int ret;
  114. if (npi->npu_pclk) {
  115. ret = clk_prepare_enable(npi->npu_pclk);
  116. if (ret)
  117. return ret;
  118. }
  119. if (npi->npu_aclk) {
  120. ret = clk_prepare_enable(npi->npu_aclk);
  121. if (ret) {
  122. clk_disable_unprepare(npi->npu_pclk);
  123. return ret;
  124. }
  125. }
  126. return 0;
  127. }
  128. static void vha_plat_dt_clk_disable_unprepare(struct npu_plat_if *npi)
  129. {
  130. if (npi->npu_aclk)
  131. clk_disable_unprepare(npi->npu_aclk);
  132. if (npi->npu_pclk)
  133. clk_disable_unprepare(npi->npu_pclk);
  134. }
  135. void vha_plat_dt_hw_destroy(struct platform_device *pdev)
  136. {
  137. /* Put any vendor related code:
  138. * put clock domain, voltage regulator, etc */
  139. }
  140. int vha_plat_dt_hw_suspend(struct platform_device *pdev)
  141. {
  142. vha_plat_dt_clk_disable_unprepare(g_npi);
  143. return 0;
  144. }
  145. int vha_plat_dt_hw_resume(struct platform_device *pdev)
  146. {
  147. int ret;
  148. ret = vha_plat_dt_clk_prepare_enable(g_npi);
  149. if (ret) {
  150. dev_err(&pdev->dev, "failed to enable npu clock(%d)\n", ret);
  151. return ret;
  152. }
  153. return 0;
  154. }
  155. //MODULE_DEVICE_TABLE(of, vha_plat_dt_of_ids);