vha_plat_thead_light_fpga_c910.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File vha_plat_thead_light_fpga_c910.c
  5. * ---------------------------------------------------------------------------
  6. *
  7. * Copyright (C) 2020 Alibaba Group Holding Limited
  8. *
  9. *****************************************************************************/
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/of.h>
  15. #include <img_mem_man.h>
  16. #include "vha_plat.h"
  17. #include "vha_plat_dt.h"
  18. const struct of_device_id vha_plat_dt_of_ids[] = {
  19. { .compatible = "img,ax3386-nna" },
  20. //{ .compatible = VHA_PLAT_DT_OF_ID },
  21. { }
  22. };
  23. static struct heap_config example_heap_configs[] = {
  24. {
  25. .type = IMG_MEM_HEAP_TYPE_UNIFIED,
  26. .options.unified = {
  27. .gfp_type = GFP_KERNEL | __GFP_ZERO,
  28. },
  29. .to_dev_addr = NULL,
  30. },
  31. {
  32. .type = IMG_MEM_HEAP_TYPE_DMABUF,
  33. .to_dev_addr = NULL,
  34. },
  35. {
  36. .type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
  37. .to_dev_addr = NULL,
  38. },
  39. };
  40. /*
  41. * IO hooks.
  42. * NOTE: customer may want to use spinlock to avoid
  43. * problems with multi threaded IO access
  44. */
  45. uint64_t vha_plat_read64(void *addr)
  46. {
  47. return readq((volatile void __iomem *)addr);
  48. }
  49. void vha_plat_write64(void *addr, uint64_t val)
  50. {
  51. writeq(val, (volatile void __iomem *)addr);
  52. }
  53. int vha_plat_dt_hw_init(struct platform_device *pdev)
  54. {
  55. struct device *dev = &pdev->dev;
  56. int ret;
  57. uint64_t dma_mask;
  58. dev_dbg(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
  59. if (dev->dma_mask) {
  60. dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
  61. __func__, dev->dma_mask, *dev->dma_mask);
  62. } else {
  63. dev_info(dev, "%s mask unset, setting coherent\n", __func__);
  64. dev->dma_mask = &dev->coherent_dma_mask;
  65. }
  66. /* Try alternative dma_mask setting from device tree */
  67. if (!of_property_read_u64(pdev->dev.of_node, "dma-mask",
  68. (uint64_t *)&dma_mask)) {
  69. dev_info(dev, "%s forcing custom mask from DT : %#llx\n",
  70. __func__, dma_mask);
  71. } else {
  72. /* If alternative mask not defined in
  73. * DT -> "dma-mask" property, use the default one (32bit) */
  74. dma_mask = dma_get_mask(dev);
  75. }
  76. ret = dma_set_mask(dev, dma_mask);
  77. if (ret) {
  78. dev_err(dev, "%s failed to set dma mask\n", __func__);
  79. return ret;
  80. }
  81. /* Put any vendor related code:
  82. * get clock domain, voltage regulator, set clock rate, etc */
  83. return 0;
  84. }
  85. /* return platform global heaps */
  86. void vha_plat_dt_get_heaps(struct heap_config **heap_configs, int *num_heaps)
  87. {
  88. *heap_configs = example_heap_configs;
  89. *num_heaps = sizeof(example_heap_configs)/sizeof(struct heap_config);
  90. }
  91. void vha_plat_dt_hw_destroy(struct platform_device *pdev)
  92. {
  93. /* Put any vendor related code:
  94. * put clock domain, voltage regulator, etc */
  95. }
  96. int vha_plat_dt_hw_suspend(struct platform_device *pdev)
  97. {
  98. /* This is the place where vendor specific code shall be called:
  99. * eg. turn off voltage regulator/disable power domain */
  100. return 0;
  101. }
  102. int vha_plat_dt_hw_resume(struct platform_device *pdev)
  103. {
  104. /* This is the place where vendor specific code shall be called:
  105. * eg. turn on voltage regulator/enable power domain */
  106. return 0;
  107. }
  108. //MODULE_DEVICE_TABLE(of, vha_plat_dt_of_ids);