vha_plat_dummy.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /*****************************************************************************
  2. * Copyright (c) Imagination Technologies Ltd.
  3. *
  4. * The contents of this file are subject to the MIT license as set out below.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. *
  24. * Alternatively, the contents of this file may be used under the terms of the
  25. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  26. * GPL are applicable instead of those above.
  27. *
  28. * If you wish to allow use of your version of this file only under the terms
  29. * of GPL, and not to allow others to use your version of this file under the
  30. * terms of the MIT license, indicate your decision by deleting the provisions
  31. * above and replace them with the notice and other provisions required by GPL
  32. * as set out in the file called "GPLHEADER" included in this distribution. If
  33. * you do not delete the provisions above, a recipient may use your version of
  34. * this file under the terms of either the MIT license or GPL.
  35. *
  36. * This License is also included in this distribution in the file called
  37. * "MIT_COPYING".
  38. *
  39. *****************************************************************************/
  40. #include <linux/delay.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/module.h>
  43. #include <linux/device.h>
  44. #include <linux/gfp.h>
  45. #include <linux/dma-mapping.h>
  46. #include <linux/pci.h>
  47. #include <linux/pm.h>
  48. #include <linux/mod_devicetable.h>
  49. #include <linux/platform_device.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/vmalloc.h>
  52. #include "vha_common.h"
  53. #include "vha_plat.h"
  54. #include "uapi/version.h"
  55. #include "vha_regs.h"
  56. #if defined(CFG_SYS_MAGNA)
  57. #include "hwdefs/vha_cr_magna.h"
  58. #endif
  59. #define DEVICE_NAME "vha"
  60. static unsigned short min_alloc_order;
  61. module_param(min_alloc_order, ushort, 0444);
  62. MODULE_PARM_DESC(min_alloc_order,
  63. "Minimum allocation order, depends on PAGE_SIZE, \
  64. for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
  65. static unsigned short max_alloc_order = 10; /* 4MB for PAGE_SIZE=4kB */
  66. module_param(max_alloc_order, ushort, 0444);
  67. MODULE_PARM_DESC(max_alloc_order,
  68. "Maximum allocation order, depends on PAGE_SIZE, \
  69. for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
  70. static unsigned char num_clusters = 1;
  71. module_param(num_clusters, byte, 0444);
  72. MODULE_PARM_DESC(num_clusters,
  73. "Number of dummy clusters. Each cluster will be instantiated "
  74. "as separate /dev/vhaN node. Max number supported is 255.");
  75. static struct heap_config dummy_heap_configs[] = {
  76. /* the first config is default */
  77. {
  78. .type = IMG_MEM_HEAP_TYPE_UNIFIED,
  79. .options.unified = {
  80. .gfp_type = GFP_KERNEL | __GFP_ZERO,
  81. },
  82. .to_dev_addr = NULL,
  83. },
  84. {
  85. .type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
  86. },
  87. #ifdef CONFIG_DMA_SHARED_BUFFER
  88. {
  89. .type = IMG_MEM_HEAP_TYPE_DMABUF,
  90. .to_dev_addr = NULL,
  91. },
  92. #endif
  93. };
  94. static size_t num_dummy_heaps =
  95. sizeof(dummy_heap_configs) / sizeof(*dummy_heap_configs);
  96. static const size_t nna_regs_size =
  97. #ifdef _REG_NNSYS_SIZE
  98. _REG_NNSYS_SIZE +
  99. #endif
  100. _REG_SIZE;
  101. static bool use_dummy_regs = false;
  102. /* IO hooks - do nothing */
  103. uint64_t vha_plat_read64(void *addr)
  104. {
  105. if (use_dummy_regs)
  106. return *((uint64_t*)addr);
  107. return 0ULL;
  108. }
  109. void vha_plat_write64(void *addr, uint64_t val)
  110. {
  111. if (use_dummy_regs)
  112. *((uint64_t*)addr) = val;
  113. }
  114. #if defined(CFG_SYS_MAGNA)
  115. static void vha_plat_magna_write_defaults(uint8_t* nna_regs) {
  116. if (!use_dummy_regs)
  117. return;
  118. *((uint64_t*)(nna_regs + VHA_CR_CORE_ASSIGNMENT)) =
  119. VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_UNALLOCATED |
  120. VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_UNALLOCATED |
  121. VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_UNALLOCATED |
  122. VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_UNALLOCATED |
  123. VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_UNALLOCATED |
  124. VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_UNALLOCATED |
  125. VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_UNALLOCATED |
  126. VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED;
  127. *((uint64_t*)(nna_regs + VHA_CR_SOCM_BUF_ASSIGNMENT)) =
  128. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_UNALLOCATED |
  129. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_UNALLOCATED |
  130. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_UNALLOCATED |
  131. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_UNALLOCATED |
  132. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_UNALLOCATED |
  133. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_UNALLOCATED |
  134. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_UNALLOCATED |
  135. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_UNALLOCATED;
  136. }
  137. #endif
  138. #ifdef CONFIG_PM
  139. static int vha_plat_suspend(struct device *dev)
  140. {
  141. return vha_suspend_dev(dev);
  142. }
  143. static int vha_plat_resume(struct device *dev)
  144. {
  145. return vha_resume_dev(dev);
  146. }
  147. static int vha_plat_runtime_idle(struct device *dev)
  148. {
  149. dev_dbg(dev, "%s\n", __func__);
  150. return 0;
  151. }
  152. static int vha_plat_runtime_suspend(struct device *dev)
  153. {
  154. dev_dbg(dev, "%s\n", __func__);
  155. return 0;
  156. }
  157. static int vha_plat_runtime_resume(struct device *dev)
  158. {
  159. dev_dbg(dev, "%s\n", __func__);
  160. return 0;
  161. }
  162. #endif
  163. static int vha_plat_probe(struct platform_device *pdev)
  164. {
  165. struct device *dev = &pdev->dev;
  166. void* nna_regs=NULL;
  167. int ret = 0;
  168. /* dma_mask is required in order for dma_ops mapping to work */
  169. dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
  170. if (dev->dma_mask) {
  171. dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
  172. __func__, dev->dma_mask, *dev->dma_mask);
  173. } else {
  174. dev_info(dev, "%s mask unset, setting coherent\n", __func__);
  175. dev->dma_mask = &dev->coherent_dma_mask;
  176. }
  177. /* Give 128GB fake dma address range,
  178. * so that dma_map_page/map_sg does not throw any error,
  179. * when dealing with high mem address alocations */
  180. ret = dma_set_mask(dev, DMA_BIT_MASK(37));
  181. if (ret) {
  182. dev_err(dev, "%s failed to set dma mask\n", __func__);
  183. goto out_add_dev;
  184. }
  185. dev_info(dev, "%s dma_set_mask %#llx\n",
  186. __func__, dma_get_mask(dev));
  187. if (dev->platform_data)
  188. nna_regs = *(uint8_t**)dev->platform_data;
  189. ret = vha_add_dev(dev, NULL, 0,
  190. NULL /* plat data */,
  191. nna_regs /* reg base */,
  192. nna_regs_size /* reg size*/);
  193. if (ret) {
  194. dev_err(dev, "vha_add_dev failed\n");
  195. goto out_add_dev;
  196. }
  197. out_add_dev:
  198. return ret;
  199. }
  200. static int vha_plat_remove(struct platform_device *pdev)
  201. {
  202. int ret = 0;
  203. dev_info(&pdev->dev, "%s\n", __func__);
  204. vha_rm_dev(&pdev->dev);
  205. return ret;
  206. }
  207. static struct dev_pm_ops vha_pm_plat_ops = {
  208. SET_RUNTIME_PM_OPS(vha_plat_runtime_suspend,
  209. vha_plat_runtime_resume, vha_plat_runtime_idle)
  210. SET_SYSTEM_SLEEP_PM_OPS(vha_plat_suspend, vha_plat_resume)
  211. };
  212. static ssize_t info_show(struct device_driver *drv, char *buf)
  213. {
  214. return sprintf(buf, "VHA dummy driver version : " VERSION_STRING "\n");
  215. }
  216. static DRIVER_ATTR_RO(info);
  217. static struct attribute *drv_attrs[] = {
  218. &driver_attr_info.attr,
  219. NULL
  220. };
  221. ATTRIBUTE_GROUPS(drv);
  222. static struct platform_driver vha_driver = {
  223. .driver = {
  224. .owner = THIS_MODULE,
  225. .name = "vha",
  226. .groups = drv_groups,
  227. .pm = &vha_pm_plat_ops,
  228. },
  229. .probe = vha_plat_probe,
  230. .remove = vha_plat_remove,
  231. };
  232. static struct platform_device **pd;
  233. int __exit vha_plat_deinit(void)
  234. {
  235. int ret;
  236. int cluster;
  237. uint8_t* nna_regs;
  238. platform_driver_unregister(&vha_driver);
  239. for (cluster=0; cluster<num_clusters; ++cluster) {
  240. BUG_ON(pd[cluster]==NULL);
  241. nna_regs = *(uint8_t**)pd[cluster]->dev.platform_data;
  242. vfree(nna_regs);
  243. platform_device_unregister(pd[cluster]);
  244. }
  245. use_dummy_regs = false;
  246. kfree(pd);
  247. ret = vha_deinit();
  248. if (ret)
  249. pr_err("VHA driver deinit failed\n");
  250. return 0;
  251. }
  252. int __init vha_plat_init(void)
  253. {
  254. int ret;
  255. int cluster;
  256. if (min_alloc_order > max_alloc_order) {
  257. pr_err("Can't set min_alloc_order > max_alloc_order !\n");
  258. return -EINVAL;
  259. }
  260. WARN_ON(dummy_heap_configs[0].type != IMG_MEM_HEAP_TYPE_UNIFIED);
  261. dummy_heap_configs[0].options.unified.min_order = min_alloc_order;
  262. dummy_heap_configs[0].options.unified.max_order = max_alloc_order;
  263. ret = vha_init_plat_heaps(dummy_heap_configs, num_dummy_heaps);
  264. if (ret) {
  265. pr_err("failed to initialize global heaps\n");
  266. return -ENOMEM;
  267. }
  268. if (num_clusters == 0) {
  269. pr_notice("Overriding num_clusters parameter to 1\n");
  270. num_clusters=1;
  271. }
  272. pr_notice("%s instantiating %d dummy clusters\n",
  273. __func__, num_clusters);
  274. pd = kmalloc(num_clusters*sizeof(*pd), GFP_KERNEL);
  275. if (pd == NULL) {
  276. pr_err("failed to allocate memory!\n");
  277. return -ENOMEM;
  278. }
  279. memset(pd, 0, num_clusters*sizeof(*pd));
  280. for (cluster=0; cluster<num_clusters; ++cluster) {
  281. void* nna_regs=NULL;
  282. pr_notice("%s Instantiating dummy vha%d cluster\n", __func__, cluster);
  283. #ifdef _REG_NNA_SIZE
  284. nna_regs = vmalloc(nna_regs_size);
  285. if (nna_regs == NULL)
  286. pr_warn("Failed allocating dummy NNA reg space. Will not use it...\n");
  287. else {
  288. pr_notice("Successfully allocated dummy NNA reg space.\n");
  289. memset(nna_regs, 0, nna_regs_size);
  290. use_dummy_regs = true;
  291. #if defined(CFG_SYS_MAGNA)
  292. vha_plat_magna_write_defaults(nna_regs);
  293. #endif
  294. }
  295. #endif
  296. /* after this call a copy of pointer to nna_regs is stored in struct device.platform_data
  297. * internal data is managed by platform device */
  298. pd[cluster] = platform_device_register_data(NULL, "vha", cluster, &nna_regs, sizeof(&nna_regs));
  299. ret = IS_ERR(pd[cluster]);
  300. if (ret) {
  301. pr_err("failed to register platform device!\n");
  302. goto _err;
  303. }
  304. }
  305. ret = platform_driver_register(&vha_driver);
  306. if (ret) {
  307. pr_err("failed to register platform driver!\n");
  308. goto _err;
  309. }
  310. return ret;
  311. _err:
  312. for (cluster=0; cluster<num_clusters; ++cluster) {
  313. uint8_t* nna_regs = *(uint8_t**)pd[cluster]->dev.platform_data;
  314. vfree(nna_regs);
  315. platform_device_unregister(pd[cluster]);
  316. }
  317. kfree(pd);
  318. return ret;
  319. }