vha_api.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /*
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/slab.h>
  42. #include <asm/current.h>
  43. #include <linux/fs.h>
  44. #include <linux/module.h>
  45. #include <linux/init.h>
  46. #include <linux/device.h>
  47. #include <linux/miscdevice.h>
  48. #include <linux/uaccess.h>
  49. #include <linux/sched.h>
  50. #include <linux/poll.h>
  51. #include <uapi/img_mem_man.h>
  52. #include <uapi/version.h>
  53. #include <img_mem_man.h>
  54. #include "vha_common.h"
  55. #include "vha_plat.h"
  56. static uint32_t default_mem_heap = IMG_MEM_MAN_HEAP_ID_INVALID;
  57. module_param(default_mem_heap, uint, 0444);
  58. MODULE_PARM_DESC(default_mem_heap,
  59. "default heap to use when allocating device memory, \
  60. when 'invalid' -> user requested id will be used.");
  61. #define VHA_IRQ_FENCE() \
  62. do { \
  63. spin_lock_irq(&vha->irq_lock); \
  64. spin_unlock_irq(&vha->irq_lock); \
  65. } while(0)
  66. static ssize_t vha_read(struct file *file, char __user *buf,
  67. size_t count, loff_t *ppos)
  68. {
  69. struct vha_session *session = file->private_data;
  70. struct vha_dev *vha = session->vha;
  71. struct miscdevice *miscdev = &vha->miscdev;
  72. struct vha_rsp *rsp;
  73. int ret;
  74. dev_dbg(miscdev->this_device, "%s: PID: %d, vha: %p, link: %p\n",
  75. __func__, task_pid_nr(current), vha, session);
  76. ret = mutex_lock_interruptible(&vha->lock);
  77. if (ret)
  78. return ret;
  79. while (list_empty(&session->rsps)) {
  80. mutex_unlock(&vha->lock);
  81. if (file->f_flags & O_NONBLOCK) {
  82. dev_dbg(miscdev->this_device,
  83. "%s: returning, no block!\n", __func__);
  84. return -EAGAIN;
  85. }
  86. dev_dbg(miscdev->this_device, "%s: going to sleep\n", __func__);
  87. if (wait_event_interruptible(session->wq,
  88. !list_empty(&session->rsps))) {
  89. dev_dbg(miscdev->this_device, "%s: signal\n", __func__);
  90. return -ERESTARTSYS;
  91. }
  92. dev_dbg(miscdev->this_device, "%s: woken up\n", __func__);
  93. ret = mutex_lock_interruptible(&vha->lock);
  94. if (ret)
  95. return -ERESTARTSYS;
  96. }
  97. if (list_empty(&session->rsps)) {
  98. ret = 0;
  99. goto out_unlock;
  100. }
  101. rsp = list_first_entry(&session->rsps, struct vha_rsp, list);
  102. if (rsp->size > count) {
  103. dev_warn(miscdev->this_device,
  104. "WARNING: unexpected read buffer size (%zd/%zd). "
  105. "Probably user space and kernel space are out of step\n",
  106. count, rsp->size);
  107. ret = -EINVAL;
  108. goto out_unlock;
  109. }
  110. ret = copy_to_user(buf, &rsp->user_rsp, rsp->size);
  111. if (ret) {
  112. ret = -EFAULT;
  113. goto out_unlock;
  114. }
  115. list_del(&rsp->list);
  116. mutex_unlock(&vha->lock);
  117. ret = rsp->size;
  118. #if 0
  119. print_hex_dump_debug("VHA RSP: ", DUMP_PREFIX_NONE,
  120. 4, 4, (uint32_t *)&rsp->user_rsp,
  121. ALIGN(rsp->size, 4), false);
  122. #endif
  123. kfree(rsp);
  124. return ret;
  125. out_unlock:
  126. mutex_unlock(&vha->lock);
  127. return ret;
  128. }
  129. static ssize_t vha_read_wrapper(struct file *file, char __user *buf,
  130. size_t count, loff_t *ppos)
  131. {
  132. ssize_t ret = 0;
  133. struct vha_session *session = (struct vha_session *)file->private_data;
  134. struct vha_dev *vha = session->vha;
  135. #ifdef CONFIG_FAULT_INJECTION
  136. if (vha->fault_inject & VHA_FI_READ)
  137. current->make_it_fail = true;
  138. else
  139. current->make_it_fail = false;
  140. #endif
  141. ret = vha_read(file, buf, count, ppos);
  142. #ifdef CONFIG_FAULT_INJECTION
  143. if ((vha->fault_inject & VHA_FI_READ) &&
  144. !(vha->fault_inject & VHA_FI_UM))
  145. current->make_it_fail = false;
  146. #endif
  147. /* Avoid leaving ioctl with interrupts disabled. */
  148. VHA_IRQ_FENCE();
  149. return ret;
  150. }
  151. static unsigned int vha_poll(struct file *file, poll_table *wait)
  152. {
  153. unsigned long req_events = poll_requested_events(wait);
  154. struct vha_session *session = file->private_data;
  155. struct vha_dev *vha = session->vha;
  156. struct miscdevice *miscdev = &vha->miscdev;
  157. unsigned int mask = 0;
  158. int ret;
  159. dev_dbg(miscdev->this_device, "%s: PID: %d, vha: %p, link: %p\n",
  160. __func__, task_pid_nr(current), vha, session);
  161. ret = mutex_lock_interruptible(&vha->lock);
  162. if (ret)
  163. return POLLERR;
  164. if (req_events & (POLLIN | POLLRDNORM)) {
  165. /* Register for event */
  166. poll_wait(file, &session->wq, wait);
  167. if (session->oom)
  168. mask = POLLERR;
  169. if (!list_empty(&session->rsps))
  170. mask = POLLIN | POLLRDNORM;
  171. /* if no response item available just return 0 */
  172. }
  173. mutex_unlock(&vha->lock);
  174. return mask;
  175. }
  176. static unsigned int vha_poll_wrapper(struct file *file, poll_table *wait)
  177. {
  178. unsigned int ret = 0;
  179. struct vha_session *session = (struct vha_session *)file->private_data;
  180. struct vha_dev *vha = session->vha;
  181. #ifdef CONFIG_FAULT_INJECTION
  182. if (vha->fault_inject & VHA_FI_READ)
  183. current->make_it_fail = true;
  184. else
  185. current->make_it_fail = false;
  186. #endif
  187. ret = vha_poll(file, wait);
  188. #ifdef CONFIG_FAULT_INJECTION
  189. if ((vha->fault_inject & VHA_FI_READ) &&
  190. !(vha->fault_inject & VHA_FI_UM))
  191. current->make_it_fail = false;
  192. #endif
  193. /* Avoid leaving ioctl with interrupts disabled. */
  194. VHA_IRQ_FENCE();
  195. return ret;
  196. }
  197. /* read a message from user, and queue it up to be sent to hw */
  198. static ssize_t vha_write(struct file *file, const char __user *buf,
  199. size_t size, loff_t *offset)
  200. {
  201. struct vha_session *session = (struct vha_session *)file->private_data;
  202. struct vha_dev *vha = session->vha;
  203. struct miscdevice *miscdev = &vha->miscdev;
  204. int ret;
  205. struct vha_cmd *cmd;
  206. dev_dbg(miscdev->this_device,
  207. "%s: PID: %d, vha: %p, session: %p, size: %zu\n",
  208. __func__, task_pid_nr(current), vha, session, size);
  209. if (size < sizeof(struct vha_user_cmd)) {
  210. dev_err(miscdev->this_device, "%s: msg too small\n", __func__);
  211. return -EINVAL;
  212. }
  213. cmd = kzalloc(sizeof(*cmd) - sizeof(cmd->user_cmd) + size, GFP_KERNEL);
  214. if (!cmd)
  215. return -ENOMEM;
  216. cmd->size = size;
  217. cmd->session = session;
  218. #ifdef VHA_SCF
  219. init_completion(&cmd->conf_done);
  220. #endif
  221. ret = copy_from_user(&cmd->user_cmd, buf, size);
  222. if (ret) {
  223. dev_err(miscdev->this_device, "%s: copy failed!\n", __func__);
  224. ret = -EFAULT;
  225. goto out_free_item;
  226. }
  227. ret = mutex_lock_interruptible(&vha->lock);
  228. if (ret)
  229. goto out_free_item;
  230. ret = vha_add_cmd(session, cmd);
  231. mutex_unlock(&vha->lock);
  232. if (ret)
  233. goto out_free_item;
  234. return size;
  235. out_free_item:
  236. kfree(cmd);
  237. return ret;
  238. }
  239. static ssize_t vha_write_wrapper(struct file *file, const char __user *buf,
  240. size_t size, loff_t *offset)
  241. {
  242. ssize_t ret = 0;
  243. struct vha_session *session = (struct vha_session *)file->private_data;
  244. struct vha_dev *vha = session->vha;
  245. #ifdef CONFIG_FAULT_INJECTION
  246. if (vha->fault_inject & VHA_FI_WRITE)
  247. current->make_it_fail = true;
  248. else
  249. current->make_it_fail = false;
  250. #endif
  251. ret = vha_write(file, buf, size, offset);
  252. #ifdef CONFIG_FAULT_INJECTION
  253. if ((vha->fault_inject & VHA_FI_WRITE) &&
  254. !(vha->fault_inject & VHA_FI_UM))
  255. current->make_it_fail = false;
  256. #endif
  257. /* Avoid leaving ioctl with interrupts disabled. */
  258. VHA_IRQ_FENCE();
  259. return ret;
  260. }
  261. static int vha_open(struct inode *inode, struct file *file)
  262. {
  263. struct miscdevice *miscdev = (struct miscdevice *)file->private_data;
  264. struct vha_dev *vha = container_of(miscdev, struct vha_dev, miscdev);
  265. struct vha_session *session;
  266. int ret;
  267. uint8_t pri;
  268. dev_dbg(miscdev->this_device, "%s: PID: %d, vha: %p\n",
  269. __func__, task_pid_nr(current), vha);
  270. session = devm_kzalloc(miscdev->this_device, sizeof(struct vha_session),
  271. GFP_KERNEL);
  272. if (!session)
  273. return -ENOMEM;
  274. session->vha = vha;
  275. /* memory context for all buffers used by this session */
  276. ret = img_mem_create_proc_ctx(&session->mem_ctx);
  277. if (ret) {
  278. dev_err(miscdev->this_device, "%s: failed to create context!\n",
  279. __func__);
  280. devm_kfree(miscdev->this_device, session);
  281. return ret;
  282. }
  283. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++)
  284. INIT_LIST_HEAD(&session->cmds[pri]);
  285. INIT_LIST_HEAD(&session->rsps);
  286. INIT_LIST_HEAD(&session->bufs);
  287. init_waitqueue_head(&session->wq);
  288. file->private_data = session;
  289. ret = vha_add_session(session);
  290. if (ret) {
  291. img_mem_destroy_proc_ctx(session->mem_ctx);
  292. devm_kfree(miscdev->this_device, session);
  293. file->private_data = NULL;
  294. }
  295. return ret;
  296. }
  297. static int vha_open_wrapper(struct inode *inode, struct file *file)
  298. {
  299. int ret = 0;
  300. struct miscdevice *miscdev = (struct miscdevice *)file->private_data;
  301. struct vha_dev *vha = container_of(miscdev, struct vha_dev, miscdev);
  302. #ifdef CONFIG_FAULT_INJECTION
  303. if (vha->fault_inject & VHA_FI_OPEN)
  304. current->make_it_fail = true;
  305. else
  306. current->make_it_fail = false;
  307. #endif
  308. ret = vha_open(inode, file);
  309. #ifdef CONFIG_FAULT_INJECTION
  310. if ((vha->fault_inject & VHA_FI_OPEN) &&
  311. !(vha->fault_inject & VHA_FI_UM))
  312. current->make_it_fail = false;
  313. #endif
  314. /* Avoid leaving ioctl with interrupts disabled. */
  315. VHA_IRQ_FENCE();
  316. return ret;
  317. }
  318. static int vha_release(struct inode *inode, struct file *file)
  319. {
  320. struct vha_session *session = (struct vha_session *)file->private_data;
  321. struct vha_dev *vha = session->vha;
  322. struct miscdevice *miscdev = &vha->miscdev;
  323. dev_dbg(miscdev->this_device, "%s: PID: %d, vha: %p, session: %p\n",
  324. __func__, task_pid_nr(current), vha, session);
  325. vha_rm_session(session);
  326. img_mem_destroy_proc_ctx(session->mem_ctx);
  327. devm_kfree(miscdev->this_device, session);
  328. file->private_data = NULL;
  329. /* Avoid leaving ioctl with interrupts disabled. */
  330. VHA_IRQ_FENCE();
  331. return 0;
  332. }
  333. static long vha_ioctl_get_hw_props(struct vha_session *session,
  334. void __user *buf)
  335. {
  336. struct vha_dev *vha = session->vha;
  337. struct miscdevice *miscdev = &vha->miscdev;
  338. dev_dbg(miscdev->this_device, "%s: session %p\n", __func__, session);
  339. if (copy_to_user(buf, &vha->hw_props,
  340. sizeof(struct vha_hw_props))) {
  341. dev_err(miscdev->this_device, "%s: copy to user failed!\n",
  342. __func__);
  343. return -EFAULT;
  344. }
  345. return 0;
  346. }
  347. static long vha_ioctl_query_heaps(struct vha_session *session, void __user *buf)
  348. {
  349. struct vha_heaps_data data;
  350. struct vha_dev* vha = session->vha;
  351. struct miscdevice *miscdev = &vha->miscdev;
  352. int ret;
  353. int i = 0;
  354. struct list_head* pos;
  355. dev_dbg(miscdev->this_device, "%s: session %u\n",
  356. __func__, session->id);
  357. memset(&data, 0, sizeof(data));
  358. list_for_each(pos, &vha->heaps) {
  359. struct vha_heap* heap = list_entry(pos, struct vha_heap, list);
  360. uint8_t type;
  361. uint32_t attrs;
  362. struct vha_heap_data *info;
  363. ret = img_mem_get_heap_info(heap->id, &type, &attrs);
  364. BUG_ON(ret != 0);
  365. info = &data.heaps[i++];
  366. info->id = heap->id;
  367. info->type = type;
  368. info->attributes = attrs;
  369. }
  370. if (copy_to_user(buf, &data, sizeof(data)))
  371. return -EFAULT;
  372. return 0;
  373. }
  374. static long vha_ioctl_alloc(struct vha_session *session, void __user *buf)
  375. {
  376. struct vha_alloc_data data;
  377. struct vha_dev *vha = session->vha;
  378. struct miscdevice *miscdev = &vha->miscdev;
  379. int ret;
  380. if (copy_from_user(&data, buf, sizeof(data)))
  381. return -EFAULT;
  382. dev_dbg(miscdev->this_device, "%s: session %u, size %llu, heap_id %u\n",
  383. __func__, session->id, data.size, data.heap_id);
  384. if (default_mem_heap != IMG_MEM_MAN_HEAP_ID_INVALID)
  385. data.heap_id = default_mem_heap;
  386. if (list_empty(&session->bufs))
  387. img_pdump_printf("-- ALLOC_BEGIN\n");
  388. ret = img_mem_alloc(session->vha->dev,
  389. session->mem_ctx, data.heap_id,
  390. (size_t)data.size, data.attributes, &data.buf_id);
  391. if (ret)
  392. return ret;
  393. ret = mutex_lock_interruptible(&vha->lock);
  394. if (ret) {
  395. img_mem_free(session->mem_ctx, data.buf_id);
  396. return ret;
  397. }
  398. ret = vha_add_buf(session, data.buf_id, (size_t)data.size,
  399. data.name, data.attributes);
  400. if (ret)
  401. goto out_free;
  402. if (copy_to_user(buf, &data, sizeof(struct vha_alloc_data)))
  403. goto out_rm_buf;
  404. mutex_unlock(&vha->lock);
  405. return 0;
  406. out_rm_buf:
  407. vha_rm_buf(session, data.buf_id);
  408. out_free:
  409. img_mem_free(session->mem_ctx, data.buf_id);
  410. mutex_unlock(&vha->lock);
  411. return -EFAULT;
  412. }
  413. static long vha_ioctl_import(struct vha_session *session, void __user *buf)
  414. {
  415. struct vha_import_data data;
  416. struct vha_dev *vha = session->vha;
  417. struct miscdevice *miscdev = &vha->miscdev;
  418. int ret;
  419. if (copy_from_user(&data, buf, sizeof(data)))
  420. return -EFAULT;
  421. dev_dbg(miscdev->this_device, "%s: session %u, buf_hnd 0x%016llx, size %llu, heap_id %u\n",
  422. __func__, session->id, data.buf_hnd, data.size, data.heap_id);
  423. ret = img_mem_import(session->vha->dev, session->mem_ctx, data.heap_id,
  424. (size_t)data.size, data.attributes, data.buf_hnd,
  425. &data.buf_id);
  426. if (ret)
  427. return ret;
  428. ret = mutex_lock_interruptible(&vha->lock);
  429. if (ret) {
  430. img_mem_free(session->mem_ctx, data.buf_id);
  431. return ret;
  432. }
  433. ret = vha_add_buf(session, data.buf_id, (size_t)data.size,
  434. data.name, data.attributes);
  435. if (ret)
  436. goto out_free;
  437. if (copy_to_user(buf, &data, sizeof(struct vha_import_data)))
  438. goto out_rm_buf;
  439. mutex_unlock(&vha->lock);
  440. return 0;
  441. out_rm_buf:
  442. vha_rm_buf(session, data.buf_id);
  443. out_free:
  444. img_mem_free(session->mem_ctx, data.buf_id);
  445. mutex_unlock(&vha->lock);
  446. return -EFAULT;
  447. }
  448. static long vha_ioctl_export(struct vha_session *session, void __user *buf)
  449. {
  450. struct vha_export_data data;
  451. struct vha_dev *vha = session->vha;
  452. struct miscdevice *miscdev = &vha->miscdev;
  453. int ret;
  454. if (copy_from_user(&data, buf, sizeof(data)))
  455. return -EFAULT;
  456. dev_dbg(miscdev->this_device, "%s: session %u, buf_id %u, size %llu\n",
  457. __func__, session->id, data.buf_id, data.size);
  458. ret = img_mem_export(session->vha->dev, session->mem_ctx, data.buf_id,
  459. (size_t)data.size, data.attributes, &data.buf_hnd);
  460. if (ret)
  461. return ret;
  462. if (copy_to_user(buf, &data, sizeof(struct vha_export_data)))
  463. return -EFAULT;
  464. return 0;
  465. }
  466. static long vha_ioctl_free(struct vha_session *session, void __user *buf)
  467. {
  468. struct vha_free_data data;
  469. struct vha_dev *vha = session->vha;
  470. struct miscdevice *miscdev = &vha->miscdev;
  471. int ret;
  472. if (copy_from_user(&data, buf, sizeof(data))) {
  473. dev_err(miscdev->this_device,
  474. "%s: copy_from_user error\n", __func__);
  475. return -EFAULT;
  476. }
  477. dev_dbg(miscdev->this_device, "%s: session %u, buf_id %u\n",
  478. __func__, session->id, data.buf_id);
  479. ret = mutex_lock_interruptible(&vha->lock);
  480. if (ret)
  481. return ret;
  482. if (!session->freeing) {
  483. session->freeing = true;
  484. img_pdump_printf("-- FREE_BEGIN\n");
  485. }
  486. vha_rm_buf(session, data.buf_id);
  487. img_mem_free(session->mem_ctx, data.buf_id);
  488. mutex_unlock(&vha->lock);
  489. return 0;
  490. }
  491. static long vha_ioctl_map_to_onchip(struct vha_session *session, void __user *buf)
  492. {
  493. struct vha_map_to_onchip_data data;
  494. struct vha_dev *vha = session->vha;
  495. struct miscdevice *miscdev = &vha->miscdev;
  496. int ret = 0;
  497. if (copy_from_user(&data, buf, sizeof(data))) {
  498. dev_err(miscdev->this_device,
  499. "%s: copy_from_user error\n", __func__);
  500. return -EFAULT;
  501. }
  502. dev_dbg(miscdev->this_device, "%s: session %u, virt_addr 0x%016llx, buf_id %u\n",
  503. __func__, session->id, data.virt_addr, data.buf_id);
  504. ret = vha_map_to_onchip(session, data.buf_id, data.virt_addr, data.page_size,
  505. data.num_pages, data.page_idxs, &data.map_id);
  506. if (copy_to_user(buf, &data, sizeof(data))) {
  507. dev_err(miscdev->this_device, "%s: copy to user failed!\n",
  508. __func__);
  509. return -EFAULT;
  510. }
  511. return ret;
  512. }
  513. static long vha_ioctl_map(struct vha_session *session, void __user *buf)
  514. {
  515. struct vha_map_data data;
  516. struct vha_dev *vha = session->vha;
  517. struct miscdevice *miscdev = &vha->miscdev;
  518. if (copy_from_user(&data, buf, sizeof(data))) {
  519. dev_err(miscdev->this_device,
  520. "%s: copy_from_user error\n", __func__);
  521. return -EFAULT;
  522. }
  523. dev_dbg(miscdev->this_device, "%s: session %u, virt_addr 0x%016llx, buf_id %u, flags 0x%08x\n",
  524. __func__, session->id, data.virt_addr, data.buf_id, data.flags);
  525. return vha_map_buffer(session, data.buf_id,
  526. data.virt_addr, data.flags);
  527. }
  528. static long vha_ioctl_unmap(struct vha_session *session, void __user *buf)
  529. {
  530. struct vha_unmap_data data;
  531. struct vha_dev *vha = session->vha;
  532. struct miscdevice *miscdev = &vha->miscdev;
  533. if (copy_from_user(&data, buf, sizeof(data))) {
  534. dev_err(miscdev->this_device,
  535. "%s: copy_from_user error\n", __func__);
  536. return -EFAULT;
  537. }
  538. if (!session->freeing) {
  539. session->freeing = true;
  540. img_pdump_printf("-- FREE_BEGIN\n");
  541. }
  542. dev_dbg(miscdev->this_device, "%s: session %u, buf_id %u\n",
  543. __func__, session->id, data.buf_id);
  544. return vha_unmap_buffer(session, data.buf_id);
  545. }
  546. static long vha_ioctl_buf_status(struct vha_session *session, void __user *buf)
  547. {
  548. struct vha_buf_status_data data;
  549. struct vha_dev *vha = session->vha;
  550. struct miscdevice *miscdev = &vha->miscdev;
  551. int ret;
  552. if (copy_from_user(&data, buf, sizeof(data))) {
  553. dev_err(miscdev->this_device,
  554. "%s: copy_from_user error\n", __func__);
  555. return -EFAULT;
  556. }
  557. dev_dbg(miscdev->this_device, "%s: session %u, buf_id %u, status %u, in_sync_fd %d, out_sync_sig %d \n",
  558. __func__, session->id, data.buf_id, data.status, data.in_sync_fd, data.out_sync_sig);
  559. ret = mutex_lock_interruptible(&vha->lock);
  560. if (ret)
  561. return ret;
  562. ret = vha_set_buf_status(session, data.buf_id, data.status,
  563. data.in_sync_fd, data.out_sync_sig);
  564. mutex_unlock(&vha->lock);
  565. return ret;
  566. }
  567. static long vha_ioctl_sync(struct vha_session *session, void __user *buf)
  568. {
  569. struct vha_sync_data data;
  570. struct vha_dev *vha = session->vha;
  571. struct miscdevice *miscdev = &vha->miscdev;
  572. int ret = -EINVAL;
  573. if (copy_from_user(&data, buf, sizeof(data))) {
  574. dev_err(miscdev->this_device, "%s: copy_from_user error\n", __func__);
  575. return -EFAULT;
  576. }
  577. #ifdef KERNEL_DMA_FENCE_SUPPORT
  578. ret = mutex_lock_interruptible(&vha->lock);
  579. if (ret)
  580. return ret;
  581. switch (data.op) {
  582. case VHA_SYNC_OP_CREATE_OUT:
  583. dev_dbg(miscdev->this_device, "%s: session %u, VHA_SYNC_OP_CREATE_OUT buf_id_count: %u\n",
  584. __func__, session->id, data.create_data.buf_id_count);
  585. if (data.create_data.buf_id_count > VHA_SYNC_MAX_BUF_IDS) {
  586. dev_err(miscdev->this_device, "%s: too many buf_ids provided\n",
  587. __func__);
  588. ret = -EINVAL;
  589. } else
  590. ret = vha_create_output_sync(session, data.create_data.buf_id_count,
  591. data.create_data.buf_ids);
  592. break;
  593. case VHA_SYNC_OP_MERGE_IN:
  594. dev_dbg(miscdev->this_device, "%s: session %u, VHA_SYNC_OP_MERGE_IN in_sync_fd_count: %u\n",
  595. __func__, session->id, data.merge_data.in_sync_fd_count);
  596. if (data.merge_data.in_sync_fd_count > VHA_SYNC_MAX_IN_SYNC_FDS) {
  597. dev_err(miscdev->this_device, "%s: too many in_sync_fds provided\n",
  598. __func__);
  599. ret = -EINVAL;
  600. } else
  601. ret = vha_merge_input_syncs(session, data.merge_data.in_sync_fd_count,
  602. data.merge_data.in_sync_fds);
  603. break;
  604. case VHA_SYNC_OP_RELEASE:
  605. dev_dbg(miscdev->this_device, "%s: session %u, VHA_SYNC_OP_RELEASE buf_id_count: %u\n",
  606. __func__, session->id, data.release_data.buf_id_count);
  607. if (data.release_data.buf_id_count > VHA_SYNC_MAX_BUF_IDS) {
  608. dev_err(miscdev->this_device, "%s: too many buf_ids provided\n",
  609. __func__);
  610. ret = -EINVAL;
  611. } else
  612. ret = vha_release_syncs(session, data.release_data.buf_id_count,
  613. data.release_data.buf_ids);
  614. break;
  615. default:
  616. break;
  617. }
  618. mutex_unlock(&vha->lock);
  619. if (ret < 0)
  620. data.sync_fd = VHA_SYNC_NONE;
  621. else {
  622. data.sync_fd = ret;
  623. ret = 0;
  624. }
  625. #else
  626. data.sync_fd = VHA_SYNC_NONE;
  627. ret = -ENOSYS;
  628. dev_warn(miscdev->this_device, "%s: dma_fences not supported!\n", __func__);
  629. #endif
  630. if (copy_to_user(buf, &data, sizeof(data))) {
  631. dev_err(miscdev->this_device, "%s: copy to user failed!\n", __func__);
  632. return -EFAULT;
  633. }
  634. return ret;
  635. }
  636. static long vha_ioctl_cancel(struct vha_session *session, void __user *buf)
  637. {
  638. struct vha_cancel_data data;
  639. struct vha_dev *vha = session->vha;
  640. struct miscdevice *miscdev = &vha->miscdev;
  641. if (copy_from_user(&data, buf, sizeof(data))) {
  642. dev_err(miscdev->this_device,
  643. "%s: copy_from_user error\n", __func__);
  644. return -EFAULT;
  645. }
  646. dev_dbg(miscdev->this_device, "%s: session %u, cmd_id 0x%08x, cmd_id_mask 0x%08x\n",
  647. __func__, session->id, data.cmd_id, data.cmd_id_mask);
  648. return vha_rm_cmds(session, data.cmd_id, data.cmd_id_mask, data.respond);
  649. }
  650. static long vha_ioctl_version(struct vha_session *session,
  651. void __user *buf)
  652. {
  653. struct vha_version_data data;
  654. struct vha_dev *vha = session->vha;
  655. struct miscdevice *miscdev = &vha->miscdev;
  656. memset(&data, 0, sizeof(struct vha_version_data));
  657. memcpy(data.digest, KERNEL_INTERFACE_DIGEST, sizeof(data.digest)-1);
  658. dev_dbg(miscdev->this_device, "%s: session %p: interface digest:%s\n", __func__,
  659. session, data.digest);
  660. if (copy_to_user(buf, &data,
  661. sizeof(struct vha_version_data))) {
  662. dev_err(miscdev->this_device, "%s: copy to user failed!\n",
  663. __func__);
  664. return -EFAULT;
  665. }
  666. return 0;
  667. }
  668. static long vha_ioctl(struct file *file, unsigned int code, unsigned long value)
  669. {
  670. struct vha_session *session = (struct vha_session *)file->private_data;
  671. struct vha_dev *vha = session->vha;
  672. struct miscdevice *miscdev = &vha->miscdev;
  673. dev_dbg(miscdev->this_device, "%s: code: 0x%x, value: 0x%lx\n",
  674. __func__, code, value);
  675. switch (code) {
  676. case VHA_IOC_HW_PROPS:
  677. return vha_ioctl_get_hw_props(session, (void __user *)value);
  678. case VHA_IOC_QUERY_HEAPS:
  679. return vha_ioctl_query_heaps(session, (void __user *)value);
  680. case VHA_IOC_ALLOC:
  681. return vha_ioctl_alloc(session, (void __user *)value);
  682. case VHA_IOC_IMPORT:
  683. return vha_ioctl_import(session, (void __user *)value);
  684. case VHA_IOC_EXPORT:
  685. return vha_ioctl_export(session, (void __user *)value);
  686. case VHA_IOC_FREE:
  687. return vha_ioctl_free(session, (void __user *)value);
  688. case VHA_IOC_VHA_MAP_TO_ONCHIP:
  689. return vha_ioctl_map_to_onchip(session, (void __user *)value);
  690. case VHA_IOC_VHA_MAP:
  691. return vha_ioctl_map(session, (void __user *)value);
  692. case VHA_IOC_VHA_UNMAP:
  693. return vha_ioctl_unmap(session, (void __user *)value);
  694. case VHA_IOC_BUF_STATUS:
  695. return vha_ioctl_buf_status(session, (void __user *)value);
  696. case VHA_IOC_SYNC:
  697. return vha_ioctl_sync(session, (void __user *)value);
  698. case VHA_IOC_CANCEL:
  699. return vha_ioctl_cancel(session, (void __user *)value);
  700. case VHA_IOC_VERSION:
  701. return vha_ioctl_version(session, (void __user *)value);
  702. default:
  703. dev_err(miscdev->this_device, "%s: code %#x unknown\n",
  704. __func__, code);
  705. return -EINVAL;
  706. }
  707. }
  708. static long vha_ioctl_wrapper(struct file *file, unsigned int code, unsigned long value)
  709. {
  710. long ret = 0;
  711. struct vha_session *session = (struct vha_session *)file->private_data;
  712. struct vha_dev *vha = session->vha;
  713. #ifdef CONFIG_FAULT_INJECTION
  714. if (vha->fault_inject & VHA_FI_IOCTL)
  715. current->make_it_fail = true;
  716. else
  717. current->make_it_fail = false;
  718. #endif
  719. ret = vha_ioctl(file, code, value);
  720. #ifdef CONFIG_FAULT_INJECTION
  721. if ((vha->fault_inject & VHA_FI_IOCTL) &&
  722. !(vha->fault_inject & VHA_FI_UM))
  723. current->make_it_fail = false;
  724. #endif
  725. /* Avoid leaving ioctl with interrupts disabled. */
  726. VHA_IRQ_FENCE();
  727. return ret;
  728. }
  729. static int vha_mmap(struct file *file, struct vm_area_struct *vma)
  730. {
  731. struct vha_session *session = (struct vha_session *)file->private_data;
  732. struct vha_dev *vha = session->vha;
  733. struct miscdevice *miscdev = &vha->miscdev;
  734. int buf_id = vma->vm_pgoff;
  735. dev_dbg(miscdev->this_device,
  736. "%s: PID: %d start %#lx end %#lx\n",
  737. __func__, task_pid_nr(current),
  738. vma->vm_start, vma->vm_end);
  739. dev_dbg(miscdev->this_device, "%s: PID: %d buf_id %d\n",
  740. __func__, task_pid_nr(current), buf_id);
  741. return img_mem_map_um(session->mem_ctx, buf_id, vma);
  742. }
  743. static int vha_mmap_wrapper(struct file *file, struct vm_area_struct *vma)
  744. {
  745. int ret = 0;
  746. struct vha_session *session = (struct vha_session *)file->private_data;
  747. struct vha_dev *vha = session->vha;
  748. #ifdef CONFIG_FAULT_INJECTION
  749. if (vha->fault_inject & VHA_FI_MMAP)
  750. current->make_it_fail = true;
  751. else
  752. current->make_it_fail = false;
  753. #endif
  754. ret = vha_mmap(file, vma);
  755. #ifdef CONFIG_FAULT_INJECTION
  756. if ((vha->fault_inject & VHA_FI_MMAP) &&
  757. !(vha->fault_inject & VHA_FI_UM))
  758. current->make_it_fail = false;
  759. #endif
  760. /* Avoid leaving ioctl with interrupts disabled. */
  761. VHA_IRQ_FENCE();
  762. return ret;
  763. }
  764. static const struct file_operations vha_fops = {
  765. .owner = THIS_MODULE,
  766. .read = vha_read_wrapper,
  767. .poll = vha_poll_wrapper,
  768. .write = vha_write_wrapper,
  769. .open = vha_open_wrapper,
  770. .mmap = vha_mmap_wrapper,
  771. .unlocked_ioctl = vha_ioctl_wrapper,
  772. .compat_ioctl = vha_ioctl_wrapper,
  773. .release = vha_release,
  774. };
  775. #define VHA_MAX_NODE_NAME 16
  776. int vha_api_add_dev(struct device *dev, struct vha_dev *vha, unsigned int id)
  777. {
  778. int ret;
  779. char *dev_name = NULL;
  780. if (!dev || !vha) {
  781. pr_err("%s: invalid params!\n", __func__);
  782. return -EINVAL;
  783. }
  784. dev_name = devm_kzalloc(dev, VHA_MAX_NODE_NAME, GFP_KERNEL);
  785. if (!dev_name)
  786. return -ENOMEM;
  787. snprintf(dev_name, VHA_MAX_NODE_NAME, "vha%d", id);
  788. dev_dbg(dev, "%s: trying to register misc dev %s...\n",
  789. __func__, dev_name);
  790. vha->miscdev.minor = MISC_DYNAMIC_MINOR;
  791. vha->miscdev.fops = &vha_fops;
  792. vha->miscdev.name = dev_name;
  793. vha->id = id;
  794. ret = misc_register(&vha->miscdev);
  795. if (ret) {
  796. dev_err(dev, "%s: failed to register VHA misc device\n",
  797. __func__);
  798. goto out_register;
  799. }
  800. dev_dbg(dev, "%s: misc dev registered successfully\n", __func__);
  801. return 0;
  802. out_register:
  803. devm_kfree(dev, dev_name);
  804. return ret;
  805. }
  806. int vha_api_rm_dev(struct device *dev, struct vha_dev *vha)
  807. {
  808. int ret = 0;
  809. if (!dev || !vha) {
  810. pr_err("%s: invalid params!\n", __func__);
  811. return -EINVAL;
  812. }
  813. dev_dbg(dev, "%s: trying to deregister VHA misc device\n", __func__);
  814. /* note: since linux v4.3, misc_deregister does not return errors */
  815. misc_deregister(&vha->miscdev);
  816. devm_kfree(dev, (void *)vha->miscdev.name);
  817. dev_dbg(dev, "%s: VHA misc dev deregistered: %d\n", __func__, ret);
  818. return ret;
  819. }
  820. static int __init vha_api_init(void)
  821. {
  822. int ret;
  823. pr_debug("loading VHA module.\n");
  824. ret = vha_early_init();
  825. if (ret)
  826. pr_err("failed initialize VHA driver\n");
  827. else {
  828. ret = vha_plat_init();
  829. if (ret)
  830. pr_err("failed initialize VHA driver\n");
  831. }
  832. return ret;
  833. }
  834. static void __exit vha_api_exit(void)
  835. {
  836. int ret;
  837. pr_debug("unloading VHA module.\n");
  838. ret = vha_plat_deinit();
  839. if (ret)
  840. pr_err("failed to deinitialise VHA driver\n");
  841. }
  842. module_init(vha_api_init);
  843. module_exit(vha_api_exit);
  844. MODULE_LICENSE("GPL");
  845. MODULE_AUTHOR("Imagination");
  846. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
  847. MODULE_IMPORT_NS(IMG_MEM);
  848. #endif