uacce.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/compat.h>
  3. #include <linux/dma-mapping.h>
  4. #include <linux/iommu.h>
  5. #include <linux/module.h>
  6. #include <linux/poll.h>
  7. #include <linux/slab.h>
  8. #include <linux/uacce.h>
  9. static struct class *uacce_class;
  10. static dev_t uacce_devt;
  11. static DEFINE_MUTEX(uacce_mutex);
  12. static DEFINE_XARRAY_ALLOC(uacce_xa);
  13. static int uacce_start_queue(struct uacce_queue *q)
  14. {
  15. int ret = 0;
  16. mutex_lock(&uacce_mutex);
  17. if (q->state != UACCE_Q_INIT) {
  18. ret = -EINVAL;
  19. goto out_with_lock;
  20. }
  21. if (q->uacce->ops->start_queue) {
  22. ret = q->uacce->ops->start_queue(q);
  23. if (ret < 0)
  24. goto out_with_lock;
  25. }
  26. q->state = UACCE_Q_STARTED;
  27. out_with_lock:
  28. mutex_unlock(&uacce_mutex);
  29. return ret;
  30. }
  31. static int uacce_put_queue(struct uacce_queue *q)
  32. {
  33. struct uacce_device *uacce = q->uacce;
  34. mutex_lock(&uacce_mutex);
  35. if (q->state == UACCE_Q_ZOMBIE)
  36. goto out;
  37. if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
  38. uacce->ops->stop_queue(q);
  39. if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
  40. uacce->ops->put_queue)
  41. uacce->ops->put_queue(q);
  42. q->state = UACCE_Q_ZOMBIE;
  43. out:
  44. mutex_unlock(&uacce_mutex);
  45. return 0;
  46. }
  47. static long uacce_fops_unl_ioctl(struct file *filep,
  48. unsigned int cmd, unsigned long arg)
  49. {
  50. struct uacce_queue *q = filep->private_data;
  51. struct uacce_device *uacce = q->uacce;
  52. switch (cmd) {
  53. case UACCE_CMD_START_Q:
  54. return uacce_start_queue(q);
  55. case UACCE_CMD_PUT_Q:
  56. return uacce_put_queue(q);
  57. default:
  58. if (!uacce->ops->ioctl)
  59. return -EINVAL;
  60. return uacce->ops->ioctl(q, cmd, arg);
  61. }
  62. }
  63. #ifdef CONFIG_COMPAT
  64. static long uacce_fops_compat_ioctl(struct file *filep,
  65. unsigned int cmd, unsigned long arg)
  66. {
  67. arg = (unsigned long)compat_ptr(arg);
  68. return uacce_fops_unl_ioctl(filep, cmd, arg);
  69. }
  70. #endif
  71. static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
  72. {
  73. u32 pasid;
  74. struct iommu_sva *handle;
  75. if (!(uacce->flags & UACCE_DEV_SVA))
  76. return 0;
  77. handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
  78. if (IS_ERR(handle))
  79. return PTR_ERR(handle);
  80. pasid = iommu_sva_get_pasid(handle);
  81. if (pasid == IOMMU_PASID_INVALID) {
  82. iommu_sva_unbind_device(handle);
  83. return -ENODEV;
  84. }
  85. q->handle = handle;
  86. q->pasid = pasid;
  87. return 0;
  88. }
  89. static void uacce_unbind_queue(struct uacce_queue *q)
  90. {
  91. if (!q->handle)
  92. return;
  93. iommu_sva_unbind_device(q->handle);
  94. q->handle = NULL;
  95. }
  96. static int uacce_fops_open(struct inode *inode, struct file *filep)
  97. {
  98. struct uacce_device *uacce;
  99. struct uacce_queue *q;
  100. int ret = 0;
  101. uacce = xa_load(&uacce_xa, iminor(inode));
  102. if (!uacce)
  103. return -ENODEV;
  104. q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
  105. if (!q)
  106. return -ENOMEM;
  107. ret = uacce_bind_queue(uacce, q);
  108. if (ret)
  109. goto out_with_mem;
  110. q->uacce = uacce;
  111. if (uacce->ops->get_queue) {
  112. ret = uacce->ops->get_queue(uacce, q->pasid, q);
  113. if (ret < 0)
  114. goto out_with_bond;
  115. }
  116. init_waitqueue_head(&q->wait);
  117. filep->private_data = q;
  118. uacce->inode = inode;
  119. q->state = UACCE_Q_INIT;
  120. mutex_lock(&uacce->queues_lock);
  121. list_add(&q->list, &uacce->queues);
  122. mutex_unlock(&uacce->queues_lock);
  123. return 0;
  124. out_with_bond:
  125. uacce_unbind_queue(q);
  126. out_with_mem:
  127. kfree(q);
  128. return ret;
  129. }
  130. static int uacce_fops_release(struct inode *inode, struct file *filep)
  131. {
  132. struct uacce_queue *q = filep->private_data;
  133. mutex_lock(&q->uacce->queues_lock);
  134. list_del(&q->list);
  135. mutex_unlock(&q->uacce->queues_lock);
  136. uacce_put_queue(q);
  137. uacce_unbind_queue(q);
  138. kfree(q);
  139. return 0;
  140. }
  141. static void uacce_vma_close(struct vm_area_struct *vma)
  142. {
  143. struct uacce_queue *q = vma->vm_private_data;
  144. struct uacce_qfile_region *qfr = NULL;
  145. if (vma->vm_pgoff < UACCE_MAX_REGION)
  146. qfr = q->qfrs[vma->vm_pgoff];
  147. kfree(qfr);
  148. }
  149. static const struct vm_operations_struct uacce_vm_ops = {
  150. .close = uacce_vma_close,
  151. };
  152. static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
  153. {
  154. struct uacce_queue *q = filep->private_data;
  155. struct uacce_device *uacce = q->uacce;
  156. struct uacce_qfile_region *qfr;
  157. enum uacce_qfrt type = UACCE_MAX_REGION;
  158. int ret = 0;
  159. if (vma->vm_pgoff < UACCE_MAX_REGION)
  160. type = vma->vm_pgoff;
  161. else
  162. return -EINVAL;
  163. qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
  164. if (!qfr)
  165. return -ENOMEM;
  166. vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
  167. vma->vm_ops = &uacce_vm_ops;
  168. vma->vm_private_data = q;
  169. qfr->type = type;
  170. mutex_lock(&uacce_mutex);
  171. if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
  172. ret = -EINVAL;
  173. goto out_with_lock;
  174. }
  175. if (q->qfrs[type]) {
  176. ret = -EEXIST;
  177. goto out_with_lock;
  178. }
  179. switch (type) {
  180. case UACCE_QFRT_MMIO:
  181. if (!uacce->ops->mmap) {
  182. ret = -EINVAL;
  183. goto out_with_lock;
  184. }
  185. ret = uacce->ops->mmap(q, vma, qfr);
  186. if (ret)
  187. goto out_with_lock;
  188. break;
  189. case UACCE_QFRT_DUS:
  190. if (!uacce->ops->mmap) {
  191. ret = -EINVAL;
  192. goto out_with_lock;
  193. }
  194. ret = uacce->ops->mmap(q, vma, qfr);
  195. if (ret)
  196. goto out_with_lock;
  197. break;
  198. default:
  199. ret = -EINVAL;
  200. goto out_with_lock;
  201. }
  202. q->qfrs[type] = qfr;
  203. mutex_unlock(&uacce_mutex);
  204. return ret;
  205. out_with_lock:
  206. mutex_unlock(&uacce_mutex);
  207. kfree(qfr);
  208. return ret;
  209. }
  210. static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
  211. {
  212. struct uacce_queue *q = file->private_data;
  213. struct uacce_device *uacce = q->uacce;
  214. poll_wait(file, &q->wait, wait);
  215. if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
  216. return EPOLLIN | EPOLLRDNORM;
  217. return 0;
  218. }
  219. static const struct file_operations uacce_fops = {
  220. .owner = THIS_MODULE,
  221. .open = uacce_fops_open,
  222. .release = uacce_fops_release,
  223. .unlocked_ioctl = uacce_fops_unl_ioctl,
  224. #ifdef CONFIG_COMPAT
  225. .compat_ioctl = uacce_fops_compat_ioctl,
  226. #endif
  227. .mmap = uacce_fops_mmap,
  228. .poll = uacce_fops_poll,
  229. };
  230. #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
  231. static ssize_t api_show(struct device *dev,
  232. struct device_attribute *attr, char *buf)
  233. {
  234. struct uacce_device *uacce = to_uacce_device(dev);
  235. return sprintf(buf, "%s\n", uacce->api_ver);
  236. }
  237. static ssize_t flags_show(struct device *dev,
  238. struct device_attribute *attr, char *buf)
  239. {
  240. struct uacce_device *uacce = to_uacce_device(dev);
  241. return sprintf(buf, "%u\n", uacce->flags);
  242. }
  243. static ssize_t available_instances_show(struct device *dev,
  244. struct device_attribute *attr,
  245. char *buf)
  246. {
  247. struct uacce_device *uacce = to_uacce_device(dev);
  248. if (!uacce->ops->get_available_instances)
  249. return -ENODEV;
  250. return sprintf(buf, "%d\n",
  251. uacce->ops->get_available_instances(uacce));
  252. }
  253. static ssize_t algorithms_show(struct device *dev,
  254. struct device_attribute *attr, char *buf)
  255. {
  256. struct uacce_device *uacce = to_uacce_device(dev);
  257. return sprintf(buf, "%s\n", uacce->algs);
  258. }
  259. static ssize_t region_mmio_size_show(struct device *dev,
  260. struct device_attribute *attr, char *buf)
  261. {
  262. struct uacce_device *uacce = to_uacce_device(dev);
  263. return sprintf(buf, "%lu\n",
  264. uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
  265. }
  266. static ssize_t region_dus_size_show(struct device *dev,
  267. struct device_attribute *attr, char *buf)
  268. {
  269. struct uacce_device *uacce = to_uacce_device(dev);
  270. return sprintf(buf, "%lu\n",
  271. uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
  272. }
  273. static DEVICE_ATTR_RO(api);
  274. static DEVICE_ATTR_RO(flags);
  275. static DEVICE_ATTR_RO(available_instances);
  276. static DEVICE_ATTR_RO(algorithms);
  277. static DEVICE_ATTR_RO(region_mmio_size);
  278. static DEVICE_ATTR_RO(region_dus_size);
  279. static struct attribute *uacce_dev_attrs[] = {
  280. &dev_attr_api.attr,
  281. &dev_attr_flags.attr,
  282. &dev_attr_available_instances.attr,
  283. &dev_attr_algorithms.attr,
  284. &dev_attr_region_mmio_size.attr,
  285. &dev_attr_region_dus_size.attr,
  286. NULL,
  287. };
  288. static umode_t uacce_dev_is_visible(struct kobject *kobj,
  289. struct attribute *attr, int n)
  290. {
  291. struct device *dev = kobj_to_dev(kobj);
  292. struct uacce_device *uacce = to_uacce_device(dev);
  293. if (((attr == &dev_attr_region_mmio_size.attr) &&
  294. (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
  295. ((attr == &dev_attr_region_dus_size.attr) &&
  296. (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
  297. return 0;
  298. return attr->mode;
  299. }
  300. static struct attribute_group uacce_dev_group = {
  301. .is_visible = uacce_dev_is_visible,
  302. .attrs = uacce_dev_attrs,
  303. };
  304. __ATTRIBUTE_GROUPS(uacce_dev);
  305. static void uacce_release(struct device *dev)
  306. {
  307. struct uacce_device *uacce = to_uacce_device(dev);
  308. kfree(uacce);
  309. }
  310. /**
  311. * uacce_alloc() - alloc an accelerator
  312. * @parent: pointer of uacce parent device
  313. * @interface: pointer of uacce_interface for register
  314. *
  315. * Returns uacce pointer if success and ERR_PTR if not
  316. * Need check returned negotiated uacce->flags
  317. */
  318. struct uacce_device *uacce_alloc(struct device *parent,
  319. struct uacce_interface *interface)
  320. {
  321. unsigned int flags = interface->flags;
  322. struct uacce_device *uacce;
  323. int ret;
  324. uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
  325. if (!uacce)
  326. return ERR_PTR(-ENOMEM);
  327. if (flags & UACCE_DEV_SVA) {
  328. ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
  329. if (ret)
  330. flags &= ~UACCE_DEV_SVA;
  331. }
  332. uacce->parent = parent;
  333. uacce->flags = flags;
  334. uacce->ops = interface->ops;
  335. ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
  336. GFP_KERNEL);
  337. if (ret < 0)
  338. goto err_with_uacce;
  339. INIT_LIST_HEAD(&uacce->queues);
  340. mutex_init(&uacce->queues_lock);
  341. device_initialize(&uacce->dev);
  342. uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
  343. uacce->dev.class = uacce_class;
  344. uacce->dev.groups = uacce_dev_groups;
  345. uacce->dev.parent = uacce->parent;
  346. uacce->dev.release = uacce_release;
  347. dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
  348. return uacce;
  349. err_with_uacce:
  350. if (flags & UACCE_DEV_SVA)
  351. iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
  352. kfree(uacce);
  353. return ERR_PTR(ret);
  354. }
  355. EXPORT_SYMBOL_GPL(uacce_alloc);
  356. /**
  357. * uacce_register() - add the accelerator to cdev and export to user space
  358. * @uacce: The initialized uacce device
  359. *
  360. * Return 0 if register succeeded, or an error.
  361. */
  362. int uacce_register(struct uacce_device *uacce)
  363. {
  364. if (!uacce)
  365. return -ENODEV;
  366. uacce->cdev = cdev_alloc();
  367. if (!uacce->cdev)
  368. return -ENOMEM;
  369. uacce->cdev->ops = &uacce_fops;
  370. uacce->cdev->owner = THIS_MODULE;
  371. return cdev_device_add(uacce->cdev, &uacce->dev);
  372. }
  373. EXPORT_SYMBOL_GPL(uacce_register);
  374. /**
  375. * uacce_remove() - remove the accelerator
  376. * @uacce: the accelerator to remove
  377. */
  378. void uacce_remove(struct uacce_device *uacce)
  379. {
  380. struct uacce_queue *q, *next_q;
  381. if (!uacce)
  382. return;
  383. /*
  384. * unmap remaining mapping from user space, preventing user still
  385. * access the mmaped area while parent device is already removed
  386. */
  387. if (uacce->inode)
  388. unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
  389. /* ensure no open queue remains */
  390. mutex_lock(&uacce->queues_lock);
  391. list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
  392. uacce_put_queue(q);
  393. uacce_unbind_queue(q);
  394. }
  395. mutex_unlock(&uacce->queues_lock);
  396. /* disable sva now since no opened queues */
  397. if (uacce->flags & UACCE_DEV_SVA)
  398. iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
  399. if (uacce->cdev)
  400. cdev_device_del(uacce->cdev, &uacce->dev);
  401. xa_erase(&uacce_xa, uacce->dev_id);
  402. put_device(&uacce->dev);
  403. }
  404. EXPORT_SYMBOL_GPL(uacce_remove);
  405. static int __init uacce_init(void)
  406. {
  407. int ret;
  408. uacce_class = class_create(THIS_MODULE, UACCE_NAME);
  409. if (IS_ERR(uacce_class))
  410. return PTR_ERR(uacce_class);
  411. ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
  412. if (ret)
  413. class_destroy(uacce_class);
  414. return ret;
  415. }
  416. static __exit void uacce_exit(void)
  417. {
  418. unregister_chrdev_region(uacce_devt, MINORMASK);
  419. class_destroy(uacce_class);
  420. }
  421. subsys_initcall(uacce_init);
  422. module_exit(uacce_exit);
  423. MODULE_LICENSE("GPL");
  424. MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
  425. MODULE_DESCRIPTION("Accelerator interface for Userland applications");