vdmabuf.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776
  1. // SPDX-License-Identifier: (MIT OR GPL-2.0)
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/init.h>
  5. #include <linux/module.h>
  6. #include <linux/mutex.h>
  7. #include <linux/miscdevice.h>
  8. #include <linux/workqueue.h>
  9. #include <linux/slab.h>
  10. #include <linux/device.h>
  11. #include <linux/hashtable.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/poll.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/vhost.h>
  16. #include <linux/vfio.h>
  17. #include <linux/virtio_vdmabuf.h>
  18. #include <linux/of_address.h>
  19. #include <linux/of_fdt.h>
  20. #include <linux/of.h>
  21. enum {
  22. VHOST_VDMABUF_FEATURES = 1ULL << VIRTIO_F_VERSION_1,
  23. };
  24. static struct virtio_vdmabuf_info *drv_info;
  25. static unsigned int vhost_vmid; /* current only support one guest */
  26. /*
  27. * carveout_buf config demo in dts,
  28. *
  29. * vdmabuf_reserved_memory {
  30. * reg = <0x0 0x82000000 0x0 0x4000
  31. * 0x0 0x82004000 0x0 0x4000
  32. * 0x0 0x82008000 0x0 0x4000>;
  33. * reg-names = "vi", "vo", "enc";
  34. * };
  35. */
  36. static struct carveout_buf carveout_bufs[VIRTIO_VDMABUF_CARVEOUTS_NUM] = { 0 };
  37. static char carveout_names[VIRTIO_VDMABUF_CARVEOUTS_NUM][VIRTIO_VDMABUF_CARVEOUT_NAME_LEN] =
  38. VIRTIO_VDMABUF_CARVEOUT_NAMES;
  39. static inline void vhost_vdmabuf_add(struct vhost_vdmabuf *new)
  40. {
  41. list_add_tail(&new->list, &drv_info->head_vdmabuf_list);
  42. }
  43. static inline struct vhost_vdmabuf *vhost_vdmabuf_find(unsigned int vmid)
  44. {
  45. struct vhost_vdmabuf *found;
  46. list_for_each_entry(found, &drv_info->head_vdmabuf_list, list)
  47. if (found->vmid == vmid)
  48. return found;
  49. return NULL;
  50. }
  51. static inline bool vhost_vdmabuf_del(struct vhost_vdmabuf *vdmabuf)
  52. {
  53. struct vhost_vdmabuf *iter, *temp;
  54. list_for_each_entry_safe(iter, temp,
  55. &drv_info->head_vdmabuf_list,
  56. list)
  57. if (iter == vdmabuf) {
  58. list_del(&iter->list);
  59. return true;
  60. }
  61. return false;
  62. }
  63. static inline void vhost_vdmabuf_del_all(void)
  64. {
  65. struct vhost_vdmabuf *iter, *temp;
  66. list_for_each_entry_safe(iter, temp,
  67. &drv_info->head_vdmabuf_list,
  68. list) {
  69. list_del(&iter->list);
  70. kfree(iter);
  71. }
  72. }
  73. static unsigned int get_buf_id(void)
  74. {
  75. static int buf_id = 0;
  76. buf_id = buf_id < VIRTIO_VDMABUF_MAX_ID ? buf_id + 1 : 0;
  77. return buf_id;
  78. }
  79. static int carveout_buf_setup(void)
  80. {
  81. struct device_node *node;
  82. struct resource res;
  83. int i, index;
  84. int ret;
  85. node = of_find_node_by_name(NULL, "vdmabuf_reserved_memory");
  86. if (!node) {
  87. ret = -EINVAL;
  88. dev_err(drv_info->dev,
  89. "failed to find vdmabuf_reserved_memory node\n");
  90. }
  91. for (i = 0; i <= VIRTIO_VDMABUF_CARVEOUTS_NUM; i++) {
  92. index = of_property_match_string(node, "reg-names",
  93. carveout_names[i]);
  94. if (index < 0)
  95. goto exit;
  96. if (of_address_to_resource(node, index, &res))
  97. goto exit;
  98. carveout_bufs[i].addr = res.start;
  99. carveout_bufs[i].size = resource_size(&res);
  100. carveout_bufs[i].ready = true;
  101. }
  102. exit:
  103. of_node_put(node);
  104. return index;
  105. }
  106. static struct sg_table *get_sg_table(struct virtio_vdmabuf_buf *exp_buf)
  107. {
  108. int heap_type = exp_buf->heap_type;
  109. struct carveout_buf *carveout_sg;
  110. struct sg_table *sgt;
  111. struct scatterlist *sgl;
  112. int i, ret;
  113. switch (heap_type) {
  114. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  115. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  116. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  117. /* SYSTEM, SYSTEM_CONFIG has the same logic */
  118. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  119. if (!sgt)
  120. return ERR_PTR(-ENOMEM);
  121. ret = sg_alloc_table(sgt, exp_buf->bp_num, GFP_KERNEL);
  122. if (ret) {
  123. kfree(sgt);
  124. return ERR_PTR(-ENOMEM);
  125. }
  126. sgl = sgt->sgl;
  127. for (i = 0; i < exp_buf->bp_num; i++) {
  128. sg_set_page(sgl, exp_buf->bp[i].page,
  129. exp_buf->bp[i].size, 0);
  130. sgl = sg_next(sgl);
  131. }
  132. break;
  133. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  134. carveout_sg = kzalloc(sizeof(struct carveout_buf), GFP_KERNEL);
  135. if (!carveout_sg)
  136. return ERR_PTR(-ENOMEM);
  137. carveout_sg->addr = exp_buf->bp[0].addr;
  138. carveout_sg->size = exp_buf->bp[0].size;
  139. sgt = (struct sg_table *)carveout_sg;
  140. break;
  141. default:
  142. return NULL;
  143. }
  144. return sgt;
  145. }
  146. static void put_sg_table(struct virtio_vdmabuf_buf *buf,
  147. struct sg_table *sgt)
  148. {
  149. int heap_type = buf->heap_type;
  150. switch (heap_type) {
  151. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  152. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  153. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  154. sg_free_table(sgt);
  155. kfree(sgt);
  156. break;
  157. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  158. kfree(sgt);
  159. break;
  160. default:
  161. break;
  162. }
  163. }
  164. static int sg_table_map(struct device *dev, struct virtio_vdmabuf_buf *exp_buf,
  165. struct sg_table *sgt, enum dma_data_direction dir)
  166. {
  167. int heap_type = exp_buf->heap_type;
  168. struct carveout_buf *carveout_sg;
  169. dma_addr_t dma_handle;
  170. switch (heap_type) {
  171. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  172. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  173. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  174. /* SYSTEM, SYSTEM_CONFIG has the same logic */
  175. if (dma_map_sgtable(dev, sgt, dir, 0)) {
  176. dev_err(dev, "[%s:%d] error\n",
  177. __func__, __LINE__);
  178. sg_free_table(sgt);
  179. kfree(sgt);
  180. return -EINVAL;
  181. }
  182. break;
  183. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  184. carveout_sg = (struct carveout_buf *)sgt;
  185. dma_handle = dma_map_single(dev, (void *)carveout_sg->addr,
  186. carveout_sg->size, dir);
  187. if (dma_mapping_error(dev, dma_handle)) {
  188. dev_err(dev, "[%s:%d] error\n",
  189. __func__, __LINE__);
  190. kfree(carveout_sg);
  191. return -EINVAL;
  192. }
  193. break;
  194. default:
  195. return -EINVAL;
  196. }
  197. return 0;
  198. }
  199. static int sg_table_unmap(struct device *dev, struct virtio_vdmabuf_buf *exp_buf,
  200. struct sg_table *sgt, enum dma_data_direction dir)
  201. {
  202. int heap_type = exp_buf->heap_type;
  203. switch (heap_type) {
  204. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  205. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  206. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  207. dma_unmap_sgtable(dev, sgt, dir, 0);
  208. break;
  209. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  210. dma_unmap_single(dev, exp_buf->bp[0].addr,
  211. exp_buf->bp[0].size, dir);
  212. break;
  213. default:
  214. return -EINVAL;
  215. }
  216. return 0;
  217. }
  218. static struct sg_table *vhost_vdmabuf_dmabuf_map(struct dma_buf_attachment *attachment,
  219. enum dma_data_direction dir)
  220. {
  221. struct virtio_vdmabuf_buf *exp_buf = attachment->dmabuf->priv;
  222. struct virtio_vdmabuf_attachment *a = attachment->priv;
  223. struct sg_table *sgt = a->sgt;
  224. int ret;
  225. ret = sg_table_map(a->dev, exp_buf, sgt, dir);
  226. if (ret)
  227. return ERR_PTR(ret);
  228. return sgt;
  229. }
  230. static void vhost_vdmabuf_dmabuf_unmap(struct dma_buf_attachment *attachment,
  231. struct sg_table *sgt,
  232. enum dma_data_direction dir)
  233. {
  234. struct virtio_vdmabuf_buf *exp_buf = attachment->dmabuf->priv;
  235. sg_table_unmap(attachment->dev, exp_buf, sgt, dir);
  236. }
  237. static int vhost_vdmabuf_dmabuf_mmap(struct dma_buf *dmabuf,
  238. struct vm_area_struct *vma)
  239. {
  240. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  241. unsigned long start, pfn;
  242. int size;
  243. int i, ret = 0;
  244. if (!exp_buf)
  245. return -EINVAL;
  246. if (vma->vm_end - vma->vm_start > exp_buf->size) {
  247. dev_warn(drv_info->dev,
  248. "vm_end[%lu] - vm_start[%lu] [%lu] > mem size[%ld]\n",
  249. vma->vm_end, vma->vm_start,
  250. vma->vm_end - vma->vm_start,
  251. exp_buf->size);
  252. return -EINVAL;
  253. }
  254. if (exp_buf->flags & VIRTIO_VDAMBUF_NONCACHED)
  255. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  256. start = vma->vm_start;
  257. switch (exp_buf->heap_type) {
  258. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  259. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  260. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  261. /* SYSTEM, SYSTEM_CONFIG has the same logic */
  262. for (i = 0; i < exp_buf->bp_num; i++) {
  263. pfn = page_to_pfn(exp_buf->bp[i].page);
  264. size = exp_buf->bp[i].size;
  265. ret = remap_pfn_range(vma, start, pfn, size,
  266. vma->vm_page_prot);
  267. if (ret)
  268. return ret;
  269. start += size;
  270. }
  271. break;
  272. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  273. ret = vm_iomap_memory(vma, exp_buf->bp[i].addr,
  274. exp_buf->bp[i].size);
  275. break;
  276. default:
  277. break;
  278. }
  279. return ret;
  280. }
  281. static void *vhost_vdmabuf_dmabuf_vmap(struct dma_buf *dmabuf)
  282. {
  283. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  284. int heap_type = exp_buf->heap_type;
  285. struct page **pages;
  286. unsigned long pfn;
  287. void *addr;
  288. int i, nr_pages;
  289. switch (heap_type) {
  290. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  291. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  292. nr_pages = exp_buf->bp_num;
  293. pages = kzalloc(nr_pages * sizeof(struct page *),
  294. GFP_KERNEL);
  295. if (!pages)
  296. return ERR_PTR(-ENOMEM);
  297. for (i = 0; i < exp_buf->bp_num; i++)
  298. pages[i] = exp_buf->bp[i].page;
  299. addr = vm_map_ram(pages, exp_buf->bp_num, 0); /* or vmap */
  300. kfree(pages);
  301. return addr;
  302. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  303. nr_pages = exp_buf->size / PAGE_SIZE;
  304. pages = kzalloc(nr_pages * sizeof(struct page *),
  305. GFP_KERNEL);
  306. if (!pages)
  307. return ERR_PTR(-ENOMEM);
  308. /* convert the head page of config memory to pfn */
  309. pfn = page_to_pfn(exp_buf->bp[0].page);
  310. for (i = 0; i < nr_pages; i++)
  311. pages[i] = pfn_to_page(pfn + i);
  312. addr = vm_map_ram(pages, exp_buf->bp_num, 0); /* or vmap */
  313. kfree(pages);
  314. return addr;
  315. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  316. return ioremap(exp_buf->bp[0].addr,
  317. exp_buf->bp[0].size);
  318. default:
  319. return ERR_PTR(-EINVAL);
  320. }
  321. return NULL;
  322. }
  323. static void vhost_vdmabuf_dmabuf_vunmap(struct dma_buf *dmabuf, void *vaddr)
  324. {
  325. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  326. int heap_type = exp_buf->heap_type;
  327. switch (heap_type) {
  328. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  329. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  330. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  331. vm_unmap_ram(vaddr, exp_buf->bp_num);
  332. break;
  333. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  334. iounmap(vaddr);
  335. break;
  336. default:
  337. break;
  338. }
  339. }
  340. static void vhost_vdmabuf_release_priv(struct virtio_vdmabuf_buf *exp_buf)
  341. {
  342. int i;
  343. switch (exp_buf->heap_type) {
  344. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  345. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  346. for (i = 0; i < exp_buf->bp_num; i++)
  347. put_page(exp_buf->bp[i].page);
  348. break;
  349. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  350. __free_pages(exp_buf->bp[0].page,
  351. get_order(exp_buf->bp[0].size));
  352. break;
  353. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  354. /* no need to release */
  355. break;
  356. default:
  357. break;
  358. }
  359. }
  360. static int send_msg_to_guest(struct vhost_vdmabuf *vdmabuf,
  361. enum virtio_vdmabuf_cmd cmd, void *data,
  362. int bp_num);
  363. static void vhost_vdmabuf_dmabuf_release(struct dma_buf *dma_buf)
  364. {
  365. struct virtio_vdmabuf_buf *exp_buf = dma_buf->priv;
  366. struct vhost_vdmabuf *vdmabuf;
  367. unsigned long irqflags;
  368. unsigned int buf_id;
  369. int ret;
  370. if (!exp_buf)
  371. return;
  372. exp_buf->dma_buf = NULL;
  373. exp_buf->valid = false;
  374. buf_id = exp_buf->buf_id;
  375. vdmabuf = exp_buf->host;
  376. if (exp_buf->imported) {
  377. ret = send_msg_to_guest(vdmabuf, VIRTIO_VDMABUF_CMD_REL_NOTIFY,
  378. &buf_id, 0);
  379. if (ret < 0)
  380. dev_err(drv_info->dev,
  381. "failed(%d) to send dmabuf(%d) release cmd\n",
  382. ret, buf_id);
  383. } else {
  384. spin_lock_irqsave(&vdmabuf->local_lock, irqflags);
  385. ret = vhost_vdmabuf_del_buf(vdmabuf, buf_id, true);
  386. spin_unlock_irqrestore(&vdmabuf->local_lock, irqflags);
  387. if (ret)
  388. dev_err(drv_info->dev,
  389. "failed(%d) to del dmabuf(%d) from local list\n",
  390. ret, buf_id);
  391. vhost_vdmabuf_release_priv(exp_buf);
  392. }
  393. kfree(exp_buf);
  394. }
  395. static int vhost_vdmabuf_dmabuf_begin_cpu_access(struct dma_buf *dmabuf,
  396. enum dma_data_direction dir)
  397. {
  398. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  399. struct virtio_vdmabuf_attachment *a;
  400. int heap_type = exp_buf->heap_type;
  401. struct carveout_buf *c;
  402. mutex_lock(&exp_buf->lock);
  403. switch (heap_type) {
  404. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  405. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  406. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  407. list_for_each_entry(a, &exp_buf->attachments, list)
  408. dma_sync_sgtable_for_cpu(a->dev, a->sgt, dir);
  409. break;
  410. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  411. list_for_each_entry(a, &exp_buf->attachments, list) {
  412. c = (struct carveout_buf *)a->sgt;
  413. dma_sync_single_for_cpu(a->dev, c->addr, c->size,
  414. dir);
  415. }
  416. break;
  417. default:
  418. break;
  419. }
  420. mutex_unlock(&exp_buf->lock);
  421. return 0;
  422. }
  423. static int vhost_vdmabuf_dmabuf_end_cpu_access(struct dma_buf *dmabuf,
  424. enum dma_data_direction dir)
  425. {
  426. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  427. struct virtio_vdmabuf_attachment *a;
  428. int heap_type = exp_buf->heap_type;
  429. struct carveout_buf *c;
  430. mutex_lock(&exp_buf->lock);
  431. switch (heap_type) {
  432. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  433. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  434. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  435. list_for_each_entry(a, &exp_buf->attachments, list)
  436. dma_sync_sgtable_for_device(a->dev, a->sgt, dir);
  437. break;
  438. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  439. list_for_each_entry(a, &exp_buf->attachments, list) {
  440. c = (struct carveout_buf *)a->sgt;
  441. dma_sync_single_for_device(a->dev, c->addr, c->size,
  442. dir);
  443. }
  444. break;
  445. default:
  446. break;
  447. }
  448. mutex_unlock(&exp_buf->lock);
  449. return 0;
  450. }
  451. static int vhost_vdmabuf_dmabuf_attach(struct dma_buf *dmabuf,
  452. struct dma_buf_attachment *attachment)
  453. {
  454. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  455. struct virtio_vdmabuf_attachment *a;
  456. struct sg_table *sgt;
  457. a = kzalloc(sizeof(*a), GFP_KERNEL);
  458. if (!a)
  459. return -ENOMEM;
  460. sgt = get_sg_table(exp_buf);
  461. if (IS_ERR(sgt)) {
  462. kfree(a);
  463. return -ENOMEM;
  464. }
  465. a->sgt = sgt;
  466. a->dev = attachment->dev;
  467. INIT_LIST_HEAD(&a->list);
  468. attachment->priv = a;
  469. mutex_lock(&exp_buf->lock);
  470. list_add(&a->list, &exp_buf->attachments);
  471. mutex_unlock(&exp_buf->lock);
  472. return 0;
  473. }
  474. static void vhost_vdmabuf_dmabuf_detach(struct dma_buf *dmabuf,
  475. struct dma_buf_attachment *attachment)
  476. {
  477. struct virtio_vdmabuf_attachment *a = attachment->priv;
  478. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  479. mutex_lock(&exp_buf->lock);
  480. list_del(&a->list);
  481. mutex_unlock(&exp_buf->lock);
  482. put_sg_table(exp_buf, a->sgt);
  483. kfree(a);
  484. }
  485. static const struct dma_buf_ops vhost_vdmabuf_dmabuf_ops = {
  486. .attach = vhost_vdmabuf_dmabuf_attach,
  487. .detach = vhost_vdmabuf_dmabuf_detach,
  488. .map_dma_buf = vhost_vdmabuf_dmabuf_map,
  489. .unmap_dma_buf = vhost_vdmabuf_dmabuf_unmap,
  490. .release = vhost_vdmabuf_dmabuf_release,
  491. .mmap = vhost_vdmabuf_dmabuf_mmap,
  492. .vmap = vhost_vdmabuf_dmabuf_vmap,
  493. .vunmap = vhost_vdmabuf_dmabuf_vunmap,
  494. .begin_cpu_access = vhost_vdmabuf_dmabuf_begin_cpu_access,
  495. .end_cpu_access = vhost_vdmabuf_dmabuf_end_cpu_access,
  496. };
  497. static int send_msg_to_guest(struct vhost_vdmabuf *vdmabuf,
  498. enum virtio_vdmabuf_cmd cmd, void *data,
  499. int bp_num)
  500. {
  501. struct virtio_vdmabuf_msg *msg;
  502. struct virtio_vdmabuf_buf *exp_buf;
  503. unsigned int buf_id;
  504. unsigned long irqflags;
  505. int ret;
  506. if (bp_num > VIRTIO_VDMABUF_MAX_BP_NUM) {
  507. dev_err(drv_info->dev, "[%s:%d] max bp num reached %d\n",
  508. __func__, __LINE__, bp_num);
  509. return -EINVAL;
  510. }
  511. msg = kzalloc(struct_size(msg, bp, bp_num), GFP_KERNEL);
  512. if (!msg)
  513. return -ENOMEM;
  514. msg->op[5] = bp_num;
  515. switch (cmd) {
  516. case VIRTIO_VDMABUF_CMD_VMID_REPLY:
  517. msg->op[0] = vdmabuf->vmid;
  518. break;
  519. /* Host played dmabuf importer role */
  520. case VIRTIO_VDMABUF_CMD_IMPORT_REQ:
  521. buf_id = *(unsigned int *)data;
  522. msg->op[0] = vdmabuf->vmid;
  523. msg->op[1] = buf_id;
  524. break;
  525. case VIRTIO_VDMABUF_CMD_REL_NOTIFY:
  526. buf_id = *(unsigned int *)data;
  527. spin_lock_irqsave(&vdmabuf->import_lock, irqflags);
  528. ret = vhost_vdmabuf_del_buf(vdmabuf, buf_id, false);
  529. spin_unlock_irqrestore(&vdmabuf->import_lock, irqflags);
  530. if (ret)
  531. return ret;
  532. msg->op[0] = vdmabuf->vmid;
  533. msg->op[1] = buf_id;
  534. break;
  535. /* Host played dmabuf exporter role */
  536. case VIRTIO_VDMABUF_CMD_IMPORT_REPLY:
  537. exp_buf = (struct virtio_vdmabuf_buf *)data;
  538. msg->op[0] = exp_buf->vmid;
  539. msg->op[1] = exp_buf->buf_id;
  540. msg->op[2] = exp_buf->heap_type;
  541. msg->op[3] = exp_buf->carveout_type;
  542. msg->op[4] = exp_buf->flags;
  543. msg->op[5] = exp_buf->bp_num;
  544. memcpy(msg->bp, exp_buf->bp, sizeof(exp_buf->bp[0]) * exp_buf->bp_num);
  545. break;
  546. default:
  547. /* no command found */
  548. kfree(msg);
  549. return -EINVAL;
  550. }
  551. msg->cmd = cmd;
  552. list_add_tail(&msg->list, &vdmabuf->msg_list);
  553. vhost_work_queue(&vdmabuf->dev, &vdmabuf->send_work);
  554. return 0;
  555. }
  556. static void send_to_recvq(struct vhost_vdmabuf *vdmabuf,
  557. struct vhost_virtqueue *vq)
  558. {
  559. struct virtio_vdmabuf_msg *msg;
  560. int head, in, out, in_size;
  561. bool added = false;
  562. int ret, size;
  563. mutex_lock(&vq->mutex);
  564. if (!vhost_vq_get_backend(vq))
  565. goto out;
  566. vhost_disable_notify(&vdmabuf->dev, vq);
  567. for (;;) {
  568. if (list_empty(&vdmabuf->msg_list))
  569. break;
  570. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  571. &out, &in, NULL, NULL);
  572. if (head < 0 || head == vq->num)
  573. break;
  574. in_size = iov_length(&vq->iov[out], in);
  575. if (in_size != vdmabuf_msg_size(VIRTIO_VDMABUF_MAX_BP_NUM)) {
  576. dev_err(drv_info->dev, "tx msg with wrong size %d\n",
  577. in_size);
  578. break;
  579. }
  580. msg = list_first_entry(&vdmabuf->msg_list,
  581. struct virtio_vdmabuf_msg, list);
  582. dev_dbg(drv_info->dev, "send msg cmd %d\n", msg->cmd);
  583. list_del_init(&msg->list);
  584. size = vdmabuf_msg_size(msg->op[5]);
  585. ret = __copy_to_user(vq->iov[out].iov_base, msg, size);
  586. if (ret) {
  587. dev_err(drv_info->dev,
  588. "fail to copy tx msg\n");
  589. break;
  590. }
  591. vhost_add_used(vq, head, in_size);
  592. added = true;
  593. kfree(msg);
  594. }
  595. vhost_enable_notify(&vdmabuf->dev, vq);
  596. if (added)
  597. vhost_signal(&vdmabuf->dev, vq);
  598. out:
  599. mutex_unlock(&vq->mutex);
  600. }
  601. static void vhost_send_msg_work(struct vhost_work *work)
  602. {
  603. struct vhost_vdmabuf *vdmabuf = container_of(work,
  604. struct vhost_vdmabuf,
  605. send_work);
  606. struct vhost_virtqueue *vq = &vdmabuf->vqs[VDMABUF_VQ_RECV];
  607. send_to_recvq(vdmabuf, vq);
  608. }
  609. /* parse incoming message from a guest */
  610. static int parse_msg_from_guest(struct vhost_vdmabuf *vdmabuf,
  611. struct virtio_vdmabuf_msg *msg)
  612. {
  613. struct virtio_vdmabuf_event *event;
  614. struct virtio_vdmabuf_buf *exp_buf;
  615. unsigned long irqflags;
  616. unsigned int buf_id;
  617. unsigned vmid;
  618. int bp_num;
  619. int i, ret = 0;
  620. dev_dbg(drv_info->dev, "received msg cmd %d\n", msg->cmd);
  621. switch (msg->cmd) {
  622. case VIRTIO_VDMABUF_CMD_VMID_REQ:
  623. send_msg_to_guest(vdmabuf, VIRTIO_VDMABUF_CMD_VMID_REPLY, NULL, 0);
  624. break;
  625. /* Host played dmabuf exporter role */
  626. case VIRTIO_VDMABUF_CMD_IMPORT_REQ:
  627. vmid = msg->op[0];
  628. if (vdmabuf->vmid != vmid) {
  629. dev_err(drv_info->dev, "vmid does not match %d %d\n",
  630. vdmabuf->vmid, vmid);
  631. return -EINVAL;
  632. }
  633. buf_id = msg->op[1];
  634. spin_lock_irqsave(&vdmabuf->local_lock, irqflags);
  635. exp_buf = vhost_vdmabuf_find_buf(vdmabuf, buf_id, true);
  636. if (!exp_buf) {
  637. spin_unlock_irqrestore(&vdmabuf->local_lock, irqflags);
  638. dev_err(drv_info->dev, "no exp_buf found for buf id %d\n",
  639. buf_id);
  640. return -ENOENT;
  641. }
  642. spin_unlock_irqrestore(&vdmabuf->local_lock, irqflags);
  643. send_msg_to_guest(vdmabuf, VIRTIO_VDMABUF_CMD_IMPORT_REPLY,
  644. exp_buf, exp_buf->bp_num);
  645. get_dma_buf(exp_buf->dma_buf);
  646. break;
  647. case VIRTIO_VDMABUF_CMD_REL_NOTIFY:
  648. vmid = msg->op[0];
  649. if (vdmabuf->vmid != vmid)
  650. return -EINVAL;
  651. buf_id = msg->op[1];
  652. spin_lock_irqsave(&vdmabuf->local_lock, irqflags);
  653. exp_buf = vhost_vdmabuf_find_buf(vdmabuf, buf_id, true);
  654. if (!exp_buf) {
  655. spin_unlock_irqrestore(&vdmabuf->local_lock, irqflags);
  656. dev_err(drv_info->dev, "can't find buffer\n");
  657. return -ENOENT;
  658. }
  659. dma_buf_put(exp_buf->dma_buf);
  660. spin_unlock_irqrestore(&vdmabuf->local_lock, irqflags);
  661. break;
  662. /* Host played dmabuf importer role */
  663. case VIRTIO_VDMABUF_CMD_IMPORT_REPLY:
  664. vmid = msg->op[0];
  665. buf_id = msg->op[1];
  666. bp_num = msg->op[5];
  667. if (vdmabuf->vmid != vmid) {
  668. dev_err(drv_info->dev, "vmid do not match %d %d\n",
  669. vdmabuf->vmid, vmid);
  670. return -EINVAL;
  671. }
  672. if (bp_num > VIRTIO_VDMABUF_MAX_BP_NUM) {
  673. dev_err(drv_info->dev, "[%s:%d] max bp num reached %d\n",
  674. __func__, __LINE__, bp_num);
  675. return -EINVAL;
  676. }
  677. event = kzalloc(struct_size(event, bp, bp_num), GFP_KERNEL);
  678. if (!event)
  679. return -ENOMEM;
  680. memcpy(event->op, msg->op, sizeof(event->op));
  681. for (i = 0; i < bp_num; i++) {
  682. /*
  683. * no need to copy page info, as the page info from
  684. * guest is invalid at host side
  685. */
  686. event->bp[i].addr = msg->bp[i].addr;
  687. event->bp[i].size = msg->bp[i].size;
  688. }
  689. spin_lock_irqsave(&vdmabuf->eq_import->e_lock, irqflags);
  690. list_add_tail(&event->list, &vdmabuf->eq_import->e_list);
  691. wake_up_interruptible(&vdmabuf->eq_import->e_wait);
  692. spin_unlock_irqrestore(&vdmabuf->eq_import->e_lock, irqflags);
  693. break;
  694. default:
  695. ret = -EINVAL;
  696. break;
  697. }
  698. return ret;
  699. }
  700. static void vhost_vdmabuf_handle_send_kick(struct vhost_work *work)
  701. {
  702. struct vhost_virtqueue *vq = container_of(work,
  703. struct vhost_virtqueue,
  704. poll.work);
  705. struct vhost_vdmabuf *vdmabuf = container_of(vq->dev,
  706. struct vhost_vdmabuf,
  707. dev);
  708. struct virtio_vdmabuf_msg *msg;
  709. int head, in, out, in_size;
  710. bool added = false;
  711. int ret;
  712. mutex_lock(&vq->mutex);
  713. if (!vhost_vq_get_backend(vq))
  714. goto out;
  715. vhost_disable_notify(&vdmabuf->dev, vq);
  716. msg = kzalloc(struct_size(msg, bp, VIRTIO_VDMABUF_MAX_BP_NUM),
  717. GFP_KERNEL);
  718. if (!msg) {
  719. dev_err(drv_info->dev, "kzalloc failed\n");
  720. return;
  721. }
  722. /* Make sure we will process all pending requests */
  723. for (;;) {
  724. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  725. &out, &in, NULL, NULL);
  726. if (head < 0 || head == vq->num)
  727. break;
  728. in_size = iov_length(&vq->iov[in], out);
  729. /*
  730. * As the size of the msg from guest doesn't have a certain
  731. * value, it is hard to do the check, so remove it.
  732. if (in_size != vdmabuf_msg_size(VIRTIO_VDMABUF_MAX_BP_NUM)) {
  733. dev_err(drv_info->dev, "rx msg with wrong size %d\n",
  734. in_size);
  735. break;
  736. }*/
  737. if (__copy_from_user(msg, vq->iov[in].iov_base, in_size)) {
  738. dev_err(drv_info->dev,
  739. "err: can't get the msg from vq\n");
  740. break;
  741. }
  742. ret = parse_msg_from_guest(vdmabuf, msg);
  743. if (ret) {
  744. dev_err(drv_info->dev, "msg parse error %d", ret);
  745. break;
  746. }
  747. vhost_add_used(vq, head, in_size);
  748. added = true;
  749. }
  750. kfree(msg);
  751. vhost_enable_notify(&vdmabuf->dev, vq);
  752. if (added)
  753. vhost_signal(&vdmabuf->dev, vq);
  754. out:
  755. mutex_unlock(&vq->mutex);
  756. }
  757. static void vhost_vdmabuf_handle_recv_kick(struct vhost_work *work)
  758. {
  759. struct vhost_virtqueue *vq = container_of(work,
  760. struct vhost_virtqueue,
  761. poll.work);
  762. struct vhost_vdmabuf *vdmabuf = container_of(vq->dev,
  763. struct vhost_vdmabuf,
  764. dev);
  765. send_to_recvq(vdmabuf, vq);
  766. }
  767. static int vhost_vdmabuf_open(struct inode *inode, struct file *filp)
  768. {
  769. struct vhost_vdmabuf *vdmabuf;
  770. struct vhost_virtqueue **vqs;
  771. int ret = 0;
  772. if (!drv_info) {
  773. dev_err(drv_info->dev,
  774. "vhost-vdmabuf: can't open misc device\n");
  775. return -EINVAL;
  776. }
  777. /* each vdmabuf on behave of one guest */
  778. vdmabuf = kzalloc(sizeof(*vdmabuf), GFP_KERNEL |
  779. __GFP_RETRY_MAYFAIL);
  780. if (!vdmabuf)
  781. return -ENOMEM;
  782. vqs = kmalloc_array(ARRAY_SIZE(vdmabuf->vqs), sizeof(*vqs),
  783. GFP_KERNEL);
  784. if (!vqs) {
  785. kfree(vdmabuf);
  786. return -ENOMEM;
  787. }
  788. vdmabuf->eq_import = kcalloc(1, sizeof(*(vdmabuf->eq_import)), GFP_KERNEL);
  789. if (!vdmabuf->eq_import) {
  790. kfree(vdmabuf);
  791. kfree(vqs);
  792. return -ENOMEM;
  793. }
  794. vqs[VDMABUF_VQ_SEND] = &vdmabuf->vqs[VDMABUF_VQ_SEND];
  795. vqs[VDMABUF_VQ_RECV] = &vdmabuf->vqs[VDMABUF_VQ_RECV];
  796. vdmabuf->vqs[VDMABUF_VQ_SEND].handle_kick = vhost_vdmabuf_handle_send_kick;
  797. vdmabuf->vqs[VDMABUF_VQ_RECV].handle_kick = vhost_vdmabuf_handle_recv_kick;
  798. vhost_dev_init(&vdmabuf->dev, vqs, ARRAY_SIZE(vdmabuf->vqs),
  799. UIO_MAXIOV, 0, 0, true, NULL);
  800. INIT_LIST_HEAD(&vdmabuf->msg_list);
  801. vhost_work_init(&vdmabuf->send_work, vhost_send_msg_work);
  802. vdmabuf->vmid = task_pid_nr(current);
  803. /* init guest's local and import list */
  804. spin_lock_init(&vdmabuf->local_lock);
  805. hash_init(vdmabuf->buf_list_local);
  806. spin_lock_init(&vdmabuf->import_lock);
  807. hash_init(vdmabuf->buf_list_import);
  808. /* add vdmabuf to list as we may have multiple guests */
  809. vhost_vdmabuf_add(vdmabuf);
  810. vhost_vmid = vdmabuf->vmid;
  811. mutex_init(&vdmabuf->eq_import->e_readlock);
  812. spin_lock_init(&vdmabuf->eq_import->e_lock);
  813. /* Initialize event queue */
  814. INIT_LIST_HEAD(&vdmabuf->eq_import->e_list);
  815. init_waitqueue_head(&vdmabuf->eq_import->e_wait);
  816. filp->private_data = vdmabuf;
  817. return ret;
  818. }
  819. static void vhost_vdmabuf_flush(struct vhost_vdmabuf *vdmabuf)
  820. {
  821. int i;
  822. for (i = 0; i < ARRAY_SIZE(vdmabuf->vqs); i++)
  823. if (vdmabuf->vqs[i].handle_kick)
  824. vhost_poll_flush(&vdmabuf->vqs[i].poll);
  825. vhost_work_flush(&vdmabuf->dev, &vdmabuf->send_work);
  826. }
  827. static int vhost_vdmabuf_release(struct inode *inode, struct file *filp)
  828. {
  829. struct vhost_vdmabuf *vdmabuf = filp->private_data;
  830. struct virtio_vdmabuf_event *event, *event_tmp;
  831. if (!vhost_vdmabuf_del(vdmabuf))
  832. return -EINVAL;
  833. mutex_lock(&drv_info->g_mutex);
  834. list_for_each_entry_safe(event, event_tmp,
  835. &vdmabuf->eq_import->e_list,
  836. list) {
  837. list_del(&event->list);
  838. kfree(event);
  839. }
  840. vhost_vdmabuf_flush(vdmabuf);
  841. vhost_dev_cleanup(&vdmabuf->dev);
  842. kfree(vdmabuf->eq_import);
  843. kfree(vdmabuf->dev.vqs);
  844. kfree(vdmabuf);
  845. filp->private_data = NULL;
  846. mutex_unlock(&drv_info->g_mutex);
  847. return 0;
  848. }
  849. static unsigned int vhost_vdmabuf_event_poll(struct file *filp,
  850. struct poll_table_struct *wait)
  851. {
  852. return 0;
  853. }
  854. static ssize_t vhost_vdmabuf_event_read(struct file *filp, char __user *buf,
  855. size_t cnt, loff_t *ofst)
  856. {
  857. return 0;
  858. }
  859. static int vhost_vdmabuf_start(struct vhost_vdmabuf *vdmabuf)
  860. {
  861. struct vhost_virtqueue *vq;
  862. int i, ret;
  863. mutex_lock(&vdmabuf->dev.mutex);
  864. ret = vhost_dev_check_owner(&vdmabuf->dev);
  865. if (ret)
  866. goto err;
  867. for (i = 0; i < ARRAY_SIZE(vdmabuf->vqs); i++) {
  868. vq = &vdmabuf->vqs[i];
  869. mutex_lock(&vq->mutex);
  870. if (!vhost_vq_access_ok(vq)) {
  871. ret = -EFAULT;
  872. goto err_vq;
  873. }
  874. if (!vhost_vq_get_backend(vq)) {
  875. vhost_vq_set_backend(vq, vdmabuf);
  876. ret = vhost_vq_init_access(vq);
  877. if (ret)
  878. goto err_vq;
  879. }
  880. mutex_unlock(&vq->mutex);
  881. }
  882. mutex_unlock(&vdmabuf->dev.mutex);
  883. return 0;
  884. err_vq:
  885. vhost_vq_set_backend(vq, NULL);
  886. mutex_unlock(&vq->mutex);
  887. for (i = 0; i < ARRAY_SIZE(vdmabuf->vqs); i++) {
  888. vq = &vdmabuf->vqs[i];
  889. mutex_lock(&vq->mutex);
  890. vhost_vq_set_backend(vq, NULL);
  891. mutex_unlock(&vq->mutex);
  892. }
  893. err:
  894. mutex_unlock(&vdmabuf->dev.mutex);
  895. return ret;
  896. }
  897. static int vhost_vdmabuf_stop(struct vhost_vdmabuf *vdmabuf)
  898. {
  899. struct vhost_virtqueue *vq;
  900. int i, ret;
  901. mutex_lock(&vdmabuf->dev.mutex);
  902. ret = vhost_dev_check_owner(&vdmabuf->dev);
  903. if (ret)
  904. goto err;
  905. for (i = 0; i < ARRAY_SIZE(vdmabuf->vqs); i++) {
  906. vq = &vdmabuf->vqs[i];
  907. mutex_lock(&vq->mutex);
  908. vhost_vq_set_backend(vq, NULL);
  909. mutex_unlock(&vq->mutex);
  910. }
  911. err:
  912. mutex_unlock(&vdmabuf->dev.mutex);
  913. return ret;
  914. }
  915. static int vhost_vdmabuf_set_features(struct vhost_vdmabuf *vdmabuf,
  916. u64 features)
  917. {
  918. struct vhost_virtqueue *vq;
  919. int i;
  920. if (features & ~VHOST_VDMABUF_FEATURES)
  921. return -EOPNOTSUPP;
  922. mutex_lock(&vdmabuf->dev.mutex);
  923. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  924. !vhost_log_access_ok(&vdmabuf->dev)) {
  925. mutex_unlock(&vdmabuf->dev.mutex);
  926. return -EFAULT;
  927. }
  928. for (i = 0; i < ARRAY_SIZE(vdmabuf->vqs); i++) {
  929. vq = &vdmabuf->vqs[i];
  930. mutex_lock(&vq->mutex);
  931. vq->acked_features = features;
  932. mutex_unlock(&vq->mutex);
  933. }
  934. mutex_unlock(&vdmabuf->dev.mutex);
  935. return 0;
  936. }
  937. /* wrapper ioctl for vhost interface control */
  938. static int vhost_core_ioctl(struct file *filp, unsigned int cmd,
  939. unsigned long param)
  940. {
  941. struct vhost_vdmabuf *vdmabuf = filp->private_data;
  942. void __user *argp = (void __user *)param;
  943. u64 features;
  944. int ret, start;
  945. switch (cmd) {
  946. case VHOST_GET_FEATURES:
  947. features = VHOST_VDMABUF_FEATURES;
  948. if (copy_to_user(argp, &features, sizeof(features)))
  949. return -EFAULT;
  950. return 0;
  951. case VHOST_SET_FEATURES:
  952. if (copy_from_user(&features, argp, sizeof(features)))
  953. return -EFAULT;
  954. return vhost_vdmabuf_set_features(vdmabuf, features);
  955. case VHOST_VDMABUF_SET_RUNNING:
  956. if (copy_from_user(&start, argp, sizeof(start)))
  957. return -EFAULT;
  958. if (start)
  959. return vhost_vdmabuf_start(vdmabuf);
  960. else
  961. return vhost_vdmabuf_stop(vdmabuf);
  962. default:
  963. mutex_lock(&vdmabuf->dev.mutex);
  964. ret = vhost_dev_ioctl(&vdmabuf->dev, cmd, argp);
  965. if (ret == -ENOIOCTLCMD) {
  966. ret = vhost_vring_ioctl(&vdmabuf->dev, cmd, argp);
  967. } else {
  968. vhost_vdmabuf_flush(vdmabuf);
  969. }
  970. mutex_unlock(&vdmabuf->dev.mutex);
  971. }
  972. return ret;
  973. }
  974. static int vhost_vdmabuf_create_dmabuf(struct vhost_vdmabuf *vdmabuf,
  975. struct virtio_vdmabuf_alloc *attr)
  976. {
  977. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  978. int carveout_type = attr->carveout_type;
  979. int heap_type = attr->heap_type;
  980. size_t size = attr->size;
  981. struct virtio_vdmabuf_buf *exp_buf;
  982. struct dma_buf *dmabuf;
  983. unsigned long irqflags;
  984. struct page *page = NULL;
  985. int i = 0, ret, npages, bp_num;
  986. /* For carveout, buf size is fixed, user don't need specify it */
  987. if (attr->heap_type != VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT) {
  988. npages = bp_num = DIV_ROUND_UP(size, PAGE_SIZE);
  989. if (npages <= 0)
  990. return -EINVAL;
  991. }
  992. if (attr->heap_type == VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG ||
  993. attr->heap_type == VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT)
  994. bp_num = 1;
  995. exp_buf = kzalloc(struct_size(exp_buf, bp, bp_num), GFP_KERNEL);
  996. if (!exp_buf) {
  997. ret = -ENOMEM;
  998. goto err_exp;
  999. }
  1000. mutex_init(&exp_buf->lock);
  1001. exp_info.ops = &vhost_vdmabuf_dmabuf_ops;
  1002. exp_info.flags = O_RDWR;
  1003. exp_info.priv = exp_buf;
  1004. switch (heap_type) {
  1005. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  1006. return -EINVAL; /* Not support currently */
  1007. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  1008. exp_buf->size = exp_info.size = npages * PAGE_SIZE;
  1009. exp_buf->bp_num = npages;
  1010. for (i = 0; i < npages; i++) {
  1011. page = alloc_page(GFP_KERNEL);
  1012. if (!page) {
  1013. ret = -ENOMEM;
  1014. goto err_alloc;
  1015. }
  1016. exp_buf->bp[i].page = page;
  1017. exp_buf->bp[i].addr = page_to_phys(page);
  1018. exp_buf->bp[i].size = PAGE_SIZE;
  1019. }
  1020. break;
  1021. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  1022. exp_buf->size = exp_info.size = npages * PAGE_SIZE;
  1023. /* only need 1 bp to record Compound Page */
  1024. exp_buf->bp_num = 1;
  1025. page = alloc_pages(GFP_KERNEL, get_order(exp_buf->size));
  1026. if (!page) {
  1027. ret = -ENOMEM;
  1028. goto err_exp;
  1029. }
  1030. exp_buf->bp[0].page = page;
  1031. exp_buf->bp[0].addr = page_to_phys(page);
  1032. exp_buf->bp[0].size = exp_buf->size;
  1033. break;
  1034. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  1035. if (carveout_type >= VIRTIO_VDMABUF_CARVEOUTS_NUM ||
  1036. !carveout_bufs[carveout_type].ready)
  1037. return -EINVAL;
  1038. exp_buf->bp_num = 1;
  1039. exp_buf->bp[0].addr = carveout_bufs[carveout_type].addr;
  1040. if (size <= 0 || size > carveout_bufs[carveout_type].size)
  1041. size = carveout_bufs[carveout_type].size;
  1042. exp_buf->bp[0].size = size;
  1043. exp_buf->size = exp_info.size = size;
  1044. attr->size = exp_buf->size;
  1045. break;
  1046. default:
  1047. /* no command found */
  1048. ret = -EINVAL;
  1049. goto err_exp;
  1050. }
  1051. /* export real dambuf */
  1052. dmabuf = dma_buf_export(&exp_info);
  1053. if (IS_ERR_OR_NULL(dmabuf))
  1054. goto err_alloc;
  1055. ret = dma_buf_fd(dmabuf, 0);
  1056. if (ret < 0) {
  1057. dma_buf_put(dmabuf);
  1058. goto err_alloc;
  1059. }
  1060. attr->fd = ret;
  1061. INIT_LIST_HEAD(&exp_buf->attachments);
  1062. exp_buf->vmid = vdmabuf->vmid;
  1063. exp_buf->heap_type = heap_type;
  1064. exp_buf->carveout_type = carveout_type;
  1065. exp_buf->flags = attr->flags;
  1066. exp_buf->dma_buf = dmabuf;
  1067. exp_buf->buf_id = get_buf_id();
  1068. exp_buf->host = vdmabuf;
  1069. exp_buf->valid = true;
  1070. exp_buf->imported = false;
  1071. attr->buf_id = exp_buf->buf_id;
  1072. spin_lock_irqsave(&vdmabuf->local_lock, irqflags);
  1073. vhost_vdmabuf_add_buf(vdmabuf, exp_buf, true);
  1074. spin_unlock_irqrestore(&vdmabuf->local_lock, irqflags);
  1075. return 0;
  1076. err_alloc:
  1077. if (heap_type == VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG)
  1078. __free_pages(page, get_order(exp_buf->size));
  1079. while(i--)
  1080. put_page(exp_buf->bp[i].page);
  1081. err_exp:
  1082. kfree(exp_buf);
  1083. return ret;
  1084. }
  1085. static int vhost_vdmabuf_create_mirror_dmabuf(struct vhost_vdmabuf *vdmabuf,
  1086. struct virtio_vdmabuf_import *attr,
  1087. struct virtio_vdmabuf_event *event)
  1088. {
  1089. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  1090. struct virtio_vdmabuf_buf *exp_buf;
  1091. struct dma_buf *dmabuf;
  1092. unsigned long irqflags;
  1093. int heap_type;
  1094. int carveout_type;
  1095. unsigned int buf_id;
  1096. phys_addr_t addr;
  1097. int bp_num;
  1098. int ret = -ENOMEM;
  1099. int i, size;
  1100. buf_id = event->op[1];
  1101. if (attr->buf_id != buf_id) {
  1102. dev_err(drv_info->dev, "buf id does not match %d %d\n",
  1103. attr->buf_id, buf_id);
  1104. return -EINVAL;
  1105. }
  1106. heap_type = event->op[2];
  1107. bp_num = event->op[5];
  1108. if (bp_num <= 0 || bp_num > VIRTIO_VDMABUF_MAX_BP_NUM)
  1109. return -EINVAL;
  1110. exp_buf = kzalloc(struct_size(exp_buf, bp, bp_num), GFP_KERNEL);
  1111. if (!exp_buf)
  1112. goto err_exp;
  1113. exp_buf->bp_num = bp_num;
  1114. mutex_init(&exp_buf->lock);
  1115. switch (heap_type) {
  1116. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  1117. for (i = 0; i < exp_buf->bp_num; i++) {
  1118. /* as can access guest's page, we can directly convert */
  1119. exp_buf->bp[i].page = phys_to_page(event->bp[i].addr);
  1120. exp_buf->bp[i].addr = event->bp[i].addr;
  1121. exp_buf->bp[i].size = event->bp[i].size;
  1122. exp_buf->size += event->bp[i].size;
  1123. }
  1124. break;
  1125. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  1126. if (exp_buf->bp_num != 1) {
  1127. dev_err(drv_info->dev, "[%s:%d] error %d\n",
  1128. __func__, __LINE__, exp_buf->bp_num);
  1129. exp_buf->bp_num = 1;
  1130. }
  1131. addr = event->bp[0].addr;
  1132. exp_buf->bp[0].page = phys_to_page(addr);
  1133. exp_buf->bp[0].size = event->bp[0].size;
  1134. exp_buf->size = exp_buf->bp[0].size;
  1135. break;
  1136. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  1137. return -EINVAL;
  1138. break;
  1139. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  1140. if (exp_buf->bp_num != 1) {
  1141. dev_err(drv_info->dev, "[%s:%d] error %d\n",
  1142. __func__, __LINE__, exp_buf->bp_num);
  1143. exp_buf->bp_num = 1;
  1144. }
  1145. carveout_type = exp_buf->carveout_type;
  1146. addr = event->bp[0].addr;
  1147. size = event->bp[0].size;
  1148. if (addr != carveout_bufs[carveout_type].addr &&
  1149. size != carveout_bufs[carveout_type].size) {
  1150. dev_err(drv_info->dev, "[%s:%d] error\n",
  1151. __func__, __LINE__);
  1152. return -EINVAL;
  1153. }
  1154. exp_buf->bp[0].addr = addr;
  1155. exp_buf->bp[0].size = size;
  1156. exp_buf->size = size;
  1157. break;
  1158. default:
  1159. return -EINVAL;
  1160. }
  1161. exp_info.ops = &vhost_vdmabuf_dmabuf_ops;
  1162. exp_info.size = exp_buf->size;
  1163. exp_info.flags = O_RDWR;
  1164. exp_info.priv = exp_buf;
  1165. /* export real dambuf */
  1166. dmabuf = dma_buf_export(&exp_info);
  1167. if (IS_ERR_OR_NULL(dmabuf))
  1168. goto err_exp;
  1169. ret = dma_buf_fd(dmabuf, 0);
  1170. if (ret < 0) {
  1171. dma_buf_put(dmabuf);
  1172. goto err_exp;
  1173. }
  1174. attr->fd = ret;
  1175. attr->size = exp_buf->size;
  1176. INIT_LIST_HEAD(&exp_buf->attachments);
  1177. exp_buf->vmid = vdmabuf->vmid;
  1178. exp_buf->dma_buf = dmabuf;
  1179. exp_buf->buf_id = attr->buf_id;
  1180. exp_buf->heap_type = event->op[2];
  1181. exp_buf->carveout_type = event->op[3];
  1182. exp_buf->flags = event->op[4];
  1183. exp_buf->host = vdmabuf;
  1184. /*
  1185. * it indicates this is a mirrored dmabuf,
  1186. * we need send release cmd to host when
  1187. * this dambuf is released.
  1188. */
  1189. exp_buf->imported = true;
  1190. exp_buf->valid = true;
  1191. spin_lock_irqsave(&vdmabuf->import_lock, irqflags);
  1192. vhost_vdmabuf_add_buf(vdmabuf, exp_buf, false);
  1193. spin_unlock_irqrestore(&vdmabuf->import_lock, irqflags);
  1194. return 0;
  1195. err_exp:
  1196. kfree(exp_buf);
  1197. return ret;
  1198. }
  1199. static int alloc_ioctl(struct vhost_vdmabuf *vdmabuf, void *data)
  1200. {
  1201. struct virtio_vdmabuf_alloc *attr = data;
  1202. int ret;
  1203. mutex_lock(&vdmabuf->dev.mutex);
  1204. ret = vhost_vdmabuf_create_dmabuf(vdmabuf, attr);
  1205. mutex_unlock(&vdmabuf->dev.mutex);
  1206. return ret;
  1207. }
  1208. static int import_ioctl(struct vhost_vdmabuf *vdmabuf, void *data)
  1209. {
  1210. struct virtio_vdmabuf_import *attr = data;
  1211. struct virtio_vdmabuf_buf *imp_buf;
  1212. struct virtio_vdmabuf_event *event;
  1213. unsigned long irqflags;
  1214. int ret = 0;
  1215. spin_lock_irqsave(&vdmabuf->import_lock, irqflags);
  1216. imp_buf = vhost_vdmabuf_find_buf(vdmabuf, attr->buf_id, false);
  1217. if (imp_buf && imp_buf->valid) {
  1218. ret = dma_buf_fd(imp_buf->dma_buf, 0);
  1219. if (ret < 0) {
  1220. dma_buf_put(imp_buf->dma_buf);
  1221. spin_unlock_irqrestore(&vdmabuf->import_lock, irqflags);
  1222. return ret;
  1223. }
  1224. attr->fd = ret;
  1225. attr->size = imp_buf->size;
  1226. spin_unlock_irqrestore(&vdmabuf->import_lock, irqflags);
  1227. return 0;
  1228. }
  1229. spin_unlock_irqrestore(&vdmabuf->import_lock, irqflags);
  1230. /*
  1231. * We can't find the dambuf with buf_id in local hash list,
  1232. * need send import request to host to get it.
  1233. */
  1234. mutex_lock(&vdmabuf->dev.mutex);
  1235. send_msg_to_guest(vdmabuf, VIRTIO_VDMABUF_CMD_IMPORT_REQ, &attr->buf_id, 0);
  1236. /* host's ack of import request will wakeup us */
  1237. if (wait_event_interruptible(vdmabuf->eq_import->e_wait,
  1238. !list_empty(&vdmabuf->eq_import->e_list))) {
  1239. mutex_unlock(&vdmabuf->dev.mutex);
  1240. dev_err(drv_info->dev, "OMG, this err is not expected\n");
  1241. return -ERESTARTSYS;
  1242. }
  1243. spin_lock_irqsave(&vdmabuf->eq_import->e_lock, irqflags);
  1244. event = list_first_entry(&vdmabuf->eq_import->e_list,
  1245. struct virtio_vdmabuf_event, list);
  1246. /* safely del the event from list and free it */
  1247. list_del(&event->list);
  1248. spin_unlock_irqrestore(&vdmabuf->eq_import->e_lock, irqflags);
  1249. /* create local mirror dmabuf */
  1250. ret = vhost_vdmabuf_create_mirror_dmabuf(vdmabuf, attr, event);
  1251. if (ret)
  1252. dev_err(drv_info->dev, "create mirror dmabuf failed %d\n",
  1253. ret);
  1254. kfree(event);
  1255. mutex_unlock(&vdmabuf->dev.mutex);
  1256. return ret;
  1257. }
  1258. static const struct vhost_vdmabuf_ioctl_desc vhost_vdmabuf_ioctls[] = {
  1259. VIRTIO_VDMABUF_IOCTL_DEF(VIRTIO_VDMABUF_IOCTL_ALLOC_FD, alloc_ioctl, 0),
  1260. VIRTIO_VDMABUF_IOCTL_DEF(VIRTIO_VDMABUF_IOCTL_IMPORT_FD, import_ioctl, 0),
  1261. };
  1262. static long vhost_vdmabuf_ioctl(struct file *filp, unsigned int cmd,
  1263. unsigned long param)
  1264. {
  1265. int ret = -EINVAL;
  1266. /* check if cmd is vhost's */
  1267. if (_IOC_TYPE(cmd) == VHOST_VIRTIO)
  1268. ret = vhost_core_ioctl(filp, cmd, param);
  1269. return ret;
  1270. }
  1271. static const struct file_operations vhost_vdmabuf_fops = {
  1272. .owner = THIS_MODULE,
  1273. .open = vhost_vdmabuf_open,
  1274. .release = vhost_vdmabuf_release,
  1275. .read = vhost_vdmabuf_event_read,
  1276. .poll = vhost_vdmabuf_event_poll,
  1277. .unlocked_ioctl = vhost_vdmabuf_ioctl,
  1278. };
  1279. /*
  1280. * vhost-vdmabuf dev is used by hypervisor, eg kvmtool,
  1281. * to setup this vhost backend.
  1282. */
  1283. static struct miscdevice vhost_vdmabuf_miscdev = {
  1284. .minor = MISC_DYNAMIC_MINOR,
  1285. .name = "vhost-vdmabuf",
  1286. .fops = &vhost_vdmabuf_fops,
  1287. };
  1288. static int vhost_vdmabuf_user_open(struct inode *inode, struct file *filp)
  1289. {
  1290. struct vhost_vdmabuf *vdmabuf;
  1291. vdmabuf = vhost_vdmabuf_find(vhost_vmid);
  1292. if (!vdmabuf) {
  1293. dev_warn(drv_info->dev,
  1294. "vhost-vdmabuf: no vdmabuf with vmid %d found\n",
  1295. vhost_vmid);
  1296. return -EINVAL;
  1297. }
  1298. filp->private_data = vdmabuf;
  1299. return 0;
  1300. }
  1301. static int vhost_vdmabuf_user_release(struct inode *inode, struct file *filp)
  1302. {
  1303. mutex_lock(&drv_info->g_mutex);
  1304. filp->private_data = NULL;
  1305. mutex_unlock(&drv_info->g_mutex);
  1306. return 0;
  1307. }
  1308. static long vhost_vdmabuf_user_ioctl(struct file *filp, unsigned int cmd,
  1309. unsigned long param)
  1310. {
  1311. struct vhost_vdmabuf *vdmabuf = filp->private_data;
  1312. const struct vhost_vdmabuf_ioctl_desc *ioctl;
  1313. vhost_vdmabuf_ioctl_t func;
  1314. unsigned int nr;
  1315. int ret;
  1316. char *kdata;
  1317. nr = _IOC_NR(cmd);
  1318. if (nr >= ARRAY_SIZE(vhost_vdmabuf_ioctls)) {
  1319. dev_err(drv_info->dev, "invalid ioctl\n");
  1320. return -EINVAL;
  1321. }
  1322. ioctl = &vhost_vdmabuf_ioctls[nr];
  1323. func = ioctl->func;
  1324. if (unlikely(!func)) {
  1325. dev_err(drv_info->dev, "no function\n");
  1326. return -EINVAL;
  1327. }
  1328. kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
  1329. if (!kdata)
  1330. return -ENOMEM;
  1331. if (copy_from_user(kdata, (void __user *)param,
  1332. _IOC_SIZE(cmd)) != 0) {
  1333. dev_err(drv_info->dev,
  1334. "failed to copy args from userspace\n");
  1335. ret = -EFAULT;
  1336. goto ioctl_error;
  1337. }
  1338. ret = func(vdmabuf, kdata);
  1339. if (copy_to_user((void __user *)param, kdata,
  1340. _IOC_SIZE(cmd)) != 0) {
  1341. dev_err(drv_info->dev,
  1342. "failed to copy args back to userspace\n");
  1343. ret = -EFAULT;
  1344. goto ioctl_error;
  1345. }
  1346. ioctl_error:
  1347. kfree(kdata);
  1348. return ret;
  1349. }
  1350. static const struct file_operations vhost_vdmabuf_user_fops = {
  1351. .owner = THIS_MODULE,
  1352. .open = vhost_vdmabuf_user_open,
  1353. .release = vhost_vdmabuf_user_release,
  1354. .unlocked_ioctl = vhost_vdmabuf_user_ioctl,
  1355. };
  1356. /*
  1357. * vhost-user-vdmabuf dev is used by dmabuf user to alloc or
  1358. * get dambuf via buf id.
  1359. */
  1360. static struct miscdevice vhost_vdmabuf_user_miscdev = {
  1361. .minor = MISC_DYNAMIC_MINOR,
  1362. .name = "vhost-user-vdmabuf",
  1363. .fops = &vhost_vdmabuf_user_fops,
  1364. };
  1365. static int __init vhost_vdmabuf_init(void)
  1366. {
  1367. int ret = 0;
  1368. /* register vhost setup dev */
  1369. ret = misc_register(&vhost_vdmabuf_miscdev);
  1370. if (ret) {
  1371. dev_err(drv_info->dev,
  1372. "vhost-vdmabuf: driver can't be registered\n");
  1373. return ret;
  1374. }
  1375. dma_coerce_mask_and_coherent(vhost_vdmabuf_miscdev.this_device,
  1376. DMA_BIT_MASK(64));
  1377. drv_info = kcalloc(1, sizeof(*drv_info), GFP_KERNEL);
  1378. if (!drv_info) {
  1379. misc_deregister(&vhost_vdmabuf_miscdev);
  1380. return -ENOMEM;
  1381. }
  1382. drv_info->dev = vhost_vdmabuf_miscdev.this_device;
  1383. /* register dmabuf alloc & get dev for user */
  1384. ret = misc_register(&vhost_vdmabuf_user_miscdev);
  1385. if (ret) {
  1386. misc_deregister(&vhost_vdmabuf_miscdev);
  1387. dev_err(drv_info->dev,
  1388. "vhost-vdmabuf: driver can't be registered\n");
  1389. return ret;
  1390. }
  1391. ret = carveout_buf_setup();
  1392. if (ret < 0)
  1393. dev_warn(drv_info->dev,
  1394. "vhost-vdmabuf: carveout bufs setup failed %d\n",
  1395. ret);
  1396. mutex_init(&drv_info->g_mutex);
  1397. INIT_LIST_HEAD(&drv_info->head_vdmabuf_list);
  1398. dev_info(drv_info->dev, "vhost-vdmabuf: init successfully\n");
  1399. return 0;
  1400. }
  1401. static void __exit vhost_vdmabuf_deinit(void)
  1402. {
  1403. misc_deregister(&vhost_vdmabuf_miscdev);
  1404. misc_deregister(&vhost_vdmabuf_user_miscdev);
  1405. vhost_vdmabuf_del_all();
  1406. kfree(drv_info);
  1407. drv_info = NULL;
  1408. }
  1409. module_init(vhost_vdmabuf_init);
  1410. module_exit(vhost_vdmabuf_deinit);
  1411. MODULE_DESCRIPTION("Vhost Vdmabuf Driver");
  1412. MODULE_AUTHOR("Xianting Tian <xianting.tian@linux.alibaba.com>");
  1413. MODULE_LICENSE("GPL and additional rights");