virtio_vdmabuf.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731
  1. // SPDX-License-Identifier: (MIT OR GPL-2.0)
  2. #include <linux/init.h>
  3. #include <linux/kernel.h>
  4. #include <linux/errno.h>
  5. #include <linux/module.h>
  6. #include <linux/device.h>
  7. #include <linux/uaccess.h>
  8. #include <linux/miscdevice.h>
  9. #include <linux/delay.h>
  10. #include <linux/random.h>
  11. #include <linux/poll.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/dma-buf.h>
  14. #include <linux/virtio.h>
  15. #include <linux/virtio_ids.h>
  16. #include <linux/virtio_config.h>
  17. #include <linux/virtio_vdmabuf.h>
  18. #include <linux/slab.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_fdt.h>
  21. #include <linux/of.h>
  22. /* one global drv object */
  23. static struct virtio_vdmabuf_info *drv_info = NULL;
  24. /*
  25. * carveout_buf config demo in dts,
  26. *
  27. * vdmabuf_reserved_memory {
  28. * reg = <0x0 0x82000000 0x0 0x4000
  29. * 0x0 0x82004000 0x0 0x4000
  30. * 0x0 0x82008000 0x0 0x4000>;
  31. * reg-names = "vi", "vo", "enc";
  32. * };
  33. */
  34. static struct carveout_buf carveout_bufs[VIRTIO_VDMABUF_CARVEOUTS_NUM] = { 0 };
  35. static char carveout_names[VIRTIO_VDMABUF_CARVEOUTS_NUM][VIRTIO_VDMABUF_CARVEOUT_NAME_LEN] =
  36. VIRTIO_VDMABUF_CARVEOUT_NAMES;
  37. static unsigned int get_buf_id(void)
  38. {
  39. static int buf_id = 0;
  40. buf_id = buf_id < VIRTIO_VDMABUF_MAX_ID ? buf_id + 1 : 0;
  41. return buf_id;
  42. }
  43. static int carveout_buf_setup(void)
  44. {
  45. struct device_node *node;
  46. struct resource res;
  47. int i, index;
  48. int ret;
  49. node = of_find_node_by_name(NULL, "vdmabuf_reserved_memory");
  50. if (!node) {
  51. ret = -EINVAL;
  52. dev_err(drv_info->dev,
  53. "failed to find vdmabuf_reserved_memory node\n");
  54. }
  55. for (i = 0; i <= VIRTIO_VDMABUF_CARVEOUTS_NUM; i++) {
  56. index = of_property_match_string(node, "reg-names",
  57. carveout_names[i]);
  58. if (index < 0)
  59. goto exit;
  60. if (of_address_to_resource(node, index, &res))
  61. goto exit;
  62. carveout_bufs[i].addr = res.start;
  63. carveout_bufs[i].size = resource_size(&res);
  64. carveout_bufs[i].ready = true;
  65. }
  66. exit:
  67. of_node_put(node);
  68. return index;
  69. }
  70. static struct sg_table *get_sg_table(struct virtio_vdmabuf_buf *exp_buf)
  71. {
  72. int heap_type = exp_buf->heap_type;
  73. struct carveout_buf *carveout_sg;
  74. struct sg_table *sgt;
  75. struct scatterlist *sgl;
  76. int i, ret;
  77. switch (heap_type) {
  78. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  79. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  80. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  81. /* SYSTEM, SYSTEM_CONFIG has the same logic */
  82. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  83. if (!sgt)
  84. return ERR_PTR(-ENOMEM);
  85. ret = sg_alloc_table(sgt, exp_buf->bp_num, GFP_KERNEL);
  86. if (ret) {
  87. kfree(sgt);
  88. return ERR_PTR(-ENOMEM);
  89. }
  90. sgl = sgt->sgl;
  91. for (i = 0; i < exp_buf->bp_num; i++) {
  92. sg_set_page(sgl, exp_buf->bp[i].page,
  93. exp_buf->bp[i].size, 0);
  94. sgl = sg_next(sgl);
  95. }
  96. break;
  97. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  98. carveout_sg = kzalloc(sizeof(struct carveout_buf), GFP_KERNEL);
  99. if (!carveout_sg)
  100. return ERR_PTR(-ENOMEM);
  101. carveout_sg->addr = exp_buf->bp[0].addr;
  102. carveout_sg->size = exp_buf->bp[0].size;
  103. sgt = (struct sg_table *)carveout_sg;
  104. break;
  105. default:
  106. return NULL;
  107. }
  108. return sgt;
  109. }
  110. static void put_sg_table(struct virtio_vdmabuf_buf *buf,
  111. struct sg_table *sgt)
  112. {
  113. int heap_type = buf->heap_type;
  114. switch (heap_type) {
  115. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  116. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  117. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  118. sg_free_table(sgt);
  119. kfree(sgt);
  120. break;
  121. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  122. kfree(sgt);
  123. break;
  124. default:
  125. break;
  126. }
  127. }
  128. static int sg_table_map(struct device *dev, struct virtio_vdmabuf_buf *exp_buf,
  129. struct sg_table *sgt, enum dma_data_direction dir)
  130. {
  131. int heap_type = exp_buf->heap_type;
  132. struct carveout_buf *carveout_sg;
  133. dma_addr_t dma_handle;
  134. switch (heap_type) {
  135. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  136. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  137. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  138. /* SYSTEM, SYSTEM_CONFIG has the same logic */
  139. if (dma_map_sgtable(dev, sgt, dir, 0)) {
  140. dev_err(dev, "[%s:%d] error\n",
  141. __func__, __LINE__);
  142. sg_free_table(sgt);
  143. kfree(sgt);
  144. return -EINVAL;
  145. }
  146. break;
  147. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  148. carveout_sg = (struct carveout_buf *)sgt;
  149. dma_handle = dma_map_single(dev, (void *)carveout_sg->addr,
  150. carveout_sg->size, dir);
  151. if (dma_mapping_error(dev, dma_handle)) {
  152. dev_err(dev, "[%s:%d] error\n",
  153. __func__, __LINE__);
  154. kfree(carveout_sg);
  155. return -EINVAL;
  156. }
  157. break;
  158. default:
  159. return -EINVAL;
  160. }
  161. return 0;
  162. }
  163. static int sg_table_unmap(struct device *dev, struct virtio_vdmabuf_buf *exp_buf,
  164. struct sg_table *sgt, enum dma_data_direction dir)
  165. {
  166. int heap_type = exp_buf->heap_type;
  167. switch (heap_type) {
  168. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  169. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  170. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  171. dma_unmap_sgtable(dev, sgt, dir, 0);
  172. break;
  173. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  174. dma_unmap_single(dev, exp_buf->bp[0].addr,
  175. exp_buf->bp[0].size, dir);
  176. break;
  177. default:
  178. return -EINVAL;
  179. }
  180. return 0;
  181. }
  182. static struct sg_table *virtio_vdmabuf_dmabuf_map(struct dma_buf_attachment *attachment,
  183. enum dma_data_direction dir)
  184. {
  185. struct virtio_vdmabuf_buf *exp_buf = attachment->dmabuf->priv;
  186. struct virtio_vdmabuf_attachment *a = attachment->priv;
  187. struct sg_table *sgt = a->sgt;
  188. int ret;
  189. ret = sg_table_map(a->dev, exp_buf, sgt, dir);
  190. if (ret)
  191. return ERR_PTR(ret);
  192. return sgt;
  193. }
  194. static void virtio_vdmabuf_dmabuf_unmap(struct dma_buf_attachment *attachment,
  195. struct sg_table *sgt,
  196. enum dma_data_direction dir)
  197. {
  198. struct virtio_vdmabuf_buf *exp_buf = attachment->dmabuf->priv;
  199. sg_table_unmap(attachment->dev, exp_buf, sgt, dir);
  200. }
  201. static int virtio_vdmabuf_dmabuf_mmap(struct dma_buf *dmabuf,
  202. struct vm_area_struct *vma)
  203. {
  204. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  205. unsigned long start, pfn;
  206. int size;
  207. int i, ret = 0;
  208. if (!exp_buf)
  209. return -EINVAL;
  210. if (vma->vm_end - vma->vm_start > exp_buf->size) {
  211. dev_warn(drv_info->dev,
  212. "vm_end[%lu] - vm_start[%lu] [%lu] > mem size[%ld]\n",
  213. vma->vm_end, vma->vm_start,
  214. vma->vm_end - vma->vm_start,
  215. exp_buf->size);
  216. return -EINVAL;
  217. }
  218. if (exp_buf->flags & VIRTIO_VDAMBUF_NONCACHED)
  219. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  220. start = vma->vm_start;
  221. switch (exp_buf->heap_type) {
  222. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  223. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  224. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  225. /* SYSTEM, SYSTEM_CONFIG has the same logic */
  226. for (i = 0; i < exp_buf->bp_num; i++) {
  227. pfn = page_to_pfn(exp_buf->bp[i].page);
  228. size = exp_buf->bp[i].size;
  229. ret = remap_pfn_range(vma, start, pfn, size,
  230. vma->vm_page_prot);
  231. if (ret)
  232. return ret;
  233. start += size;
  234. }
  235. break;
  236. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  237. ret = vm_iomap_memory(vma, exp_buf->bp[i].addr,
  238. exp_buf->bp[i].size);
  239. break;
  240. default:
  241. break;
  242. }
  243. return ret;
  244. }
  245. static void *virtio_vdmabuf_dmabuf_vmap(struct dma_buf *dmabuf)
  246. {
  247. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  248. int heap_type = exp_buf->heap_type;
  249. struct page **pages;
  250. unsigned long pfn;
  251. void *addr;
  252. int i, nr_pages;
  253. switch (heap_type) {
  254. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  255. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  256. nr_pages = exp_buf->bp_num;
  257. pages = kzalloc(nr_pages * sizeof(struct page *),
  258. GFP_KERNEL);
  259. if (!pages)
  260. return ERR_PTR(-ENOMEM);
  261. for (i = 0; i < exp_buf->bp_num; i++)
  262. pages[i] = exp_buf->bp[i].page;
  263. addr = vm_map_ram(pages, exp_buf->bp_num, 0); /* or vmap */
  264. kfree(pages);
  265. return addr;
  266. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  267. nr_pages = exp_buf->size / PAGE_SIZE;
  268. pages = kzalloc(nr_pages * sizeof(struct page *),
  269. GFP_KERNEL);
  270. if (!pages)
  271. return ERR_PTR(-ENOMEM);
  272. /* convert the head page of config memory to pfn */
  273. pfn = page_to_pfn(exp_buf->bp[0].page);
  274. for (i = 0; i < nr_pages; i++)
  275. pages[i] = pfn_to_page(pfn + i);
  276. addr = vm_map_ram(pages, exp_buf->bp_num, 0); /* or vmap */
  277. kfree(pages);
  278. return addr;
  279. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  280. return ioremap(exp_buf->bp[0].addr,
  281. exp_buf->bp[0].size);
  282. default:
  283. return ERR_PTR(-EINVAL);
  284. }
  285. return NULL;
  286. }
  287. static void virtio_vdmabuf_dmabuf_vunmap(struct dma_buf *dmabuf, void *vaddr)
  288. {
  289. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  290. int heap_type = exp_buf->heap_type;
  291. switch (heap_type) {
  292. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  293. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  294. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  295. vm_unmap_ram(vaddr, exp_buf->bp_num);
  296. break;
  297. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  298. iounmap(vaddr);
  299. break;
  300. default:
  301. break;
  302. }
  303. }
  304. static int send_msg_to_host(enum virtio_vdmabuf_cmd cmd, void *data,
  305. int bp_num)
  306. {
  307. struct virtio_vdmabuf *vdmabuf = drv_info->priv;
  308. struct virtio_vdmabuf_buf *exp_buf;
  309. struct virtio_vdmabuf_msg *msg;
  310. unsigned int buf_id;
  311. unsigned long irqflags;
  312. int ret;
  313. if (bp_num > VIRTIO_VDMABUF_MAX_BP_NUM) {
  314. dev_err(drv_info->dev, "[%s:%d] max bp num reached %d\n",
  315. __func__, __LINE__, bp_num);
  316. return -EINVAL;
  317. }
  318. msg = kzalloc(struct_size(msg, bp, bp_num), GFP_KERNEL);
  319. if (!msg)
  320. return -ENOMEM;
  321. msg->op[5] = bp_num;
  322. switch (cmd) {
  323. case VIRTIO_VDMABUF_CMD_VMID_REQ:
  324. /*
  325. * set vmid to a default value, it will be changed
  326. * after we received reply
  327. */
  328. vdmabuf->vmid = 0;
  329. break;
  330. /* Guest played importer role */
  331. case VIRTIO_VDMABUF_CMD_REL_NOTIFY:
  332. buf_id = *(unsigned int *)data;
  333. spin_lock_irqsave(&drv_info->import_lock, irqflags);
  334. /* remove dmabuf with buf_id from local hash table */
  335. ret = virtio_vdmabuf_del_buf(drv_info, buf_id, false);
  336. spin_unlock_irqrestore(&drv_info->import_lock, irqflags);
  337. if (ret)
  338. return ret;
  339. msg->op[0] = vdmabuf->vmid;
  340. msg->op[1] = buf_id;
  341. break;
  342. case VIRTIO_VDMABUF_CMD_IMPORT_REQ:
  343. buf_id = *(unsigned int *)data;
  344. msg->op[0] = vdmabuf->vmid;
  345. msg->op[1] = buf_id;
  346. break;
  347. /* Guest played exporter role */
  348. case VIRTIO_VDMABUF_CMD_IMPORT_REPLY:
  349. exp_buf = (struct virtio_vdmabuf_buf *)data;
  350. msg->op[0] = exp_buf->vmid;
  351. msg->op[1] = exp_buf->buf_id;
  352. msg->op[2] = exp_buf->heap_type;
  353. msg->op[3] = exp_buf->carveout_type;
  354. msg->op[4] = exp_buf->flags;
  355. msg->op[5] = exp_buf->bp_num;
  356. memcpy(msg->bp, exp_buf->bp, sizeof(exp_buf->bp[0]) * exp_buf->bp_num);
  357. break;
  358. default:
  359. /* no command found */
  360. kfree(msg);
  361. return -EINVAL;
  362. }
  363. msg->cmd = cmd;
  364. spin_lock_irqsave(&vdmabuf->msg_list_lock, irqflags);
  365. list_add_tail(&msg->list, &vdmabuf->msg_list);
  366. spin_unlock_irqrestore(&vdmabuf->msg_list_lock, irqflags);
  367. queue_work(vdmabuf->wq, &vdmabuf->send_msg_work);
  368. return 0;
  369. }
  370. #if 0
  371. static void virtio_vdmabuf_clear_buf(struct virtio_vdmabuf_buf *exp_buf)
  372. {
  373. dma_buf_unmap_attachment(exp_buf->attach, exp_buf->sgt,
  374. DMA_BIDIRECTIONAL);
  375. if (exp_buf->dma_buf) {
  376. dma_buf_detach(exp_buf->dma_buf, exp_buf->attach);
  377. /* close connection to dma-buf completely */
  378. dma_buf_put(exp_buf->dma_buf);
  379. exp_buf->dma_buf = NULL;
  380. }
  381. }
  382. #endif
  383. static int virtio_vdmabuf_remove_buf(struct virtio_vdmabuf_info *drv_info,
  384. struct virtio_vdmabuf_buf *exp_buf,
  385. bool local)
  386. {
  387. unsigned int buf_id = exp_buf->buf_id;
  388. unsigned long irqflags;
  389. spinlock_t *lock;
  390. int ret;
  391. //virtio_vdmabuf_clear_buf(exp_buf);
  392. if (local)
  393. lock = &drv_info->local_lock;
  394. else
  395. lock = &drv_info->import_lock;
  396. spin_lock_irqsave(lock, irqflags);
  397. ret = virtio_vdmabuf_del_buf(drv_info, buf_id, local);
  398. spin_unlock_irqrestore(lock, irqflags);
  399. if (ret)
  400. return ret;
  401. kfree(exp_buf);
  402. return 0;
  403. }
  404. /* parse msg from host */
  405. static int parse_msg_from_host(struct virtio_vdmabuf *vdmabuf,
  406. struct virtio_vdmabuf_msg *msg)
  407. {
  408. struct virtio_vdmabuf_event *event;
  409. struct virtio_vdmabuf_buf *exp_buf;
  410. unsigned long irqflags;
  411. unsigned int buf_id;
  412. unsigned vmid;
  413. int i, bp_num;
  414. dev_dbg(drv_info->dev, "received msg cmd %d\n", msg->cmd);
  415. switch (msg->cmd) {
  416. /* Host's rely for guest's VMID request by open() */
  417. case VIRTIO_VDMABUF_CMD_VMID_REPLY:
  418. vdmabuf->vmid = msg->op[0];
  419. if (!vdmabuf->vmid)
  420. dev_err(drv_info->dev, "vmid should be not 0\n");
  421. dev_dbg(drv_info->dev, "vmid %d\n", vdmabuf->vmid);
  422. wake_up_interruptible(&vdmabuf->eq_import->e_wait);
  423. break;
  424. /* 1, Guest played dmabuf exporter role */
  425. /* Guest received import request from host */
  426. case VIRTIO_VDMABUF_CMD_IMPORT_REQ:
  427. vmid = msg->op[0];
  428. if (vdmabuf->vmid != vmid) {
  429. dev_err(drv_info->dev, "vmid does not match %d %d\n",
  430. vdmabuf->vmid, vmid);
  431. return -EINVAL;
  432. }
  433. buf_id = msg->op[1];
  434. spin_lock_irqsave(&drv_info->local_lock, irqflags);
  435. exp_buf = virtio_vdmabuf_find_buf(drv_info, buf_id, true);
  436. if (!exp_buf) {
  437. spin_unlock_irqrestore(&drv_info->local_lock, irqflags);
  438. dev_err(drv_info->dev, "no exp_buf found for buf id %d\n",
  439. buf_id);
  440. return -ENOENT;
  441. }
  442. spin_unlock_irqrestore(&drv_info->local_lock, irqflags);
  443. send_msg_to_host(VIRTIO_VDMABUF_CMD_IMPORT_REPLY, exp_buf,
  444. exp_buf->bp_num);
  445. /*
  446. * Only increment the reference count on the dmabuf ,Then this
  447. * dmabuf won't be released at this side until we received
  448. * REL_NOTIFY command to decrease the reference count.
  449. */
  450. get_dma_buf(exp_buf->dma_buf);
  451. break;
  452. /* Guest received dmabuf released request from host */
  453. case VIRTIO_VDMABUF_CMD_REL_NOTIFY:
  454. vmid = msg->op[0];
  455. if (vdmabuf->vmid != vmid) {
  456. dev_err(drv_info->dev, "[%s:%d] %d %d\n",
  457. __func__, __LINE__, vdmabuf->vmid, vmid);
  458. return -EINVAL;
  459. }
  460. buf_id = msg->op[1];
  461. spin_lock_irqsave(&drv_info->local_lock, irqflags);
  462. exp_buf = virtio_vdmabuf_find_buf(drv_info, buf_id, true);
  463. if (!exp_buf) {
  464. spin_unlock_irqrestore(&drv_info->local_lock, irqflags);
  465. dev_err(drv_info->dev, "can't find buffer\n");
  466. return -ENOENT;
  467. }
  468. dma_buf_put(exp_buf->dma_buf);
  469. spin_unlock_irqrestore(&drv_info->local_lock, irqflags);
  470. break;
  471. /* 2, Guest played dmabuf importer role */
  472. /* Guest received host's ACK for its import request
  473. * by ioctl(..,VIRTIO_VDMABUF_IOCTL_GET_FD,..)
  474. */
  475. case VIRTIO_VDMABUF_CMD_IMPORT_REPLY:
  476. vmid = msg->op[0];
  477. bp_num = msg->op[5];
  478. if (vdmabuf->vmid != vmid) {
  479. dev_err(drv_info->dev, "[%s:%d] %d %d\n",
  480. __func__, __LINE__, vdmabuf->vmid, vmid);
  481. return -EINVAL;
  482. }
  483. if (bp_num > VIRTIO_VDMABUF_MAX_BP_NUM) {
  484. dev_err(drv_info->dev, "[%s:%d] max bp num reached %d\n",
  485. __func__, __LINE__, bp_num);
  486. return -EINVAL;
  487. }
  488. event = kzalloc(struct_size(event, bp, bp_num), GFP_KERNEL);
  489. if (!event)
  490. return -ENOMEM;
  491. memcpy(event->op, msg->op, sizeof(event->op));
  492. for (i = 0; i < bp_num; i++) {
  493. /*
  494. * no need to copy page info, as the page info from
  495. * host is invalid at guest side
  496. */
  497. event->bp[i].addr = msg->bp[i].addr;
  498. event->bp[i].size = msg->bp[i].size;
  499. }
  500. spin_lock_irqsave(&vdmabuf->eq_import->e_lock, irqflags);
  501. list_add_tail(&event->list, &vdmabuf->eq_import->e_list);
  502. wake_up_interruptible(&vdmabuf->eq_import->e_wait);
  503. spin_unlock_irqrestore(&vdmabuf->eq_import->e_lock, irqflags);
  504. break;
  505. default:
  506. dev_err(drv_info->dev, "invalid cmd\n");
  507. return -EINVAL;
  508. }
  509. return 0;
  510. }
  511. static int virtio_vdmabuf_fill_recv_msg(struct virtio_vdmabuf *vdmabuf,
  512. struct virtio_vdmabuf_msg *msg)
  513. {
  514. struct virtqueue *vq = vdmabuf->vqs[VDMABUF_VQ_RECV];
  515. struct scatterlist sg;
  516. if (!msg)
  517. return -EINVAL;
  518. sg_init_one(&sg, msg, sizeof(struct virtio_vdmabuf_msg));
  519. return virtqueue_add_inbuf(vq, &sg, 1, msg, GFP_ATOMIC);
  520. }
  521. static int virtio_vdambuf_fill_queue(struct virtqueue *vq)
  522. {
  523. struct virtqueue *vq_ = vq;
  524. struct virtio_vdmabuf_msg *msg;
  525. struct scatterlist sg;
  526. int added = 0;
  527. int ret, size;
  528. do {
  529. msg = kzalloc(struct_size(msg, bp, VIRTIO_VDMABUF_MAX_BP_NUM),
  530. GFP_KERNEL);
  531. if (!msg)
  532. break;
  533. size = sizeof(struct virtio_vdmabuf_msg) +
  534. sizeof(struct buf_pair) * VIRTIO_VDMABUF_MAX_BP_NUM;
  535. sg_init_one(&sg, msg, size);
  536. ret = virtqueue_add_inbuf(vq_, &sg, 1, msg, GFP_KERNEL);
  537. if (ret) {
  538. kfree(msg);
  539. break;
  540. }
  541. added++;
  542. } while(vq->num_free);
  543. dev_info(drv_info->dev, "filled %d msg buffers to vq\n", added);
  544. return added;
  545. }
  546. static void virtio_vdmabuf_recv_work(struct work_struct *work)
  547. {
  548. struct virtio_vdmabuf *vdmabuf =
  549. container_of(work, struct virtio_vdmabuf, recv_work);
  550. struct virtqueue *vq = vdmabuf->vqs[VDMABUF_VQ_RECV];
  551. struct virtio_vdmabuf_msg *msg;
  552. int sz, ret;
  553. mutex_lock(&vdmabuf->recv_lock);
  554. do {
  555. virtqueue_disable_cb(vq);
  556. for (;;) {
  557. msg = virtqueue_get_buf(vq, &sz);
  558. if (!msg)
  559. break;
  560. /* valid size */
  561. if (sz == vdmabuf_msg_size(VIRTIO_VDMABUF_MAX_BP_NUM)) {
  562. ret = parse_msg_from_host(vdmabuf, msg);
  563. if (ret)
  564. dev_err(drv_info->dev, "msg parse error %d\n",
  565. ret);
  566. ret = virtio_vdmabuf_fill_recv_msg(vdmabuf, msg);
  567. if (ret < 0) {
  568. dev_warn(drv_info->dev,
  569. "failed to fill recv msg to vq\n");
  570. kfree(msg);
  571. }
  572. } else
  573. dev_err(drv_info->dev,
  574. "received malformed message\n");
  575. }
  576. } while (!virtqueue_enable_cb(vq));
  577. mutex_unlock(&vdmabuf->recv_lock);
  578. }
  579. static void virtio_vdmabuf_send_msg_work(struct work_struct *work)
  580. {
  581. struct virtio_vdmabuf *vdmabuf =
  582. container_of(work, struct virtio_vdmabuf, send_msg_work);
  583. struct virtqueue *vq = vdmabuf->vqs[VDMABUF_VQ_SEND];
  584. struct scatterlist sg;
  585. struct virtio_vdmabuf_msg *msg;
  586. unsigned long irqflags;
  587. bool added = false;
  588. int ret, size;
  589. mutex_lock(&vdmabuf->send_lock);
  590. for (;;) {
  591. spin_lock_irqsave(&vdmabuf->msg_list_lock, irqflags);
  592. if (list_empty(&vdmabuf->msg_list)) {
  593. spin_unlock_irqrestore(&vdmabuf->msg_list_lock, irqflags);
  594. break;
  595. }
  596. msg = list_first_entry(&vdmabuf->msg_list,
  597. struct virtio_vdmabuf_msg, list);
  598. if (!msg) {
  599. dev_warn(drv_info->dev, "msg is null\n");
  600. spin_unlock_irqrestore(&vdmabuf->msg_list_lock, irqflags);
  601. continue;
  602. }
  603. list_del_init(&msg->list);
  604. spin_unlock_irqrestore(&vdmabuf->msg_list_lock, irqflags);
  605. size = sizeof(struct virtio_vdmabuf_msg) +
  606. sizeof(struct buf_pair) * msg->op[5];
  607. dev_dbg(drv_info->dev, "send msg cmd %d, size %d\n", msg->cmd, size);
  608. sg_init_one(&sg, msg, size);
  609. ret = virtqueue_add_outbuf(vq, &sg, 1, msg, GFP_KERNEL);
  610. if (ret < 0) {
  611. dev_err(drv_info->dev, "failed to add msg to vq\n");
  612. break;
  613. }
  614. added = true;
  615. }
  616. if (added)
  617. virtqueue_kick(vq);
  618. mutex_unlock(&vdmabuf->send_lock);
  619. }
  620. static void virtio_vdmabuf_send_work(struct work_struct *work)
  621. {
  622. struct virtio_vdmabuf *vdmabuf =
  623. container_of(work, struct virtio_vdmabuf, send_work);
  624. struct virtqueue *vq = vdmabuf->vqs[VDMABUF_VQ_SEND];
  625. struct virtio_vdmabuf_msg *msg;
  626. unsigned int sz;
  627. bool added = false;
  628. mutex_lock(&vdmabuf->send_lock);
  629. do {
  630. virtqueue_disable_cb(vq);
  631. for (;;) {
  632. msg = virtqueue_get_buf(vq, &sz);
  633. if (!msg)
  634. break;
  635. kfree(msg);
  636. added = true;
  637. }
  638. } while (!virtqueue_enable_cb(vq));
  639. mutex_unlock(&vdmabuf->send_lock);
  640. /* use this chance to send msg to host if we have */
  641. if (added)
  642. queue_work(vdmabuf->wq, &vdmabuf->send_msg_work);
  643. }
  644. static void virtio_vdmabuf_recv_cb(struct virtqueue *vq)
  645. {
  646. struct virtio_vdmabuf *vdmabuf = vq->vdev->priv;
  647. if (!vdmabuf)
  648. return;
  649. queue_work(vdmabuf->wq, &vdmabuf->recv_work);
  650. }
  651. static void virtio_vdmabuf_send_cb(struct virtqueue *vq)
  652. {
  653. struct virtio_vdmabuf *vdmabuf = vq->vdev->priv;
  654. if (!vdmabuf)
  655. return;
  656. queue_work(vdmabuf->wq, &vdmabuf->send_work);
  657. }
  658. static void virtio_vdmabuf_empty_queue(struct virtqueue *vq)
  659. {
  660. void *buf;
  661. int sz;
  662. while (1) {
  663. buf = virtqueue_get_buf(vq, &sz);
  664. if (buf == NULL)
  665. break;
  666. kfree(buf);
  667. }
  668. }
  669. static int virtio_vdmabuf_remove_all_bufs(struct virtio_vdmabuf *vdmabuf)
  670. {
  671. struct virtio_vdmabuf_buf *found;
  672. struct hlist_node *tmp;
  673. struct virtqueue *vq;
  674. int bkt;
  675. int ret;
  676. hash_for_each_safe(drv_info->buf_list_local, bkt,
  677. tmp, found, node) {
  678. ret = virtio_vdmabuf_remove_buf(drv_info,
  679. found, true);
  680. if (ret)
  681. return ret;
  682. }
  683. hash_for_each_safe(drv_info->buf_list_import, bkt,
  684. tmp, found, node) {
  685. ret = virtio_vdmabuf_remove_buf(drv_info,
  686. found, false);
  687. if (ret)
  688. return ret;
  689. }
  690. if (drv_info->host_ready) {
  691. vq = vdmabuf->vqs[VDMABUF_VQ_RECV];
  692. virtio_vdmabuf_empty_queue(vq);
  693. }
  694. return 0;
  695. }
  696. static void virtio_vdmabuf_release_priv(struct virtio_vdmabuf_buf *exp_buf)
  697. {
  698. int i;
  699. switch (exp_buf->heap_type) {
  700. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  701. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  702. for (i = 0; i < exp_buf->bp_num; i++)
  703. put_page(exp_buf->bp[i].page);
  704. break;
  705. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  706. __free_pages(exp_buf->bp[0].page,
  707. get_order(exp_buf->bp[0].size));
  708. break;
  709. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  710. /* no need to release */
  711. break;
  712. default:
  713. break;
  714. }
  715. }
  716. static void virtio_vdmabuf_dmabuf_release(struct dma_buf *dmabuf)
  717. {
  718. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  719. unsigned long irqflags;
  720. int buf_id;
  721. int ret;
  722. if (!exp_buf)
  723. return;
  724. exp_buf->valid = false;
  725. exp_buf->dma_buf = NULL;
  726. buf_id = exp_buf->buf_id;
  727. if (exp_buf->imported) {
  728. ret = send_msg_to_host(VIRTIO_VDMABUF_CMD_REL_NOTIFY,
  729. &buf_id, 0);
  730. if (ret < 0)
  731. dev_err(drv_info->dev,
  732. "failed(%d) to send dmabuf(%d) release cmd\n",
  733. ret, buf_id);
  734. } else {
  735. spin_lock_irqsave(&drv_info->local_lock, irqflags);
  736. ret = virtio_vdmabuf_del_buf(drv_info, buf_id, true);
  737. spin_unlock_irqrestore(&drv_info->local_lock, irqflags);
  738. if (ret)
  739. dev_err(drv_info->dev,
  740. "failed(%d) to del dmabuf(%d) from local list\n",
  741. ret, buf_id);
  742. virtio_vdmabuf_release_priv(exp_buf);
  743. }
  744. kfree(exp_buf);
  745. }
  746. static int virtio_vdmabuf_dmabuf_begin_cpu_access(struct dma_buf *dmabuf,
  747. enum dma_data_direction dir)
  748. {
  749. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  750. struct virtio_vdmabuf_attachment *a;
  751. int heap_type = exp_buf->heap_type;
  752. struct carveout_buf *c;
  753. mutex_lock(&exp_buf->lock);
  754. switch (heap_type) {
  755. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  756. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  757. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  758. list_for_each_entry(a, &exp_buf->attachments, list)
  759. dma_sync_sgtable_for_cpu(a->dev, a->sgt, dir);
  760. break;
  761. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  762. list_for_each_entry(a, &exp_buf->attachments, list) {
  763. c = (struct carveout_buf *)a->sgt;
  764. dma_sync_single_for_cpu(a->dev, c->addr, c->size,
  765. dir);
  766. }
  767. break;
  768. default:
  769. break;
  770. }
  771. mutex_unlock(&exp_buf->lock);
  772. return 0;
  773. }
  774. static int virtio_vdmabuf_dmabuf_end_cpu_access(struct dma_buf *dmabuf,
  775. enum dma_data_direction dir)
  776. {
  777. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  778. struct virtio_vdmabuf_attachment *a;
  779. int heap_type = exp_buf->heap_type;
  780. struct carveout_buf *c;
  781. mutex_lock(&exp_buf->lock);
  782. switch (heap_type) {
  783. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  784. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  785. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  786. list_for_each_entry(a, &exp_buf->attachments, list)
  787. dma_sync_sgtable_for_device(a->dev, a->sgt, dir);
  788. break;
  789. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  790. list_for_each_entry(a, &exp_buf->attachments, list) {
  791. c = (struct carveout_buf *)a->sgt;
  792. dma_sync_single_for_device(a->dev, c->addr, c->size,
  793. dir);
  794. }
  795. break;
  796. default:
  797. break;
  798. }
  799. mutex_unlock(&exp_buf->lock);
  800. return 0;
  801. }
  802. static int virtio_vdmabuf_dmabuf_attach(struct dma_buf *dmabuf,
  803. struct dma_buf_attachment *attachment)
  804. {
  805. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  806. struct virtio_vdmabuf_attachment *a;
  807. struct sg_table *sgt;
  808. a = kzalloc(sizeof(*a), GFP_KERNEL);
  809. if (!a)
  810. return -ENOMEM;
  811. sgt = get_sg_table(exp_buf);
  812. if (IS_ERR(sgt)) {
  813. kfree(a);
  814. return -ENOMEM;
  815. }
  816. a->sgt = sgt;
  817. a->dev = attachment->dev;
  818. INIT_LIST_HEAD(&a->list);
  819. attachment->priv = a;
  820. mutex_lock(&exp_buf->lock);
  821. list_add(&a->list, &exp_buf->attachments);
  822. mutex_unlock(&exp_buf->lock);
  823. return 0;
  824. }
  825. static void virtio_vdmabuf_dmabuf_detach(struct dma_buf *dmabuf,
  826. struct dma_buf_attachment *attachment)
  827. {
  828. struct virtio_vdmabuf_attachment *a = attachment->priv;
  829. struct virtio_vdmabuf_buf *exp_buf = dmabuf->priv;
  830. mutex_lock(&exp_buf->lock);
  831. list_del(&a->list);
  832. mutex_unlock(&exp_buf->lock);
  833. put_sg_table(exp_buf, a->sgt);
  834. kfree(a);
  835. }
  836. static const struct dma_buf_ops virtio_vdmabuf_dmabuf_ops = {
  837. .attach = virtio_vdmabuf_dmabuf_attach,
  838. .detach = virtio_vdmabuf_dmabuf_detach,
  839. .map_dma_buf = virtio_vdmabuf_dmabuf_map,
  840. .unmap_dma_buf = virtio_vdmabuf_dmabuf_unmap,
  841. .release = virtio_vdmabuf_dmabuf_release,
  842. .mmap = virtio_vdmabuf_dmabuf_mmap,
  843. .vmap = virtio_vdmabuf_dmabuf_vmap,
  844. .vunmap = virtio_vdmabuf_dmabuf_vunmap,
  845. .begin_cpu_access = virtio_vdmabuf_dmabuf_begin_cpu_access,
  846. .end_cpu_access = virtio_vdmabuf_dmabuf_end_cpu_access,
  847. };
  848. static int virtio_vdmabuf_create_mirror_dmabuf(struct virtio_vdmabuf_import *attr,
  849. struct virtio_vdmabuf_event *event)
  850. {
  851. struct virtio_vdmabuf *vdmabuf = drv_info->priv;
  852. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  853. struct virtio_vdmabuf_buf *exp_buf;
  854. struct dma_buf *dmabuf;
  855. unsigned long irqflags;
  856. int heap_type;
  857. int carveout_type;
  858. unsigned int buf_id;
  859. phys_addr_t addr;
  860. int bp_num;
  861. int ret = -ENOMEM;
  862. int i, size;
  863. buf_id = event->op[1];
  864. if (attr->buf_id != buf_id)
  865. return -EINVAL;
  866. heap_type = event->op[2];
  867. bp_num = event->op[5];
  868. if (bp_num <= 0 || bp_num > VIRTIO_VDMABUF_MAX_BP_NUM)
  869. return -EINVAL;
  870. exp_buf = kzalloc(struct_size(exp_buf, bp, bp_num), GFP_KERNEL);
  871. if (!exp_buf)
  872. goto err_exp;
  873. exp_buf->bp_num = bp_num;
  874. mutex_init(&exp_buf->lock);
  875. switch (heap_type) {
  876. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  877. for (i = 0; i < bp_num; i++) {
  878. /* actually guest can't access the page from host in light */
  879. exp_buf->bp[i].page = phys_to_page(event->bp[i].addr);
  880. exp_buf->bp[i].addr = event->bp[i].addr;
  881. exp_buf->bp[i].size = event->bp[i].size;
  882. exp_buf->size += event->bp[i].size;
  883. }
  884. break;
  885. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  886. if (exp_buf->bp_num != 1) {
  887. dev_err(drv_info->dev, "[%s:%d] error %d\n",
  888. __func__, __LINE__, exp_buf->bp_num);
  889. exp_buf->bp_num = 1;
  890. }
  891. addr = event->bp[0].addr;
  892. exp_buf->bp[0].page = phys_to_page(addr);
  893. exp_buf->bp[0].size = event->bp[0].size;
  894. exp_buf->size = exp_buf->bp[0].size;
  895. break;
  896. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  897. return -EINVAL;
  898. break;
  899. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  900. if (exp_buf->bp_num != 1) {
  901. dev_err(drv_info->dev, "[%s:%d] error %d\n",
  902. __func__, __LINE__, exp_buf->bp_num);
  903. exp_buf->bp_num = 1;
  904. }
  905. carveout_type = exp_buf->carveout_type;
  906. addr = event->bp[0].addr;
  907. size = event->bp[0].size;
  908. if (addr != carveout_bufs[carveout_type].addr &&
  909. size != carveout_bufs[carveout_type].size) {
  910. dev_err(drv_info->dev, "[%s:%d] error\n",
  911. __func__, __LINE__);
  912. return -EINVAL;
  913. }
  914. exp_buf->bp[0].addr = addr;
  915. exp_buf->bp[0].size = size;
  916. exp_buf->size = size;
  917. break;
  918. default:
  919. return -EINVAL;
  920. }
  921. exp_info.ops = &virtio_vdmabuf_dmabuf_ops;
  922. exp_info.size = exp_buf->size;
  923. exp_info.flags = O_RDWR;
  924. exp_info.priv = exp_buf;
  925. /* export real dambuf */
  926. dmabuf = dma_buf_export(&exp_info);
  927. if (IS_ERR_OR_NULL(dmabuf))
  928. goto err_exp;
  929. ret = dma_buf_fd(dmabuf, 0);
  930. if (ret < 0) {
  931. dma_buf_put(dmabuf);
  932. goto err_exp;
  933. }
  934. attr->fd = ret;
  935. attr->size = exp_buf->size;
  936. INIT_LIST_HEAD(&exp_buf->attachments);
  937. exp_buf->vmid = vdmabuf->vmid;
  938. exp_buf->dma_buf = dmabuf;
  939. exp_buf->buf_id = attr->buf_id;
  940. exp_buf->heap_type = event->op[2];
  941. exp_buf->carveout_type = event->op[3];
  942. exp_buf->flags = event->op[4];
  943. /*
  944. * it indicates this is a mirrored dmabuf,
  945. * we need send release cmd to host when
  946. * this dambuf is released.
  947. */
  948. exp_buf->imported = true;
  949. exp_buf->valid = true;
  950. spin_lock_irqsave(&drv_info->import_lock, irqflags);
  951. virtio_vdmabuf_add_buf(drv_info, exp_buf, false);
  952. spin_unlock_irqrestore(&drv_info->import_lock, irqflags);
  953. return 0;
  954. err_exp:
  955. kfree(exp_buf);
  956. return ret;
  957. }
  958. static int virtio_vdmabuf_create_dmabuf(struct virtio_vdmabuf *vdmabuf,
  959. struct virtio_vdmabuf_alloc *attr)
  960. {
  961. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  962. int carveout_type = attr->carveout_type;
  963. int heap_type = attr->heap_type;
  964. size_t size = attr->size;
  965. struct virtio_vdmabuf_buf *exp_buf;
  966. struct dma_buf *dmabuf;
  967. unsigned long irqflags;
  968. struct page *page = NULL;
  969. int ret, i = 0, npages, bp_num;
  970. /* For carveout, buf size is fixed, user don't need specify it */
  971. if (attr->heap_type != VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT) {
  972. npages = bp_num = DIV_ROUND_UP(size, PAGE_SIZE);
  973. if (npages <= 0)
  974. return -EINVAL;
  975. }
  976. if (attr->heap_type == VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG ||
  977. attr->heap_type == VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT)
  978. bp_num = 1;
  979. exp_buf = kzalloc(struct_size(exp_buf, bp, bp_num), GFP_KERNEL);
  980. if (!exp_buf) {
  981. ret = -ENOMEM;
  982. goto err_exp;
  983. }
  984. mutex_init(&exp_buf->lock);
  985. exp_info.ops = &virtio_vdmabuf_dmabuf_ops;
  986. exp_info.flags = O_RDWR;
  987. exp_info.priv = exp_buf;
  988. switch (heap_type) {
  989. case VIRTIO_VDMABUF_HEAP_TYPE_USER:
  990. return -EINVAL; /* Not support currently */
  991. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM:
  992. exp_buf->size = exp_info.size = npages * PAGE_SIZE;
  993. exp_buf->bp_num = npages;
  994. for (i = 0; i < npages; i++) {
  995. page = alloc_page(GFP_KERNEL);
  996. if (!page) {
  997. ret = -ENOMEM;
  998. goto err_alloc;
  999. }
  1000. exp_buf->bp[i].page = page;
  1001. exp_buf->bp[i].addr = page_to_phys(page);
  1002. exp_buf->bp[i].size = PAGE_SIZE;
  1003. }
  1004. break;
  1005. case VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG:
  1006. exp_buf->size = exp_info.size = npages * PAGE_SIZE;
  1007. /* only need 1 bp to record Compound Page */
  1008. exp_buf->bp_num = 1;
  1009. page = alloc_pages(GFP_KERNEL, get_order(exp_buf->size));
  1010. if (!page) {
  1011. ret = -ENOMEM;
  1012. goto err_exp;
  1013. }
  1014. exp_buf->bp[0].page = page;
  1015. exp_buf->bp[0].addr = page_to_phys(page);
  1016. exp_buf->bp[0].size = exp_buf->size;
  1017. break;
  1018. case VIRTIO_VDMABUF_HEAP_TYPE_CARVEOUT:
  1019. if (carveout_type >= VIRTIO_VDMABUF_CARVEOUTS_NUM ||
  1020. !carveout_bufs[carveout_type].ready)
  1021. return -EINVAL;
  1022. exp_buf->bp_num = 1;
  1023. exp_buf->bp[0].addr = carveout_bufs[carveout_type].addr;
  1024. if (size <= 0 || size > carveout_bufs[carveout_type].size)
  1025. size = carveout_bufs[carveout_type].size;
  1026. exp_buf->bp[0].size = size;
  1027. exp_buf->size = exp_info.size = size;
  1028. attr->size = exp_buf->size;
  1029. break;
  1030. default:
  1031. /* no command found */
  1032. ret = -EINVAL;
  1033. goto err_exp;
  1034. }
  1035. /* export real dambuf */
  1036. dmabuf = dma_buf_export(&exp_info);
  1037. if (IS_ERR_OR_NULL(dmabuf))
  1038. goto err_alloc;
  1039. ret = dma_buf_fd(dmabuf, 0);
  1040. if (ret < 0) {
  1041. dma_buf_put(dmabuf);
  1042. goto err_alloc;
  1043. }
  1044. attr->fd = ret;
  1045. INIT_LIST_HEAD(&exp_buf->attachments);
  1046. exp_buf->vmid = vdmabuf->vmid;
  1047. exp_buf->heap_type = heap_type;
  1048. exp_buf->carveout_type = carveout_type;
  1049. exp_buf->flags = attr->flags;
  1050. exp_buf->dma_buf = dmabuf;
  1051. exp_buf->buf_id = get_buf_id();
  1052. exp_buf->valid = true;
  1053. exp_buf->imported = false;
  1054. attr->buf_id = exp_buf->buf_id;
  1055. spin_lock_irqsave(&drv_info->local_lock, irqflags);
  1056. virtio_vdmabuf_add_buf(drv_info, exp_buf, true);
  1057. spin_unlock_irqrestore(&drv_info->local_lock, irqflags);
  1058. return 0;
  1059. err_alloc:
  1060. if (heap_type == VIRTIO_VDMABUF_HEAP_TYPE_SYSTEM_CONTIG)
  1061. __free_pages(page, get_order(exp_buf->size));
  1062. while(i--)
  1063. put_page(exp_buf->bp[i].page);
  1064. err_exp:
  1065. kfree(exp_buf);
  1066. return ret;
  1067. }
  1068. static int virtio_vdmabuf_open(struct inode *inode, struct file *filp)
  1069. {
  1070. struct virtio_vdmabuf *vdmabuf;
  1071. int ret;
  1072. if (!drv_info) {
  1073. dev_err(drv_info->dev, "virtio vdmabuf driver is not ready\n");
  1074. return -EINVAL;
  1075. }
  1076. vdmabuf = drv_info->priv;
  1077. mutex_lock(&drv_info->g_mutex);
  1078. if (drv_info->host_ready) {
  1079. ret = send_msg_to_host(VIRTIO_VDMABUF_CMD_VMID_REQ, NULL, 0);
  1080. if (ret < 0) {
  1081. dev_err(drv_info->dev, "fail to send vmid req\n");
  1082. return ret;
  1083. }
  1084. /* host's reply of vmid req will wakeup us */
  1085. if (wait_event_interruptible(vdmabuf->eq_import->e_wait,
  1086. vdmabuf->vmid != 0)) {
  1087. dev_err(drv_info->dev, "Uh, this err is not expected\n");
  1088. return -ERESTARTSYS;
  1089. }
  1090. }
  1091. mutex_unlock(&drv_info->g_mutex);
  1092. filp->private_data = vdmabuf;
  1093. return 0;
  1094. }
  1095. static int import_ioctl(struct virtio_vdmabuf *vdmabuf, void *data)
  1096. {
  1097. struct virtio_vdmabuf_import *attr = data;
  1098. struct virtio_vdmabuf_buf *imp_buf;
  1099. struct virtio_vdmabuf_event *event;
  1100. unsigned long irqflags;
  1101. int ret = 0;
  1102. if (vdmabuf->vmid <= 0)
  1103. return -EINVAL;
  1104. spin_lock_irqsave(&drv_info->import_lock, irqflags);
  1105. imp_buf = virtio_vdmabuf_find_buf(drv_info, attr->buf_id, false);
  1106. if (imp_buf && imp_buf->valid) {
  1107. ret = dma_buf_fd(imp_buf->dma_buf, 0);
  1108. if (ret < 0) {
  1109. dma_buf_put(imp_buf->dma_buf);
  1110. spin_unlock_irqrestore(&drv_info->import_lock, irqflags);
  1111. return ret;
  1112. }
  1113. attr->fd = ret;
  1114. attr->size = imp_buf->size;
  1115. spin_unlock_irqrestore(&drv_info->import_lock, irqflags);
  1116. return 0;
  1117. }
  1118. spin_unlock_irqrestore(&drv_info->import_lock, irqflags);
  1119. /*
  1120. * We can't find the dambuf with buf_id in local hash list,
  1121. * need send import request to host to get it.
  1122. */
  1123. mutex_lock(&drv_info->g_mutex);
  1124. send_msg_to_host(VIRTIO_VDMABUF_CMD_IMPORT_REQ, &attr->buf_id, 0);
  1125. /* host's ack of import request will wakeup us */
  1126. if (wait_event_interruptible(vdmabuf->eq_import->e_wait,
  1127. !list_empty(&vdmabuf->eq_import->e_list))) {
  1128. mutex_unlock(&drv_info->g_mutex);
  1129. dev_err(drv_info->dev, "This err is not expected\n");
  1130. return -ERESTARTSYS;
  1131. }
  1132. spin_lock_irqsave(&vdmabuf->eq_import->e_lock, irqflags);
  1133. event = list_first_entry(&vdmabuf->eq_import->e_list,
  1134. struct virtio_vdmabuf_event, list);
  1135. /* safely del the event from list and free it */
  1136. list_del(&event->list);
  1137. spin_unlock_irqrestore(&vdmabuf->eq_import->e_lock, irqflags);
  1138. /* create local mirror dmabuf */
  1139. ret = virtio_vdmabuf_create_mirror_dmabuf(attr, event);
  1140. if (ret)
  1141. dev_err(drv_info->dev, "create mirror dmabuf failed %d\n",
  1142. ret);
  1143. kfree(event);
  1144. mutex_unlock(&drv_info->g_mutex);
  1145. return ret;
  1146. }
  1147. static int alloc_ioctl(struct virtio_vdmabuf *vdmabuf, void *data)
  1148. {
  1149. struct virtio_vdmabuf_alloc *attr = data;
  1150. int ret;
  1151. mutex_lock(&drv_info->g_mutex);
  1152. ret = virtio_vdmabuf_create_dmabuf(vdmabuf, attr);
  1153. mutex_unlock(&drv_info->g_mutex);
  1154. return ret;
  1155. }
  1156. static const struct virtio_vdmabuf_ioctl_desc virtio_vdmabuf_ioctls[] = {
  1157. VIRTIO_VDMABUF_IOCTL_DEF(VIRTIO_VDMABUF_IOCTL_ALLOC_FD, alloc_ioctl, 0),
  1158. VIRTIO_VDMABUF_IOCTL_DEF(VIRTIO_VDMABUF_IOCTL_IMPORT_FD, import_ioctl, 0),
  1159. };
  1160. static long virtio_vdmabuf_ioctl(struct file *filp, unsigned int cmd,
  1161. unsigned long param)
  1162. {
  1163. struct virtio_vdmabuf *vdmabuf = filp->private_data;
  1164. const struct virtio_vdmabuf_ioctl_desc *ioctl = NULL;
  1165. unsigned int nr = _IOC_NR(cmd);
  1166. int ret;
  1167. virtio_vdmabuf_ioctl_t func;
  1168. char *kdata;
  1169. if (nr >= ARRAY_SIZE(virtio_vdmabuf_ioctls)) {
  1170. dev_err(drv_info->dev, "invalid ioctl\n");
  1171. return -EINVAL;
  1172. }
  1173. ioctl = &virtio_vdmabuf_ioctls[nr];
  1174. func = ioctl->func;
  1175. if (unlikely(!func)) {
  1176. dev_err(drv_info->dev, "no function\n");
  1177. return -EINVAL;
  1178. }
  1179. kdata = kvmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
  1180. if (!kdata)
  1181. return -ENOMEM;
  1182. if (copy_from_user(kdata, (void __user *)param,
  1183. _IOC_SIZE(cmd)) != 0) {
  1184. dev_err(drv_info->dev,
  1185. "failed to copy from user arguments\n");
  1186. ret = -EFAULT;
  1187. goto ioctl_error;
  1188. }
  1189. ret = func(vdmabuf, kdata);
  1190. if (copy_to_user((void __user *)param, kdata,
  1191. _IOC_SIZE(cmd)) != 0) {
  1192. dev_err(drv_info->dev,
  1193. "failed to copy to user arguments\n");
  1194. ret = -EFAULT;
  1195. goto ioctl_error;
  1196. }
  1197. ioctl_error:
  1198. kvfree(kdata);
  1199. return ret;
  1200. }
  1201. static unsigned int virtio_vdmabuf_event_poll(struct file *filp,
  1202. struct poll_table_struct *wait)
  1203. {
  1204. return 0;
  1205. }
  1206. static ssize_t virtio_vdmabuf_event_read(struct file *filp, char __user *buf,
  1207. size_t cnt, loff_t *ofst)
  1208. {
  1209. return 0;
  1210. }
  1211. static int virtio_vdmabuf_release(struct inode *inode, struct file *filp)
  1212. {
  1213. return 0;
  1214. }
  1215. static const struct file_operations virtio_vdmabuf_fops = {
  1216. .owner = THIS_MODULE,
  1217. .open = virtio_vdmabuf_open,
  1218. .release = virtio_vdmabuf_release,
  1219. .read = virtio_vdmabuf_event_read,
  1220. .poll = virtio_vdmabuf_event_poll,
  1221. .unlocked_ioctl = virtio_vdmabuf_ioctl,
  1222. };
  1223. static struct miscdevice virtio_vdmabuf_miscdev = {
  1224. .minor = MISC_DYNAMIC_MINOR,
  1225. .name = "virtio-vdmabuf",
  1226. .fops = &virtio_vdmabuf_fops,
  1227. };
  1228. static int virtio_vdmabuf_probe(struct virtio_device *vdev)
  1229. {
  1230. vq_callback_t *cbs[] = {
  1231. virtio_vdmabuf_recv_cb,
  1232. virtio_vdmabuf_send_cb,
  1233. };
  1234. static const char *const names[] = {
  1235. "recv",
  1236. "send",
  1237. };
  1238. struct virtio_vdmabuf *vdmabuf;
  1239. struct virtqueue *vq;
  1240. int ret = 0;
  1241. if (!drv_info)
  1242. return -EINVAL;
  1243. vdmabuf = drv_info->priv;
  1244. if (!vdmabuf)
  1245. return -EINVAL;
  1246. vdmabuf->vdev = vdev;
  1247. vdev->priv = vdmabuf;
  1248. /* initialize spinlock for synchronizing virtqueue accesses */
  1249. spin_lock_init(&vdmabuf->vq_lock);
  1250. /* initialize spinlock for synchronizing msg_list accesses */
  1251. spin_lock_init(&vdmabuf->msg_list_lock);
  1252. ret = virtio_find_vqs(vdmabuf->vdev, VDMABUF_VQ_MAX, vdmabuf->vqs,
  1253. cbs, names, NULL);
  1254. if (ret) {
  1255. dev_err(drv_info->dev, "Cannot find any vqs\n");
  1256. return ret;
  1257. }
  1258. vq = vdmabuf->vqs[VDMABUF_VQ_RECV];
  1259. ret = virtio_vdambuf_fill_queue(vq);
  1260. if (!ret)
  1261. goto vqs_del;
  1262. INIT_LIST_HEAD(&vdmabuf->msg_list);
  1263. INIT_WORK(&vdmabuf->recv_work, virtio_vdmabuf_recv_work);
  1264. INIT_WORK(&vdmabuf->send_work, virtio_vdmabuf_send_work);
  1265. INIT_WORK(&vdmabuf->send_msg_work, virtio_vdmabuf_send_msg_work);
  1266. mutex_lock(&drv_info->g_mutex);
  1267. drv_info->host_ready = true;
  1268. mutex_unlock(&drv_info->g_mutex);
  1269. dev_info(drv_info->dev, "virtio_vdmabuf: init successfully\n");
  1270. return 0;
  1271. vqs_del:
  1272. vdev->config->del_vqs(vdev);
  1273. return ret;
  1274. }
  1275. static void virtio_vdmabuf_remove(struct virtio_device *vdev)
  1276. {
  1277. struct virtio_vdmabuf *vdmabuf;
  1278. if (!drv_info)
  1279. return;
  1280. mutex_lock(&drv_info->g_mutex);
  1281. drv_info->host_ready = false;
  1282. mutex_unlock(&drv_info->g_mutex);
  1283. vdmabuf = drv_info->priv;
  1284. flush_work(&vdmabuf->recv_work);
  1285. flush_work(&vdmabuf->send_work);
  1286. flush_work(&vdmabuf->send_msg_work);
  1287. vdev->config->reset(vdev);
  1288. vdev->config->del_vqs(vdev);
  1289. }
  1290. static struct virtio_device_id id_table[] = {
  1291. { VIRTIO_ID_VDMABUF, VIRTIO_DEV_ANY_ID },
  1292. { 0 },
  1293. };
  1294. static struct virtio_driver virtio_vdmabuf_vdev_drv = {
  1295. .driver.name = KBUILD_MODNAME,
  1296. .driver.owner = THIS_MODULE,
  1297. .id_table = id_table,
  1298. .probe = virtio_vdmabuf_probe,
  1299. .remove = virtio_vdmabuf_remove,
  1300. };
  1301. static int __init virtio_vdmabuf_init(void)
  1302. {
  1303. struct virtio_vdmabuf *vdmabuf;
  1304. int ret = 0;
  1305. drv_info = kvcalloc(1, sizeof(*drv_info), GFP_KERNEL);
  1306. if (!drv_info)
  1307. return -ENOMEM;
  1308. vdmabuf = kvcalloc(1, sizeof(*vdmabuf), GFP_KERNEL);
  1309. if (!vdmabuf) {
  1310. ret = -ENOMEM;
  1311. goto free_2;
  1312. }
  1313. vdmabuf->eq_import = kvcalloc(1, sizeof(*vdmabuf->eq_import),
  1314. GFP_KERNEL);
  1315. if (!vdmabuf->eq_import)
  1316. goto free_1;
  1317. drv_info->priv = (void *)vdmabuf;
  1318. drv_info->host_ready = false;
  1319. ret = carveout_buf_setup();
  1320. if (ret < 0)
  1321. pr_warn("virtio-vdmabuf: carveout bufs setup failed %d\n",
  1322. ret);
  1323. mutex_init(&drv_info->g_mutex);
  1324. ret = misc_register(&virtio_vdmabuf_miscdev);
  1325. if (ret) {
  1326. dev_err(drv_info->dev,
  1327. "virtio-vdmabuf misc driver can't be registered\n");
  1328. goto free_1;
  1329. }
  1330. dma_coerce_mask_and_coherent(virtio_vdmabuf_miscdev.this_device,
  1331. DMA_BIT_MASK(64));
  1332. drv_info->dev = virtio_vdmabuf_miscdev.this_device;
  1333. spin_lock_init(&vdmabuf->eq_import->e_lock);
  1334. INIT_LIST_HEAD(&vdmabuf->eq_import->e_list);
  1335. init_waitqueue_head(&vdmabuf->eq_import->e_wait);
  1336. spin_lock_init(&drv_info->local_lock);
  1337. hash_init(drv_info->buf_list_local);
  1338. spin_lock_init(&drv_info->import_lock);
  1339. hash_init(drv_info->buf_list_import);
  1340. vdmabuf->wq = create_workqueue("virtio_vdmabuf_wq");
  1341. ret = register_virtio_driver(&virtio_vdmabuf_vdev_drv);
  1342. if (ret) {
  1343. dev_err(drv_info->dev,
  1344. "vdmabuf driver can't be registered\n");
  1345. goto misc_dereg;
  1346. }
  1347. return 0;
  1348. misc_dereg:
  1349. misc_deregister(&virtio_vdmabuf_miscdev);
  1350. kvfree(vdmabuf->eq_import);
  1351. free_1:
  1352. kvfree(vdmabuf);
  1353. free_2:
  1354. kvfree(drv_info);
  1355. return ret;
  1356. }
  1357. static void __exit virtio_vdmabuf_deinit(void)
  1358. {
  1359. struct virtio_vdmabuf *vdmabuf = drv_info->priv;
  1360. struct virtio_vdmabuf_event *event, *event_tmp;
  1361. unsigned long irqflags;
  1362. misc_deregister(&virtio_vdmabuf_miscdev);
  1363. unregister_virtio_driver(&virtio_vdmabuf_vdev_drv);
  1364. if (vdmabuf->wq)
  1365. destroy_workqueue(vdmabuf->wq);
  1366. spin_lock_irqsave(&vdmabuf->eq_import->e_lock, irqflags);
  1367. list_for_each_entry_safe(event, event_tmp,
  1368. &vdmabuf->eq_import->e_list,
  1369. list) {
  1370. list_del(&event->list);
  1371. kfree(event);
  1372. }
  1373. spin_unlock_irqrestore(&vdmabuf->eq_import->e_lock, irqflags);
  1374. /* freeing all exported buffers */
  1375. virtio_vdmabuf_remove_all_bufs(vdmabuf);
  1376. kvfree(vdmabuf->eq_import);
  1377. kvfree(vdmabuf);
  1378. kvfree(drv_info);
  1379. }
  1380. module_init(virtio_vdmabuf_init);
  1381. module_exit(virtio_vdmabuf_deinit);
  1382. MODULE_DESCRIPTION("Virtio Vdmabuf frontend driver");
  1383. MODULE_AUTHOR("Xianting Tian <xianting.tian@linux.alibaba.com>");
  1384. MODULE_LICENSE("GPL and additional rights");