vboxguest_utils.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
  2. /*
  3. * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
  4. * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
  5. *
  6. * Copyright (C) 2006-2016 Oracle Corporation
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/io.h>
  10. #include <linux/kernel.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/sizes.h>
  14. #include <linux/slab.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/vbox_err.h>
  18. #include <linux/vbox_utils.h>
  19. #include "vboxguest_core.h"
  20. /* Get the pointer to the first parameter of a HGCM call request. */
  21. #define VMMDEV_HGCM_CALL_PARMS(a) \
  22. ((struct vmmdev_hgcm_function_parameter *)( \
  23. (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
  24. /* The max parameter buffer size for a user request. */
  25. #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
  26. /* The max parameter buffer size for a kernel request. */
  27. #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
  28. #define VBG_DEBUG_PORT 0x504
  29. /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
  30. static DEFINE_SPINLOCK(vbg_log_lock);
  31. static char vbg_log_buf[128];
  32. #define VBG_LOG(name, pr_func) \
  33. void name(const char *fmt, ...) \
  34. { \
  35. unsigned long flags; \
  36. va_list args; \
  37. int i, count; \
  38. \
  39. va_start(args, fmt); \
  40. spin_lock_irqsave(&vbg_log_lock, flags); \
  41. \
  42. count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
  43. for (i = 0; i < count; i++) \
  44. outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
  45. \
  46. pr_func("%s", vbg_log_buf); \
  47. \
  48. spin_unlock_irqrestore(&vbg_log_lock, flags); \
  49. va_end(args); \
  50. } \
  51. EXPORT_SYMBOL(name)
  52. VBG_LOG(vbg_info, pr_info);
  53. VBG_LOG(vbg_warn, pr_warn);
  54. VBG_LOG(vbg_err, pr_err);
  55. VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
  56. #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
  57. VBG_LOG(vbg_debug, pr_debug);
  58. #endif
  59. void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
  60. u32 requestor)
  61. {
  62. struct vmmdev_request_header *req;
  63. int order = get_order(PAGE_ALIGN(len));
  64. req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
  65. if (!req)
  66. return NULL;
  67. memset(req, 0xaa, len);
  68. req->size = len;
  69. req->version = VMMDEV_REQUEST_HEADER_VERSION;
  70. req->request_type = req_type;
  71. req->rc = VERR_GENERAL_FAILURE;
  72. req->reserved1 = 0;
  73. req->requestor = requestor;
  74. return req;
  75. }
  76. void vbg_req_free(void *req, size_t len)
  77. {
  78. if (!req)
  79. return;
  80. free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
  81. }
  82. /* Note this function returns a VBox status code, not a negative errno!! */
  83. int vbg_req_perform(struct vbg_dev *gdev, void *req)
  84. {
  85. unsigned long phys_req = virt_to_phys(req);
  86. outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
  87. /*
  88. * The host changes the request as a result of the outl, make sure
  89. * the outl and any reads of the req happen in the correct order.
  90. */
  91. mb();
  92. return ((struct vmmdev_request_header *)req)->rc;
  93. }
  94. static bool hgcm_req_done(struct vbg_dev *gdev,
  95. struct vmmdev_hgcmreq_header *header)
  96. {
  97. unsigned long flags;
  98. bool done;
  99. spin_lock_irqsave(&gdev->event_spinlock, flags);
  100. done = header->flags & VMMDEV_HGCM_REQ_DONE;
  101. spin_unlock_irqrestore(&gdev->event_spinlock, flags);
  102. return done;
  103. }
  104. int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
  105. struct vmmdev_hgcm_service_location *loc,
  106. u32 *client_id, int *vbox_status)
  107. {
  108. struct vmmdev_hgcm_connect *hgcm_connect = NULL;
  109. int rc;
  110. hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
  111. VMMDEVREQ_HGCM_CONNECT, requestor);
  112. if (!hgcm_connect)
  113. return -ENOMEM;
  114. hgcm_connect->header.flags = 0;
  115. memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
  116. hgcm_connect->client_id = 0;
  117. rc = vbg_req_perform(gdev, hgcm_connect);
  118. if (rc == VINF_HGCM_ASYNC_EXECUTE)
  119. wait_event(gdev->hgcm_wq,
  120. hgcm_req_done(gdev, &hgcm_connect->header));
  121. if (rc >= 0) {
  122. *client_id = hgcm_connect->client_id;
  123. rc = hgcm_connect->header.result;
  124. }
  125. vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
  126. *vbox_status = rc;
  127. return 0;
  128. }
  129. EXPORT_SYMBOL(vbg_hgcm_connect);
  130. int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
  131. u32 client_id, int *vbox_status)
  132. {
  133. struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
  134. int rc;
  135. hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
  136. VMMDEVREQ_HGCM_DISCONNECT,
  137. requestor);
  138. if (!hgcm_disconnect)
  139. return -ENOMEM;
  140. hgcm_disconnect->header.flags = 0;
  141. hgcm_disconnect->client_id = client_id;
  142. rc = vbg_req_perform(gdev, hgcm_disconnect);
  143. if (rc == VINF_HGCM_ASYNC_EXECUTE)
  144. wait_event(gdev->hgcm_wq,
  145. hgcm_req_done(gdev, &hgcm_disconnect->header));
  146. if (rc >= 0)
  147. rc = hgcm_disconnect->header.result;
  148. vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
  149. *vbox_status = rc;
  150. return 0;
  151. }
  152. EXPORT_SYMBOL(vbg_hgcm_disconnect);
  153. static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
  154. {
  155. u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
  156. return size >> PAGE_SHIFT;
  157. }
  158. static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
  159. {
  160. u32 page_count;
  161. page_count = hgcm_call_buf_size_in_pages(buf, len);
  162. *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
  163. }
  164. static int hgcm_call_preprocess_linaddr(
  165. const struct vmmdev_hgcm_function_parameter *src_parm,
  166. void **bounce_buf_ret, size_t *extra)
  167. {
  168. void *buf, *bounce_buf;
  169. bool copy_in;
  170. u32 len;
  171. int ret;
  172. buf = (void *)src_parm->u.pointer.u.linear_addr;
  173. len = src_parm->u.pointer.size;
  174. copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
  175. if (len > VBG_MAX_HGCM_USER_PARM)
  176. return -E2BIG;
  177. bounce_buf = kvmalloc(len, GFP_KERNEL);
  178. if (!bounce_buf)
  179. return -ENOMEM;
  180. *bounce_buf_ret = bounce_buf;
  181. if (copy_in) {
  182. ret = copy_from_user(bounce_buf, (void __user *)buf, len);
  183. if (ret)
  184. return -EFAULT;
  185. } else {
  186. memset(bounce_buf, 0, len);
  187. }
  188. hgcm_call_add_pagelist_size(bounce_buf, len, extra);
  189. return 0;
  190. }
  191. /**
  192. * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
  193. * figure out how much extra storage we need for page lists.
  194. * Return: 0 or negative errno value.
  195. * @src_parm: Pointer to source function call parameters
  196. * @parm_count: Number of function call parameters.
  197. * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
  198. * @extra: Where to return the extra request space needed for
  199. * physical page lists.
  200. */
  201. static int hgcm_call_preprocess(
  202. const struct vmmdev_hgcm_function_parameter *src_parm,
  203. u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
  204. {
  205. void *buf, **bounce_bufs = NULL;
  206. u32 i, len;
  207. int ret;
  208. for (i = 0; i < parm_count; i++, src_parm++) {
  209. switch (src_parm->type) {
  210. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  211. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  212. break;
  213. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  214. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  215. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  216. if (!bounce_bufs) {
  217. bounce_bufs = kcalloc(parm_count,
  218. sizeof(void *),
  219. GFP_KERNEL);
  220. if (!bounce_bufs)
  221. return -ENOMEM;
  222. *bounce_bufs_ret = bounce_bufs;
  223. }
  224. ret = hgcm_call_preprocess_linaddr(src_parm,
  225. &bounce_bufs[i],
  226. extra);
  227. if (ret)
  228. return ret;
  229. break;
  230. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  231. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  232. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  233. buf = (void *)src_parm->u.pointer.u.linear_addr;
  234. len = src_parm->u.pointer.size;
  235. if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
  236. return -E2BIG;
  237. hgcm_call_add_pagelist_size(buf, len, extra);
  238. break;
  239. default:
  240. return -EINVAL;
  241. }
  242. }
  243. return 0;
  244. }
  245. /**
  246. * Translates linear address types to page list direction flags.
  247. *
  248. * Return: page list flags.
  249. * @type: The type.
  250. */
  251. static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
  252. enum vmmdev_hgcm_function_parameter_type type)
  253. {
  254. switch (type) {
  255. default:
  256. WARN_ON(1);
  257. fallthrough;
  258. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  259. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  260. return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
  261. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  262. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  263. return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
  264. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  265. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  266. return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
  267. }
  268. }
  269. static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
  270. struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
  271. enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
  272. {
  273. struct vmmdev_hgcm_pagelist *dst_pg_lst;
  274. struct page *page;
  275. bool is_vmalloc;
  276. u32 i, page_count;
  277. dst_parm->type = type;
  278. if (len == 0) {
  279. dst_parm->u.pointer.size = 0;
  280. dst_parm->u.pointer.u.linear_addr = 0;
  281. return;
  282. }
  283. dst_pg_lst = (void *)call + *off_extra;
  284. page_count = hgcm_call_buf_size_in_pages(buf, len);
  285. is_vmalloc = is_vmalloc_addr(buf);
  286. dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
  287. dst_parm->u.page_list.size = len;
  288. dst_parm->u.page_list.offset = *off_extra;
  289. dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
  290. dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
  291. dst_pg_lst->page_count = page_count;
  292. for (i = 0; i < page_count; i++) {
  293. if (is_vmalloc)
  294. page = vmalloc_to_page(buf);
  295. else
  296. page = virt_to_page(buf);
  297. dst_pg_lst->pages[i] = page_to_phys(page);
  298. buf += PAGE_SIZE;
  299. }
  300. *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
  301. }
  302. /**
  303. * Initializes the call request that we're sending to the host.
  304. * @call: The call to initialize.
  305. * @client_id: The client ID of the caller.
  306. * @function: The function number of the function to call.
  307. * @src_parm: Pointer to source function call parameters.
  308. * @parm_count: Number of function call parameters.
  309. * @bounce_bufs: The bouncebuffer array.
  310. */
  311. static void hgcm_call_init_call(
  312. struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
  313. const struct vmmdev_hgcm_function_parameter *src_parm,
  314. u32 parm_count, void **bounce_bufs)
  315. {
  316. struct vmmdev_hgcm_function_parameter *dst_parm =
  317. VMMDEV_HGCM_CALL_PARMS(call);
  318. u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
  319. void *buf;
  320. call->header.flags = 0;
  321. call->header.result = VINF_SUCCESS;
  322. call->client_id = client_id;
  323. call->function = function;
  324. call->parm_count = parm_count;
  325. for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
  326. switch (src_parm->type) {
  327. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  328. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  329. *dst_parm = *src_parm;
  330. break;
  331. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  332. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  333. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  334. hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
  335. src_parm->u.pointer.size,
  336. src_parm->type, &off_extra);
  337. break;
  338. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  339. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  340. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  341. buf = (void *)src_parm->u.pointer.u.linear_addr;
  342. hgcm_call_init_linaddr(call, dst_parm, buf,
  343. src_parm->u.pointer.size,
  344. src_parm->type, &off_extra);
  345. break;
  346. default:
  347. WARN_ON(1);
  348. dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
  349. }
  350. }
  351. }
  352. /**
  353. * Tries to cancel a pending HGCM call.
  354. *
  355. * Return: VBox status code
  356. */
  357. static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
  358. {
  359. int rc;
  360. /*
  361. * We use a pre-allocated request for cancellations, which is
  362. * protected by cancel_req_mutex. This means that all cancellations
  363. * get serialized, this should be fine since they should be rare.
  364. */
  365. mutex_lock(&gdev->cancel_req_mutex);
  366. gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
  367. rc = vbg_req_perform(gdev, gdev->cancel_req);
  368. mutex_unlock(&gdev->cancel_req_mutex);
  369. if (rc == VERR_NOT_IMPLEMENTED) {
  370. call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
  371. call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
  372. rc = vbg_req_perform(gdev, call);
  373. if (rc == VERR_INVALID_PARAMETER)
  374. rc = VERR_NOT_FOUND;
  375. }
  376. if (rc >= 0)
  377. call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
  378. return rc;
  379. }
  380. /**
  381. * Performs the call and completion wait.
  382. * Return: 0 or negative errno value.
  383. * @gdev: The VBoxGuest device extension.
  384. * @call: The call to execute.
  385. * @timeout_ms: Timeout in ms.
  386. * @leak_it: Where to return the leak it / free it, indicator.
  387. * Cancellation fun.
  388. */
  389. static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
  390. u32 timeout_ms, bool interruptible, bool *leak_it)
  391. {
  392. int rc, cancel_rc, ret;
  393. long timeout;
  394. *leak_it = false;
  395. rc = vbg_req_perform(gdev, call);
  396. /*
  397. * If the call failed, then pretend success. Upper layers will
  398. * interpret the result code in the packet.
  399. */
  400. if (rc < 0) {
  401. call->header.result = rc;
  402. return 0;
  403. }
  404. if (rc != VINF_HGCM_ASYNC_EXECUTE)
  405. return 0;
  406. /* Host decided to process the request asynchronously, wait for it */
  407. if (timeout_ms == U32_MAX)
  408. timeout = MAX_SCHEDULE_TIMEOUT;
  409. else
  410. timeout = msecs_to_jiffies(timeout_ms);
  411. if (interruptible) {
  412. timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
  413. hgcm_req_done(gdev, &call->header),
  414. timeout);
  415. } else {
  416. timeout = wait_event_timeout(gdev->hgcm_wq,
  417. hgcm_req_done(gdev, &call->header),
  418. timeout);
  419. }
  420. /* timeout > 0 means hgcm_req_done has returned true, so success */
  421. if (timeout > 0)
  422. return 0;
  423. if (timeout == 0)
  424. ret = -ETIMEDOUT;
  425. else
  426. ret = -EINTR;
  427. /* Cancel the request */
  428. cancel_rc = hgcm_cancel_call(gdev, call);
  429. if (cancel_rc >= 0)
  430. return ret;
  431. /*
  432. * Failed to cancel, this should mean that the cancel has lost the
  433. * race with normal completion, wait while the host completes it.
  434. */
  435. if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
  436. timeout = msecs_to_jiffies(500);
  437. else
  438. timeout = msecs_to_jiffies(2000);
  439. timeout = wait_event_timeout(gdev->hgcm_wq,
  440. hgcm_req_done(gdev, &call->header),
  441. timeout);
  442. if (WARN_ON(timeout == 0)) {
  443. /* We really should never get here */
  444. vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
  445. __func__);
  446. *leak_it = true;
  447. return ret;
  448. }
  449. /* The call has completed normally after all */
  450. return 0;
  451. }
  452. /**
  453. * Copies the result of the call back to the caller info structure and user
  454. * buffers.
  455. * Return: 0 or negative errno value.
  456. * @call: HGCM call request.
  457. * @dst_parm: Pointer to function call parameters destination.
  458. * @parm_count: Number of function call parameters.
  459. * @bounce_bufs: The bouncebuffer array.
  460. */
  461. static int hgcm_call_copy_back_result(
  462. const struct vmmdev_hgcm_call *call,
  463. struct vmmdev_hgcm_function_parameter *dst_parm,
  464. u32 parm_count, void **bounce_bufs)
  465. {
  466. const struct vmmdev_hgcm_function_parameter *src_parm =
  467. VMMDEV_HGCM_CALL_PARMS(call);
  468. void __user *p;
  469. int ret;
  470. u32 i;
  471. /* Copy back parameters. */
  472. for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
  473. switch (dst_parm->type) {
  474. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  475. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  476. *dst_parm = *src_parm;
  477. break;
  478. case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
  479. dst_parm->u.page_list.size = src_parm->u.page_list.size;
  480. break;
  481. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  482. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  483. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  484. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  485. dst_parm->u.pointer.size = src_parm->u.pointer.size;
  486. break;
  487. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  488. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  489. dst_parm->u.pointer.size = src_parm->u.pointer.size;
  490. p = (void __user *)dst_parm->u.pointer.u.linear_addr;
  491. ret = copy_to_user(p, bounce_bufs[i],
  492. min(src_parm->u.pointer.size,
  493. dst_parm->u.pointer.size));
  494. if (ret)
  495. return -EFAULT;
  496. break;
  497. default:
  498. WARN_ON(1);
  499. return -EINVAL;
  500. }
  501. }
  502. return 0;
  503. }
  504. int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
  505. u32 function, u32 timeout_ms,
  506. struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
  507. int *vbox_status)
  508. {
  509. struct vmmdev_hgcm_call *call;
  510. void **bounce_bufs = NULL;
  511. bool leak_it;
  512. size_t size;
  513. int i, ret;
  514. size = sizeof(struct vmmdev_hgcm_call) +
  515. parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
  516. /*
  517. * Validate and buffer the parameters for the call. This also increases
  518. * call_size with the amount of extra space needed for page lists.
  519. */
  520. ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
  521. if (ret) {
  522. /* Even on error bounce bufs may still have been allocated */
  523. goto free_bounce_bufs;
  524. }
  525. call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
  526. if (!call) {
  527. ret = -ENOMEM;
  528. goto free_bounce_bufs;
  529. }
  530. hgcm_call_init_call(call, client_id, function, parms, parm_count,
  531. bounce_bufs);
  532. ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
  533. requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
  534. if (ret == 0) {
  535. *vbox_status = call->header.result;
  536. ret = hgcm_call_copy_back_result(call, parms, parm_count,
  537. bounce_bufs);
  538. }
  539. if (!leak_it)
  540. vbg_req_free(call, size);
  541. free_bounce_bufs:
  542. if (bounce_bufs) {
  543. for (i = 0; i < parm_count; i++)
  544. kvfree(bounce_bufs[i]);
  545. kfree(bounce_bufs);
  546. }
  547. return ret;
  548. }
  549. EXPORT_SYMBOL(vbg_hgcm_call);
  550. #ifdef CONFIG_COMPAT
  551. int vbg_hgcm_call32(
  552. struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
  553. u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
  554. u32 parm_count, int *vbox_status)
  555. {
  556. struct vmmdev_hgcm_function_parameter *parm64 = NULL;
  557. u32 i, size;
  558. int ret = 0;
  559. /* KISS allocate a temporary request and convert the parameters. */
  560. size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
  561. parm64 = kzalloc(size, GFP_KERNEL);
  562. if (!parm64)
  563. return -ENOMEM;
  564. for (i = 0; i < parm_count; i++) {
  565. switch (parm32[i].type) {
  566. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  567. parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
  568. parm64[i].u.value32 = parm32[i].u.value32;
  569. break;
  570. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  571. parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
  572. parm64[i].u.value64 = parm32[i].u.value64;
  573. break;
  574. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  575. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  576. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  577. parm64[i].type = parm32[i].type;
  578. parm64[i].u.pointer.size = parm32[i].u.pointer.size;
  579. parm64[i].u.pointer.u.linear_addr =
  580. parm32[i].u.pointer.u.linear_addr;
  581. break;
  582. default:
  583. ret = -EINVAL;
  584. }
  585. if (ret < 0)
  586. goto out_free;
  587. }
  588. ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
  589. parm64, parm_count, vbox_status);
  590. if (ret < 0)
  591. goto out_free;
  592. /* Copy back. */
  593. for (i = 0; i < parm_count; i++, parm32++, parm64++) {
  594. switch (parm64[i].type) {
  595. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  596. parm32[i].u.value32 = parm64[i].u.value32;
  597. break;
  598. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  599. parm32[i].u.value64 = parm64[i].u.value64;
  600. break;
  601. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  602. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  603. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  604. parm32[i].u.pointer.size = parm64[i].u.pointer.size;
  605. break;
  606. default:
  607. WARN_ON(1);
  608. ret = -EINVAL;
  609. }
  610. }
  611. out_free:
  612. kfree(parm64);
  613. return ret;
  614. }
  615. #endif
  616. static const int vbg_status_code_to_errno_table[] = {
  617. [-VERR_ACCESS_DENIED] = -EPERM,
  618. [-VERR_FILE_NOT_FOUND] = -ENOENT,
  619. [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
  620. [-VERR_INTERRUPTED] = -EINTR,
  621. [-VERR_DEV_IO_ERROR] = -EIO,
  622. [-VERR_TOO_MUCH_DATA] = -E2BIG,
  623. [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
  624. [-VERR_INVALID_HANDLE] = -EBADF,
  625. [-VERR_TRY_AGAIN] = -EAGAIN,
  626. [-VERR_NO_MEMORY] = -ENOMEM,
  627. [-VERR_INVALID_POINTER] = -EFAULT,
  628. [-VERR_RESOURCE_BUSY] = -EBUSY,
  629. [-VERR_ALREADY_EXISTS] = -EEXIST,
  630. [-VERR_NOT_SAME_DEVICE] = -EXDEV,
  631. [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
  632. [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
  633. [-VERR_INVALID_NAME] = -ENOENT,
  634. [-VERR_IS_A_DIRECTORY] = -EISDIR,
  635. [-VERR_INVALID_PARAMETER] = -EINVAL,
  636. [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
  637. [-VERR_INVALID_FUNCTION] = -ENOTTY,
  638. [-VERR_SHARING_VIOLATION] = -ETXTBSY,
  639. [-VERR_FILE_TOO_BIG] = -EFBIG,
  640. [-VERR_DISK_FULL] = -ENOSPC,
  641. [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
  642. [-VERR_WRITE_PROTECT] = -EROFS,
  643. [-VERR_BROKEN_PIPE] = -EPIPE,
  644. [-VERR_DEADLOCK] = -EDEADLK,
  645. [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
  646. [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
  647. [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
  648. [-VERR_NOT_SUPPORTED] = -ENOSYS,
  649. [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
  650. [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
  651. [-VERR_NO_MORE_FILES] = -ENODATA,
  652. [-VERR_NO_DATA] = -ENODATA,
  653. [-VERR_NET_NO_NETWORK] = -ENONET,
  654. [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
  655. [-VERR_NO_TRANSLATION] = -EILSEQ,
  656. [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
  657. [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
  658. [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
  659. [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
  660. [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
  661. [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
  662. [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
  663. [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
  664. [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
  665. [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
  666. [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
  667. [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
  668. [-VERR_NET_DOWN] = -ENETDOWN,
  669. [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
  670. [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
  671. [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
  672. [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
  673. [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
  674. [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
  675. [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
  676. [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
  677. [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
  678. [-VERR_TIMEOUT] = -ETIMEDOUT,
  679. [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
  680. [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
  681. [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
  682. [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
  683. [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
  684. [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
  685. [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
  686. };
  687. int vbg_status_code_to_errno(int rc)
  688. {
  689. if (rc >= 0)
  690. return 0;
  691. rc = -rc;
  692. if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
  693. vbg_status_code_to_errno_table[rc] == 0) {
  694. vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
  695. return -EPROTO;
  696. }
  697. return vbg_status_code_to_errno_table[rc];
  698. }
  699. EXPORT_SYMBOL(vbg_status_code_to_errno);