hv_utils_transport.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Kernel/userspace transport abstraction for Hyper-V util driver.
  4. *
  5. * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/fs.h>
  9. #include <linux/poll.h>
  10. #include "hyperv_vmbus.h"
  11. #include "hv_utils_transport.h"
  12. static DEFINE_SPINLOCK(hvt_list_lock);
  13. static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
  14. static void hvt_reset(struct hvutil_transport *hvt)
  15. {
  16. kfree(hvt->outmsg);
  17. hvt->outmsg = NULL;
  18. hvt->outmsg_len = 0;
  19. if (hvt->on_reset)
  20. hvt->on_reset();
  21. }
  22. static ssize_t hvt_op_read(struct file *file, char __user *buf,
  23. size_t count, loff_t *ppos)
  24. {
  25. struct hvutil_transport *hvt;
  26. int ret;
  27. hvt = container_of(file->f_op, struct hvutil_transport, fops);
  28. if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0 ||
  29. hvt->mode != HVUTIL_TRANSPORT_CHARDEV))
  30. return -EINTR;
  31. mutex_lock(&hvt->lock);
  32. if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
  33. ret = -EBADF;
  34. goto out_unlock;
  35. }
  36. if (!hvt->outmsg) {
  37. ret = -EAGAIN;
  38. goto out_unlock;
  39. }
  40. if (count < hvt->outmsg_len) {
  41. ret = -EINVAL;
  42. goto out_unlock;
  43. }
  44. if (!copy_to_user(buf, hvt->outmsg, hvt->outmsg_len))
  45. ret = hvt->outmsg_len;
  46. else
  47. ret = -EFAULT;
  48. kfree(hvt->outmsg);
  49. hvt->outmsg = NULL;
  50. hvt->outmsg_len = 0;
  51. if (hvt->on_read)
  52. hvt->on_read();
  53. hvt->on_read = NULL;
  54. out_unlock:
  55. mutex_unlock(&hvt->lock);
  56. return ret;
  57. }
  58. static ssize_t hvt_op_write(struct file *file, const char __user *buf,
  59. size_t count, loff_t *ppos)
  60. {
  61. struct hvutil_transport *hvt;
  62. u8 *inmsg;
  63. int ret;
  64. hvt = container_of(file->f_op, struct hvutil_transport, fops);
  65. inmsg = memdup_user(buf, count);
  66. if (IS_ERR(inmsg))
  67. return PTR_ERR(inmsg);
  68. if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
  69. ret = -EBADF;
  70. else
  71. ret = hvt->on_msg(inmsg, count);
  72. kfree(inmsg);
  73. return ret ? ret : count;
  74. }
  75. static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
  76. {
  77. struct hvutil_transport *hvt;
  78. hvt = container_of(file->f_op, struct hvutil_transport, fops);
  79. poll_wait(file, &hvt->outmsg_q, wait);
  80. if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
  81. return EPOLLERR | EPOLLHUP;
  82. if (hvt->outmsg_len > 0)
  83. return EPOLLIN | EPOLLRDNORM;
  84. return 0;
  85. }
  86. static int hvt_op_open(struct inode *inode, struct file *file)
  87. {
  88. struct hvutil_transport *hvt;
  89. int ret = 0;
  90. bool issue_reset = false;
  91. hvt = container_of(file->f_op, struct hvutil_transport, fops);
  92. mutex_lock(&hvt->lock);
  93. if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
  94. ret = -EBADF;
  95. } else if (hvt->mode == HVUTIL_TRANSPORT_INIT) {
  96. /*
  97. * Switching to CHARDEV mode. We switch bach to INIT when
  98. * device gets released.
  99. */
  100. hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
  101. }
  102. else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
  103. /*
  104. * We're switching from netlink communication to using char
  105. * device. Issue the reset first.
  106. */
  107. issue_reset = true;
  108. hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
  109. } else {
  110. ret = -EBUSY;
  111. }
  112. if (issue_reset)
  113. hvt_reset(hvt);
  114. mutex_unlock(&hvt->lock);
  115. return ret;
  116. }
  117. static void hvt_transport_free(struct hvutil_transport *hvt)
  118. {
  119. misc_deregister(&hvt->mdev);
  120. kfree(hvt->outmsg);
  121. kfree(hvt);
  122. }
  123. static int hvt_op_release(struct inode *inode, struct file *file)
  124. {
  125. struct hvutil_transport *hvt;
  126. int mode_old;
  127. hvt = container_of(file->f_op, struct hvutil_transport, fops);
  128. mutex_lock(&hvt->lock);
  129. mode_old = hvt->mode;
  130. if (hvt->mode != HVUTIL_TRANSPORT_DESTROY)
  131. hvt->mode = HVUTIL_TRANSPORT_INIT;
  132. /*
  133. * Cleanup message buffers to avoid spurious messages when the daemon
  134. * connects back.
  135. */
  136. hvt_reset(hvt);
  137. if (mode_old == HVUTIL_TRANSPORT_DESTROY)
  138. complete(&hvt->release);
  139. mutex_unlock(&hvt->lock);
  140. return 0;
  141. }
  142. static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
  143. {
  144. struct hvutil_transport *hvt, *hvt_found = NULL;
  145. spin_lock(&hvt_list_lock);
  146. list_for_each_entry(hvt, &hvt_list, list) {
  147. if (hvt->cn_id.idx == msg->id.idx &&
  148. hvt->cn_id.val == msg->id.val) {
  149. hvt_found = hvt;
  150. break;
  151. }
  152. }
  153. spin_unlock(&hvt_list_lock);
  154. if (!hvt_found) {
  155. pr_warn("hvt_cn_callback: spurious message received!\n");
  156. return;
  157. }
  158. /*
  159. * Switching to NETLINK mode. Switching to CHARDEV happens when someone
  160. * opens the device.
  161. */
  162. mutex_lock(&hvt->lock);
  163. if (hvt->mode == HVUTIL_TRANSPORT_INIT)
  164. hvt->mode = HVUTIL_TRANSPORT_NETLINK;
  165. if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
  166. hvt_found->on_msg(msg->data, msg->len);
  167. else
  168. pr_warn("hvt_cn_callback: unexpected netlink message!\n");
  169. mutex_unlock(&hvt->lock);
  170. }
  171. int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
  172. void (*on_read_cb)(void))
  173. {
  174. struct cn_msg *cn_msg;
  175. int ret = 0;
  176. if (hvt->mode == HVUTIL_TRANSPORT_INIT ||
  177. hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
  178. return -EINVAL;
  179. } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
  180. cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
  181. if (!cn_msg)
  182. return -ENOMEM;
  183. cn_msg->id.idx = hvt->cn_id.idx;
  184. cn_msg->id.val = hvt->cn_id.val;
  185. cn_msg->len = len;
  186. memcpy(cn_msg->data, msg, len);
  187. ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
  188. kfree(cn_msg);
  189. /*
  190. * We don't know when netlink messages are delivered but unlike
  191. * in CHARDEV mode we're not blocked and we can send next
  192. * messages right away.
  193. */
  194. if (on_read_cb)
  195. on_read_cb();
  196. return ret;
  197. }
  198. /* HVUTIL_TRANSPORT_CHARDEV */
  199. mutex_lock(&hvt->lock);
  200. if (hvt->mode != HVUTIL_TRANSPORT_CHARDEV) {
  201. ret = -EINVAL;
  202. goto out_unlock;
  203. }
  204. if (hvt->outmsg) {
  205. /* Previous message wasn't received */
  206. ret = -EFAULT;
  207. goto out_unlock;
  208. }
  209. hvt->outmsg = kzalloc(len, GFP_KERNEL);
  210. if (hvt->outmsg) {
  211. memcpy(hvt->outmsg, msg, len);
  212. hvt->outmsg_len = len;
  213. hvt->on_read = on_read_cb;
  214. wake_up_interruptible(&hvt->outmsg_q);
  215. } else
  216. ret = -ENOMEM;
  217. out_unlock:
  218. mutex_unlock(&hvt->lock);
  219. return ret;
  220. }
  221. struct hvutil_transport *hvutil_transport_init(const char *name,
  222. u32 cn_idx, u32 cn_val,
  223. int (*on_msg)(void *, int),
  224. void (*on_reset)(void))
  225. {
  226. struct hvutil_transport *hvt;
  227. hvt = kzalloc(sizeof(*hvt), GFP_KERNEL);
  228. if (!hvt)
  229. return NULL;
  230. hvt->cn_id.idx = cn_idx;
  231. hvt->cn_id.val = cn_val;
  232. hvt->mdev.minor = MISC_DYNAMIC_MINOR;
  233. hvt->mdev.name = name;
  234. hvt->fops.owner = THIS_MODULE;
  235. hvt->fops.read = hvt_op_read;
  236. hvt->fops.write = hvt_op_write;
  237. hvt->fops.poll = hvt_op_poll;
  238. hvt->fops.open = hvt_op_open;
  239. hvt->fops.release = hvt_op_release;
  240. hvt->mdev.fops = &hvt->fops;
  241. init_waitqueue_head(&hvt->outmsg_q);
  242. mutex_init(&hvt->lock);
  243. init_completion(&hvt->release);
  244. spin_lock(&hvt_list_lock);
  245. list_add(&hvt->list, &hvt_list);
  246. spin_unlock(&hvt_list_lock);
  247. hvt->on_msg = on_msg;
  248. hvt->on_reset = on_reset;
  249. if (misc_register(&hvt->mdev))
  250. goto err_free_hvt;
  251. /* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
  252. if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0 &&
  253. cn_add_callback(&hvt->cn_id, name, hvt_cn_callback))
  254. goto err_free_hvt;
  255. return hvt;
  256. err_free_hvt:
  257. spin_lock(&hvt_list_lock);
  258. list_del(&hvt->list);
  259. spin_unlock(&hvt_list_lock);
  260. kfree(hvt);
  261. return NULL;
  262. }
  263. void hvutil_transport_destroy(struct hvutil_transport *hvt)
  264. {
  265. int mode_old;
  266. mutex_lock(&hvt->lock);
  267. mode_old = hvt->mode;
  268. hvt->mode = HVUTIL_TRANSPORT_DESTROY;
  269. wake_up_interruptible(&hvt->outmsg_q);
  270. mutex_unlock(&hvt->lock);
  271. /*
  272. * In case we were in 'chardev' mode we still have an open fd so we
  273. * have to defer freeing the device. Netlink interface can be freed
  274. * now.
  275. */
  276. spin_lock(&hvt_list_lock);
  277. list_del(&hvt->list);
  278. spin_unlock(&hvt_list_lock);
  279. if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
  280. cn_del_callback(&hvt->cn_id);
  281. if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
  282. wait_for_completion(&hvt->release);
  283. hvt_transport_free(hvt);
  284. }