virtio_uml.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio vhost-user driver
  4. *
  5. * Copyright(c) 2019 Intel Corporation
  6. *
  7. * This driver allows virtio devices to be used over a vhost-user socket.
  8. *
  9. * Guest devices can be instantiated by kernel module or command line
  10. * parameters. One device will be created for each parameter. Syntax:
  11. *
  12. * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
  13. * where:
  14. * <socket> := vhost-user socket path to connect
  15. * <virtio_id> := virtio device id (as in virtio_ids.h)
  16. * <platform_id> := (optional) platform device id
  17. *
  18. * example:
  19. * virtio_uml.device=/var/uml.socket:1
  20. *
  21. * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
  22. */
  23. #include <linux/module.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/slab.h>
  26. #include <linux/virtio.h>
  27. #include <linux/virtio_config.h>
  28. #include <linux/virtio_ring.h>
  29. #include <linux/time-internal.h>
  30. #include <shared/as-layout.h>
  31. #include <irq_kern.h>
  32. #include <init.h>
  33. #include <os.h>
  34. #include "vhost_user.h"
  35. /* Workaround due to a conflict between irq_user.h and irqreturn.h */
  36. #ifdef IRQ_NONE
  37. #undef IRQ_NONE
  38. #endif
  39. #define MAX_SUPPORTED_QUEUE_SIZE 256
  40. #define to_virtio_uml_device(_vdev) \
  41. container_of(_vdev, struct virtio_uml_device, vdev)
  42. struct virtio_uml_platform_data {
  43. u32 virtio_device_id;
  44. const char *socket_path;
  45. struct work_struct conn_broken_wk;
  46. struct platform_device *pdev;
  47. };
  48. struct virtio_uml_device {
  49. struct virtio_device vdev;
  50. struct platform_device *pdev;
  51. spinlock_t sock_lock;
  52. int sock, req_fd;
  53. u64 features;
  54. u64 protocol_features;
  55. u8 status;
  56. u8 registered:1;
  57. };
  58. struct virtio_uml_vq_info {
  59. int kick_fd, call_fd;
  60. char name[32];
  61. #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
  62. struct virtqueue *vq;
  63. vq_callback_t *callback;
  64. struct time_travel_event defer;
  65. #endif
  66. };
  67. extern unsigned long long physmem_size, highmem;
  68. #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
  69. /* Vhost-user protocol */
  70. static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
  71. const int *fds, unsigned int fds_num)
  72. {
  73. int rc;
  74. do {
  75. rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
  76. if (rc > 0) {
  77. buf += rc;
  78. len -= rc;
  79. fds = NULL;
  80. fds_num = 0;
  81. }
  82. } while (len && (rc >= 0 || rc == -EINTR));
  83. if (rc < 0)
  84. return rc;
  85. return 0;
  86. }
  87. static int full_read(int fd, void *buf, int len, bool abortable)
  88. {
  89. int rc;
  90. do {
  91. rc = os_read_file(fd, buf, len);
  92. if (rc > 0) {
  93. buf += rc;
  94. len -= rc;
  95. }
  96. } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
  97. if (rc < 0)
  98. return rc;
  99. if (rc == 0)
  100. return -ECONNRESET;
  101. return 0;
  102. }
  103. static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
  104. {
  105. return full_read(fd, msg, sizeof(msg->header), true);
  106. }
  107. static int vhost_user_recv(struct virtio_uml_device *vu_dev,
  108. int fd, struct vhost_user_msg *msg,
  109. size_t max_payload_size, bool wait)
  110. {
  111. size_t size;
  112. int rc;
  113. /*
  114. * In virtio time-travel mode, we're handling all the vhost-user
  115. * FDs by polling them whenever appropriate. However, we may get
  116. * into a situation where we're sending out an interrupt message
  117. * to a device (e.g. a net device) and need to handle a simulation
  118. * time message while doing so, e.g. one that tells us to update
  119. * our idea of how long we can run without scheduling.
  120. *
  121. * Thus, we need to not just read() from the given fd, but need
  122. * to also handle messages for the simulation time - this function
  123. * does that for us while waiting for the given fd to be readable.
  124. */
  125. if (wait)
  126. time_travel_wait_readable(fd);
  127. rc = vhost_user_recv_header(fd, msg);
  128. if (rc == -ECONNRESET && vu_dev->registered) {
  129. struct virtio_uml_platform_data *pdata;
  130. pdata = vu_dev->pdev->dev.platform_data;
  131. virtio_break_device(&vu_dev->vdev);
  132. schedule_work(&pdata->conn_broken_wk);
  133. }
  134. if (rc)
  135. return rc;
  136. size = msg->header.size;
  137. if (size > max_payload_size)
  138. return -EPROTO;
  139. return full_read(fd, &msg->payload, size, false);
  140. }
  141. static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
  142. struct vhost_user_msg *msg,
  143. size_t max_payload_size)
  144. {
  145. int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
  146. max_payload_size, true);
  147. if (rc)
  148. return rc;
  149. if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
  150. return -EPROTO;
  151. return 0;
  152. }
  153. static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
  154. u64 *value)
  155. {
  156. struct vhost_user_msg msg;
  157. int rc = vhost_user_recv_resp(vu_dev, &msg,
  158. sizeof(msg.payload.integer));
  159. if (rc)
  160. return rc;
  161. if (msg.header.size != sizeof(msg.payload.integer))
  162. return -EPROTO;
  163. *value = msg.payload.integer;
  164. return 0;
  165. }
  166. static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
  167. struct vhost_user_msg *msg,
  168. size_t max_payload_size)
  169. {
  170. int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
  171. max_payload_size, false);
  172. if (rc)
  173. return rc;
  174. if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
  175. VHOST_USER_VERSION)
  176. return -EPROTO;
  177. return 0;
  178. }
  179. static int vhost_user_send(struct virtio_uml_device *vu_dev,
  180. bool need_response, struct vhost_user_msg *msg,
  181. int *fds, size_t num_fds)
  182. {
  183. size_t size = sizeof(msg->header) + msg->header.size;
  184. unsigned long flags;
  185. bool request_ack;
  186. int rc;
  187. msg->header.flags |= VHOST_USER_VERSION;
  188. /*
  189. * The need_response flag indicates that we already need a response,
  190. * e.g. to read the features. In these cases, don't request an ACK as
  191. * it is meaningless. Also request an ACK only if supported.
  192. */
  193. request_ack = !need_response;
  194. if (!(vu_dev->protocol_features &
  195. BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
  196. request_ack = false;
  197. if (request_ack)
  198. msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
  199. spin_lock_irqsave(&vu_dev->sock_lock, flags);
  200. rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
  201. if (rc < 0)
  202. goto out;
  203. if (request_ack) {
  204. uint64_t status;
  205. rc = vhost_user_recv_u64(vu_dev, &status);
  206. if (rc)
  207. goto out;
  208. if (status) {
  209. vu_err(vu_dev, "slave reports error: %llu\n", status);
  210. rc = -EIO;
  211. goto out;
  212. }
  213. }
  214. out:
  215. spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
  216. return rc;
  217. }
  218. static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
  219. bool need_response, u32 request)
  220. {
  221. struct vhost_user_msg msg = {
  222. .header.request = request,
  223. };
  224. return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
  225. }
  226. static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
  227. u32 request, int fd)
  228. {
  229. struct vhost_user_msg msg = {
  230. .header.request = request,
  231. };
  232. return vhost_user_send(vu_dev, false, &msg, &fd, 1);
  233. }
  234. static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
  235. u32 request, u64 value)
  236. {
  237. struct vhost_user_msg msg = {
  238. .header.request = request,
  239. .header.size = sizeof(msg.payload.integer),
  240. .payload.integer = value,
  241. };
  242. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  243. }
  244. static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
  245. {
  246. return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
  247. }
  248. static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
  249. u64 *features)
  250. {
  251. int rc = vhost_user_send_no_payload(vu_dev, true,
  252. VHOST_USER_GET_FEATURES);
  253. if (rc)
  254. return rc;
  255. return vhost_user_recv_u64(vu_dev, features);
  256. }
  257. static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
  258. u64 features)
  259. {
  260. return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
  261. }
  262. static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
  263. u64 *protocol_features)
  264. {
  265. int rc = vhost_user_send_no_payload(vu_dev, true,
  266. VHOST_USER_GET_PROTOCOL_FEATURES);
  267. if (rc)
  268. return rc;
  269. return vhost_user_recv_u64(vu_dev, protocol_features);
  270. }
  271. static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
  272. u64 protocol_features)
  273. {
  274. return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
  275. protocol_features);
  276. }
  277. static void vhost_user_reply(struct virtio_uml_device *vu_dev,
  278. struct vhost_user_msg *msg, int response)
  279. {
  280. struct vhost_user_msg reply = {
  281. .payload.integer = response,
  282. };
  283. size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
  284. int rc;
  285. reply.header = msg->header;
  286. reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
  287. reply.header.flags |= VHOST_USER_FLAG_REPLY;
  288. reply.header.size = sizeof(reply.payload.integer);
  289. rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
  290. if (rc)
  291. vu_err(vu_dev,
  292. "sending reply to slave request failed: %d (size %zu)\n",
  293. rc, size);
  294. }
  295. static irqreturn_t vu_req_interrupt(int irq, void *data)
  296. {
  297. struct virtio_uml_device *vu_dev = data;
  298. struct virtqueue *vq;
  299. int response = 1;
  300. struct {
  301. struct vhost_user_msg msg;
  302. u8 extra_payload[512];
  303. } msg;
  304. int rc;
  305. rc = vhost_user_recv_req(vu_dev, &msg.msg,
  306. sizeof(msg.msg.payload) +
  307. sizeof(msg.extra_payload));
  308. if (rc)
  309. return IRQ_NONE;
  310. switch (msg.msg.header.request) {
  311. case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
  312. virtio_config_changed(&vu_dev->vdev);
  313. response = 0;
  314. break;
  315. case VHOST_USER_SLAVE_VRING_CALL:
  316. virtio_device_for_each_vq((&vu_dev->vdev), vq) {
  317. if (vq->index == msg.msg.payload.vring_state.index) {
  318. response = 0;
  319. vring_interrupt(0 /* ignored */, vq);
  320. break;
  321. }
  322. }
  323. break;
  324. case VHOST_USER_SLAVE_IOTLB_MSG:
  325. /* not supported - VIRTIO_F_ACCESS_PLATFORM */
  326. case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
  327. /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
  328. default:
  329. vu_err(vu_dev, "unexpected slave request %d\n",
  330. msg.msg.header.request);
  331. }
  332. if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
  333. vhost_user_reply(vu_dev, &msg.msg, response);
  334. return IRQ_HANDLED;
  335. }
  336. static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
  337. {
  338. int rc, req_fds[2];
  339. /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
  340. rc = os_pipe(req_fds, true, true);
  341. if (rc < 0)
  342. return rc;
  343. vu_dev->req_fd = req_fds[0];
  344. rc = um_request_irq(VIRTIO_IRQ, vu_dev->req_fd, IRQ_READ,
  345. vu_req_interrupt, IRQF_SHARED,
  346. vu_dev->pdev->name, vu_dev);
  347. if (rc)
  348. goto err_close;
  349. rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
  350. req_fds[1]);
  351. if (rc)
  352. goto err_free_irq;
  353. goto out;
  354. err_free_irq:
  355. um_free_irq(VIRTIO_IRQ, vu_dev);
  356. err_close:
  357. os_close_file(req_fds[0]);
  358. out:
  359. /* Close unused write end of request fds */
  360. os_close_file(req_fds[1]);
  361. return rc;
  362. }
  363. static int vhost_user_init(struct virtio_uml_device *vu_dev)
  364. {
  365. int rc = vhost_user_set_owner(vu_dev);
  366. if (rc)
  367. return rc;
  368. rc = vhost_user_get_features(vu_dev, &vu_dev->features);
  369. if (rc)
  370. return rc;
  371. if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
  372. rc = vhost_user_get_protocol_features(vu_dev,
  373. &vu_dev->protocol_features);
  374. if (rc)
  375. return rc;
  376. vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
  377. rc = vhost_user_set_protocol_features(vu_dev,
  378. vu_dev->protocol_features);
  379. if (rc)
  380. return rc;
  381. }
  382. if (vu_dev->protocol_features &
  383. BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
  384. rc = vhost_user_init_slave_req(vu_dev);
  385. if (rc)
  386. return rc;
  387. }
  388. return 0;
  389. }
  390. static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
  391. u32 offset, void *buf, u32 len)
  392. {
  393. u32 cfg_size = offset + len;
  394. struct vhost_user_msg *msg;
  395. size_t payload_size = sizeof(msg->payload.config) + cfg_size;
  396. size_t msg_size = sizeof(msg->header) + payload_size;
  397. int rc;
  398. if (!(vu_dev->protocol_features &
  399. BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
  400. return;
  401. msg = kzalloc(msg_size, GFP_KERNEL);
  402. if (!msg)
  403. return;
  404. msg->header.request = VHOST_USER_GET_CONFIG;
  405. msg->header.size = payload_size;
  406. msg->payload.config.offset = 0;
  407. msg->payload.config.size = cfg_size;
  408. rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
  409. if (rc) {
  410. vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
  411. rc);
  412. goto free;
  413. }
  414. rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
  415. if (rc) {
  416. vu_err(vu_dev,
  417. "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
  418. rc);
  419. goto free;
  420. }
  421. if (msg->header.size != payload_size ||
  422. msg->payload.config.size != cfg_size) {
  423. rc = -EPROTO;
  424. vu_err(vu_dev,
  425. "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
  426. msg->header.size, payload_size,
  427. msg->payload.config.size, cfg_size);
  428. goto free;
  429. }
  430. memcpy(buf, msg->payload.config.payload + offset, len);
  431. free:
  432. kfree(msg);
  433. }
  434. static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
  435. u32 offset, const void *buf, u32 len)
  436. {
  437. struct vhost_user_msg *msg;
  438. size_t payload_size = sizeof(msg->payload.config) + len;
  439. size_t msg_size = sizeof(msg->header) + payload_size;
  440. int rc;
  441. if (!(vu_dev->protocol_features &
  442. BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
  443. return;
  444. msg = kzalloc(msg_size, GFP_KERNEL);
  445. if (!msg)
  446. return;
  447. msg->header.request = VHOST_USER_SET_CONFIG;
  448. msg->header.size = payload_size;
  449. msg->payload.config.offset = offset;
  450. msg->payload.config.size = len;
  451. memcpy(msg->payload.config.payload, buf, len);
  452. rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
  453. if (rc)
  454. vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
  455. rc);
  456. kfree(msg);
  457. }
  458. static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
  459. struct vhost_user_mem_region *region_out)
  460. {
  461. unsigned long long mem_offset;
  462. int rc = phys_mapping(addr, &mem_offset);
  463. if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
  464. return -EFAULT;
  465. *fd_out = rc;
  466. region_out->guest_addr = addr;
  467. region_out->user_addr = addr;
  468. region_out->size = size;
  469. region_out->mmap_offset = mem_offset;
  470. /* Ensure mapping is valid for the entire region */
  471. rc = phys_mapping(addr + size - 1, &mem_offset);
  472. if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
  473. addr + size - 1, rc, *fd_out))
  474. return -EFAULT;
  475. return 0;
  476. }
  477. static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
  478. {
  479. struct vhost_user_msg msg = {
  480. .header.request = VHOST_USER_SET_MEM_TABLE,
  481. .header.size = sizeof(msg.payload.mem_regions),
  482. .payload.mem_regions.num = 1,
  483. };
  484. unsigned long reserved = uml_reserved - uml_physmem;
  485. int fds[2];
  486. int rc;
  487. /*
  488. * This is a bit tricky, see also the comment with setup_physmem().
  489. *
  490. * Essentially, setup_physmem() uses a file to mmap() our physmem,
  491. * but the code and data we *already* have is omitted. To us, this
  492. * is no difference, since they both become part of our address
  493. * space and memory consumption. To somebody looking in from the
  494. * outside, however, it is different because the part of our memory
  495. * consumption that's already part of the binary (code/data) is not
  496. * mapped from the file, so it's not visible to another mmap from
  497. * the file descriptor.
  498. *
  499. * Thus, don't advertise this space to the vhost-user slave. This
  500. * means that the slave will likely abort or similar when we give
  501. * it an address from the hidden range, since it's not marked as
  502. * a valid address, but at least that way we detect the issue and
  503. * don't just have the slave read an all-zeroes buffer from the
  504. * shared memory file, or write something there that we can never
  505. * see (depending on the direction of the virtqueue traffic.)
  506. *
  507. * Since we usually don't want to use .text for virtio buffers,
  508. * this effectively means that you cannot use
  509. * 1) global variables, which are in the .bss and not in the shm
  510. * file-backed memory
  511. * 2) the stack in some processes, depending on where they have
  512. * their stack (or maybe only no interrupt stack?)
  513. *
  514. * The stack is already not typically valid for DMA, so this isn't
  515. * much of a restriction, but global variables might be encountered.
  516. *
  517. * It might be possible to fix it by copying around the data that's
  518. * between bss_start and where we map the file now, but it's not
  519. * something that you typically encounter with virtio drivers, so
  520. * it didn't seem worthwhile.
  521. */
  522. rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
  523. &fds[0],
  524. &msg.payload.mem_regions.regions[0]);
  525. if (rc < 0)
  526. return rc;
  527. if (highmem) {
  528. msg.payload.mem_regions.num++;
  529. rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
  530. &fds[1], &msg.payload.mem_regions.regions[1]);
  531. if (rc < 0)
  532. return rc;
  533. }
  534. return vhost_user_send(vu_dev, false, &msg, fds,
  535. msg.payload.mem_regions.num);
  536. }
  537. static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
  538. u32 request, u32 index, u32 num)
  539. {
  540. struct vhost_user_msg msg = {
  541. .header.request = request,
  542. .header.size = sizeof(msg.payload.vring_state),
  543. .payload.vring_state.index = index,
  544. .payload.vring_state.num = num,
  545. };
  546. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  547. }
  548. static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
  549. u32 index, u32 num)
  550. {
  551. return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
  552. index, num);
  553. }
  554. static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
  555. u32 index, u32 offset)
  556. {
  557. return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
  558. index, offset);
  559. }
  560. static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
  561. u32 index, u64 desc, u64 used, u64 avail,
  562. u64 log)
  563. {
  564. struct vhost_user_msg msg = {
  565. .header.request = VHOST_USER_SET_VRING_ADDR,
  566. .header.size = sizeof(msg.payload.vring_addr),
  567. .payload.vring_addr.index = index,
  568. .payload.vring_addr.desc = desc,
  569. .payload.vring_addr.used = used,
  570. .payload.vring_addr.avail = avail,
  571. .payload.vring_addr.log = log,
  572. };
  573. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  574. }
  575. static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
  576. u32 request, int index, int fd)
  577. {
  578. struct vhost_user_msg msg = {
  579. .header.request = request,
  580. .header.size = sizeof(msg.payload.integer),
  581. .payload.integer = index,
  582. };
  583. if (index & ~VHOST_USER_VRING_INDEX_MASK)
  584. return -EINVAL;
  585. if (fd < 0) {
  586. msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
  587. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  588. }
  589. return vhost_user_send(vu_dev, false, &msg, &fd, 1);
  590. }
  591. static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
  592. int index, int fd)
  593. {
  594. return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
  595. index, fd);
  596. }
  597. static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
  598. int index, int fd)
  599. {
  600. return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
  601. index, fd);
  602. }
  603. static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
  604. u32 index, bool enable)
  605. {
  606. if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
  607. return 0;
  608. return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
  609. index, enable);
  610. }
  611. /* Virtio interface */
  612. static bool vu_notify(struct virtqueue *vq)
  613. {
  614. struct virtio_uml_vq_info *info = vq->priv;
  615. const uint64_t n = 1;
  616. int rc;
  617. time_travel_propagate_time();
  618. if (info->kick_fd < 0) {
  619. struct virtio_uml_device *vu_dev;
  620. vu_dev = to_virtio_uml_device(vq->vdev);
  621. return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
  622. vq->index, 0) == 0;
  623. }
  624. do {
  625. rc = os_write_file(info->kick_fd, &n, sizeof(n));
  626. } while (rc == -EINTR);
  627. return !WARN(rc != sizeof(n), "write returned %d\n", rc);
  628. }
  629. static irqreturn_t vu_interrupt(int irq, void *opaque)
  630. {
  631. struct virtqueue *vq = opaque;
  632. struct virtio_uml_vq_info *info = vq->priv;
  633. uint64_t n;
  634. int rc;
  635. irqreturn_t ret = IRQ_NONE;
  636. do {
  637. rc = os_read_file(info->call_fd, &n, sizeof(n));
  638. if (rc == sizeof(n))
  639. ret |= vring_interrupt(irq, vq);
  640. } while (rc == sizeof(n) || rc == -EINTR);
  641. WARN(rc != -EAGAIN, "read returned %d\n", rc);
  642. return ret;
  643. }
  644. static void vu_get(struct virtio_device *vdev, unsigned offset,
  645. void *buf, unsigned len)
  646. {
  647. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  648. vhost_user_get_config(vu_dev, offset, buf, len);
  649. }
  650. static void vu_set(struct virtio_device *vdev, unsigned offset,
  651. const void *buf, unsigned len)
  652. {
  653. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  654. vhost_user_set_config(vu_dev, offset, buf, len);
  655. }
  656. static u8 vu_get_status(struct virtio_device *vdev)
  657. {
  658. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  659. return vu_dev->status;
  660. }
  661. static void vu_set_status(struct virtio_device *vdev, u8 status)
  662. {
  663. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  664. vu_dev->status = status;
  665. }
  666. static void vu_reset(struct virtio_device *vdev)
  667. {
  668. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  669. vu_dev->status = 0;
  670. }
  671. static void vu_del_vq(struct virtqueue *vq)
  672. {
  673. struct virtio_uml_vq_info *info = vq->priv;
  674. if (info->call_fd >= 0) {
  675. um_free_irq(VIRTIO_IRQ, vq);
  676. os_close_file(info->call_fd);
  677. }
  678. if (info->kick_fd >= 0)
  679. os_close_file(info->kick_fd);
  680. vring_del_virtqueue(vq);
  681. kfree(info);
  682. }
  683. static void vu_del_vqs(struct virtio_device *vdev)
  684. {
  685. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  686. struct virtqueue *vq, *n;
  687. u64 features;
  688. /* Note: reverse order as a workaround to a decoding bug in snabb */
  689. list_for_each_entry_reverse(vq, &vdev->vqs, list)
  690. WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
  691. /* Ensure previous messages have been processed */
  692. WARN_ON(vhost_user_get_features(vu_dev, &features));
  693. list_for_each_entry_safe(vq, n, &vdev->vqs, list)
  694. vu_del_vq(vq);
  695. }
  696. static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
  697. struct virtqueue *vq)
  698. {
  699. struct virtio_uml_vq_info *info = vq->priv;
  700. int call_fds[2];
  701. int rc;
  702. /* no call FD needed/desired in this case */
  703. if (vu_dev->protocol_features &
  704. BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
  705. vu_dev->protocol_features &
  706. BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
  707. info->call_fd = -1;
  708. return 0;
  709. }
  710. /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
  711. rc = os_pipe(call_fds, true, true);
  712. if (rc < 0)
  713. return rc;
  714. info->call_fd = call_fds[0];
  715. rc = um_request_irq(VIRTIO_IRQ, info->call_fd, IRQ_READ,
  716. vu_interrupt, IRQF_SHARED, info->name, vq);
  717. if (rc)
  718. goto close_both;
  719. rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
  720. if (rc)
  721. goto release_irq;
  722. goto out;
  723. release_irq:
  724. um_free_irq(VIRTIO_IRQ, vq);
  725. close_both:
  726. os_close_file(call_fds[0]);
  727. out:
  728. /* Close (unused) write end of call fds */
  729. os_close_file(call_fds[1]);
  730. return rc;
  731. }
  732. #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
  733. static void vu_defer_irq_handle(struct time_travel_event *d)
  734. {
  735. struct virtio_uml_vq_info *info;
  736. info = container_of(d, struct virtio_uml_vq_info, defer);
  737. info->callback(info->vq);
  738. }
  739. static void vu_defer_irq_callback(struct virtqueue *vq)
  740. {
  741. struct virtio_uml_vq_info *info = vq->priv;
  742. time_travel_add_irq_event(&info->defer);
  743. }
  744. #endif
  745. static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
  746. unsigned index, vq_callback_t *callback,
  747. const char *name, bool ctx)
  748. {
  749. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  750. struct platform_device *pdev = vu_dev->pdev;
  751. struct virtio_uml_vq_info *info;
  752. struct virtqueue *vq;
  753. int num = MAX_SUPPORTED_QUEUE_SIZE;
  754. int rc;
  755. info = kzalloc(sizeof(*info), GFP_KERNEL);
  756. if (!info) {
  757. rc = -ENOMEM;
  758. goto error_kzalloc;
  759. }
  760. snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
  761. pdev->id, name);
  762. #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
  763. /*
  764. * When we get an interrupt, we must bounce it through the simulation
  765. * calendar (the simtime device), except for the simtime device itself
  766. * since that's part of the simulation control.
  767. */
  768. if (time_travel_mode == TT_MODE_EXTERNAL && callback) {
  769. info->callback = callback;
  770. callback = vu_defer_irq_callback;
  771. time_travel_set_event_fn(&info->defer, vu_defer_irq_handle);
  772. }
  773. #endif
  774. vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
  775. ctx, vu_notify, callback, info->name);
  776. if (!vq) {
  777. rc = -ENOMEM;
  778. goto error_create;
  779. }
  780. vq->priv = info;
  781. num = virtqueue_get_vring_size(vq);
  782. #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
  783. info->vq = vq;
  784. #endif
  785. if (vu_dev->protocol_features &
  786. BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
  787. info->kick_fd = -1;
  788. } else {
  789. rc = os_eventfd(0, 0);
  790. if (rc < 0)
  791. goto error_kick;
  792. info->kick_fd = rc;
  793. }
  794. rc = vu_setup_vq_call_fd(vu_dev, vq);
  795. if (rc)
  796. goto error_call;
  797. rc = vhost_user_set_vring_num(vu_dev, index, num);
  798. if (rc)
  799. goto error_setup;
  800. rc = vhost_user_set_vring_base(vu_dev, index, 0);
  801. if (rc)
  802. goto error_setup;
  803. rc = vhost_user_set_vring_addr(vu_dev, index,
  804. virtqueue_get_desc_addr(vq),
  805. virtqueue_get_used_addr(vq),
  806. virtqueue_get_avail_addr(vq),
  807. (u64) -1);
  808. if (rc)
  809. goto error_setup;
  810. return vq;
  811. error_setup:
  812. if (info->call_fd >= 0) {
  813. um_free_irq(VIRTIO_IRQ, vq);
  814. os_close_file(info->call_fd);
  815. }
  816. error_call:
  817. if (info->kick_fd >= 0)
  818. os_close_file(info->kick_fd);
  819. error_kick:
  820. vring_del_virtqueue(vq);
  821. error_create:
  822. kfree(info);
  823. error_kzalloc:
  824. return ERR_PTR(rc);
  825. }
  826. static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
  827. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  828. const char * const names[], const bool *ctx,
  829. struct irq_affinity *desc)
  830. {
  831. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  832. int i, queue_idx = 0, rc;
  833. struct virtqueue *vq;
  834. rc = vhost_user_set_mem_table(vu_dev);
  835. if (rc)
  836. return rc;
  837. for (i = 0; i < nvqs; ++i) {
  838. if (!names[i]) {
  839. vqs[i] = NULL;
  840. continue;
  841. }
  842. vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  843. ctx ? ctx[i] : false);
  844. if (IS_ERR(vqs[i])) {
  845. rc = PTR_ERR(vqs[i]);
  846. goto error_setup;
  847. }
  848. }
  849. list_for_each_entry(vq, &vdev->vqs, list) {
  850. struct virtio_uml_vq_info *info = vq->priv;
  851. if (info->kick_fd >= 0) {
  852. rc = vhost_user_set_vring_kick(vu_dev, vq->index,
  853. info->kick_fd);
  854. if (rc)
  855. goto error_setup;
  856. }
  857. rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
  858. if (rc)
  859. goto error_setup;
  860. }
  861. return 0;
  862. error_setup:
  863. vu_del_vqs(vdev);
  864. return rc;
  865. }
  866. static u64 vu_get_features(struct virtio_device *vdev)
  867. {
  868. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  869. return vu_dev->features;
  870. }
  871. static int vu_finalize_features(struct virtio_device *vdev)
  872. {
  873. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  874. u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
  875. vring_transport_features(vdev);
  876. vu_dev->features = vdev->features | supported;
  877. return vhost_user_set_features(vu_dev, vu_dev->features);
  878. }
  879. static const char *vu_bus_name(struct virtio_device *vdev)
  880. {
  881. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  882. return vu_dev->pdev->name;
  883. }
  884. static const struct virtio_config_ops virtio_uml_config_ops = {
  885. .get = vu_get,
  886. .set = vu_set,
  887. .get_status = vu_get_status,
  888. .set_status = vu_set_status,
  889. .reset = vu_reset,
  890. .find_vqs = vu_find_vqs,
  891. .del_vqs = vu_del_vqs,
  892. .get_features = vu_get_features,
  893. .finalize_features = vu_finalize_features,
  894. .bus_name = vu_bus_name,
  895. };
  896. static void virtio_uml_release_dev(struct device *d)
  897. {
  898. struct virtio_device *vdev =
  899. container_of(d, struct virtio_device, dev);
  900. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  901. time_travel_propagate_time();
  902. /* might not have been opened due to not negotiating the feature */
  903. if (vu_dev->req_fd >= 0) {
  904. um_free_irq(VIRTIO_IRQ, vu_dev);
  905. os_close_file(vu_dev->req_fd);
  906. }
  907. os_close_file(vu_dev->sock);
  908. kfree(vu_dev);
  909. }
  910. /* Platform device */
  911. static int virtio_uml_probe(struct platform_device *pdev)
  912. {
  913. struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
  914. struct virtio_uml_device *vu_dev;
  915. int rc;
  916. if (!pdata)
  917. return -EINVAL;
  918. vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
  919. if (!vu_dev)
  920. return -ENOMEM;
  921. vu_dev->vdev.dev.parent = &pdev->dev;
  922. vu_dev->vdev.dev.release = virtio_uml_release_dev;
  923. vu_dev->vdev.config = &virtio_uml_config_ops;
  924. vu_dev->vdev.id.device = pdata->virtio_device_id;
  925. vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
  926. vu_dev->pdev = pdev;
  927. vu_dev->req_fd = -1;
  928. time_travel_propagate_time();
  929. do {
  930. rc = os_connect_socket(pdata->socket_path);
  931. } while (rc == -EINTR);
  932. if (rc < 0)
  933. goto error_free;
  934. vu_dev->sock = rc;
  935. spin_lock_init(&vu_dev->sock_lock);
  936. rc = vhost_user_init(vu_dev);
  937. if (rc)
  938. goto error_init;
  939. platform_set_drvdata(pdev, vu_dev);
  940. rc = register_virtio_device(&vu_dev->vdev);
  941. if (rc)
  942. put_device(&vu_dev->vdev.dev);
  943. vu_dev->registered = 1;
  944. return rc;
  945. error_init:
  946. os_close_file(vu_dev->sock);
  947. error_free:
  948. kfree(vu_dev);
  949. return rc;
  950. }
  951. static int virtio_uml_remove(struct platform_device *pdev)
  952. {
  953. struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
  954. unregister_virtio_device(&vu_dev->vdev);
  955. return 0;
  956. }
  957. /* Command line device list */
  958. static void vu_cmdline_release_dev(struct device *d)
  959. {
  960. }
  961. static struct device vu_cmdline_parent = {
  962. .init_name = "virtio-uml-cmdline",
  963. .release = vu_cmdline_release_dev,
  964. };
  965. static bool vu_cmdline_parent_registered;
  966. static int vu_cmdline_id;
  967. static int vu_unregister_cmdline_device(struct device *dev, void *data)
  968. {
  969. struct platform_device *pdev = to_platform_device(dev);
  970. struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
  971. kfree(pdata->socket_path);
  972. platform_device_unregister(pdev);
  973. return 0;
  974. }
  975. static void vu_conn_broken(struct work_struct *wk)
  976. {
  977. struct virtio_uml_platform_data *pdata;
  978. pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
  979. vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
  980. }
  981. static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
  982. {
  983. const char *ids = strchr(device, ':');
  984. unsigned int virtio_device_id;
  985. int processed, consumed, err;
  986. char *socket_path;
  987. struct virtio_uml_platform_data pdata, *ppdata;
  988. struct platform_device *pdev;
  989. if (!ids || ids == device)
  990. return -EINVAL;
  991. processed = sscanf(ids, ":%u%n:%d%n",
  992. &virtio_device_id, &consumed,
  993. &vu_cmdline_id, &consumed);
  994. if (processed < 1 || ids[consumed])
  995. return -EINVAL;
  996. if (!vu_cmdline_parent_registered) {
  997. err = device_register(&vu_cmdline_parent);
  998. if (err) {
  999. pr_err("Failed to register parent device!\n");
  1000. put_device(&vu_cmdline_parent);
  1001. return err;
  1002. }
  1003. vu_cmdline_parent_registered = true;
  1004. }
  1005. socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
  1006. if (!socket_path)
  1007. return -ENOMEM;
  1008. pdata.virtio_device_id = (u32) virtio_device_id;
  1009. pdata.socket_path = socket_path;
  1010. pr_info("Registering device virtio-uml.%d id=%d at %s\n",
  1011. vu_cmdline_id, virtio_device_id, socket_path);
  1012. pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
  1013. vu_cmdline_id++, &pdata,
  1014. sizeof(pdata));
  1015. err = PTR_ERR_OR_ZERO(pdev);
  1016. if (err)
  1017. goto free;
  1018. ppdata = pdev->dev.platform_data;
  1019. ppdata->pdev = pdev;
  1020. INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
  1021. return 0;
  1022. free:
  1023. kfree(socket_path);
  1024. return err;
  1025. }
  1026. static int vu_cmdline_get_device(struct device *dev, void *data)
  1027. {
  1028. struct platform_device *pdev = to_platform_device(dev);
  1029. struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
  1030. char *buffer = data;
  1031. unsigned int len = strlen(buffer);
  1032. snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
  1033. pdata->socket_path, pdata->virtio_device_id, pdev->id);
  1034. return 0;
  1035. }
  1036. static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
  1037. {
  1038. buffer[0] = '\0';
  1039. if (vu_cmdline_parent_registered)
  1040. device_for_each_child(&vu_cmdline_parent, buffer,
  1041. vu_cmdline_get_device);
  1042. return strlen(buffer) + 1;
  1043. }
  1044. static const struct kernel_param_ops vu_cmdline_param_ops = {
  1045. .set = vu_cmdline_set,
  1046. .get = vu_cmdline_get,
  1047. };
  1048. device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
  1049. __uml_help(vu_cmdline_param_ops,
  1050. "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
  1051. " Configure a virtio device over a vhost-user socket.\n"
  1052. " See virtio_ids.h for a list of possible virtio device id values.\n"
  1053. " Optionally use a specific platform_device id.\n\n"
  1054. );
  1055. static void vu_unregister_cmdline_devices(void)
  1056. {
  1057. if (vu_cmdline_parent_registered) {
  1058. device_for_each_child(&vu_cmdline_parent, NULL,
  1059. vu_unregister_cmdline_device);
  1060. device_unregister(&vu_cmdline_parent);
  1061. vu_cmdline_parent_registered = false;
  1062. }
  1063. }
  1064. /* Platform driver */
  1065. static const struct of_device_id virtio_uml_match[] = {
  1066. { .compatible = "virtio,uml", },
  1067. { }
  1068. };
  1069. MODULE_DEVICE_TABLE(of, virtio_uml_match);
  1070. static struct platform_driver virtio_uml_driver = {
  1071. .probe = virtio_uml_probe,
  1072. .remove = virtio_uml_remove,
  1073. .driver = {
  1074. .name = "virtio-uml",
  1075. .of_match_table = virtio_uml_match,
  1076. },
  1077. };
  1078. static int __init virtio_uml_init(void)
  1079. {
  1080. return platform_driver_register(&virtio_uml_driver);
  1081. }
  1082. static void __exit virtio_uml_exit(void)
  1083. {
  1084. platform_driver_unregister(&virtio_uml_driver);
  1085. vu_unregister_cmdline_devices();
  1086. }
  1087. module_init(virtio_uml_init);
  1088. module_exit(virtio_uml_exit);
  1089. __uml_exitcall(virtio_uml_exit);
  1090. MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
  1091. MODULE_LICENSE("GPL");