pvcalls-back.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
  4. */
  5. #include <linux/inet.h>
  6. #include <linux/kthread.h>
  7. #include <linux/list.h>
  8. #include <linux/radix-tree.h>
  9. #include <linux/module.h>
  10. #include <linux/semaphore.h>
  11. #include <linux/wait.h>
  12. #include <net/sock.h>
  13. #include <net/inet_common.h>
  14. #include <net/inet_connection_sock.h>
  15. #include <net/request_sock.h>
  16. #include <xen/events.h>
  17. #include <xen/grant_table.h>
  18. #include <xen/xen.h>
  19. #include <xen/xenbus.h>
  20. #include <xen/interface/io/pvcalls.h>
  21. #define PVCALLS_VERSIONS "1"
  22. #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
  23. static struct pvcalls_back_global {
  24. struct list_head frontends;
  25. struct semaphore frontends_lock;
  26. } pvcalls_back_global;
  27. /*
  28. * Per-frontend data structure. It contains pointers to the command
  29. * ring, its event channel, a list of active sockets and a tree of
  30. * passive sockets.
  31. */
  32. struct pvcalls_fedata {
  33. struct list_head list;
  34. struct xenbus_device *dev;
  35. struct xen_pvcalls_sring *sring;
  36. struct xen_pvcalls_back_ring ring;
  37. int irq;
  38. struct list_head socket_mappings;
  39. struct radix_tree_root socketpass_mappings;
  40. struct semaphore socket_lock;
  41. };
  42. struct pvcalls_ioworker {
  43. struct work_struct register_work;
  44. struct workqueue_struct *wq;
  45. };
  46. struct sock_mapping {
  47. struct list_head list;
  48. struct pvcalls_fedata *fedata;
  49. struct sockpass_mapping *sockpass;
  50. struct socket *sock;
  51. uint64_t id;
  52. grant_ref_t ref;
  53. struct pvcalls_data_intf *ring;
  54. void *bytes;
  55. struct pvcalls_data data;
  56. uint32_t ring_order;
  57. int irq;
  58. atomic_t read;
  59. atomic_t write;
  60. atomic_t io;
  61. atomic_t release;
  62. atomic_t eoi;
  63. void (*saved_data_ready)(struct sock *sk);
  64. struct pvcalls_ioworker ioworker;
  65. };
  66. struct sockpass_mapping {
  67. struct list_head list;
  68. struct pvcalls_fedata *fedata;
  69. struct socket *sock;
  70. uint64_t id;
  71. struct xen_pvcalls_request reqcopy;
  72. spinlock_t copy_lock;
  73. struct workqueue_struct *wq;
  74. struct work_struct register_work;
  75. void (*saved_data_ready)(struct sock *sk);
  76. };
  77. static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
  78. static int pvcalls_back_release_active(struct xenbus_device *dev,
  79. struct pvcalls_fedata *fedata,
  80. struct sock_mapping *map);
  81. static bool pvcalls_conn_back_read(void *opaque)
  82. {
  83. struct sock_mapping *map = (struct sock_mapping *)opaque;
  84. struct msghdr msg;
  85. struct kvec vec[2];
  86. RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
  87. int32_t error;
  88. struct pvcalls_data_intf *intf = map->ring;
  89. struct pvcalls_data *data = &map->data;
  90. unsigned long flags;
  91. int ret;
  92. array_size = XEN_FLEX_RING_SIZE(map->ring_order);
  93. cons = intf->in_cons;
  94. prod = intf->in_prod;
  95. error = intf->in_error;
  96. /* read the indexes first, then deal with the data */
  97. virt_mb();
  98. if (error)
  99. return false;
  100. size = pvcalls_queued(prod, cons, array_size);
  101. if (size >= array_size)
  102. return false;
  103. spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
  104. if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
  105. atomic_set(&map->read, 0);
  106. spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
  107. flags);
  108. return true;
  109. }
  110. spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
  111. wanted = array_size - size;
  112. masked_prod = pvcalls_mask(prod, array_size);
  113. masked_cons = pvcalls_mask(cons, array_size);
  114. memset(&msg, 0, sizeof(msg));
  115. if (masked_prod < masked_cons) {
  116. vec[0].iov_base = data->in + masked_prod;
  117. vec[0].iov_len = wanted;
  118. iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
  119. } else {
  120. vec[0].iov_base = data->in + masked_prod;
  121. vec[0].iov_len = array_size - masked_prod;
  122. vec[1].iov_base = data->in;
  123. vec[1].iov_len = wanted - vec[0].iov_len;
  124. iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
  125. }
  126. atomic_set(&map->read, 0);
  127. ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
  128. WARN_ON(ret > wanted);
  129. if (ret == -EAGAIN) /* shouldn't happen */
  130. return true;
  131. if (!ret)
  132. ret = -ENOTCONN;
  133. spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
  134. if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
  135. atomic_inc(&map->read);
  136. spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
  137. /* write the data, then modify the indexes */
  138. virt_wmb();
  139. if (ret < 0) {
  140. atomic_set(&map->read, 0);
  141. intf->in_error = ret;
  142. } else
  143. intf->in_prod = prod + ret;
  144. /* update the indexes, then notify the other end */
  145. virt_wmb();
  146. notify_remote_via_irq(map->irq);
  147. return true;
  148. }
  149. static bool pvcalls_conn_back_write(struct sock_mapping *map)
  150. {
  151. struct pvcalls_data_intf *intf = map->ring;
  152. struct pvcalls_data *data = &map->data;
  153. struct msghdr msg;
  154. struct kvec vec[2];
  155. RING_IDX cons, prod, size, array_size;
  156. int ret;
  157. cons = intf->out_cons;
  158. prod = intf->out_prod;
  159. /* read the indexes before dealing with the data */
  160. virt_mb();
  161. array_size = XEN_FLEX_RING_SIZE(map->ring_order);
  162. size = pvcalls_queued(prod, cons, array_size);
  163. if (size == 0)
  164. return false;
  165. memset(&msg, 0, sizeof(msg));
  166. msg.msg_flags |= MSG_DONTWAIT;
  167. if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
  168. vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
  169. vec[0].iov_len = size;
  170. iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
  171. } else {
  172. vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
  173. vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
  174. vec[1].iov_base = data->out;
  175. vec[1].iov_len = size - vec[0].iov_len;
  176. iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
  177. }
  178. atomic_set(&map->write, 0);
  179. ret = inet_sendmsg(map->sock, &msg, size);
  180. if (ret == -EAGAIN) {
  181. atomic_inc(&map->write);
  182. atomic_inc(&map->io);
  183. return true;
  184. }
  185. /* write the data, then update the indexes */
  186. virt_wmb();
  187. if (ret < 0) {
  188. intf->out_error = ret;
  189. } else {
  190. intf->out_error = 0;
  191. intf->out_cons = cons + ret;
  192. prod = intf->out_prod;
  193. }
  194. /* update the indexes, then notify the other end */
  195. virt_wmb();
  196. if (prod != cons + ret) {
  197. atomic_inc(&map->write);
  198. atomic_inc(&map->io);
  199. }
  200. notify_remote_via_irq(map->irq);
  201. return true;
  202. }
  203. static void pvcalls_back_ioworker(struct work_struct *work)
  204. {
  205. struct pvcalls_ioworker *ioworker = container_of(work,
  206. struct pvcalls_ioworker, register_work);
  207. struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
  208. ioworker);
  209. unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
  210. while (atomic_read(&map->io) > 0) {
  211. if (atomic_read(&map->release) > 0) {
  212. atomic_set(&map->release, 0);
  213. return;
  214. }
  215. if (atomic_read(&map->read) > 0 &&
  216. pvcalls_conn_back_read(map))
  217. eoi_flags = 0;
  218. if (atomic_read(&map->write) > 0 &&
  219. pvcalls_conn_back_write(map))
  220. eoi_flags = 0;
  221. if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
  222. atomic_set(&map->eoi, 0);
  223. xen_irq_lateeoi(map->irq, eoi_flags);
  224. eoi_flags = XEN_EOI_FLAG_SPURIOUS;
  225. }
  226. atomic_dec(&map->io);
  227. }
  228. }
  229. static int pvcalls_back_socket(struct xenbus_device *dev,
  230. struct xen_pvcalls_request *req)
  231. {
  232. struct pvcalls_fedata *fedata;
  233. int ret;
  234. struct xen_pvcalls_response *rsp;
  235. fedata = dev_get_drvdata(&dev->dev);
  236. if (req->u.socket.domain != AF_INET ||
  237. req->u.socket.type != SOCK_STREAM ||
  238. (req->u.socket.protocol != IPPROTO_IP &&
  239. req->u.socket.protocol != AF_INET))
  240. ret = -EAFNOSUPPORT;
  241. else
  242. ret = 0;
  243. /* leave the actual socket allocation for later */
  244. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  245. rsp->req_id = req->req_id;
  246. rsp->cmd = req->cmd;
  247. rsp->u.socket.id = req->u.socket.id;
  248. rsp->ret = ret;
  249. return 0;
  250. }
  251. static void pvcalls_sk_state_change(struct sock *sock)
  252. {
  253. struct sock_mapping *map = sock->sk_user_data;
  254. if (map == NULL)
  255. return;
  256. atomic_inc(&map->read);
  257. notify_remote_via_irq(map->irq);
  258. }
  259. static void pvcalls_sk_data_ready(struct sock *sock)
  260. {
  261. struct sock_mapping *map = sock->sk_user_data;
  262. struct pvcalls_ioworker *iow;
  263. if (map == NULL)
  264. return;
  265. iow = &map->ioworker;
  266. atomic_inc(&map->read);
  267. atomic_inc(&map->io);
  268. queue_work(iow->wq, &iow->register_work);
  269. }
  270. static struct sock_mapping *pvcalls_new_active_socket(
  271. struct pvcalls_fedata *fedata,
  272. uint64_t id,
  273. grant_ref_t ref,
  274. evtchn_port_t evtchn,
  275. struct socket *sock)
  276. {
  277. int ret;
  278. struct sock_mapping *map;
  279. void *page;
  280. map = kzalloc(sizeof(*map), GFP_KERNEL);
  281. if (map == NULL)
  282. return NULL;
  283. map->fedata = fedata;
  284. map->sock = sock;
  285. map->id = id;
  286. map->ref = ref;
  287. ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
  288. if (ret < 0)
  289. goto out;
  290. map->ring = page;
  291. map->ring_order = map->ring->ring_order;
  292. /* first read the order, then map the data ring */
  293. virt_rmb();
  294. if (map->ring_order > MAX_RING_ORDER) {
  295. pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
  296. __func__, map->ring_order, MAX_RING_ORDER);
  297. goto out;
  298. }
  299. ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
  300. (1 << map->ring_order), &page);
  301. if (ret < 0)
  302. goto out;
  303. map->bytes = page;
  304. ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
  305. fedata->dev->otherend_id, evtchn,
  306. pvcalls_back_conn_event, 0, "pvcalls-backend", map);
  307. if (ret < 0)
  308. goto out;
  309. map->irq = ret;
  310. map->data.in = map->bytes;
  311. map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
  312. map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
  313. if (!map->ioworker.wq)
  314. goto out;
  315. atomic_set(&map->io, 1);
  316. INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
  317. down(&fedata->socket_lock);
  318. list_add_tail(&map->list, &fedata->socket_mappings);
  319. up(&fedata->socket_lock);
  320. write_lock_bh(&map->sock->sk->sk_callback_lock);
  321. map->saved_data_ready = map->sock->sk->sk_data_ready;
  322. map->sock->sk->sk_user_data = map;
  323. map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
  324. map->sock->sk->sk_state_change = pvcalls_sk_state_change;
  325. write_unlock_bh(&map->sock->sk->sk_callback_lock);
  326. return map;
  327. out:
  328. down(&fedata->socket_lock);
  329. list_del(&map->list);
  330. pvcalls_back_release_active(fedata->dev, fedata, map);
  331. up(&fedata->socket_lock);
  332. return NULL;
  333. }
  334. static int pvcalls_back_connect(struct xenbus_device *dev,
  335. struct xen_pvcalls_request *req)
  336. {
  337. struct pvcalls_fedata *fedata;
  338. int ret = -EINVAL;
  339. struct socket *sock;
  340. struct sock_mapping *map;
  341. struct xen_pvcalls_response *rsp;
  342. struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
  343. fedata = dev_get_drvdata(&dev->dev);
  344. if (req->u.connect.len < sizeof(sa->sa_family) ||
  345. req->u.connect.len > sizeof(req->u.connect.addr) ||
  346. sa->sa_family != AF_INET)
  347. goto out;
  348. ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
  349. if (ret < 0)
  350. goto out;
  351. ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
  352. if (ret < 0) {
  353. sock_release(sock);
  354. goto out;
  355. }
  356. map = pvcalls_new_active_socket(fedata,
  357. req->u.connect.id,
  358. req->u.connect.ref,
  359. req->u.connect.evtchn,
  360. sock);
  361. if (!map) {
  362. ret = -EFAULT;
  363. sock_release(sock);
  364. }
  365. out:
  366. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  367. rsp->req_id = req->req_id;
  368. rsp->cmd = req->cmd;
  369. rsp->u.connect.id = req->u.connect.id;
  370. rsp->ret = ret;
  371. return 0;
  372. }
  373. static int pvcalls_back_release_active(struct xenbus_device *dev,
  374. struct pvcalls_fedata *fedata,
  375. struct sock_mapping *map)
  376. {
  377. disable_irq(map->irq);
  378. if (map->sock->sk != NULL) {
  379. write_lock_bh(&map->sock->sk->sk_callback_lock);
  380. map->sock->sk->sk_user_data = NULL;
  381. map->sock->sk->sk_data_ready = map->saved_data_ready;
  382. write_unlock_bh(&map->sock->sk->sk_callback_lock);
  383. }
  384. atomic_set(&map->release, 1);
  385. flush_work(&map->ioworker.register_work);
  386. xenbus_unmap_ring_vfree(dev, map->bytes);
  387. xenbus_unmap_ring_vfree(dev, (void *)map->ring);
  388. unbind_from_irqhandler(map->irq, map);
  389. sock_release(map->sock);
  390. kfree(map);
  391. return 0;
  392. }
  393. static int pvcalls_back_release_passive(struct xenbus_device *dev,
  394. struct pvcalls_fedata *fedata,
  395. struct sockpass_mapping *mappass)
  396. {
  397. if (mappass->sock->sk != NULL) {
  398. write_lock_bh(&mappass->sock->sk->sk_callback_lock);
  399. mappass->sock->sk->sk_user_data = NULL;
  400. mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
  401. write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
  402. }
  403. sock_release(mappass->sock);
  404. flush_workqueue(mappass->wq);
  405. destroy_workqueue(mappass->wq);
  406. kfree(mappass);
  407. return 0;
  408. }
  409. static int pvcalls_back_release(struct xenbus_device *dev,
  410. struct xen_pvcalls_request *req)
  411. {
  412. struct pvcalls_fedata *fedata;
  413. struct sock_mapping *map, *n;
  414. struct sockpass_mapping *mappass;
  415. int ret = 0;
  416. struct xen_pvcalls_response *rsp;
  417. fedata = dev_get_drvdata(&dev->dev);
  418. down(&fedata->socket_lock);
  419. list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
  420. if (map->id == req->u.release.id) {
  421. list_del(&map->list);
  422. up(&fedata->socket_lock);
  423. ret = pvcalls_back_release_active(dev, fedata, map);
  424. goto out;
  425. }
  426. }
  427. mappass = radix_tree_lookup(&fedata->socketpass_mappings,
  428. req->u.release.id);
  429. if (mappass != NULL) {
  430. radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
  431. up(&fedata->socket_lock);
  432. ret = pvcalls_back_release_passive(dev, fedata, mappass);
  433. } else
  434. up(&fedata->socket_lock);
  435. out:
  436. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  437. rsp->req_id = req->req_id;
  438. rsp->u.release.id = req->u.release.id;
  439. rsp->cmd = req->cmd;
  440. rsp->ret = ret;
  441. return 0;
  442. }
  443. static void __pvcalls_back_accept(struct work_struct *work)
  444. {
  445. struct sockpass_mapping *mappass = container_of(
  446. work, struct sockpass_mapping, register_work);
  447. struct sock_mapping *map;
  448. struct pvcalls_ioworker *iow;
  449. struct pvcalls_fedata *fedata;
  450. struct socket *sock;
  451. struct xen_pvcalls_response *rsp;
  452. struct xen_pvcalls_request *req;
  453. int notify;
  454. int ret = -EINVAL;
  455. unsigned long flags;
  456. fedata = mappass->fedata;
  457. /*
  458. * __pvcalls_back_accept can race against pvcalls_back_accept.
  459. * We only need to check the value of "cmd" on read. It could be
  460. * done atomically, but to simplify the code on the write side, we
  461. * use a spinlock.
  462. */
  463. spin_lock_irqsave(&mappass->copy_lock, flags);
  464. req = &mappass->reqcopy;
  465. if (req->cmd != PVCALLS_ACCEPT) {
  466. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  467. return;
  468. }
  469. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  470. sock = sock_alloc();
  471. if (sock == NULL)
  472. goto out_error;
  473. sock->type = mappass->sock->type;
  474. sock->ops = mappass->sock->ops;
  475. ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
  476. if (ret == -EAGAIN) {
  477. sock_release(sock);
  478. return;
  479. }
  480. map = pvcalls_new_active_socket(fedata,
  481. req->u.accept.id_new,
  482. req->u.accept.ref,
  483. req->u.accept.evtchn,
  484. sock);
  485. if (!map) {
  486. ret = -EFAULT;
  487. sock_release(sock);
  488. goto out_error;
  489. }
  490. map->sockpass = mappass;
  491. iow = &map->ioworker;
  492. atomic_inc(&map->read);
  493. atomic_inc(&map->io);
  494. queue_work(iow->wq, &iow->register_work);
  495. out_error:
  496. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  497. rsp->req_id = req->req_id;
  498. rsp->cmd = req->cmd;
  499. rsp->u.accept.id = req->u.accept.id;
  500. rsp->ret = ret;
  501. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
  502. if (notify)
  503. notify_remote_via_irq(fedata->irq);
  504. mappass->reqcopy.cmd = 0;
  505. }
  506. static void pvcalls_pass_sk_data_ready(struct sock *sock)
  507. {
  508. struct sockpass_mapping *mappass = sock->sk_user_data;
  509. struct pvcalls_fedata *fedata;
  510. struct xen_pvcalls_response *rsp;
  511. unsigned long flags;
  512. int notify;
  513. if (mappass == NULL)
  514. return;
  515. fedata = mappass->fedata;
  516. spin_lock_irqsave(&mappass->copy_lock, flags);
  517. if (mappass->reqcopy.cmd == PVCALLS_POLL) {
  518. rsp = RING_GET_RESPONSE(&fedata->ring,
  519. fedata->ring.rsp_prod_pvt++);
  520. rsp->req_id = mappass->reqcopy.req_id;
  521. rsp->u.poll.id = mappass->reqcopy.u.poll.id;
  522. rsp->cmd = mappass->reqcopy.cmd;
  523. rsp->ret = 0;
  524. mappass->reqcopy.cmd = 0;
  525. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  526. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
  527. if (notify)
  528. notify_remote_via_irq(mappass->fedata->irq);
  529. } else {
  530. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  531. queue_work(mappass->wq, &mappass->register_work);
  532. }
  533. }
  534. static int pvcalls_back_bind(struct xenbus_device *dev,
  535. struct xen_pvcalls_request *req)
  536. {
  537. struct pvcalls_fedata *fedata;
  538. int ret;
  539. struct sockpass_mapping *map;
  540. struct xen_pvcalls_response *rsp;
  541. fedata = dev_get_drvdata(&dev->dev);
  542. map = kzalloc(sizeof(*map), GFP_KERNEL);
  543. if (map == NULL) {
  544. ret = -ENOMEM;
  545. goto out;
  546. }
  547. INIT_WORK(&map->register_work, __pvcalls_back_accept);
  548. spin_lock_init(&map->copy_lock);
  549. map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
  550. if (!map->wq) {
  551. ret = -ENOMEM;
  552. goto out;
  553. }
  554. ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
  555. if (ret < 0)
  556. goto out;
  557. ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
  558. req->u.bind.len);
  559. if (ret < 0)
  560. goto out;
  561. map->fedata = fedata;
  562. map->id = req->u.bind.id;
  563. down(&fedata->socket_lock);
  564. ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
  565. map);
  566. up(&fedata->socket_lock);
  567. if (ret)
  568. goto out;
  569. write_lock_bh(&map->sock->sk->sk_callback_lock);
  570. map->saved_data_ready = map->sock->sk->sk_data_ready;
  571. map->sock->sk->sk_user_data = map;
  572. map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
  573. write_unlock_bh(&map->sock->sk->sk_callback_lock);
  574. out:
  575. if (ret) {
  576. if (map && map->sock)
  577. sock_release(map->sock);
  578. if (map && map->wq)
  579. destroy_workqueue(map->wq);
  580. kfree(map);
  581. }
  582. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  583. rsp->req_id = req->req_id;
  584. rsp->cmd = req->cmd;
  585. rsp->u.bind.id = req->u.bind.id;
  586. rsp->ret = ret;
  587. return 0;
  588. }
  589. static int pvcalls_back_listen(struct xenbus_device *dev,
  590. struct xen_pvcalls_request *req)
  591. {
  592. struct pvcalls_fedata *fedata;
  593. int ret = -EINVAL;
  594. struct sockpass_mapping *map;
  595. struct xen_pvcalls_response *rsp;
  596. fedata = dev_get_drvdata(&dev->dev);
  597. down(&fedata->socket_lock);
  598. map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
  599. up(&fedata->socket_lock);
  600. if (map == NULL)
  601. goto out;
  602. ret = inet_listen(map->sock, req->u.listen.backlog);
  603. out:
  604. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  605. rsp->req_id = req->req_id;
  606. rsp->cmd = req->cmd;
  607. rsp->u.listen.id = req->u.listen.id;
  608. rsp->ret = ret;
  609. return 0;
  610. }
  611. static int pvcalls_back_accept(struct xenbus_device *dev,
  612. struct xen_pvcalls_request *req)
  613. {
  614. struct pvcalls_fedata *fedata;
  615. struct sockpass_mapping *mappass;
  616. int ret = -EINVAL;
  617. struct xen_pvcalls_response *rsp;
  618. unsigned long flags;
  619. fedata = dev_get_drvdata(&dev->dev);
  620. down(&fedata->socket_lock);
  621. mappass = radix_tree_lookup(&fedata->socketpass_mappings,
  622. req->u.accept.id);
  623. up(&fedata->socket_lock);
  624. if (mappass == NULL)
  625. goto out_error;
  626. /*
  627. * Limitation of the current implementation: only support one
  628. * concurrent accept or poll call on one socket.
  629. */
  630. spin_lock_irqsave(&mappass->copy_lock, flags);
  631. if (mappass->reqcopy.cmd != 0) {
  632. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  633. ret = -EINTR;
  634. goto out_error;
  635. }
  636. mappass->reqcopy = *req;
  637. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  638. queue_work(mappass->wq, &mappass->register_work);
  639. /* Tell the caller we don't need to send back a notification yet */
  640. return -1;
  641. out_error:
  642. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  643. rsp->req_id = req->req_id;
  644. rsp->cmd = req->cmd;
  645. rsp->u.accept.id = req->u.accept.id;
  646. rsp->ret = ret;
  647. return 0;
  648. }
  649. static int pvcalls_back_poll(struct xenbus_device *dev,
  650. struct xen_pvcalls_request *req)
  651. {
  652. struct pvcalls_fedata *fedata;
  653. struct sockpass_mapping *mappass;
  654. struct xen_pvcalls_response *rsp;
  655. struct inet_connection_sock *icsk;
  656. struct request_sock_queue *queue;
  657. unsigned long flags;
  658. int ret;
  659. bool data;
  660. fedata = dev_get_drvdata(&dev->dev);
  661. down(&fedata->socket_lock);
  662. mappass = radix_tree_lookup(&fedata->socketpass_mappings,
  663. req->u.poll.id);
  664. up(&fedata->socket_lock);
  665. if (mappass == NULL)
  666. return -EINVAL;
  667. /*
  668. * Limitation of the current implementation: only support one
  669. * concurrent accept or poll call on one socket.
  670. */
  671. spin_lock_irqsave(&mappass->copy_lock, flags);
  672. if (mappass->reqcopy.cmd != 0) {
  673. ret = -EINTR;
  674. goto out;
  675. }
  676. mappass->reqcopy = *req;
  677. icsk = inet_csk(mappass->sock->sk);
  678. queue = &icsk->icsk_accept_queue;
  679. data = READ_ONCE(queue->rskq_accept_head) != NULL;
  680. if (data) {
  681. mappass->reqcopy.cmd = 0;
  682. ret = 0;
  683. goto out;
  684. }
  685. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  686. /* Tell the caller we don't need to send back a notification yet */
  687. return -1;
  688. out:
  689. spin_unlock_irqrestore(&mappass->copy_lock, flags);
  690. rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
  691. rsp->req_id = req->req_id;
  692. rsp->cmd = req->cmd;
  693. rsp->u.poll.id = req->u.poll.id;
  694. rsp->ret = ret;
  695. return 0;
  696. }
  697. static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
  698. struct xen_pvcalls_request *req)
  699. {
  700. int ret = 0;
  701. switch (req->cmd) {
  702. case PVCALLS_SOCKET:
  703. ret = pvcalls_back_socket(dev, req);
  704. break;
  705. case PVCALLS_CONNECT:
  706. ret = pvcalls_back_connect(dev, req);
  707. break;
  708. case PVCALLS_RELEASE:
  709. ret = pvcalls_back_release(dev, req);
  710. break;
  711. case PVCALLS_BIND:
  712. ret = pvcalls_back_bind(dev, req);
  713. break;
  714. case PVCALLS_LISTEN:
  715. ret = pvcalls_back_listen(dev, req);
  716. break;
  717. case PVCALLS_ACCEPT:
  718. ret = pvcalls_back_accept(dev, req);
  719. break;
  720. case PVCALLS_POLL:
  721. ret = pvcalls_back_poll(dev, req);
  722. break;
  723. default:
  724. {
  725. struct pvcalls_fedata *fedata;
  726. struct xen_pvcalls_response *rsp;
  727. fedata = dev_get_drvdata(&dev->dev);
  728. rsp = RING_GET_RESPONSE(
  729. &fedata->ring, fedata->ring.rsp_prod_pvt++);
  730. rsp->req_id = req->req_id;
  731. rsp->cmd = req->cmd;
  732. rsp->ret = -ENOTSUPP;
  733. break;
  734. }
  735. }
  736. return ret;
  737. }
  738. static void pvcalls_back_work(struct pvcalls_fedata *fedata)
  739. {
  740. int notify, notify_all = 0, more = 1;
  741. struct xen_pvcalls_request req;
  742. struct xenbus_device *dev = fedata->dev;
  743. while (more) {
  744. while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
  745. RING_COPY_REQUEST(&fedata->ring,
  746. fedata->ring.req_cons++,
  747. &req);
  748. if (!pvcalls_back_handle_cmd(dev, &req)) {
  749. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
  750. &fedata->ring, notify);
  751. notify_all += notify;
  752. }
  753. }
  754. if (notify_all) {
  755. notify_remote_via_irq(fedata->irq);
  756. notify_all = 0;
  757. }
  758. RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
  759. }
  760. }
  761. static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
  762. {
  763. struct xenbus_device *dev = dev_id;
  764. struct pvcalls_fedata *fedata = NULL;
  765. unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
  766. if (dev) {
  767. fedata = dev_get_drvdata(&dev->dev);
  768. if (fedata) {
  769. pvcalls_back_work(fedata);
  770. eoi_flags = 0;
  771. }
  772. }
  773. xen_irq_lateeoi(irq, eoi_flags);
  774. return IRQ_HANDLED;
  775. }
  776. static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
  777. {
  778. struct sock_mapping *map = sock_map;
  779. struct pvcalls_ioworker *iow;
  780. if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
  781. map->sock->sk->sk_user_data != map) {
  782. xen_irq_lateeoi(irq, 0);
  783. return IRQ_HANDLED;
  784. }
  785. iow = &map->ioworker;
  786. atomic_inc(&map->write);
  787. atomic_inc(&map->eoi);
  788. atomic_inc(&map->io);
  789. queue_work(iow->wq, &iow->register_work);
  790. return IRQ_HANDLED;
  791. }
  792. static int backend_connect(struct xenbus_device *dev)
  793. {
  794. int err;
  795. evtchn_port_t evtchn;
  796. grant_ref_t ring_ref;
  797. struct pvcalls_fedata *fedata = NULL;
  798. fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
  799. if (!fedata)
  800. return -ENOMEM;
  801. fedata->irq = -1;
  802. err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
  803. &evtchn);
  804. if (err != 1) {
  805. err = -EINVAL;
  806. xenbus_dev_fatal(dev, err, "reading %s/event-channel",
  807. dev->otherend);
  808. goto error;
  809. }
  810. err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
  811. if (err != 1) {
  812. err = -EINVAL;
  813. xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
  814. dev->otherend);
  815. goto error;
  816. }
  817. err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
  818. if (err < 0)
  819. goto error;
  820. fedata->irq = err;
  821. err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
  822. IRQF_ONESHOT, "pvcalls-back", dev);
  823. if (err < 0)
  824. goto error;
  825. err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
  826. (void **)&fedata->sring);
  827. if (err < 0)
  828. goto error;
  829. BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
  830. fedata->dev = dev;
  831. INIT_LIST_HEAD(&fedata->socket_mappings);
  832. INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
  833. sema_init(&fedata->socket_lock, 1);
  834. dev_set_drvdata(&dev->dev, fedata);
  835. down(&pvcalls_back_global.frontends_lock);
  836. list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
  837. up(&pvcalls_back_global.frontends_lock);
  838. return 0;
  839. error:
  840. if (fedata->irq >= 0)
  841. unbind_from_irqhandler(fedata->irq, dev);
  842. if (fedata->sring != NULL)
  843. xenbus_unmap_ring_vfree(dev, fedata->sring);
  844. kfree(fedata);
  845. return err;
  846. }
  847. static int backend_disconnect(struct xenbus_device *dev)
  848. {
  849. struct pvcalls_fedata *fedata;
  850. struct sock_mapping *map, *n;
  851. struct sockpass_mapping *mappass;
  852. struct radix_tree_iter iter;
  853. void **slot;
  854. fedata = dev_get_drvdata(&dev->dev);
  855. down(&fedata->socket_lock);
  856. list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
  857. list_del(&map->list);
  858. pvcalls_back_release_active(dev, fedata, map);
  859. }
  860. radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
  861. mappass = radix_tree_deref_slot(slot);
  862. if (!mappass)
  863. continue;
  864. if (radix_tree_exception(mappass)) {
  865. if (radix_tree_deref_retry(mappass))
  866. slot = radix_tree_iter_retry(&iter);
  867. } else {
  868. radix_tree_delete(&fedata->socketpass_mappings,
  869. mappass->id);
  870. pvcalls_back_release_passive(dev, fedata, mappass);
  871. }
  872. }
  873. up(&fedata->socket_lock);
  874. unbind_from_irqhandler(fedata->irq, dev);
  875. xenbus_unmap_ring_vfree(dev, fedata->sring);
  876. list_del(&fedata->list);
  877. kfree(fedata);
  878. dev_set_drvdata(&dev->dev, NULL);
  879. return 0;
  880. }
  881. static int pvcalls_back_probe(struct xenbus_device *dev,
  882. const struct xenbus_device_id *id)
  883. {
  884. int err, abort;
  885. struct xenbus_transaction xbt;
  886. again:
  887. abort = 1;
  888. err = xenbus_transaction_start(&xbt);
  889. if (err) {
  890. pr_warn("%s cannot create xenstore transaction\n", __func__);
  891. return err;
  892. }
  893. err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
  894. PVCALLS_VERSIONS);
  895. if (err) {
  896. pr_warn("%s write out 'versions' failed\n", __func__);
  897. goto abort;
  898. }
  899. err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
  900. MAX_RING_ORDER);
  901. if (err) {
  902. pr_warn("%s write out 'max-page-order' failed\n", __func__);
  903. goto abort;
  904. }
  905. err = xenbus_printf(xbt, dev->nodename, "function-calls",
  906. XENBUS_FUNCTIONS_CALLS);
  907. if (err) {
  908. pr_warn("%s write out 'function-calls' failed\n", __func__);
  909. goto abort;
  910. }
  911. abort = 0;
  912. abort:
  913. err = xenbus_transaction_end(xbt, abort);
  914. if (err) {
  915. if (err == -EAGAIN && !abort)
  916. goto again;
  917. pr_warn("%s cannot complete xenstore transaction\n", __func__);
  918. return err;
  919. }
  920. if (abort)
  921. return -EFAULT;
  922. xenbus_switch_state(dev, XenbusStateInitWait);
  923. return 0;
  924. }
  925. static void set_backend_state(struct xenbus_device *dev,
  926. enum xenbus_state state)
  927. {
  928. while (dev->state != state) {
  929. switch (dev->state) {
  930. case XenbusStateClosed:
  931. switch (state) {
  932. case XenbusStateInitWait:
  933. case XenbusStateConnected:
  934. xenbus_switch_state(dev, XenbusStateInitWait);
  935. break;
  936. case XenbusStateClosing:
  937. xenbus_switch_state(dev, XenbusStateClosing);
  938. break;
  939. default:
  940. WARN_ON(1);
  941. }
  942. break;
  943. case XenbusStateInitWait:
  944. case XenbusStateInitialised:
  945. switch (state) {
  946. case XenbusStateConnected:
  947. if (backend_connect(dev))
  948. return;
  949. xenbus_switch_state(dev, XenbusStateConnected);
  950. break;
  951. case XenbusStateClosing:
  952. case XenbusStateClosed:
  953. xenbus_switch_state(dev, XenbusStateClosing);
  954. break;
  955. default:
  956. WARN_ON(1);
  957. }
  958. break;
  959. case XenbusStateConnected:
  960. switch (state) {
  961. case XenbusStateInitWait:
  962. case XenbusStateClosing:
  963. case XenbusStateClosed:
  964. down(&pvcalls_back_global.frontends_lock);
  965. backend_disconnect(dev);
  966. up(&pvcalls_back_global.frontends_lock);
  967. xenbus_switch_state(dev, XenbusStateClosing);
  968. break;
  969. default:
  970. WARN_ON(1);
  971. }
  972. break;
  973. case XenbusStateClosing:
  974. switch (state) {
  975. case XenbusStateInitWait:
  976. case XenbusStateConnected:
  977. case XenbusStateClosed:
  978. xenbus_switch_state(dev, XenbusStateClosed);
  979. break;
  980. default:
  981. WARN_ON(1);
  982. }
  983. break;
  984. default:
  985. WARN_ON(1);
  986. }
  987. }
  988. }
  989. static void pvcalls_back_changed(struct xenbus_device *dev,
  990. enum xenbus_state frontend_state)
  991. {
  992. switch (frontend_state) {
  993. case XenbusStateInitialising:
  994. set_backend_state(dev, XenbusStateInitWait);
  995. break;
  996. case XenbusStateInitialised:
  997. case XenbusStateConnected:
  998. set_backend_state(dev, XenbusStateConnected);
  999. break;
  1000. case XenbusStateClosing:
  1001. set_backend_state(dev, XenbusStateClosing);
  1002. break;
  1003. case XenbusStateClosed:
  1004. set_backend_state(dev, XenbusStateClosed);
  1005. if (xenbus_dev_is_online(dev))
  1006. break;
  1007. device_unregister(&dev->dev);
  1008. break;
  1009. case XenbusStateUnknown:
  1010. set_backend_state(dev, XenbusStateClosed);
  1011. device_unregister(&dev->dev);
  1012. break;
  1013. default:
  1014. xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
  1015. frontend_state);
  1016. break;
  1017. }
  1018. }
  1019. static int pvcalls_back_remove(struct xenbus_device *dev)
  1020. {
  1021. return 0;
  1022. }
  1023. static int pvcalls_back_uevent(struct xenbus_device *xdev,
  1024. struct kobj_uevent_env *env)
  1025. {
  1026. return 0;
  1027. }
  1028. static const struct xenbus_device_id pvcalls_back_ids[] = {
  1029. { "pvcalls" },
  1030. { "" }
  1031. };
  1032. static struct xenbus_driver pvcalls_back_driver = {
  1033. .ids = pvcalls_back_ids,
  1034. .probe = pvcalls_back_probe,
  1035. .remove = pvcalls_back_remove,
  1036. .uevent = pvcalls_back_uevent,
  1037. .otherend_changed = pvcalls_back_changed,
  1038. };
  1039. static int __init pvcalls_back_init(void)
  1040. {
  1041. int ret;
  1042. if (!xen_domain())
  1043. return -ENODEV;
  1044. ret = xenbus_register_backend(&pvcalls_back_driver);
  1045. if (ret < 0)
  1046. return ret;
  1047. sema_init(&pvcalls_back_global.frontends_lock, 1);
  1048. INIT_LIST_HEAD(&pvcalls_back_global.frontends);
  1049. return 0;
  1050. }
  1051. module_init(pvcalls_back_init);
  1052. static void __exit pvcalls_back_fin(void)
  1053. {
  1054. struct pvcalls_fedata *fedata, *nfedata;
  1055. down(&pvcalls_back_global.frontends_lock);
  1056. list_for_each_entry_safe(fedata, nfedata,
  1057. &pvcalls_back_global.frontends, list) {
  1058. backend_disconnect(fedata->dev);
  1059. }
  1060. up(&pvcalls_back_global.frontends_lock);
  1061. xenbus_unregister_driver(&pvcalls_back_driver);
  1062. }
  1063. module_exit(pvcalls_back_fin);
  1064. MODULE_DESCRIPTION("Xen PV Calls backend driver");
  1065. MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
  1066. MODULE_LICENSE("GPL");