stub_rx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2003-2008 Takahiro Hirofuchi
  4. */
  5. #include <asm/byteorder.h>
  6. #include <linux/kthread.h>
  7. #include <linux/usb.h>
  8. #include <linux/usb/hcd.h>
  9. #include <linux/scatterlist.h>
  10. #include "usbip_common.h"
  11. #include "stub.h"
  12. static int is_clear_halt_cmd(struct urb *urb)
  13. {
  14. struct usb_ctrlrequest *req;
  15. req = (struct usb_ctrlrequest *) urb->setup_packet;
  16. return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
  17. (req->bRequestType == USB_RECIP_ENDPOINT) &&
  18. (req->wValue == USB_ENDPOINT_HALT);
  19. }
  20. static int is_set_interface_cmd(struct urb *urb)
  21. {
  22. struct usb_ctrlrequest *req;
  23. req = (struct usb_ctrlrequest *) urb->setup_packet;
  24. return (req->bRequest == USB_REQ_SET_INTERFACE) &&
  25. (req->bRequestType == USB_RECIP_INTERFACE);
  26. }
  27. static int is_set_configuration_cmd(struct urb *urb)
  28. {
  29. struct usb_ctrlrequest *req;
  30. req = (struct usb_ctrlrequest *) urb->setup_packet;
  31. return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
  32. (req->bRequestType == USB_RECIP_DEVICE);
  33. }
  34. static int is_reset_device_cmd(struct urb *urb)
  35. {
  36. struct usb_ctrlrequest *req;
  37. __u16 value;
  38. __u16 index;
  39. req = (struct usb_ctrlrequest *) urb->setup_packet;
  40. value = le16_to_cpu(req->wValue);
  41. index = le16_to_cpu(req->wIndex);
  42. if ((req->bRequest == USB_REQ_SET_FEATURE) &&
  43. (req->bRequestType == USB_RT_PORT) &&
  44. (value == USB_PORT_FEAT_RESET)) {
  45. usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
  46. return 1;
  47. } else
  48. return 0;
  49. }
  50. static int tweak_clear_halt_cmd(struct urb *urb)
  51. {
  52. struct usb_ctrlrequest *req;
  53. int target_endp;
  54. int target_dir;
  55. int target_pipe;
  56. int ret;
  57. req = (struct usb_ctrlrequest *) urb->setup_packet;
  58. /*
  59. * The stalled endpoint is specified in the wIndex value. The endpoint
  60. * of the urb is the target of this clear_halt request (i.e., control
  61. * endpoint).
  62. */
  63. target_endp = le16_to_cpu(req->wIndex) & 0x000f;
  64. /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
  65. target_dir = le16_to_cpu(req->wIndex) & 0x0080;
  66. if (target_dir)
  67. target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
  68. else
  69. target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
  70. ret = usb_clear_halt(urb->dev, target_pipe);
  71. if (ret < 0)
  72. dev_err(&urb->dev->dev,
  73. "usb_clear_halt error: devnum %d endp %d ret %d\n",
  74. urb->dev->devnum, target_endp, ret);
  75. else
  76. dev_info(&urb->dev->dev,
  77. "usb_clear_halt done: devnum %d endp %d\n",
  78. urb->dev->devnum, target_endp);
  79. return ret;
  80. }
  81. static int tweak_set_interface_cmd(struct urb *urb)
  82. {
  83. struct usb_ctrlrequest *req;
  84. __u16 alternate;
  85. __u16 interface;
  86. int ret;
  87. req = (struct usb_ctrlrequest *) urb->setup_packet;
  88. alternate = le16_to_cpu(req->wValue);
  89. interface = le16_to_cpu(req->wIndex);
  90. usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
  91. interface, alternate);
  92. ret = usb_set_interface(urb->dev, interface, alternate);
  93. if (ret < 0)
  94. dev_err(&urb->dev->dev,
  95. "usb_set_interface error: inf %u alt %u ret %d\n",
  96. interface, alternate, ret);
  97. else
  98. dev_info(&urb->dev->dev,
  99. "usb_set_interface done: inf %u alt %u\n",
  100. interface, alternate);
  101. return ret;
  102. }
  103. static int tweak_set_configuration_cmd(struct urb *urb)
  104. {
  105. struct stub_priv *priv = (struct stub_priv *) urb->context;
  106. struct stub_device *sdev = priv->sdev;
  107. struct usb_ctrlrequest *req;
  108. __u16 config;
  109. int err;
  110. req = (struct usb_ctrlrequest *) urb->setup_packet;
  111. config = le16_to_cpu(req->wValue);
  112. err = usb_set_configuration(sdev->udev, config);
  113. if (err && err != -ENODEV)
  114. dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
  115. config, err);
  116. return 0;
  117. }
  118. static int tweak_reset_device_cmd(struct urb *urb)
  119. {
  120. struct stub_priv *priv = (struct stub_priv *) urb->context;
  121. struct stub_device *sdev = priv->sdev;
  122. dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
  123. if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
  124. dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
  125. return 0;
  126. }
  127. usb_reset_device(sdev->udev);
  128. usb_unlock_device(sdev->udev);
  129. return 0;
  130. }
  131. /*
  132. * clear_halt, set_interface, and set_configuration require special tricks.
  133. */
  134. static void tweak_special_requests(struct urb *urb)
  135. {
  136. if (!urb || !urb->setup_packet)
  137. return;
  138. if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
  139. return;
  140. if (is_clear_halt_cmd(urb))
  141. /* tweak clear_halt */
  142. tweak_clear_halt_cmd(urb);
  143. else if (is_set_interface_cmd(urb))
  144. /* tweak set_interface */
  145. tweak_set_interface_cmd(urb);
  146. else if (is_set_configuration_cmd(urb))
  147. /* tweak set_configuration */
  148. tweak_set_configuration_cmd(urb);
  149. else if (is_reset_device_cmd(urb))
  150. tweak_reset_device_cmd(urb);
  151. else
  152. usbip_dbg_stub_rx("no need to tweak\n");
  153. }
  154. /*
  155. * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
  156. * By unlinking the urb asynchronously, stub_rx can continuously
  157. * process coming urbs. Even if the urb is unlinked, its completion
  158. * handler will be called and stub_tx will send a return pdu.
  159. *
  160. * See also comments about unlinking strategy in vhci_hcd.c.
  161. */
  162. static int stub_recv_cmd_unlink(struct stub_device *sdev,
  163. struct usbip_header *pdu)
  164. {
  165. int ret, i;
  166. unsigned long flags;
  167. struct stub_priv *priv;
  168. spin_lock_irqsave(&sdev->priv_lock, flags);
  169. list_for_each_entry(priv, &sdev->priv_init, list) {
  170. if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
  171. continue;
  172. /*
  173. * This matched urb is not completed yet (i.e., be in
  174. * flight in usb hcd hardware/driver). Now we are
  175. * cancelling it. The unlinking flag means that we are
  176. * now not going to return the normal result pdu of a
  177. * submission request, but going to return a result pdu
  178. * of the unlink request.
  179. */
  180. priv->unlinking = 1;
  181. /*
  182. * In the case that unlinking flag is on, prev->seqnum
  183. * is changed from the seqnum of the cancelling urb to
  184. * the seqnum of the unlink request. This will be used
  185. * to make the result pdu of the unlink request.
  186. */
  187. priv->seqnum = pdu->base.seqnum;
  188. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  189. /*
  190. * usb_unlink_urb() is now out of spinlocking to avoid
  191. * spinlock recursion since stub_complete() is
  192. * sometimes called in this context but not in the
  193. * interrupt context. If stub_complete() is executed
  194. * before we call usb_unlink_urb(), usb_unlink_urb()
  195. * will return an error value. In this case, stub_tx
  196. * will return the result pdu of this unlink request
  197. * though submission is completed and actual unlinking
  198. * is not executed. OK?
  199. */
  200. /* In the above case, urb->status is not -ECONNRESET,
  201. * so a driver in a client host will know the failure
  202. * of the unlink request ?
  203. */
  204. for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
  205. ret = usb_unlink_urb(priv->urbs[i]);
  206. if (ret != -EINPROGRESS)
  207. dev_err(&priv->urbs[i]->dev->dev,
  208. "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
  209. i + 1, priv->num_urbs,
  210. priv->seqnum, ret);
  211. }
  212. return 0;
  213. }
  214. usbip_dbg_stub_rx("seqnum %d is not pending\n",
  215. pdu->u.cmd_unlink.seqnum);
  216. /*
  217. * The urb of the unlink target is not found in priv_init queue. It was
  218. * already completed and its results is/was going to be sent by a
  219. * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
  220. * return the completeness of this unlink request to vhci_hcd.
  221. */
  222. stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
  223. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  224. return 0;
  225. }
  226. static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
  227. {
  228. struct usbip_device *ud = &sdev->ud;
  229. int valid = 0;
  230. if (pdu->base.devid == sdev->devid) {
  231. spin_lock_irq(&ud->lock);
  232. if (ud->status == SDEV_ST_USED) {
  233. /* A request is valid. */
  234. valid = 1;
  235. }
  236. spin_unlock_irq(&ud->lock);
  237. }
  238. return valid;
  239. }
  240. static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
  241. struct usbip_header *pdu)
  242. {
  243. struct stub_priv *priv;
  244. struct usbip_device *ud = &sdev->ud;
  245. unsigned long flags;
  246. spin_lock_irqsave(&sdev->priv_lock, flags);
  247. priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
  248. if (!priv) {
  249. dev_err(&sdev->udev->dev, "alloc stub_priv\n");
  250. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  251. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  252. return NULL;
  253. }
  254. priv->seqnum = pdu->base.seqnum;
  255. priv->sdev = sdev;
  256. /*
  257. * After a stub_priv is linked to a list_head,
  258. * our error handler can free allocated data.
  259. */
  260. list_add_tail(&priv->list, &sdev->priv_init);
  261. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  262. return priv;
  263. }
  264. static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
  265. {
  266. struct usb_device *udev = sdev->udev;
  267. struct usb_host_endpoint *ep;
  268. struct usb_endpoint_descriptor *epd = NULL;
  269. int epnum = pdu->base.ep;
  270. int dir = pdu->base.direction;
  271. if (epnum < 0 || epnum > 15)
  272. goto err_ret;
  273. if (dir == USBIP_DIR_IN)
  274. ep = udev->ep_in[epnum & 0x7f];
  275. else
  276. ep = udev->ep_out[epnum & 0x7f];
  277. if (!ep)
  278. goto err_ret;
  279. epd = &ep->desc;
  280. if (usb_endpoint_xfer_control(epd)) {
  281. if (dir == USBIP_DIR_OUT)
  282. return usb_sndctrlpipe(udev, epnum);
  283. else
  284. return usb_rcvctrlpipe(udev, epnum);
  285. }
  286. if (usb_endpoint_xfer_bulk(epd)) {
  287. if (dir == USBIP_DIR_OUT)
  288. return usb_sndbulkpipe(udev, epnum);
  289. else
  290. return usb_rcvbulkpipe(udev, epnum);
  291. }
  292. if (usb_endpoint_xfer_int(epd)) {
  293. if (dir == USBIP_DIR_OUT)
  294. return usb_sndintpipe(udev, epnum);
  295. else
  296. return usb_rcvintpipe(udev, epnum);
  297. }
  298. if (usb_endpoint_xfer_isoc(epd)) {
  299. /* validate number of packets */
  300. if (pdu->u.cmd_submit.number_of_packets < 0 ||
  301. pdu->u.cmd_submit.number_of_packets >
  302. USBIP_MAX_ISO_PACKETS) {
  303. dev_err(&sdev->udev->dev,
  304. "CMD_SUBMIT: isoc invalid num packets %d\n",
  305. pdu->u.cmd_submit.number_of_packets);
  306. return -1;
  307. }
  308. if (dir == USBIP_DIR_OUT)
  309. return usb_sndisocpipe(udev, epnum);
  310. else
  311. return usb_rcvisocpipe(udev, epnum);
  312. }
  313. err_ret:
  314. /* NOT REACHED */
  315. dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
  316. return -1;
  317. }
  318. static void masking_bogus_flags(struct urb *urb)
  319. {
  320. int xfertype;
  321. struct usb_device *dev;
  322. struct usb_host_endpoint *ep;
  323. int is_out;
  324. unsigned int allowed;
  325. if (!urb || urb->hcpriv || !urb->complete)
  326. return;
  327. dev = urb->dev;
  328. if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
  329. return;
  330. ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
  331. [usb_pipeendpoint(urb->pipe)];
  332. if (!ep)
  333. return;
  334. xfertype = usb_endpoint_type(&ep->desc);
  335. if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
  336. struct usb_ctrlrequest *setup =
  337. (struct usb_ctrlrequest *) urb->setup_packet;
  338. if (!setup)
  339. return;
  340. is_out = !(setup->bRequestType & USB_DIR_IN) ||
  341. !setup->wLength;
  342. } else {
  343. is_out = usb_endpoint_dir_out(&ep->desc);
  344. }
  345. /* enforce simple/standard policy */
  346. allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
  347. URB_DIR_MASK | URB_FREE_BUFFER);
  348. switch (xfertype) {
  349. case USB_ENDPOINT_XFER_BULK:
  350. if (is_out)
  351. allowed |= URB_ZERO_PACKET;
  352. fallthrough;
  353. default: /* all non-iso endpoints */
  354. if (!is_out)
  355. allowed |= URB_SHORT_NOT_OK;
  356. break;
  357. case USB_ENDPOINT_XFER_ISOC:
  358. allowed |= URB_ISO_ASAP;
  359. break;
  360. }
  361. urb->transfer_flags &= allowed;
  362. }
  363. static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
  364. {
  365. int ret;
  366. int i;
  367. for (i = 0; i < priv->num_urbs; i++) {
  368. ret = usbip_recv_xbuff(ud, priv->urbs[i]);
  369. if (ret < 0)
  370. break;
  371. }
  372. return ret;
  373. }
  374. static void stub_recv_cmd_submit(struct stub_device *sdev,
  375. struct usbip_header *pdu)
  376. {
  377. struct stub_priv *priv;
  378. struct usbip_device *ud = &sdev->ud;
  379. struct usb_device *udev = sdev->udev;
  380. struct scatterlist *sgl = NULL, *sg;
  381. void *buffer = NULL;
  382. unsigned long long buf_len;
  383. int nents;
  384. int num_urbs = 1;
  385. int pipe = get_pipe(sdev, pdu);
  386. int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
  387. int support_sg = 1;
  388. int np = 0;
  389. int ret, i;
  390. if (pipe == -1)
  391. return;
  392. /*
  393. * Smatch reported the error case where use_sg is true and buf_len is 0.
  394. * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
  395. * released by stub event handler and connection will be shut down.
  396. */
  397. priv = stub_priv_alloc(sdev, pdu);
  398. if (!priv)
  399. return;
  400. buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
  401. if (use_sg && !buf_len) {
  402. dev_err(&udev->dev, "sg buffer with zero length\n");
  403. goto err_malloc;
  404. }
  405. /* allocate urb transfer buffer, if needed */
  406. if (buf_len) {
  407. if (use_sg) {
  408. sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
  409. if (!sgl)
  410. goto err_malloc;
  411. /* Check if the server's HCD supports SG */
  412. if (!udev->bus->sg_tablesize) {
  413. /*
  414. * If the server's HCD doesn't support SG, break
  415. * a single SG request into several URBs and map
  416. * each SG list entry to corresponding URB
  417. * buffer. The previously allocated SG list is
  418. * stored in priv->sgl (If the server's HCD
  419. * support SG, SG list is stored only in
  420. * urb->sg) and it is used as an indicator that
  421. * the server split single SG request into
  422. * several URBs. Later, priv->sgl is used by
  423. * stub_complete() and stub_send_ret_submit() to
  424. * reassemble the divied URBs.
  425. */
  426. support_sg = 0;
  427. num_urbs = nents;
  428. priv->completed_urbs = 0;
  429. pdu->u.cmd_submit.transfer_flags &=
  430. ~URB_DMA_MAP_SG;
  431. }
  432. } else {
  433. buffer = kzalloc(buf_len, GFP_KERNEL);
  434. if (!buffer)
  435. goto err_malloc;
  436. }
  437. }
  438. /* allocate urb array */
  439. priv->num_urbs = num_urbs;
  440. priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
  441. if (!priv->urbs)
  442. goto err_urbs;
  443. /* setup a urb */
  444. if (support_sg) {
  445. if (usb_pipeisoc(pipe))
  446. np = pdu->u.cmd_submit.number_of_packets;
  447. priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
  448. if (!priv->urbs[0])
  449. goto err_urb;
  450. if (buf_len) {
  451. if (use_sg) {
  452. priv->urbs[0]->sg = sgl;
  453. priv->urbs[0]->num_sgs = nents;
  454. priv->urbs[0]->transfer_buffer = NULL;
  455. } else {
  456. priv->urbs[0]->transfer_buffer = buffer;
  457. }
  458. }
  459. /* copy urb setup packet */
  460. priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
  461. 8, GFP_KERNEL);
  462. if (!priv->urbs[0]->setup_packet) {
  463. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  464. return;
  465. }
  466. usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
  467. } else {
  468. for_each_sg(sgl, sg, nents, i) {
  469. priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  470. /* The URBs which is previously allocated will be freed
  471. * in stub_device_cleanup_urbs() if error occurs.
  472. */
  473. if (!priv->urbs[i])
  474. goto err_urb;
  475. usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
  476. priv->urbs[i]->transfer_buffer = sg_virt(sg);
  477. priv->urbs[i]->transfer_buffer_length = sg->length;
  478. }
  479. priv->sgl = sgl;
  480. }
  481. for (i = 0; i < num_urbs; i++) {
  482. /* set other members from the base header of pdu */
  483. priv->urbs[i]->context = (void *) priv;
  484. priv->urbs[i]->dev = udev;
  485. priv->urbs[i]->pipe = pipe;
  486. priv->urbs[i]->complete = stub_complete;
  487. /* no need to submit an intercepted request, but harmless? */
  488. tweak_special_requests(priv->urbs[i]);
  489. masking_bogus_flags(priv->urbs[i]);
  490. }
  491. if (stub_recv_xbuff(ud, priv) < 0)
  492. return;
  493. if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
  494. return;
  495. /* urb is now ready to submit */
  496. for (i = 0; i < priv->num_urbs; i++) {
  497. ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
  498. if (ret == 0)
  499. usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
  500. pdu->base.seqnum);
  501. else {
  502. dev_err(&udev->dev, "submit_urb error, %d\n", ret);
  503. usbip_dump_header(pdu);
  504. usbip_dump_urb(priv->urbs[i]);
  505. /*
  506. * Pessimistic.
  507. * This connection will be discarded.
  508. */
  509. usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
  510. break;
  511. }
  512. }
  513. usbip_dbg_stub_rx("Leave\n");
  514. return;
  515. err_urb:
  516. kfree(priv->urbs);
  517. err_urbs:
  518. kfree(buffer);
  519. sgl_free(sgl);
  520. err_malloc:
  521. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  522. }
  523. /* recv a pdu */
  524. static void stub_rx_pdu(struct usbip_device *ud)
  525. {
  526. int ret;
  527. struct usbip_header pdu;
  528. struct stub_device *sdev = container_of(ud, struct stub_device, ud);
  529. struct device *dev = &sdev->udev->dev;
  530. usbip_dbg_stub_rx("Enter\n");
  531. memset(&pdu, 0, sizeof(pdu));
  532. /* receive a pdu header */
  533. ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
  534. if (ret != sizeof(pdu)) {
  535. dev_err(dev, "recv a header, %d\n", ret);
  536. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  537. return;
  538. }
  539. usbip_header_correct_endian(&pdu, 0);
  540. if (usbip_dbg_flag_stub_rx)
  541. usbip_dump_header(&pdu);
  542. if (!valid_request(sdev, &pdu)) {
  543. dev_err(dev, "recv invalid request\n");
  544. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  545. return;
  546. }
  547. switch (pdu.base.command) {
  548. case USBIP_CMD_UNLINK:
  549. stub_recv_cmd_unlink(sdev, &pdu);
  550. break;
  551. case USBIP_CMD_SUBMIT:
  552. stub_recv_cmd_submit(sdev, &pdu);
  553. break;
  554. default:
  555. /* NOTREACHED */
  556. dev_err(dev, "unknown pdu\n");
  557. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  558. break;
  559. }
  560. }
  561. int stub_rx_loop(void *data)
  562. {
  563. struct usbip_device *ud = data;
  564. while (!kthread_should_stop()) {
  565. if (usbip_event_happened(ud))
  566. break;
  567. stub_rx_pdu(ud);
  568. }
  569. return 0;
  570. }