ep0.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. // SPDX-License-Identifier: GPL-2.0
  2. /**
  3. * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
  4. *
  5. * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. * Authors: Felipe Balbi <balbi@ti.com>,
  8. * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  9. *
  10. * Taken from Linux Kernel v3.19-rc1 (drivers/usb/dwc3/ep0.c) and ported
  11. * to uboot.
  12. *
  13. * commit c00552ebaf : Merge 3.18-rc7 into usb-next
  14. */
  15. #include <common.h>
  16. #include <cpu_func.h>
  17. #include <linux/kernel.h>
  18. #include <linux/list.h>
  19. #include <linux/usb/ch9.h>
  20. #include <linux/usb/gadget.h>
  21. #include <linux/usb/composite.h>
  22. #include "core.h"
  23. #include "gadget.h"
  24. #include "io.h"
  25. #include "linux-compat.h"
  26. static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
  27. static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
  28. struct dwc3_ep *dep, struct dwc3_request *req);
  29. static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
  30. {
  31. switch (state) {
  32. case EP0_UNCONNECTED:
  33. return "Unconnected";
  34. case EP0_SETUP_PHASE:
  35. return "Setup Phase";
  36. case EP0_DATA_PHASE:
  37. return "Data Phase";
  38. case EP0_STATUS_PHASE:
  39. return "Status Phase";
  40. default:
  41. return "UNKNOWN";
  42. }
  43. }
  44. static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
  45. u32 len, u32 type, unsigned chain)
  46. {
  47. struct dwc3_gadget_ep_cmd_params params;
  48. struct dwc3_trb *trb;
  49. struct dwc3_ep *dep;
  50. int ret;
  51. dep = dwc->eps[epnum];
  52. if (dep->flags & DWC3_EP_BUSY) {
  53. dev_vdbg(dwc->dev, "%s still busy", dep->name);
  54. return 0;
  55. }
  56. trb = &dwc->ep0_trb[dep->free_slot];
  57. if (chain)
  58. dep->free_slot++;
  59. trb->bpl = lower_32_bits(buf_dma);
  60. trb->bph = upper_32_bits(buf_dma);
  61. trb->size = len;
  62. trb->ctrl = type;
  63. trb->ctrl |= (DWC3_TRB_CTRL_HWO
  64. | DWC3_TRB_CTRL_ISP_IMI);
  65. if (chain)
  66. trb->ctrl |= DWC3_TRB_CTRL_CHN;
  67. else
  68. trb->ctrl |= (DWC3_TRB_CTRL_IOC
  69. | DWC3_TRB_CTRL_LST);
  70. dwc3_flush_cache((uintptr_t)buf_dma, len);
  71. dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
  72. if (chain)
  73. return 0;
  74. memset(&params, 0, sizeof(params));
  75. params.param0 = upper_32_bits(dwc->ep0_trb_addr);
  76. params.param1 = lower_32_bits(dwc->ep0_trb_addr);
  77. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  78. DWC3_DEPCMD_STARTTRANSFER, &params);
  79. if (ret < 0) {
  80. dev_dbg(dwc->dev, "%s STARTTRANSFER failed", dep->name);
  81. return ret;
  82. }
  83. dep->flags |= DWC3_EP_BUSY;
  84. dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
  85. dep->number);
  86. dwc->ep0_next_event = DWC3_EP0_COMPLETE;
  87. return 0;
  88. }
  89. static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
  90. struct dwc3_request *req)
  91. {
  92. struct dwc3 *dwc = dep->dwc;
  93. req->request.actual = 0;
  94. req->request.status = -EINPROGRESS;
  95. req->epnum = dep->number;
  96. list_add_tail(&req->list, &dep->request_list);
  97. /*
  98. * Gadget driver might not be quick enough to queue a request
  99. * before we get a Transfer Not Ready event on this endpoint.
  100. *
  101. * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
  102. * flag is set, it's telling us that as soon as Gadget queues the
  103. * required request, we should kick the transfer here because the
  104. * IRQ we were waiting for is long gone.
  105. */
  106. if (dep->flags & DWC3_EP_PENDING_REQUEST) {
  107. unsigned direction;
  108. direction = !!(dep->flags & DWC3_EP0_DIR_IN);
  109. if (dwc->ep0state != EP0_DATA_PHASE) {
  110. dev_WARN(dwc->dev, "Unexpected pending request\n");
  111. return 0;
  112. }
  113. __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
  114. dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
  115. DWC3_EP0_DIR_IN);
  116. return 0;
  117. }
  118. /*
  119. * In case gadget driver asked us to delay the STATUS phase,
  120. * handle it here.
  121. */
  122. if (dwc->delayed_status) {
  123. unsigned direction;
  124. direction = !dwc->ep0_expect_in;
  125. dwc->delayed_status = false;
  126. usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);
  127. if (dwc->ep0state == EP0_STATUS_PHASE)
  128. __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
  129. else
  130. dev_dbg(dwc->dev, "too early for delayed status");
  131. return 0;
  132. }
  133. /*
  134. * Unfortunately we have uncovered a limitation wrt the Data Phase.
  135. *
  136. * Section 9.4 says we can wait for the XferNotReady(DATA) event to
  137. * come before issueing Start Transfer command, but if we do, we will
  138. * miss situations where the host starts another SETUP phase instead of
  139. * the DATA phase. Such cases happen at least on TD.7.6 of the Link
  140. * Layer Compliance Suite.
  141. *
  142. * The problem surfaces due to the fact that in case of back-to-back
  143. * SETUP packets there will be no XferNotReady(DATA) generated and we
  144. * will be stuck waiting for XferNotReady(DATA) forever.
  145. *
  146. * By looking at tables 9-13 and 9-14 of the Databook, we can see that
  147. * it tells us to start Data Phase right away. It also mentions that if
  148. * we receive a SETUP phase instead of the DATA phase, core will issue
  149. * XferComplete for the DATA phase, before actually initiating it in
  150. * the wire, with the TRB's status set to "SETUP_PENDING". Such status
  151. * can only be used to print some debugging logs, as the core expects
  152. * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
  153. * just so it completes right away, without transferring anything and,
  154. * only then, we can go back to the SETUP phase.
  155. *
  156. * Because of this scenario, SNPS decided to change the programming
  157. * model of control transfers and support on-demand transfers only for
  158. * the STATUS phase. To fix the issue we have now, we will always wait
  159. * for gadget driver to queue the DATA phase's struct usb_request, then
  160. * start it right away.
  161. *
  162. * If we're actually in a 2-stage transfer, we will wait for
  163. * XferNotReady(STATUS).
  164. */
  165. if (dwc->three_stage_setup) {
  166. unsigned direction;
  167. direction = dwc->ep0_expect_in;
  168. dwc->ep0state = EP0_DATA_PHASE;
  169. __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
  170. dep->flags &= ~DWC3_EP0_DIR_IN;
  171. }
  172. return 0;
  173. }
  174. int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
  175. gfp_t gfp_flags)
  176. {
  177. struct dwc3_request *req = to_dwc3_request(request);
  178. struct dwc3_ep *dep = to_dwc3_ep(ep);
  179. struct dwc3 *dwc = dep->dwc;
  180. unsigned long flags;
  181. int ret;
  182. spin_lock_irqsave(&dwc->lock, flags);
  183. if (!dep->endpoint.desc) {
  184. dev_dbg(dwc->dev, "trying to queue request %p to disabled %s",
  185. request, dep->name);
  186. ret = -ESHUTDOWN;
  187. goto out;
  188. }
  189. /* we share one TRB for ep0/1 */
  190. if (!list_empty(&dep->request_list)) {
  191. ret = -EBUSY;
  192. goto out;
  193. }
  194. dev_vdbg(dwc->dev, "queueing request %p to %s length %d state '%s'",
  195. request, dep->name, request->length,
  196. dwc3_ep0_state_string(dwc->ep0state));
  197. ret = __dwc3_gadget_ep0_queue(dep, req);
  198. out:
  199. spin_unlock_irqrestore(&dwc->lock, flags);
  200. return ret;
  201. }
  202. static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
  203. {
  204. struct dwc3_ep *dep;
  205. /* reinitialize physical ep1 */
  206. dep = dwc->eps[1];
  207. dep->flags = DWC3_EP_ENABLED;
  208. /* stall is always issued on EP0 */
  209. dep = dwc->eps[0];
  210. __dwc3_gadget_ep_set_halt(dep, 1, false);
  211. dep->flags = DWC3_EP_ENABLED;
  212. dwc->delayed_status = false;
  213. if (!list_empty(&dep->request_list)) {
  214. struct dwc3_request *req;
  215. req = next_request(&dep->request_list);
  216. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  217. }
  218. dwc->ep0state = EP0_SETUP_PHASE;
  219. dwc3_ep0_out_start(dwc);
  220. }
  221. int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
  222. {
  223. struct dwc3_ep *dep = to_dwc3_ep(ep);
  224. struct dwc3 *dwc = dep->dwc;
  225. dwc3_ep0_stall_and_restart(dwc);
  226. return 0;
  227. }
  228. int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
  229. {
  230. unsigned long flags;
  231. int ret;
  232. spin_lock_irqsave(&dwc->lock, flags);
  233. ret = __dwc3_gadget_ep0_set_halt(ep, value);
  234. spin_unlock_irqrestore(&dwc->lock, flags);
  235. return ret;
  236. }
  237. void dwc3_ep0_out_start(struct dwc3 *dwc)
  238. {
  239. int ret;
  240. ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
  241. DWC3_TRBCTL_CONTROL_SETUP, 0);
  242. WARN_ON(ret < 0);
  243. }
  244. static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
  245. {
  246. struct dwc3_ep *dep;
  247. u32 windex = le16_to_cpu(wIndex_le);
  248. u32 epnum;
  249. epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
  250. if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  251. epnum |= 1;
  252. dep = dwc->eps[epnum];
  253. if (dep->flags & DWC3_EP_ENABLED)
  254. return dep;
  255. return NULL;
  256. }
  257. static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
  258. {
  259. }
  260. /*
  261. * ch 9.4.5
  262. */
  263. static int dwc3_ep0_handle_status(struct dwc3 *dwc,
  264. struct usb_ctrlrequest *ctrl)
  265. {
  266. struct dwc3_ep *dep;
  267. u32 recip;
  268. u32 reg;
  269. u16 usb_status = 0;
  270. __le16 *response_pkt;
  271. recip = ctrl->bRequestType & USB_RECIP_MASK;
  272. switch (recip) {
  273. case USB_RECIP_DEVICE:
  274. /*
  275. * LTM will be set once we know how to set this in HW.
  276. */
  277. usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
  278. if (dwc->speed == DWC3_DSTS_SUPERSPEED) {
  279. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  280. if (reg & DWC3_DCTL_INITU1ENA)
  281. usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
  282. if (reg & DWC3_DCTL_INITU2ENA)
  283. usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
  284. }
  285. break;
  286. case USB_RECIP_INTERFACE:
  287. /*
  288. * Function Remote Wake Capable D0
  289. * Function Remote Wakeup D1
  290. */
  291. break;
  292. case USB_RECIP_ENDPOINT:
  293. dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
  294. if (!dep)
  295. return -EINVAL;
  296. if (dep->flags & DWC3_EP_STALL)
  297. usb_status = 1 << USB_ENDPOINT_HALT;
  298. break;
  299. default:
  300. return -EINVAL;
  301. }
  302. response_pkt = (__le16 *) dwc->setup_buf;
  303. *response_pkt = cpu_to_le16(usb_status);
  304. dep = dwc->eps[0];
  305. dwc->ep0_usb_req.dep = dep;
  306. dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
  307. dwc->ep0_usb_req.request.buf = dwc->setup_buf;
  308. dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
  309. return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  310. }
  311. static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
  312. struct usb_ctrlrequest *ctrl, int set)
  313. {
  314. struct dwc3_ep *dep;
  315. u32 recip;
  316. u32 wValue;
  317. u32 wIndex;
  318. u32 reg;
  319. int ret;
  320. enum usb_device_state state;
  321. wValue = le16_to_cpu(ctrl->wValue);
  322. wIndex = le16_to_cpu(ctrl->wIndex);
  323. recip = ctrl->bRequestType & USB_RECIP_MASK;
  324. state = dwc->gadget.state;
  325. switch (recip) {
  326. case USB_RECIP_DEVICE:
  327. switch (wValue) {
  328. case USB_DEVICE_REMOTE_WAKEUP:
  329. break;
  330. /*
  331. * 9.4.1 says only only for SS, in AddressState only for
  332. * default control pipe
  333. */
  334. case USB_DEVICE_U1_ENABLE:
  335. if (state != USB_STATE_CONFIGURED)
  336. return -EINVAL;
  337. if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  338. return -EINVAL;
  339. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  340. if (set)
  341. reg |= DWC3_DCTL_INITU1ENA;
  342. else
  343. reg &= ~DWC3_DCTL_INITU1ENA;
  344. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  345. break;
  346. case USB_DEVICE_U2_ENABLE:
  347. if (state != USB_STATE_CONFIGURED)
  348. return -EINVAL;
  349. if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  350. return -EINVAL;
  351. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  352. if (set)
  353. reg |= DWC3_DCTL_INITU2ENA;
  354. else
  355. reg &= ~DWC3_DCTL_INITU2ENA;
  356. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  357. break;
  358. case USB_DEVICE_LTM_ENABLE:
  359. return -EINVAL;
  360. case USB_DEVICE_TEST_MODE:
  361. if ((wIndex & 0xff) != 0)
  362. return -EINVAL;
  363. if (!set)
  364. return -EINVAL;
  365. dwc->test_mode_nr = wIndex >> 8;
  366. dwc->test_mode = true;
  367. break;
  368. default:
  369. return -EINVAL;
  370. }
  371. break;
  372. case USB_RECIP_INTERFACE:
  373. switch (wValue) {
  374. case USB_INTRF_FUNC_SUSPEND:
  375. if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
  376. /* XXX enable Low power suspend */
  377. ;
  378. if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
  379. /* XXX enable remote wakeup */
  380. ;
  381. break;
  382. default:
  383. return -EINVAL;
  384. }
  385. break;
  386. case USB_RECIP_ENDPOINT:
  387. switch (wValue) {
  388. case USB_ENDPOINT_HALT:
  389. dep = dwc3_wIndex_to_dep(dwc, wIndex);
  390. if (!dep)
  391. return -EINVAL;
  392. if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
  393. break;
  394. ret = __dwc3_gadget_ep_set_halt(dep, set, true);
  395. if (ret)
  396. return -EINVAL;
  397. break;
  398. default:
  399. return -EINVAL;
  400. }
  401. break;
  402. default:
  403. return -EINVAL;
  404. }
  405. return 0;
  406. }
  407. static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  408. {
  409. enum usb_device_state state = dwc->gadget.state;
  410. u32 addr;
  411. u32 reg;
  412. addr = le16_to_cpu(ctrl->wValue);
  413. if (addr > 127) {
  414. dev_dbg(dwc->dev, "invalid device address %d", addr);
  415. return -EINVAL;
  416. }
  417. if (state == USB_STATE_CONFIGURED) {
  418. dev_dbg(dwc->dev, "trying to set address when configured");
  419. return -EINVAL;
  420. }
  421. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  422. reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  423. reg |= DWC3_DCFG_DEVADDR(addr);
  424. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  425. if (addr)
  426. usb_gadget_set_state(&dwc->gadget, USB_STATE_ADDRESS);
  427. else
  428. usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
  429. return 0;
  430. }
  431. static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  432. {
  433. int ret;
  434. spin_unlock(&dwc->lock);
  435. ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
  436. spin_lock(&dwc->lock);
  437. return ret;
  438. }
  439. static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  440. {
  441. enum usb_device_state state = dwc->gadget.state;
  442. u32 cfg;
  443. int ret;
  444. u32 reg;
  445. dwc->start_config_issued = false;
  446. cfg = le16_to_cpu(ctrl->wValue);
  447. switch (state) {
  448. case USB_STATE_DEFAULT:
  449. return -EINVAL;
  450. case USB_STATE_ADDRESS:
  451. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  452. /* if the cfg matches and the cfg is non zero */
  453. if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
  454. /*
  455. * only change state if set_config has already
  456. * been processed. If gadget driver returns
  457. * USB_GADGET_DELAYED_STATUS, we will wait
  458. * to change the state on the next usb_ep_queue()
  459. */
  460. if (ret == 0)
  461. usb_gadget_set_state(&dwc->gadget,
  462. USB_STATE_CONFIGURED);
  463. /*
  464. * Enable transition to U1/U2 state when
  465. * nothing is pending from application.
  466. */
  467. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  468. reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
  469. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  470. dwc->resize_fifos = true;
  471. dev_dbg(dwc->dev, "resize FIFOs flag SET");
  472. }
  473. break;
  474. case USB_STATE_CONFIGURED:
  475. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  476. if (!cfg && !ret)
  477. usb_gadget_set_state(&dwc->gadget,
  478. USB_STATE_ADDRESS);
  479. break;
  480. default:
  481. ret = -EINVAL;
  482. }
  483. return ret;
  484. }
  485. static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
  486. {
  487. struct dwc3_ep *dep = to_dwc3_ep(ep);
  488. struct dwc3 *dwc = dep->dwc;
  489. u32 param = 0;
  490. u32 reg;
  491. struct timing {
  492. u8 u1sel;
  493. u8 u1pel;
  494. u16 u2sel;
  495. u16 u2pel;
  496. } __packed timing;
  497. int ret;
  498. memcpy(&timing, req->buf, sizeof(timing));
  499. dwc->u1sel = timing.u1sel;
  500. dwc->u1pel = timing.u1pel;
  501. dwc->u2sel = le16_to_cpu(timing.u2sel);
  502. dwc->u2pel = le16_to_cpu(timing.u2pel);
  503. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  504. if (reg & DWC3_DCTL_INITU2ENA)
  505. param = dwc->u2pel;
  506. if (reg & DWC3_DCTL_INITU1ENA)
  507. param = dwc->u1pel;
  508. /*
  509. * According to Synopsys Databook, if parameter is
  510. * greater than 125, a value of zero should be
  511. * programmed in the register.
  512. */
  513. if (param > 125)
  514. param = 0;
  515. /* now that we have the time, issue DGCMD Set Sel */
  516. ret = dwc3_send_gadget_generic_command(dwc,
  517. DWC3_DGCMD_SET_PERIODIC_PAR, param);
  518. WARN_ON(ret < 0);
  519. }
  520. static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  521. {
  522. struct dwc3_ep *dep;
  523. enum usb_device_state state = dwc->gadget.state;
  524. u16 wLength;
  525. if (state == USB_STATE_DEFAULT)
  526. return -EINVAL;
  527. wLength = le16_to_cpu(ctrl->wLength);
  528. if (wLength != 6) {
  529. dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
  530. wLength);
  531. return -EINVAL;
  532. }
  533. /*
  534. * To handle Set SEL we need to receive 6 bytes from Host. So let's
  535. * queue a usb_request for 6 bytes.
  536. *
  537. * Remember, though, this controller can't handle non-wMaxPacketSize
  538. * aligned transfers on the OUT direction, so we queue a request for
  539. * wMaxPacketSize instead.
  540. */
  541. dep = dwc->eps[0];
  542. dwc->ep0_usb_req.dep = dep;
  543. dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
  544. dwc->ep0_usb_req.request.buf = dwc->setup_buf;
  545. dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
  546. return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  547. }
  548. static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  549. {
  550. u16 wLength;
  551. u16 wValue;
  552. u16 wIndex;
  553. wValue = le16_to_cpu(ctrl->wValue);
  554. wLength = le16_to_cpu(ctrl->wLength);
  555. wIndex = le16_to_cpu(ctrl->wIndex);
  556. if (wIndex || wLength)
  557. return -EINVAL;
  558. /*
  559. * REVISIT It's unclear from Databook what to do with this
  560. * value. For now, just cache it.
  561. */
  562. dwc->isoch_delay = wValue;
  563. return 0;
  564. }
  565. static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  566. {
  567. int ret;
  568. switch (ctrl->bRequest) {
  569. case USB_REQ_GET_STATUS:
  570. dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS");
  571. ret = dwc3_ep0_handle_status(dwc, ctrl);
  572. break;
  573. case USB_REQ_CLEAR_FEATURE:
  574. dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE");
  575. ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
  576. break;
  577. case USB_REQ_SET_FEATURE:
  578. dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE");
  579. ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
  580. break;
  581. case USB_REQ_SET_ADDRESS:
  582. dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS");
  583. ret = dwc3_ep0_set_address(dwc, ctrl);
  584. break;
  585. case USB_REQ_SET_CONFIGURATION:
  586. dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION");
  587. ret = dwc3_ep0_set_config(dwc, ctrl);
  588. break;
  589. case USB_REQ_SET_SEL:
  590. dev_vdbg(dwc->dev, "USB_REQ_SET_SEL");
  591. ret = dwc3_ep0_set_sel(dwc, ctrl);
  592. break;
  593. case USB_REQ_SET_ISOCH_DELAY:
  594. dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY");
  595. ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
  596. break;
  597. default:
  598. dev_vdbg(dwc->dev, "Forwarding to gadget driver");
  599. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  600. break;
  601. }
  602. return ret;
  603. }
  604. static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
  605. const struct dwc3_event_depevt *event)
  606. {
  607. struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
  608. int ret = -EINVAL;
  609. u32 len;
  610. if (!dwc->gadget_driver)
  611. goto out;
  612. len = le16_to_cpu(ctrl->wLength);
  613. if (!len) {
  614. dwc->three_stage_setup = false;
  615. dwc->ep0_expect_in = false;
  616. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  617. } else {
  618. dwc->three_stage_setup = true;
  619. dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
  620. dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
  621. }
  622. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  623. ret = dwc3_ep0_std_request(dwc, ctrl);
  624. else
  625. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  626. if (ret == USB_GADGET_DELAYED_STATUS)
  627. dwc->delayed_status = true;
  628. out:
  629. if (ret < 0)
  630. dwc3_ep0_stall_and_restart(dwc);
  631. }
  632. static void dwc3_ep0_complete_data(struct dwc3 *dwc,
  633. const struct dwc3_event_depevt *event)
  634. {
  635. struct dwc3_request *r = NULL;
  636. struct usb_request *ur;
  637. struct dwc3_trb *trb;
  638. struct dwc3_ep *ep0;
  639. unsigned transfer_size = 0;
  640. unsigned maxp;
  641. void *buf;
  642. u32 transferred = 0;
  643. u32 status;
  644. u32 length;
  645. u8 epnum;
  646. epnum = event->endpoint_number;
  647. ep0 = dwc->eps[0];
  648. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  649. trb = dwc->ep0_trb;
  650. r = next_request(&ep0->request_list);
  651. if (!r)
  652. return;
  653. dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
  654. status = DWC3_TRB_SIZE_TRBSTS(trb->size);
  655. if (status == DWC3_TRBSTS_SETUP_PENDING) {
  656. dev_dbg(dwc->dev, "Setup Pending received");
  657. if (r)
  658. dwc3_gadget_giveback(ep0, r, -ECONNRESET);
  659. return;
  660. }
  661. ur = &r->request;
  662. buf = ur->buf;
  663. length = trb->size & DWC3_TRB_SIZE_MASK;
  664. maxp = ep0->endpoint.maxpacket;
  665. if (dwc->ep0_bounced) {
  666. /*
  667. * Handle the first TRB before handling the bounce buffer if
  668. * the request length is greater than the bounce buffer size.
  669. */
  670. if (ur->length > DWC3_EP0_BOUNCE_SIZE) {
  671. transfer_size = (ur->length / maxp) * maxp;
  672. transferred = transfer_size - length;
  673. buf = (u8 *)buf + transferred;
  674. ur->actual += transferred;
  675. trb++;
  676. dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
  677. length = trb->size & DWC3_TRB_SIZE_MASK;
  678. ep0->free_slot = 0;
  679. }
  680. transfer_size = roundup((ur->length - transfer_size),
  681. maxp);
  682. transferred = min_t(u32, ur->length - transferred,
  683. transfer_size - length);
  684. dwc3_flush_cache((uintptr_t)dwc->ep0_bounce, DWC3_EP0_BOUNCE_SIZE);
  685. memcpy(buf, dwc->ep0_bounce, transferred);
  686. } else {
  687. transferred = ur->length - length;
  688. }
  689. ur->actual += transferred;
  690. if ((epnum & 1) && ur->actual < ur->length) {
  691. /* for some reason we did not get everything out */
  692. dwc3_ep0_stall_and_restart(dwc);
  693. } else {
  694. dwc3_gadget_giveback(ep0, r, 0);
  695. if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
  696. ur->length && ur->zero) {
  697. int ret;
  698. dwc->ep0_next_event = DWC3_EP0_COMPLETE;
  699. ret = dwc3_ep0_start_trans(dwc, epnum,
  700. dwc->ctrl_req_addr, 0,
  701. DWC3_TRBCTL_CONTROL_DATA, 0);
  702. WARN_ON(ret < 0);
  703. }
  704. }
  705. }
  706. static void dwc3_ep0_complete_status(struct dwc3 *dwc,
  707. const struct dwc3_event_depevt *event)
  708. {
  709. struct dwc3_request *r;
  710. struct dwc3_ep *dep;
  711. struct dwc3_trb *trb;
  712. u32 status;
  713. dep = dwc->eps[0];
  714. trb = dwc->ep0_trb;
  715. if (!list_empty(&dep->request_list)) {
  716. r = next_request(&dep->request_list);
  717. dwc3_gadget_giveback(dep, r, 0);
  718. }
  719. if (dwc->test_mode) {
  720. int ret;
  721. ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
  722. if (ret < 0) {
  723. dev_dbg(dwc->dev, "Invalid Test #%d",
  724. dwc->test_mode_nr);
  725. dwc3_ep0_stall_and_restart(dwc);
  726. return;
  727. }
  728. }
  729. status = DWC3_TRB_SIZE_TRBSTS(trb->size);
  730. if (status == DWC3_TRBSTS_SETUP_PENDING)
  731. dev_dbg(dwc->dev, "Setup Pending received");
  732. dwc->ep0state = EP0_SETUP_PHASE;
  733. dwc3_ep0_out_start(dwc);
  734. }
  735. static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
  736. const struct dwc3_event_depevt *event)
  737. {
  738. struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
  739. dep->flags &= ~DWC3_EP_BUSY;
  740. dep->resource_index = 0;
  741. dwc->setup_packet_pending = false;
  742. invalid_dcache_range(dwc->ctrl_req, (dmaaddr_t)dwc->ctrl_req + ROUND(sizeof(*dwc->ctrl_req), CACHELINE_SIZE));
  743. switch (dwc->ep0state) {
  744. case EP0_SETUP_PHASE:
  745. dev_vdbg(dwc->dev, "Setup Phase");
  746. dwc3_ep0_inspect_setup(dwc, event);
  747. break;
  748. case EP0_DATA_PHASE:
  749. dev_vdbg(dwc->dev, "Data Phase");
  750. dwc3_ep0_complete_data(dwc, event);
  751. break;
  752. case EP0_STATUS_PHASE:
  753. dev_vdbg(dwc->dev, "Status Phase");
  754. dwc3_ep0_complete_status(dwc, event);
  755. break;
  756. default:
  757. WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
  758. }
  759. }
  760. static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
  761. struct dwc3_ep *dep, struct dwc3_request *req)
  762. {
  763. int ret;
  764. req->direction = !!dep->number;
  765. if (req->request.length == 0) {
  766. ret = dwc3_ep0_start_trans(dwc, dep->number,
  767. dwc->ctrl_req_addr, 0,
  768. DWC3_TRBCTL_CONTROL_DATA, 0);
  769. } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
  770. (dep->number == 0)) {
  771. u32 transfer_size = 0;
  772. u32 maxpacket;
  773. ret = usb_gadget_map_request(&dwc->gadget, &req->request,
  774. dep->number);
  775. if (ret) {
  776. dev_dbg(dwc->dev, "failed to map request\n");
  777. return;
  778. }
  779. maxpacket = dep->endpoint.maxpacket;
  780. if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
  781. transfer_size = (req->request.length / maxpacket) *
  782. maxpacket;
  783. ret = dwc3_ep0_start_trans(dwc, dep->number,
  784. req->request.dma,
  785. transfer_size,
  786. DWC3_TRBCTL_CONTROL_DATA, 1);
  787. }
  788. transfer_size = roundup((req->request.length - transfer_size),
  789. maxpacket);
  790. dwc->ep0_bounced = true;
  791. /*
  792. * REVISIT in case request length is bigger than
  793. * DWC3_EP0_BOUNCE_SIZE we will need two chained
  794. * TRBs to handle the transfer.
  795. */
  796. ret = dwc3_ep0_start_trans(dwc, dep->number,
  797. dwc->ep0_bounce_addr, transfer_size,
  798. DWC3_TRBCTL_CONTROL_DATA, 0);
  799. } else {
  800. ret = usb_gadget_map_request(&dwc->gadget, &req->request,
  801. dep->number);
  802. if (ret) {
  803. dev_dbg(dwc->dev, "failed to map request\n");
  804. return;
  805. }
  806. ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
  807. req->request.length,
  808. DWC3_TRBCTL_CONTROL_DATA, 0);
  809. }
  810. WARN_ON(ret < 0);
  811. }
  812. static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
  813. {
  814. struct dwc3 *dwc = dep->dwc;
  815. u32 type;
  816. type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
  817. : DWC3_TRBCTL_CONTROL_STATUS2;
  818. return dwc3_ep0_start_trans(dwc, dep->number,
  819. dwc->ctrl_req_addr, 0, type, 0);
  820. }
  821. static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
  822. {
  823. if (dwc->resize_fifos) {
  824. dev_dbg(dwc->dev, "Resizing FIFOs");
  825. dwc3_gadget_resize_tx_fifos(dwc);
  826. dwc->resize_fifos = 0;
  827. }
  828. WARN_ON(dwc3_ep0_start_control_status(dep));
  829. }
  830. static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
  831. const struct dwc3_event_depevt *event)
  832. {
  833. struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
  834. __dwc3_ep0_do_control_status(dwc, dep);
  835. }
  836. static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
  837. {
  838. struct dwc3_gadget_ep_cmd_params params;
  839. u32 cmd;
  840. int ret;
  841. if (!dep->resource_index)
  842. return;
  843. cmd = DWC3_DEPCMD_ENDTRANSFER;
  844. cmd |= DWC3_DEPCMD_CMDIOC;
  845. cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
  846. memset(&params, 0, sizeof(params));
  847. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
  848. WARN_ON_ONCE(ret);
  849. dep->resource_index = 0;
  850. }
  851. static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
  852. const struct dwc3_event_depevt *event)
  853. {
  854. dwc->setup_packet_pending = true;
  855. switch (event->status) {
  856. case DEPEVT_STATUS_CONTROL_DATA:
  857. dev_vdbg(dwc->dev, "Control Data");
  858. /*
  859. * We already have a DATA transfer in the controller's cache,
  860. * if we receive a XferNotReady(DATA) we will ignore it, unless
  861. * it's for the wrong direction.
  862. *
  863. * In that case, we must issue END_TRANSFER command to the Data
  864. * Phase we already have started and issue SetStall on the
  865. * control endpoint.
  866. */
  867. if (dwc->ep0_expect_in != event->endpoint_number) {
  868. struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
  869. dev_vdbg(dwc->dev, "Wrong direction for Data phase");
  870. dwc3_ep0_end_control_data(dwc, dep);
  871. dwc3_ep0_stall_and_restart(dwc);
  872. return;
  873. }
  874. break;
  875. case DEPEVT_STATUS_CONTROL_STATUS:
  876. if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
  877. return;
  878. dev_vdbg(dwc->dev, "Control Status");
  879. dwc->ep0state = EP0_STATUS_PHASE;
  880. if (dwc->delayed_status) {
  881. WARN_ON_ONCE(event->endpoint_number != 1);
  882. dev_vdbg(dwc->dev, "Delayed Status");
  883. return;
  884. }
  885. dwc3_ep0_do_control_status(dwc, event);
  886. }
  887. }
  888. void dwc3_ep0_interrupt(struct dwc3 *dwc,
  889. const struct dwc3_event_depevt *event)
  890. {
  891. u8 epnum = event->endpoint_number;
  892. dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'",
  893. dwc3_ep_event_string(event->endpoint_event),
  894. epnum >> 1, (epnum & 1) ? "in" : "out",
  895. dwc3_ep0_state_string(dwc->ep0state));
  896. switch (event->endpoint_event) {
  897. case DWC3_DEPEVT_XFERCOMPLETE:
  898. dwc3_ep0_xfer_complete(dwc, event);
  899. break;
  900. case DWC3_DEPEVT_XFERNOTREADY:
  901. dwc3_ep0_xfernotready(dwc, event);
  902. break;
  903. case DWC3_DEPEVT_XFERINPROGRESS:
  904. case DWC3_DEPEVT_RXTXFIFOEVT:
  905. case DWC3_DEPEVT_STREAMEVT:
  906. case DWC3_DEPEVT_EPCMDCMPLT:
  907. break;
  908. }
  909. }