omap_ssi_port.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* OMAP SSI port driver.
  3. *
  4. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  5. * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
  6. *
  7. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  8. */
  9. #include <linux/mod_devicetable.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/pm_runtime.h>
  13. #include <linux/delay.h>
  14. #include <linux/gpio/consumer.h>
  15. #include <linux/pinctrl/consumer.h>
  16. #include <linux/debugfs.h>
  17. #include "omap_ssi_regs.h"
  18. #include "omap_ssi.h"
  19. static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
  20. {
  21. return 0;
  22. }
  23. static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
  24. {
  25. return 0;
  26. }
  27. static inline unsigned int ssi_wakein(struct hsi_port *port)
  28. {
  29. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  30. return gpiod_get_value(omap_port->wake_gpio);
  31. }
  32. #ifdef CONFIG_DEBUG_FS
  33. static void ssi_debug_remove_port(struct hsi_port *port)
  34. {
  35. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  36. debugfs_remove_recursive(omap_port->dir);
  37. }
  38. static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
  39. {
  40. struct hsi_port *port = m->private;
  41. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  42. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  43. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  44. void __iomem *base = omap_ssi->sys;
  45. unsigned int ch;
  46. pm_runtime_get_sync(omap_port->pdev);
  47. if (omap_port->wake_irq > 0)
  48. seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
  49. seq_printf(m, "WAKE\t\t: 0x%08x\n",
  50. readl(base + SSI_WAKE_REG(port->num)));
  51. seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
  52. readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
  53. seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
  54. readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
  55. /* SST */
  56. base = omap_port->sst_base;
  57. seq_puts(m, "\nSST\n===\n");
  58. seq_printf(m, "ID SST\t\t: 0x%08x\n",
  59. readl(base + SSI_SST_ID_REG));
  60. seq_printf(m, "MODE\t\t: 0x%08x\n",
  61. readl(base + SSI_SST_MODE_REG));
  62. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  63. readl(base + SSI_SST_FRAMESIZE_REG));
  64. seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
  65. readl(base + SSI_SST_DIVISOR_REG));
  66. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  67. readl(base + SSI_SST_CHANNELS_REG));
  68. seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
  69. readl(base + SSI_SST_ARBMODE_REG));
  70. seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
  71. readl(base + SSI_SST_TXSTATE_REG));
  72. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  73. readl(base + SSI_SST_BUFSTATE_REG));
  74. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  75. readl(base + SSI_SST_BREAK_REG));
  76. for (ch = 0; ch < omap_port->channels; ch++) {
  77. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  78. readl(base + SSI_SST_BUFFER_CH_REG(ch)));
  79. }
  80. /* SSR */
  81. base = omap_port->ssr_base;
  82. seq_puts(m, "\nSSR\n===\n");
  83. seq_printf(m, "ID SSR\t\t: 0x%08x\n",
  84. readl(base + SSI_SSR_ID_REG));
  85. seq_printf(m, "MODE\t\t: 0x%08x\n",
  86. readl(base + SSI_SSR_MODE_REG));
  87. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  88. readl(base + SSI_SSR_FRAMESIZE_REG));
  89. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  90. readl(base + SSI_SSR_CHANNELS_REG));
  91. seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
  92. readl(base + SSI_SSR_TIMEOUT_REG));
  93. seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
  94. readl(base + SSI_SSR_RXSTATE_REG));
  95. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  96. readl(base + SSI_SSR_BUFSTATE_REG));
  97. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  98. readl(base + SSI_SSR_BREAK_REG));
  99. seq_printf(m, "ERROR\t\t: 0x%08x\n",
  100. readl(base + SSI_SSR_ERROR_REG));
  101. seq_printf(m, "ERRORACK\t: 0x%08x\n",
  102. readl(base + SSI_SSR_ERRORACK_REG));
  103. for (ch = 0; ch < omap_port->channels; ch++) {
  104. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  105. readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
  106. }
  107. pm_runtime_put_autosuspend(omap_port->pdev);
  108. return 0;
  109. }
  110. DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
  111. static int ssi_div_get(void *data, u64 *val)
  112. {
  113. struct hsi_port *port = data;
  114. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  115. pm_runtime_get_sync(omap_port->pdev);
  116. *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
  117. pm_runtime_put_autosuspend(omap_port->pdev);
  118. return 0;
  119. }
  120. static int ssi_div_set(void *data, u64 val)
  121. {
  122. struct hsi_port *port = data;
  123. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  124. if (val > 127)
  125. return -EINVAL;
  126. pm_runtime_get_sync(omap_port->pdev);
  127. writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
  128. omap_port->sst.divisor = val;
  129. pm_runtime_put_autosuspend(omap_port->pdev);
  130. return 0;
  131. }
  132. DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
  133. static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
  134. struct dentry *dir)
  135. {
  136. struct hsi_port *port = to_hsi_port(omap_port->dev);
  137. dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
  138. if (!dir)
  139. return -ENOMEM;
  140. omap_port->dir = dir;
  141. debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
  142. dir = debugfs_create_dir("sst", dir);
  143. if (!dir)
  144. return -ENOMEM;
  145. debugfs_create_file_unsafe("divisor", 0644, dir, port,
  146. &ssi_sst_div_fops);
  147. return 0;
  148. }
  149. #endif
  150. static void ssi_process_errqueue(struct work_struct *work)
  151. {
  152. struct omap_ssi_port *omap_port;
  153. struct list_head *head, *tmp;
  154. struct hsi_msg *msg;
  155. omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
  156. list_for_each_safe(head, tmp, &omap_port->errqueue) {
  157. msg = list_entry(head, struct hsi_msg, link);
  158. msg->complete(msg);
  159. list_del(head);
  160. }
  161. }
  162. static int ssi_claim_lch(struct hsi_msg *msg)
  163. {
  164. struct hsi_port *port = hsi_get_port(msg->cl);
  165. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  166. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  167. int lch;
  168. for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
  169. if (!omap_ssi->gdd_trn[lch].msg) {
  170. omap_ssi->gdd_trn[lch].msg = msg;
  171. omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
  172. return lch;
  173. }
  174. return -EBUSY;
  175. }
  176. static int ssi_start_dma(struct hsi_msg *msg, int lch)
  177. {
  178. struct hsi_port *port = hsi_get_port(msg->cl);
  179. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  180. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  181. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  182. void __iomem *gdd = omap_ssi->gdd;
  183. int err;
  184. u16 csdp;
  185. u16 ccr;
  186. u32 s_addr;
  187. u32 d_addr;
  188. u32 tmp;
  189. /* Hold clocks during the transfer */
  190. pm_runtime_get(omap_port->pdev);
  191. if (!pm_runtime_active(omap_port->pdev)) {
  192. dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
  193. pm_runtime_put_autosuspend(omap_port->pdev);
  194. return -EREMOTEIO;
  195. }
  196. if (msg->ttype == HSI_MSG_READ) {
  197. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  198. DMA_FROM_DEVICE);
  199. if (err < 0) {
  200. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  201. pm_runtime_put_autosuspend(omap_port->pdev);
  202. return err;
  203. }
  204. csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
  205. SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
  206. SSI_DATA_TYPE_S32;
  207. ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
  208. ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
  209. SSI_CCR_ENABLE;
  210. s_addr = omap_port->ssr_dma +
  211. SSI_SSR_BUFFER_CH_REG(msg->channel);
  212. d_addr = sg_dma_address(msg->sgt.sgl);
  213. } else {
  214. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  215. DMA_TO_DEVICE);
  216. if (err < 0) {
  217. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  218. pm_runtime_put_autosuspend(omap_port->pdev);
  219. return err;
  220. }
  221. csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
  222. SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
  223. SSI_DATA_TYPE_S32;
  224. ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
  225. ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
  226. SSI_CCR_ENABLE;
  227. s_addr = sg_dma_address(msg->sgt.sgl);
  228. d_addr = omap_port->sst_dma +
  229. SSI_SST_BUFFER_CH_REG(msg->channel);
  230. }
  231. dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
  232. lch, csdp, ccr, s_addr, d_addr);
  233. writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
  234. writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
  235. writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
  236. writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
  237. writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
  238. gdd + SSI_GDD_CEN_REG(lch));
  239. spin_lock_bh(&omap_ssi->lock);
  240. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  241. tmp |= SSI_GDD_LCH(lch);
  242. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  243. spin_unlock_bh(&omap_ssi->lock);
  244. writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
  245. msg->status = HSI_STATUS_PROCEEDING;
  246. return 0;
  247. }
  248. static int ssi_start_pio(struct hsi_msg *msg)
  249. {
  250. struct hsi_port *port = hsi_get_port(msg->cl);
  251. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  252. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  253. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  254. u32 val;
  255. pm_runtime_get(omap_port->pdev);
  256. if (!pm_runtime_active(omap_port->pdev)) {
  257. dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
  258. pm_runtime_put_autosuspend(omap_port->pdev);
  259. return -EREMOTEIO;
  260. }
  261. if (msg->ttype == HSI_MSG_WRITE) {
  262. val = SSI_DATAACCEPT(msg->channel);
  263. /* Hold clocks for pio writes */
  264. pm_runtime_get(omap_port->pdev);
  265. } else {
  266. val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
  267. }
  268. dev_dbg(&port->device, "Single %s transfer\n",
  269. msg->ttype ? "write" : "read");
  270. val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  271. writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  272. pm_runtime_put_autosuspend(omap_port->pdev);
  273. msg->actual_len = 0;
  274. msg->status = HSI_STATUS_PROCEEDING;
  275. return 0;
  276. }
  277. static int ssi_start_transfer(struct list_head *queue)
  278. {
  279. struct hsi_msg *msg;
  280. int lch = -1;
  281. if (list_empty(queue))
  282. return 0;
  283. msg = list_first_entry(queue, struct hsi_msg, link);
  284. if (msg->status != HSI_STATUS_QUEUED)
  285. return 0;
  286. if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
  287. lch = ssi_claim_lch(msg);
  288. if (lch >= 0)
  289. return ssi_start_dma(msg, lch);
  290. else
  291. return ssi_start_pio(msg);
  292. }
  293. static int ssi_async_break(struct hsi_msg *msg)
  294. {
  295. struct hsi_port *port = hsi_get_port(msg->cl);
  296. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  297. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  298. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  299. int err = 0;
  300. u32 tmp;
  301. pm_runtime_get_sync(omap_port->pdev);
  302. if (msg->ttype == HSI_MSG_WRITE) {
  303. if (omap_port->sst.mode != SSI_MODE_FRAME) {
  304. err = -EINVAL;
  305. goto out;
  306. }
  307. writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
  308. msg->status = HSI_STATUS_COMPLETED;
  309. msg->complete(msg);
  310. } else {
  311. if (omap_port->ssr.mode != SSI_MODE_FRAME) {
  312. err = -EINVAL;
  313. goto out;
  314. }
  315. spin_lock_bh(&omap_port->lock);
  316. tmp = readl(omap_ssi->sys +
  317. SSI_MPU_ENABLE_REG(port->num, 0));
  318. writel(tmp | SSI_BREAKDETECTED,
  319. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  320. msg->status = HSI_STATUS_PROCEEDING;
  321. list_add_tail(&msg->link, &omap_port->brkqueue);
  322. spin_unlock_bh(&omap_port->lock);
  323. }
  324. out:
  325. pm_runtime_mark_last_busy(omap_port->pdev);
  326. pm_runtime_put_autosuspend(omap_port->pdev);
  327. return err;
  328. }
  329. static int ssi_async(struct hsi_msg *msg)
  330. {
  331. struct hsi_port *port = hsi_get_port(msg->cl);
  332. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  333. struct list_head *queue;
  334. int err = 0;
  335. BUG_ON(!msg);
  336. if (msg->sgt.nents > 1)
  337. return -ENOSYS; /* TODO: Add sg support */
  338. if (msg->break_frame)
  339. return ssi_async_break(msg);
  340. if (msg->ttype) {
  341. BUG_ON(msg->channel >= omap_port->sst.channels);
  342. queue = &omap_port->txqueue[msg->channel];
  343. } else {
  344. BUG_ON(msg->channel >= omap_port->ssr.channels);
  345. queue = &omap_port->rxqueue[msg->channel];
  346. }
  347. msg->status = HSI_STATUS_QUEUED;
  348. pm_runtime_get_sync(omap_port->pdev);
  349. spin_lock_bh(&omap_port->lock);
  350. list_add_tail(&msg->link, queue);
  351. err = ssi_start_transfer(queue);
  352. if (err < 0) {
  353. list_del(&msg->link);
  354. msg->status = HSI_STATUS_ERROR;
  355. }
  356. spin_unlock_bh(&omap_port->lock);
  357. pm_runtime_mark_last_busy(omap_port->pdev);
  358. pm_runtime_put_autosuspend(omap_port->pdev);
  359. dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
  360. msg->status, msg->ttype, msg->channel);
  361. return err;
  362. }
  363. static u32 ssi_calculate_div(struct hsi_controller *ssi)
  364. {
  365. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  366. u32 tx_fckrate = (u32) omap_ssi->fck_rate;
  367. /* / 2 : SSI TX clock is always half of the SSI functional clock */
  368. tx_fckrate >>= 1;
  369. /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
  370. tx_fckrate--;
  371. dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
  372. tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
  373. omap_ssi->max_speed);
  374. return tx_fckrate / omap_ssi->max_speed;
  375. }
  376. static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
  377. {
  378. struct list_head *node, *tmp;
  379. struct hsi_msg *msg;
  380. list_for_each_safe(node, tmp, queue) {
  381. msg = list_entry(node, struct hsi_msg, link);
  382. if ((cl) && (cl != msg->cl))
  383. continue;
  384. list_del(node);
  385. pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
  386. msg->channel, msg, msg->sgt.sgl->length,
  387. msg->ttype, msg->context);
  388. if (msg->destructor)
  389. msg->destructor(msg);
  390. else
  391. hsi_free_msg(msg);
  392. }
  393. }
  394. static int ssi_setup(struct hsi_client *cl)
  395. {
  396. struct hsi_port *port = to_hsi_port(cl->device.parent);
  397. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  398. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  399. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  400. void __iomem *sst = omap_port->sst_base;
  401. void __iomem *ssr = omap_port->ssr_base;
  402. u32 div;
  403. u32 val;
  404. int err = 0;
  405. pm_runtime_get_sync(omap_port->pdev);
  406. spin_lock_bh(&omap_port->lock);
  407. if (cl->tx_cfg.speed)
  408. omap_ssi->max_speed = cl->tx_cfg.speed;
  409. div = ssi_calculate_div(ssi);
  410. if (div > SSI_MAX_DIVISOR) {
  411. dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
  412. cl->tx_cfg.speed, div);
  413. err = -EINVAL;
  414. goto out;
  415. }
  416. /* Set TX/RX module to sleep to stop TX/RX during cfg update */
  417. writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
  418. writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
  419. /* Flush posted write */
  420. val = readl(ssr + SSI_SSR_MODE_REG);
  421. /* TX */
  422. writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
  423. writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
  424. writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
  425. writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
  426. writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
  427. /* RX */
  428. writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
  429. writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
  430. writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
  431. /* Cleanup the break queue if we leave FRAME mode */
  432. if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
  433. (cl->rx_cfg.mode != SSI_MODE_FRAME))
  434. ssi_flush_queue(&omap_port->brkqueue, cl);
  435. writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
  436. omap_port->channels = max(cl->rx_cfg.num_hw_channels,
  437. cl->tx_cfg.num_hw_channels);
  438. /* Shadow registering for OFF mode */
  439. /* SST */
  440. omap_port->sst.divisor = div;
  441. omap_port->sst.frame_size = 31;
  442. omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
  443. omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
  444. omap_port->sst.mode = cl->tx_cfg.mode;
  445. /* SSR */
  446. omap_port->ssr.frame_size = 31;
  447. omap_port->ssr.timeout = 0;
  448. omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
  449. omap_port->ssr.mode = cl->rx_cfg.mode;
  450. out:
  451. spin_unlock_bh(&omap_port->lock);
  452. pm_runtime_mark_last_busy(omap_port->pdev);
  453. pm_runtime_put_autosuspend(omap_port->pdev);
  454. return err;
  455. }
  456. static int ssi_flush(struct hsi_client *cl)
  457. {
  458. struct hsi_port *port = hsi_get_port(cl);
  459. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  460. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  461. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  462. struct hsi_msg *msg;
  463. void __iomem *sst = omap_port->sst_base;
  464. void __iomem *ssr = omap_port->ssr_base;
  465. unsigned int i;
  466. u32 err;
  467. pm_runtime_get_sync(omap_port->pdev);
  468. spin_lock_bh(&omap_port->lock);
  469. /* stop all ssi communication */
  470. pinctrl_pm_select_idle_state(omap_port->pdev);
  471. udelay(1); /* wait for racing frames */
  472. /* Stop all DMA transfers */
  473. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  474. msg = omap_ssi->gdd_trn[i].msg;
  475. if (!msg || (port != hsi_get_port(msg->cl)))
  476. continue;
  477. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  478. if (msg->ttype == HSI_MSG_READ)
  479. pm_runtime_put_autosuspend(omap_port->pdev);
  480. omap_ssi->gdd_trn[i].msg = NULL;
  481. }
  482. /* Flush all SST buffers */
  483. writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
  484. writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
  485. /* Flush all SSR buffers */
  486. writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
  487. writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
  488. /* Flush all errors */
  489. err = readl(ssr + SSI_SSR_ERROR_REG);
  490. writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
  491. /* Flush break */
  492. writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
  493. /* Clear interrupts */
  494. writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  495. writel_relaxed(0xffffff00,
  496. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  497. writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  498. writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  499. /* Dequeue all pending requests */
  500. for (i = 0; i < omap_port->channels; i++) {
  501. /* Release write clocks */
  502. if (!list_empty(&omap_port->txqueue[i]))
  503. pm_runtime_put_autosuspend(omap_port->pdev);
  504. ssi_flush_queue(&omap_port->txqueue[i], NULL);
  505. ssi_flush_queue(&omap_port->rxqueue[i], NULL);
  506. }
  507. ssi_flush_queue(&omap_port->brkqueue, NULL);
  508. /* Resume SSI communication */
  509. pinctrl_pm_select_default_state(omap_port->pdev);
  510. spin_unlock_bh(&omap_port->lock);
  511. pm_runtime_mark_last_busy(omap_port->pdev);
  512. pm_runtime_put_autosuspend(omap_port->pdev);
  513. return 0;
  514. }
  515. static void start_tx_work(struct work_struct *work)
  516. {
  517. struct omap_ssi_port *omap_port =
  518. container_of(work, struct omap_ssi_port, work);
  519. struct hsi_port *port = to_hsi_port(omap_port->dev);
  520. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  521. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  522. pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
  523. writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  524. }
  525. static int ssi_start_tx(struct hsi_client *cl)
  526. {
  527. struct hsi_port *port = hsi_get_port(cl);
  528. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  529. dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
  530. spin_lock_bh(&omap_port->wk_lock);
  531. if (omap_port->wk_refcount++) {
  532. spin_unlock_bh(&omap_port->wk_lock);
  533. return 0;
  534. }
  535. spin_unlock_bh(&omap_port->wk_lock);
  536. schedule_work(&omap_port->work);
  537. return 0;
  538. }
  539. static int ssi_stop_tx(struct hsi_client *cl)
  540. {
  541. struct hsi_port *port = hsi_get_port(cl);
  542. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  543. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  544. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  545. dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
  546. spin_lock_bh(&omap_port->wk_lock);
  547. BUG_ON(!omap_port->wk_refcount);
  548. if (--omap_port->wk_refcount) {
  549. spin_unlock_bh(&omap_port->wk_lock);
  550. return 0;
  551. }
  552. writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  553. spin_unlock_bh(&omap_port->wk_lock);
  554. pm_runtime_mark_last_busy(omap_port->pdev);
  555. pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
  556. return 0;
  557. }
  558. static void ssi_transfer(struct omap_ssi_port *omap_port,
  559. struct list_head *queue)
  560. {
  561. struct hsi_msg *msg;
  562. int err = -1;
  563. pm_runtime_get(omap_port->pdev);
  564. spin_lock_bh(&omap_port->lock);
  565. while (err < 0) {
  566. err = ssi_start_transfer(queue);
  567. if (err < 0) {
  568. msg = list_first_entry(queue, struct hsi_msg, link);
  569. msg->status = HSI_STATUS_ERROR;
  570. msg->actual_len = 0;
  571. list_del(&msg->link);
  572. spin_unlock_bh(&omap_port->lock);
  573. msg->complete(msg);
  574. spin_lock_bh(&omap_port->lock);
  575. }
  576. }
  577. spin_unlock_bh(&omap_port->lock);
  578. pm_runtime_mark_last_busy(omap_port->pdev);
  579. pm_runtime_put_autosuspend(omap_port->pdev);
  580. }
  581. static void ssi_cleanup_queues(struct hsi_client *cl)
  582. {
  583. struct hsi_port *port = hsi_get_port(cl);
  584. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  585. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  586. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  587. struct hsi_msg *msg;
  588. unsigned int i;
  589. u32 rxbufstate = 0;
  590. u32 txbufstate = 0;
  591. u32 status = SSI_ERROROCCURED;
  592. u32 tmp;
  593. ssi_flush_queue(&omap_port->brkqueue, cl);
  594. if (list_empty(&omap_port->brkqueue))
  595. status |= SSI_BREAKDETECTED;
  596. for (i = 0; i < omap_port->channels; i++) {
  597. if (list_empty(&omap_port->txqueue[i]))
  598. continue;
  599. msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
  600. link);
  601. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  602. txbufstate |= (1 << i);
  603. status |= SSI_DATAACCEPT(i);
  604. /* Release the clocks writes, also GDD ones */
  605. pm_runtime_mark_last_busy(omap_port->pdev);
  606. pm_runtime_put_autosuspend(omap_port->pdev);
  607. }
  608. ssi_flush_queue(&omap_port->txqueue[i], cl);
  609. }
  610. for (i = 0; i < omap_port->channels; i++) {
  611. if (list_empty(&omap_port->rxqueue[i]))
  612. continue;
  613. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  614. link);
  615. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  616. rxbufstate |= (1 << i);
  617. status |= SSI_DATAAVAILABLE(i);
  618. }
  619. ssi_flush_queue(&omap_port->rxqueue[i], cl);
  620. /* Check if we keep the error detection interrupt armed */
  621. if (!list_empty(&omap_port->rxqueue[i]))
  622. status &= ~SSI_ERROROCCURED;
  623. }
  624. /* Cleanup write buffers */
  625. tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  626. tmp &= ~txbufstate;
  627. writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  628. /* Cleanup read buffers */
  629. tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  630. tmp &= ~rxbufstate;
  631. writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  632. /* Disarm and ack pending interrupts */
  633. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  634. tmp &= ~status;
  635. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  636. writel_relaxed(status, omap_ssi->sys +
  637. SSI_MPU_STATUS_REG(port->num, 0));
  638. }
  639. static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
  640. {
  641. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  642. struct hsi_port *port = hsi_get_port(cl);
  643. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  644. struct hsi_msg *msg;
  645. unsigned int i;
  646. u32 val = 0;
  647. u32 tmp;
  648. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  649. msg = omap_ssi->gdd_trn[i].msg;
  650. if ((!msg) || (msg->cl != cl))
  651. continue;
  652. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  653. val |= (1 << i);
  654. /*
  655. * Clock references for write will be handled in
  656. * ssi_cleanup_queues
  657. */
  658. if (msg->ttype == HSI_MSG_READ) {
  659. pm_runtime_mark_last_busy(omap_port->pdev);
  660. pm_runtime_put_autosuspend(omap_port->pdev);
  661. }
  662. omap_ssi->gdd_trn[i].msg = NULL;
  663. }
  664. tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  665. tmp &= ~val;
  666. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  667. writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  668. }
  669. static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
  670. {
  671. writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
  672. writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
  673. /* OCP barrier */
  674. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  675. return 0;
  676. }
  677. static int ssi_release(struct hsi_client *cl)
  678. {
  679. struct hsi_port *port = hsi_get_port(cl);
  680. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  681. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  682. pm_runtime_get_sync(omap_port->pdev);
  683. spin_lock_bh(&omap_port->lock);
  684. /* Stop all the pending DMA requests for that client */
  685. ssi_cleanup_gdd(ssi, cl);
  686. /* Now cleanup all the queues */
  687. ssi_cleanup_queues(cl);
  688. /* If it is the last client of the port, do extra checks and cleanup */
  689. if (port->claimed <= 1) {
  690. /*
  691. * Drop the clock reference for the incoming wake line
  692. * if it is still kept high by the other side.
  693. */
  694. if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
  695. pm_runtime_put_sync(omap_port->pdev);
  696. pm_runtime_get(omap_port->pdev);
  697. /* Stop any SSI TX/RX without a client */
  698. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  699. omap_port->sst.mode = SSI_MODE_SLEEP;
  700. omap_port->ssr.mode = SSI_MODE_SLEEP;
  701. pm_runtime_put(omap_port->pdev);
  702. WARN_ON(omap_port->wk_refcount != 0);
  703. }
  704. spin_unlock_bh(&omap_port->lock);
  705. pm_runtime_put_sync(omap_port->pdev);
  706. return 0;
  707. }
  708. static void ssi_error(struct hsi_port *port)
  709. {
  710. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  711. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  712. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  713. struct hsi_msg *msg;
  714. unsigned int i;
  715. u32 err;
  716. u32 val;
  717. u32 tmp;
  718. /* ACK error */
  719. err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
  720. dev_err(&port->device, "SSI error: 0x%02x\n", err);
  721. if (!err) {
  722. dev_dbg(&port->device, "spurious SSI error ignored!\n");
  723. return;
  724. }
  725. spin_lock(&omap_ssi->lock);
  726. /* Cancel all GDD read transfers */
  727. for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
  728. msg = omap_ssi->gdd_trn[i].msg;
  729. if ((msg) && (msg->ttype == HSI_MSG_READ)) {
  730. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  731. val |= (1 << i);
  732. omap_ssi->gdd_trn[i].msg = NULL;
  733. }
  734. }
  735. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  736. tmp &= ~val;
  737. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  738. spin_unlock(&omap_ssi->lock);
  739. /* Cancel all PIO read transfers */
  740. spin_lock(&omap_port->lock);
  741. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  742. tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
  743. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  744. /* ACK error */
  745. writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
  746. writel_relaxed(SSI_ERROROCCURED,
  747. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  748. /* Signal the error all current pending read requests */
  749. for (i = 0; i < omap_port->channels; i++) {
  750. if (list_empty(&omap_port->rxqueue[i]))
  751. continue;
  752. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  753. link);
  754. list_del(&msg->link);
  755. msg->status = HSI_STATUS_ERROR;
  756. spin_unlock(&omap_port->lock);
  757. msg->complete(msg);
  758. /* Now restart queued reads if any */
  759. ssi_transfer(omap_port, &omap_port->rxqueue[i]);
  760. spin_lock(&omap_port->lock);
  761. }
  762. spin_unlock(&omap_port->lock);
  763. }
  764. static void ssi_break_complete(struct hsi_port *port)
  765. {
  766. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  767. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  768. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  769. struct hsi_msg *msg;
  770. struct hsi_msg *tmp;
  771. u32 val;
  772. dev_dbg(&port->device, "HWBREAK received\n");
  773. spin_lock(&omap_port->lock);
  774. val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  775. val &= ~SSI_BREAKDETECTED;
  776. writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  777. writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
  778. writel(SSI_BREAKDETECTED,
  779. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  780. spin_unlock(&omap_port->lock);
  781. list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
  782. msg->status = HSI_STATUS_COMPLETED;
  783. spin_lock(&omap_port->lock);
  784. list_del(&msg->link);
  785. spin_unlock(&omap_port->lock);
  786. msg->complete(msg);
  787. }
  788. }
  789. static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
  790. {
  791. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  792. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  793. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  794. struct hsi_msg *msg;
  795. u32 *buf;
  796. u32 reg;
  797. u32 val;
  798. spin_lock_bh(&omap_port->lock);
  799. msg = list_first_entry(queue, struct hsi_msg, link);
  800. if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
  801. msg->actual_len = 0;
  802. msg->status = HSI_STATUS_PENDING;
  803. }
  804. if (msg->ttype == HSI_MSG_WRITE)
  805. val = SSI_DATAACCEPT(msg->channel);
  806. else
  807. val = SSI_DATAAVAILABLE(msg->channel);
  808. if (msg->status == HSI_STATUS_PROCEEDING) {
  809. buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
  810. if (msg->ttype == HSI_MSG_WRITE)
  811. writel(*buf, omap_port->sst_base +
  812. SSI_SST_BUFFER_CH_REG(msg->channel));
  813. else
  814. *buf = readl(omap_port->ssr_base +
  815. SSI_SSR_BUFFER_CH_REG(msg->channel));
  816. dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
  817. msg->ttype, *buf);
  818. msg->actual_len += sizeof(*buf);
  819. if (msg->actual_len >= msg->sgt.sgl->length)
  820. msg->status = HSI_STATUS_COMPLETED;
  821. /*
  822. * Wait for the last written frame to be really sent before
  823. * we call the complete callback
  824. */
  825. if ((msg->status == HSI_STATUS_PROCEEDING) ||
  826. ((msg->status == HSI_STATUS_COMPLETED) &&
  827. (msg->ttype == HSI_MSG_WRITE))) {
  828. writel(val, omap_ssi->sys +
  829. SSI_MPU_STATUS_REG(port->num, 0));
  830. spin_unlock_bh(&omap_port->lock);
  831. return;
  832. }
  833. }
  834. /* Transfer completed at this point */
  835. reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  836. if (msg->ttype == HSI_MSG_WRITE) {
  837. /* Release clocks for write transfer */
  838. pm_runtime_mark_last_busy(omap_port->pdev);
  839. pm_runtime_put_autosuspend(omap_port->pdev);
  840. }
  841. reg &= ~val;
  842. writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  843. writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  844. list_del(&msg->link);
  845. spin_unlock_bh(&omap_port->lock);
  846. msg->complete(msg);
  847. ssi_transfer(omap_port, queue);
  848. }
  849. static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
  850. {
  851. struct hsi_port *port = (struct hsi_port *)ssi_port;
  852. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  853. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  854. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  855. void __iomem *sys = omap_ssi->sys;
  856. unsigned int ch;
  857. u32 status_reg;
  858. pm_runtime_get_sync(omap_port->pdev);
  859. do {
  860. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  861. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  862. for (ch = 0; ch < omap_port->channels; ch++) {
  863. if (status_reg & SSI_DATAACCEPT(ch))
  864. ssi_pio_complete(port, &omap_port->txqueue[ch]);
  865. if (status_reg & SSI_DATAAVAILABLE(ch))
  866. ssi_pio_complete(port, &omap_port->rxqueue[ch]);
  867. }
  868. if (status_reg & SSI_BREAKDETECTED)
  869. ssi_break_complete(port);
  870. if (status_reg & SSI_ERROROCCURED)
  871. ssi_error(port);
  872. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  873. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  874. /* TODO: sleep if we retry? */
  875. } while (status_reg);
  876. pm_runtime_mark_last_busy(omap_port->pdev);
  877. pm_runtime_put_autosuspend(omap_port->pdev);
  878. return IRQ_HANDLED;
  879. }
  880. static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
  881. {
  882. struct hsi_port *port = (struct hsi_port *)ssi_port;
  883. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  884. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  885. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  886. if (ssi_wakein(port)) {
  887. /**
  888. * We can have a quick High-Low-High transition in the line.
  889. * In such a case if we have long interrupt latencies,
  890. * we can miss the low event or get twice a high event.
  891. * This workaround will avoid breaking the clock reference
  892. * count when such a situation ocurrs.
  893. */
  894. if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
  895. pm_runtime_get_sync(omap_port->pdev);
  896. dev_dbg(&ssi->device, "Wake in high\n");
  897. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  898. writel(SSI_WAKE(0),
  899. omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  900. }
  901. hsi_event(port, HSI_EVENT_START_RX);
  902. } else {
  903. dev_dbg(&ssi->device, "Wake in low\n");
  904. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  905. writel(SSI_WAKE(0),
  906. omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  907. }
  908. hsi_event(port, HSI_EVENT_STOP_RX);
  909. if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
  910. pm_runtime_mark_last_busy(omap_port->pdev);
  911. pm_runtime_put_autosuspend(omap_port->pdev);
  912. }
  913. }
  914. return IRQ_HANDLED;
  915. }
  916. static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
  917. {
  918. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  919. int err;
  920. err = platform_get_irq(pd, 0);
  921. if (err < 0)
  922. return err;
  923. omap_port->irq = err;
  924. err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
  925. ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
  926. if (err < 0)
  927. dev_err(&port->device, "Request IRQ %d failed (%d)\n",
  928. omap_port->irq, err);
  929. return err;
  930. }
  931. static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
  932. {
  933. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  934. int cawake_irq;
  935. int err;
  936. if (!omap_port->wake_gpio) {
  937. omap_port->wake_irq = -1;
  938. return 0;
  939. }
  940. cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
  941. omap_port->wake_irq = cawake_irq;
  942. err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
  943. ssi_wake_thread,
  944. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  945. "SSI cawake", port);
  946. if (err < 0)
  947. dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
  948. cawake_irq, err);
  949. err = enable_irq_wake(cawake_irq);
  950. if (err < 0)
  951. dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
  952. cawake_irq, err);
  953. return err;
  954. }
  955. static void ssi_queues_init(struct omap_ssi_port *omap_port)
  956. {
  957. unsigned int ch;
  958. for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
  959. INIT_LIST_HEAD(&omap_port->txqueue[ch]);
  960. INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
  961. }
  962. INIT_LIST_HEAD(&omap_port->brkqueue);
  963. }
  964. static int ssi_port_get_iomem(struct platform_device *pd,
  965. const char *name, void __iomem **pbase, dma_addr_t *phy)
  966. {
  967. struct hsi_port *port = platform_get_drvdata(pd);
  968. struct resource *mem;
  969. struct resource *ioarea;
  970. void __iomem *base;
  971. mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
  972. if (!mem) {
  973. dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
  974. return -ENXIO;
  975. }
  976. ioarea = devm_request_mem_region(&port->device, mem->start,
  977. resource_size(mem), dev_name(&pd->dev));
  978. if (!ioarea) {
  979. dev_err(&pd->dev, "%s IO memory region request failed\n",
  980. mem->name);
  981. return -ENXIO;
  982. }
  983. base = devm_ioremap(&port->device, mem->start, resource_size(mem));
  984. if (!base) {
  985. dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
  986. return -ENXIO;
  987. }
  988. *pbase = base;
  989. if (phy)
  990. *phy = mem->start;
  991. return 0;
  992. }
  993. static int ssi_port_probe(struct platform_device *pd)
  994. {
  995. struct device_node *np = pd->dev.of_node;
  996. struct hsi_port *port;
  997. struct omap_ssi_port *omap_port;
  998. struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
  999. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1000. struct gpio_desc *cawake_gpio = NULL;
  1001. u32 port_id;
  1002. int err;
  1003. dev_dbg(&pd->dev, "init ssi port...\n");
  1004. if (!ssi->port || !omap_ssi->port) {
  1005. dev_err(&pd->dev, "ssi controller not initialized!\n");
  1006. err = -ENODEV;
  1007. goto error;
  1008. }
  1009. /* get id of first uninitialized port in controller */
  1010. for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
  1011. port_id++)
  1012. ;
  1013. if (port_id >= ssi->num_ports) {
  1014. dev_err(&pd->dev, "port id out of range!\n");
  1015. err = -ENODEV;
  1016. goto error;
  1017. }
  1018. port = ssi->port[port_id];
  1019. if (!np) {
  1020. dev_err(&pd->dev, "missing device tree data\n");
  1021. err = -EINVAL;
  1022. goto error;
  1023. }
  1024. cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
  1025. if (IS_ERR(cawake_gpio)) {
  1026. err = PTR_ERR(cawake_gpio);
  1027. dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
  1028. goto error;
  1029. }
  1030. omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
  1031. if (!omap_port) {
  1032. err = -ENOMEM;
  1033. goto error;
  1034. }
  1035. omap_port->wake_gpio = cawake_gpio;
  1036. omap_port->pdev = &pd->dev;
  1037. omap_port->port_id = port_id;
  1038. INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
  1039. INIT_WORK(&omap_port->work, start_tx_work);
  1040. /* initialize HSI port */
  1041. port->async = ssi_async;
  1042. port->setup = ssi_setup;
  1043. port->flush = ssi_flush;
  1044. port->start_tx = ssi_start_tx;
  1045. port->stop_tx = ssi_stop_tx;
  1046. port->release = ssi_release;
  1047. hsi_port_set_drvdata(port, omap_port);
  1048. omap_ssi->port[port_id] = omap_port;
  1049. platform_set_drvdata(pd, port);
  1050. err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
  1051. &omap_port->sst_dma);
  1052. if (err < 0)
  1053. goto error;
  1054. err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
  1055. &omap_port->ssr_dma);
  1056. if (err < 0)
  1057. goto error;
  1058. err = ssi_port_irq(port, pd);
  1059. if (err < 0)
  1060. goto error;
  1061. err = ssi_wake_irq(port, pd);
  1062. if (err < 0)
  1063. goto error;
  1064. ssi_queues_init(omap_port);
  1065. spin_lock_init(&omap_port->lock);
  1066. spin_lock_init(&omap_port->wk_lock);
  1067. omap_port->dev = &port->device;
  1068. pm_runtime_use_autosuspend(omap_port->pdev);
  1069. pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
  1070. pm_runtime_enable(omap_port->pdev);
  1071. #ifdef CONFIG_DEBUG_FS
  1072. err = ssi_debug_add_port(omap_port, omap_ssi->dir);
  1073. if (err < 0) {
  1074. pm_runtime_disable(omap_port->pdev);
  1075. goto error;
  1076. }
  1077. #endif
  1078. hsi_add_clients_from_dt(port, np);
  1079. dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
  1080. return 0;
  1081. error:
  1082. return err;
  1083. }
  1084. static int ssi_port_remove(struct platform_device *pd)
  1085. {
  1086. struct hsi_port *port = platform_get_drvdata(pd);
  1087. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1088. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1089. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1090. #ifdef CONFIG_DEBUG_FS
  1091. ssi_debug_remove_port(port);
  1092. #endif
  1093. cancel_delayed_work_sync(&omap_port->errqueue_work);
  1094. hsi_port_unregister_clients(port);
  1095. port->async = hsi_dummy_msg;
  1096. port->setup = hsi_dummy_cl;
  1097. port->flush = hsi_dummy_cl;
  1098. port->start_tx = hsi_dummy_cl;
  1099. port->stop_tx = hsi_dummy_cl;
  1100. port->release = hsi_dummy_cl;
  1101. omap_ssi->port[omap_port->port_id] = NULL;
  1102. platform_set_drvdata(pd, NULL);
  1103. pm_runtime_dont_use_autosuspend(&pd->dev);
  1104. pm_runtime_disable(&pd->dev);
  1105. return 0;
  1106. }
  1107. static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
  1108. {
  1109. writel_relaxed(omap_port->sst.divisor,
  1110. omap_port->sst_base + SSI_SST_DIVISOR_REG);
  1111. return 0;
  1112. }
  1113. void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
  1114. struct omap_ssi_port *omap_port)
  1115. {
  1116. /* update divisor */
  1117. u32 div = ssi_calculate_div(ssi);
  1118. omap_port->sst.divisor = div;
  1119. ssi_restore_divisor(omap_port);
  1120. }
  1121. #ifdef CONFIG_PM
  1122. static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
  1123. {
  1124. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1125. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1126. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1127. omap_port->sys_mpu_enable = readl(omap_ssi->sys +
  1128. SSI_MPU_ENABLE_REG(port->num, 0));
  1129. return 0;
  1130. }
  1131. static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
  1132. {
  1133. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1134. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1135. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1136. void __iomem *base;
  1137. writel_relaxed(omap_port->sys_mpu_enable,
  1138. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  1139. /* SST context */
  1140. base = omap_port->sst_base;
  1141. writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
  1142. writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
  1143. writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
  1144. /* SSR context */
  1145. base = omap_port->ssr_base;
  1146. writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
  1147. writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
  1148. writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
  1149. return 0;
  1150. }
  1151. static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
  1152. {
  1153. u32 mode;
  1154. writel_relaxed(omap_port->sst.mode,
  1155. omap_port->sst_base + SSI_SST_MODE_REG);
  1156. writel_relaxed(omap_port->ssr.mode,
  1157. omap_port->ssr_base + SSI_SSR_MODE_REG);
  1158. /* OCP barrier */
  1159. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  1160. return 0;
  1161. }
  1162. static int omap_ssi_port_runtime_suspend(struct device *dev)
  1163. {
  1164. struct hsi_port *port = dev_get_drvdata(dev);
  1165. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1166. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1167. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1168. dev_dbg(dev, "port runtime suspend!\n");
  1169. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  1170. if (omap_ssi->get_loss)
  1171. omap_port->loss_count =
  1172. omap_ssi->get_loss(ssi->device.parent);
  1173. ssi_save_port_ctx(omap_port);
  1174. return 0;
  1175. }
  1176. static int omap_ssi_port_runtime_resume(struct device *dev)
  1177. {
  1178. struct hsi_port *port = dev_get_drvdata(dev);
  1179. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1180. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1181. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1182. dev_dbg(dev, "port runtime resume!\n");
  1183. if ((omap_ssi->get_loss) && (omap_port->loss_count ==
  1184. omap_ssi->get_loss(ssi->device.parent)))
  1185. goto mode; /* We always need to restore the mode & TX divisor */
  1186. ssi_restore_port_ctx(omap_port);
  1187. mode:
  1188. ssi_restore_divisor(omap_port);
  1189. ssi_restore_port_mode(omap_port);
  1190. return 0;
  1191. }
  1192. static const struct dev_pm_ops omap_ssi_port_pm_ops = {
  1193. SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
  1194. omap_ssi_port_runtime_resume, NULL)
  1195. };
  1196. #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
  1197. #else
  1198. #define DEV_PM_OPS NULL
  1199. #endif
  1200. #ifdef CONFIG_OF
  1201. static const struct of_device_id omap_ssi_port_of_match[] = {
  1202. { .compatible = "ti,omap3-ssi-port", },
  1203. {},
  1204. };
  1205. MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
  1206. #else
  1207. #define omap_ssi_port_of_match NULL
  1208. #endif
  1209. struct platform_driver ssi_port_pdriver = {
  1210. .probe = ssi_port_probe,
  1211. .remove = ssi_port_remove,
  1212. .driver = {
  1213. .name = "omap_ssi_port",
  1214. .of_match_table = omap_ssi_port_of_match,
  1215. .pm = DEV_PM_OPS,
  1216. },
  1217. };