intel.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995
  1. // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2. // Copyright(c) 2015-17 Intel Corporation.
  3. /*
  4. * Soundwire Intel Master Driver
  5. */
  6. #include <linux/acpi.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/delay.h>
  9. #include <linux/module.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/platform_device.h>
  13. #include <sound/pcm_params.h>
  14. #include <linux/pm_runtime.h>
  15. #include <sound/soc.h>
  16. #include <linux/soundwire/sdw_registers.h>
  17. #include <linux/soundwire/sdw.h>
  18. #include <linux/soundwire/sdw_intel.h>
  19. #include "cadence_master.h"
  20. #include "bus.h"
  21. #include "intel.h"
  22. #define INTEL_MASTER_SUSPEND_DELAY_MS 3000
  23. /*
  24. * debug/config flags for the Intel SoundWire Master.
  25. *
  26. * Since we may have multiple masters active, we can have up to 8
  27. * flags reused in each byte, with master0 using the ls-byte, etc.
  28. */
  29. #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME BIT(0)
  30. #define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP BIT(1)
  31. #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE BIT(2)
  32. #define SDW_INTEL_MASTER_DISABLE_MULTI_LINK BIT(3)
  33. static int md_flags;
  34. module_param_named(sdw_md_flags, md_flags, int, 0444);
  35. MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
  36. /* Intel SHIM Registers Definition */
  37. #define SDW_SHIM_LCAP 0x0
  38. #define SDW_SHIM_LCTL 0x4
  39. #define SDW_SHIM_IPPTR 0x8
  40. #define SDW_SHIM_SYNC 0xC
  41. #define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
  42. #define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
  43. #define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
  44. #define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
  45. #define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
  46. #define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
  47. #define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
  48. #define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
  49. #define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x))
  50. #define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
  51. #define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
  52. #define SDW_SHIM_WAKEEN 0x190
  53. #define SDW_SHIM_WAKESTS 0x192
  54. #define SDW_SHIM_LCTL_SPA BIT(0)
  55. #define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
  56. #define SDW_SHIM_LCTL_CPA BIT(8)
  57. #define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
  58. #define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
  59. #define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
  60. #define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
  61. #define SDW_SHIM_SYNC_SYNCCPU BIT(15)
  62. #define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
  63. #define SDW_SHIM_SYNC_CMDSYNC BIT(16)
  64. #define SDW_SHIM_SYNC_SYNCGO BIT(24)
  65. #define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
  66. #define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
  67. #define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
  68. #define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
  69. #define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
  70. #define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
  71. #define SDW_SHIM_PCMSYCM_DIR BIT(15)
  72. #define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0)
  73. #define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4)
  74. #define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8)
  75. #define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13)
  76. #define SDW_SHIM_IOCTL_MIF BIT(0)
  77. #define SDW_SHIM_IOCTL_CO BIT(1)
  78. #define SDW_SHIM_IOCTL_COE BIT(2)
  79. #define SDW_SHIM_IOCTL_DO BIT(3)
  80. #define SDW_SHIM_IOCTL_DOE BIT(4)
  81. #define SDW_SHIM_IOCTL_BKE BIT(5)
  82. #define SDW_SHIM_IOCTL_WPDD BIT(6)
  83. #define SDW_SHIM_IOCTL_CIBD BIT(8)
  84. #define SDW_SHIM_IOCTL_DIBD BIT(9)
  85. #define SDW_SHIM_CTMCTL_DACTQE BIT(0)
  86. #define SDW_SHIM_CTMCTL_DODS BIT(1)
  87. #define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
  88. #define SDW_SHIM_WAKEEN_ENABLE BIT(0)
  89. #define SDW_SHIM_WAKESTS_STATUS BIT(0)
  90. /* Intel ALH Register definitions */
  91. #define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
  92. #define SDW_ALH_NUM_STREAMS 64
  93. #define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
  94. #define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
  95. #define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
  96. enum intel_pdi_type {
  97. INTEL_PDI_IN = 0,
  98. INTEL_PDI_OUT = 1,
  99. INTEL_PDI_BD = 2,
  100. };
  101. #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
  102. /*
  103. * Read, write helpers for HW registers
  104. */
  105. static inline int intel_readl(void __iomem *base, int offset)
  106. {
  107. return readl(base + offset);
  108. }
  109. static inline void intel_writel(void __iomem *base, int offset, int value)
  110. {
  111. writel(value, base + offset);
  112. }
  113. static inline u16 intel_readw(void __iomem *base, int offset)
  114. {
  115. return readw(base + offset);
  116. }
  117. static inline void intel_writew(void __iomem *base, int offset, u16 value)
  118. {
  119. writew(value, base + offset);
  120. }
  121. static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
  122. {
  123. int timeout = 10;
  124. u32 reg_read;
  125. do {
  126. reg_read = readl(base + offset);
  127. if ((reg_read & mask) == target)
  128. return 0;
  129. timeout--;
  130. usleep_range(50, 100);
  131. } while (timeout != 0);
  132. return -EAGAIN;
  133. }
  134. static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
  135. {
  136. writel(value, base + offset);
  137. return intel_wait_bit(base, offset, mask, 0);
  138. }
  139. static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
  140. {
  141. writel(value, base + offset);
  142. return intel_wait_bit(base, offset, mask, mask);
  143. }
  144. /*
  145. * debugfs
  146. */
  147. #ifdef CONFIG_DEBUG_FS
  148. #define RD_BUF (2 * PAGE_SIZE)
  149. static ssize_t intel_sprintf(void __iomem *mem, bool l,
  150. char *buf, size_t pos, unsigned int reg)
  151. {
  152. int value;
  153. if (l)
  154. value = intel_readl(mem, reg);
  155. else
  156. value = intel_readw(mem, reg);
  157. return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
  158. }
  159. static int intel_reg_show(struct seq_file *s_file, void *data)
  160. {
  161. struct sdw_intel *sdw = s_file->private;
  162. void __iomem *s = sdw->link_res->shim;
  163. void __iomem *a = sdw->link_res->alh;
  164. char *buf;
  165. ssize_t ret;
  166. int i, j;
  167. unsigned int links, reg;
  168. buf = kzalloc(RD_BUF, GFP_KERNEL);
  169. if (!buf)
  170. return -ENOMEM;
  171. links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
  172. ret = scnprintf(buf, RD_BUF, "Register Value\n");
  173. ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
  174. for (i = 0; i < links; i++) {
  175. reg = SDW_SHIM_LCAP + i * 4;
  176. ret += intel_sprintf(s, true, buf, ret, reg);
  177. }
  178. for (i = 0; i < links; i++) {
  179. ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
  180. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
  181. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
  182. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
  183. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
  184. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
  185. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
  186. ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
  187. /*
  188. * the value 10 is the number of PDIs. We will need a
  189. * cleanup to remove hard-coded Intel configurations
  190. * from cadence_master.c
  191. */
  192. for (j = 0; j < 10; j++) {
  193. ret += intel_sprintf(s, false, buf, ret,
  194. SDW_SHIM_PCMSYCHM(i, j));
  195. ret += intel_sprintf(s, false, buf, ret,
  196. SDW_SHIM_PCMSYCHC(i, j));
  197. }
  198. ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
  199. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
  200. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
  201. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
  202. }
  203. ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
  204. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
  205. ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
  206. ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
  207. for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
  208. ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
  209. seq_printf(s_file, "%s", buf);
  210. kfree(buf);
  211. return 0;
  212. }
  213. DEFINE_SHOW_ATTRIBUTE(intel_reg);
  214. static int intel_set_m_datamode(void *data, u64 value)
  215. {
  216. struct sdw_intel *sdw = data;
  217. struct sdw_bus *bus = &sdw->cdns.bus;
  218. if (value > SDW_PORT_DATA_MODE_STATIC_1)
  219. return -EINVAL;
  220. /* Userspace changed the hardware state behind the kernel's back */
  221. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  222. bus->params.m_data_mode = value;
  223. return 0;
  224. }
  225. DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
  226. intel_set_m_datamode, "%llu\n");
  227. static int intel_set_s_datamode(void *data, u64 value)
  228. {
  229. struct sdw_intel *sdw = data;
  230. struct sdw_bus *bus = &sdw->cdns.bus;
  231. if (value > SDW_PORT_DATA_MODE_STATIC_1)
  232. return -EINVAL;
  233. /* Userspace changed the hardware state behind the kernel's back */
  234. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  235. bus->params.s_data_mode = value;
  236. return 0;
  237. }
  238. DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
  239. intel_set_s_datamode, "%llu\n");
  240. static void intel_debugfs_init(struct sdw_intel *sdw)
  241. {
  242. struct dentry *root = sdw->cdns.bus.debugfs;
  243. if (!root)
  244. return;
  245. sdw->debugfs = debugfs_create_dir("intel-sdw", root);
  246. debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
  247. &intel_reg_fops);
  248. debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
  249. &intel_set_m_datamode_fops);
  250. debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
  251. &intel_set_s_datamode_fops);
  252. sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
  253. }
  254. static void intel_debugfs_exit(struct sdw_intel *sdw)
  255. {
  256. debugfs_remove_recursive(sdw->debugfs);
  257. }
  258. #else
  259. static void intel_debugfs_init(struct sdw_intel *sdw) {}
  260. static void intel_debugfs_exit(struct sdw_intel *sdw) {}
  261. #endif /* CONFIG_DEBUG_FS */
  262. /*
  263. * shim ops
  264. */
  265. static int intel_link_power_up(struct sdw_intel *sdw)
  266. {
  267. unsigned int link_id = sdw->instance;
  268. void __iomem *shim = sdw->link_res->shim;
  269. u32 *shim_mask = sdw->link_res->shim_mask;
  270. struct sdw_bus *bus = &sdw->cdns.bus;
  271. struct sdw_master_prop *prop = &bus->prop;
  272. u32 spa_mask, cpa_mask;
  273. u32 link_control;
  274. int ret = 0;
  275. u32 syncprd;
  276. u32 sync_reg;
  277. mutex_lock(sdw->link_res->shim_lock);
  278. /*
  279. * The hardware relies on an internal counter, typically 4kHz,
  280. * to generate the SoundWire SSP - which defines a 'safe'
  281. * synchronization point between commands and audio transport
  282. * and allows for multi link synchronization. The SYNCPRD value
  283. * is only dependent on the oscillator clock provided to
  284. * the IP, so adjust based on _DSD properties reported in DSDT
  285. * tables. The values reported are based on either 24MHz
  286. * (CNL/CML) or 38.4 MHz (ICL/TGL+).
  287. */
  288. if (prop->mclk_freq % 6000000)
  289. syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
  290. else
  291. syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
  292. if (!*shim_mask) {
  293. dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
  294. /* we first need to program the SyncPRD/CPU registers */
  295. dev_dbg(sdw->cdns.dev,
  296. "%s: first link up, programming SYNCPRD\n", __func__);
  297. /* set SyncPRD period */
  298. sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
  299. u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
  300. /* Set SyncCPU bit */
  301. sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
  302. intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
  303. /* Link power up sequence */
  304. link_control = intel_readl(shim, SDW_SHIM_LCTL);
  305. /* only power-up enabled links */
  306. spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
  307. cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
  308. link_control |= spa_mask;
  309. ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
  310. if (ret < 0) {
  311. dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
  312. goto out;
  313. }
  314. /* SyncCPU will change once link is active */
  315. ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
  316. SDW_SHIM_SYNC_SYNCCPU, 0);
  317. if (ret < 0) {
  318. dev_err(sdw->cdns.dev,
  319. "Failed to set SHIM_SYNC: %d\n", ret);
  320. goto out;
  321. }
  322. }
  323. *shim_mask |= BIT(link_id);
  324. sdw->cdns.link_up = true;
  325. out:
  326. mutex_unlock(sdw->link_res->shim_lock);
  327. return ret;
  328. }
  329. /* this needs to be called with shim_lock */
  330. static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
  331. {
  332. void __iomem *shim = sdw->link_res->shim;
  333. unsigned int link_id = sdw->instance;
  334. u16 ioctl;
  335. /* Switch to MIP from Glue logic */
  336. ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
  337. ioctl &= ~(SDW_SHIM_IOCTL_DOE);
  338. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  339. usleep_range(10, 15);
  340. ioctl &= ~(SDW_SHIM_IOCTL_DO);
  341. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  342. usleep_range(10, 15);
  343. ioctl |= (SDW_SHIM_IOCTL_MIF);
  344. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  345. usleep_range(10, 15);
  346. ioctl &= ~(SDW_SHIM_IOCTL_BKE);
  347. ioctl &= ~(SDW_SHIM_IOCTL_COE);
  348. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  349. usleep_range(10, 15);
  350. /* at this point Master IP has full control of the I/Os */
  351. }
  352. /* this needs to be called with shim_lock */
  353. static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
  354. {
  355. unsigned int link_id = sdw->instance;
  356. void __iomem *shim = sdw->link_res->shim;
  357. u16 ioctl;
  358. /* Glue logic */
  359. ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
  360. ioctl |= SDW_SHIM_IOCTL_BKE;
  361. ioctl |= SDW_SHIM_IOCTL_COE;
  362. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  363. usleep_range(10, 15);
  364. ioctl &= ~(SDW_SHIM_IOCTL_MIF);
  365. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  366. usleep_range(10, 15);
  367. /* at this point Integration Glue has full control of the I/Os */
  368. }
  369. static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
  370. {
  371. void __iomem *shim = sdw->link_res->shim;
  372. unsigned int link_id = sdw->instance;
  373. int ret = 0;
  374. u16 ioctl = 0, act = 0;
  375. mutex_lock(sdw->link_res->shim_lock);
  376. /* Initialize Shim */
  377. ioctl |= SDW_SHIM_IOCTL_BKE;
  378. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  379. usleep_range(10, 15);
  380. ioctl |= SDW_SHIM_IOCTL_WPDD;
  381. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  382. usleep_range(10, 15);
  383. ioctl |= SDW_SHIM_IOCTL_DO;
  384. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  385. usleep_range(10, 15);
  386. ioctl |= SDW_SHIM_IOCTL_DOE;
  387. intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
  388. usleep_range(10, 15);
  389. intel_shim_glue_to_master_ip(sdw);
  390. u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
  391. act |= SDW_SHIM_CTMCTL_DACTQE;
  392. act |= SDW_SHIM_CTMCTL_DODS;
  393. intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
  394. usleep_range(10, 15);
  395. mutex_unlock(sdw->link_res->shim_lock);
  396. return ret;
  397. }
  398. static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
  399. {
  400. void __iomem *shim = sdw->link_res->shim;
  401. unsigned int link_id = sdw->instance;
  402. u16 wake_en, wake_sts;
  403. mutex_lock(sdw->link_res->shim_lock);
  404. wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
  405. if (wake_enable) {
  406. /* Enable the wakeup */
  407. wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
  408. intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
  409. } else {
  410. /* Disable the wake up interrupt */
  411. wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
  412. intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
  413. /* Clear wake status */
  414. wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
  415. wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
  416. intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
  417. }
  418. mutex_unlock(sdw->link_res->shim_lock);
  419. }
  420. static int intel_link_power_down(struct sdw_intel *sdw)
  421. {
  422. u32 link_control, spa_mask, cpa_mask;
  423. unsigned int link_id = sdw->instance;
  424. void __iomem *shim = sdw->link_res->shim;
  425. u32 *shim_mask = sdw->link_res->shim_mask;
  426. int ret = 0;
  427. mutex_lock(sdw->link_res->shim_lock);
  428. if (!(*shim_mask & BIT(link_id)))
  429. dev_err(sdw->cdns.dev,
  430. "%s: Unbalanced power-up/down calls\n", __func__);
  431. sdw->cdns.link_up = false;
  432. intel_shim_master_ip_to_glue(sdw);
  433. *shim_mask &= ~BIT(link_id);
  434. if (!*shim_mask) {
  435. dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
  436. /* Link power down sequence */
  437. link_control = intel_readl(shim, SDW_SHIM_LCTL);
  438. /* only power-down enabled links */
  439. spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
  440. cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
  441. link_control &= spa_mask;
  442. ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
  443. if (ret < 0) {
  444. dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
  445. /*
  446. * we leave the sdw->cdns.link_up flag as false since we've disabled
  447. * the link at this point and cannot handle interrupts any longer.
  448. */
  449. }
  450. }
  451. link_control = intel_readl(shim, SDW_SHIM_LCTL);
  452. mutex_unlock(sdw->link_res->shim_lock);
  453. return ret;
  454. }
  455. static void intel_shim_sync_arm(struct sdw_intel *sdw)
  456. {
  457. void __iomem *shim = sdw->link_res->shim;
  458. u32 sync_reg;
  459. mutex_lock(sdw->link_res->shim_lock);
  460. /* update SYNC register */
  461. sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
  462. sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
  463. intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
  464. mutex_unlock(sdw->link_res->shim_lock);
  465. }
  466. static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
  467. {
  468. void __iomem *shim = sdw->link_res->shim;
  469. u32 sync_reg;
  470. int ret;
  471. /* Read SYNC register */
  472. sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
  473. /*
  474. * Set SyncGO bit to synchronously trigger a bank switch for
  475. * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
  476. * the Masters.
  477. */
  478. sync_reg |= SDW_SHIM_SYNC_SYNCGO;
  479. ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
  480. SDW_SHIM_SYNC_SYNCGO);
  481. if (ret < 0)
  482. dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
  483. return ret;
  484. }
  485. static int intel_shim_sync_go(struct sdw_intel *sdw)
  486. {
  487. int ret;
  488. mutex_lock(sdw->link_res->shim_lock);
  489. ret = intel_shim_sync_go_unlocked(sdw);
  490. mutex_unlock(sdw->link_res->shim_lock);
  491. return ret;
  492. }
  493. /*
  494. * PDI routines
  495. */
  496. static void intel_pdi_init(struct sdw_intel *sdw,
  497. struct sdw_cdns_stream_config *config)
  498. {
  499. void __iomem *shim = sdw->link_res->shim;
  500. unsigned int link_id = sdw->instance;
  501. int pcm_cap, pdm_cap;
  502. /* PCM Stream Capability */
  503. pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
  504. config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
  505. config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
  506. config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
  507. dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
  508. config->pcm_bd, config->pcm_in, config->pcm_out);
  509. /* PDM Stream Capability */
  510. pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
  511. config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
  512. config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
  513. config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
  514. dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
  515. config->pdm_bd, config->pdm_in, config->pdm_out);
  516. }
  517. static int
  518. intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
  519. {
  520. void __iomem *shim = sdw->link_res->shim;
  521. unsigned int link_id = sdw->instance;
  522. int count;
  523. if (pcm) {
  524. count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
  525. /*
  526. * WORKAROUND: on all existing Intel controllers, pdi
  527. * number 2 reports channel count as 1 even though it
  528. * supports 8 channels. Performing hardcoding for pdi
  529. * number 2.
  530. */
  531. if (pdi_num == 2)
  532. count = 7;
  533. } else {
  534. count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
  535. count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
  536. }
  537. /* zero based values for channel count in register */
  538. count++;
  539. return count;
  540. }
  541. static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
  542. struct sdw_cdns_pdi *pdi,
  543. unsigned int num_pdi,
  544. unsigned int *num_ch, bool pcm)
  545. {
  546. int i, ch_count = 0;
  547. for (i = 0; i < num_pdi; i++) {
  548. pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm);
  549. ch_count += pdi->ch_count;
  550. pdi++;
  551. }
  552. *num_ch = ch_count;
  553. return 0;
  554. }
  555. static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
  556. struct sdw_cdns_streams *stream, bool pcm)
  557. {
  558. intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
  559. &stream->num_ch_bd, pcm);
  560. intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
  561. &stream->num_ch_in, pcm);
  562. intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
  563. &stream->num_ch_out, pcm);
  564. return 0;
  565. }
  566. static int intel_pdi_ch_update(struct sdw_intel *sdw)
  567. {
  568. /* First update PCM streams followed by PDM streams */
  569. intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true);
  570. intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false);
  571. return 0;
  572. }
  573. static void
  574. intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
  575. {
  576. void __iomem *shim = sdw->link_res->shim;
  577. unsigned int link_id = sdw->instance;
  578. int pdi_conf = 0;
  579. /* the Bulk and PCM streams are not contiguous */
  580. pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
  581. if (pdi->num >= 2)
  582. pdi->intel_alh_id += 2;
  583. /*
  584. * Program stream parameters to stream SHIM register
  585. * This is applicable for PCM stream only.
  586. */
  587. if (pdi->type != SDW_STREAM_PCM)
  588. return;
  589. if (pdi->dir == SDW_DATA_DIR_RX)
  590. pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
  591. else
  592. pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
  593. u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
  594. u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
  595. u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
  596. intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
  597. }
  598. static void
  599. intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
  600. {
  601. void __iomem *alh = sdw->link_res->alh;
  602. unsigned int link_id = sdw->instance;
  603. unsigned int conf;
  604. /* the Bulk and PCM streams are not contiguous */
  605. pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
  606. if (pdi->num >= 2)
  607. pdi->intel_alh_id += 2;
  608. /* Program Stream config ALH register */
  609. conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
  610. u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
  611. u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
  612. intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
  613. }
  614. static int intel_params_stream(struct sdw_intel *sdw,
  615. struct snd_pcm_substream *substream,
  616. struct snd_soc_dai *dai,
  617. struct snd_pcm_hw_params *hw_params,
  618. int link_id, int alh_stream_id)
  619. {
  620. struct sdw_intel_link_res *res = sdw->link_res;
  621. struct sdw_intel_stream_params_data params_data;
  622. params_data.substream = substream;
  623. params_data.dai = dai;
  624. params_data.hw_params = hw_params;
  625. params_data.link_id = link_id;
  626. params_data.alh_stream_id = alh_stream_id;
  627. if (res->ops && res->ops->params_stream && res->dev)
  628. return res->ops->params_stream(res->dev,
  629. &params_data);
  630. return -EIO;
  631. }
  632. static int intel_free_stream(struct sdw_intel *sdw,
  633. struct snd_pcm_substream *substream,
  634. struct snd_soc_dai *dai,
  635. int link_id)
  636. {
  637. struct sdw_intel_link_res *res = sdw->link_res;
  638. struct sdw_intel_stream_free_data free_data;
  639. free_data.substream = substream;
  640. free_data.dai = dai;
  641. free_data.link_id = link_id;
  642. if (res->ops && res->ops->free_stream && res->dev)
  643. return res->ops->free_stream(res->dev,
  644. &free_data);
  645. return 0;
  646. }
  647. /*
  648. * bank switch routines
  649. */
  650. static int intel_pre_bank_switch(struct sdw_bus *bus)
  651. {
  652. struct sdw_cdns *cdns = bus_to_cdns(bus);
  653. struct sdw_intel *sdw = cdns_to_intel(cdns);
  654. /* Write to register only for multi-link */
  655. if (!bus->multi_link)
  656. return 0;
  657. intel_shim_sync_arm(sdw);
  658. return 0;
  659. }
  660. static int intel_post_bank_switch(struct sdw_bus *bus)
  661. {
  662. struct sdw_cdns *cdns = bus_to_cdns(bus);
  663. struct sdw_intel *sdw = cdns_to_intel(cdns);
  664. void __iomem *shim = sdw->link_res->shim;
  665. int sync_reg, ret;
  666. /* Write to register only for multi-link */
  667. if (!bus->multi_link)
  668. return 0;
  669. mutex_lock(sdw->link_res->shim_lock);
  670. /* Read SYNC register */
  671. sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
  672. /*
  673. * post_bank_switch() ops is called from the bus in loop for
  674. * all the Masters in the steam with the expectation that
  675. * we trigger the bankswitch for the only first Master in the list
  676. * and do nothing for the other Masters
  677. *
  678. * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
  679. */
  680. if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
  681. ret = 0;
  682. goto unlock;
  683. }
  684. ret = intel_shim_sync_go_unlocked(sdw);
  685. unlock:
  686. mutex_unlock(sdw->link_res->shim_lock);
  687. if (ret < 0)
  688. dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
  689. return ret;
  690. }
  691. /*
  692. * DAI routines
  693. */
  694. static int intel_startup(struct snd_pcm_substream *substream,
  695. struct snd_soc_dai *dai)
  696. {
  697. struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
  698. int ret;
  699. ret = pm_runtime_get_sync(cdns->dev);
  700. if (ret < 0 && ret != -EACCES) {
  701. dev_err_ratelimited(cdns->dev,
  702. "pm_runtime_get_sync failed in %s, ret %d\n",
  703. __func__, ret);
  704. pm_runtime_put_noidle(cdns->dev);
  705. return ret;
  706. }
  707. return 0;
  708. }
  709. static int intel_hw_params(struct snd_pcm_substream *substream,
  710. struct snd_pcm_hw_params *params,
  711. struct snd_soc_dai *dai)
  712. {
  713. struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
  714. struct sdw_intel *sdw = cdns_to_intel(cdns);
  715. struct sdw_cdns_dma_data *dma;
  716. struct sdw_cdns_pdi *pdi;
  717. struct sdw_stream_config sconfig;
  718. struct sdw_port_config *pconfig;
  719. int ch, dir;
  720. int ret;
  721. bool pcm = true;
  722. dma = snd_soc_dai_get_dma_data(dai, substream);
  723. if (!dma)
  724. return -EIO;
  725. ch = params_channels(params);
  726. if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
  727. dir = SDW_DATA_DIR_RX;
  728. else
  729. dir = SDW_DATA_DIR_TX;
  730. if (dma->stream_type == SDW_STREAM_PDM)
  731. pcm = false;
  732. if (pcm)
  733. pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
  734. else
  735. pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
  736. if (!pdi) {
  737. ret = -EINVAL;
  738. goto error;
  739. }
  740. /* do run-time configurations for SHIM, ALH and PDI/PORT */
  741. intel_pdi_shim_configure(sdw, pdi);
  742. intel_pdi_alh_configure(sdw, pdi);
  743. sdw_cdns_config_stream(cdns, ch, dir, pdi);
  744. /* store pdi and hw_params, may be needed in prepare step */
  745. dma->suspended = false;
  746. dma->pdi = pdi;
  747. dma->hw_params = params;
  748. /* Inform DSP about PDI stream number */
  749. ret = intel_params_stream(sdw, substream, dai, params,
  750. sdw->instance,
  751. pdi->intel_alh_id);
  752. if (ret)
  753. goto error;
  754. sconfig.direction = dir;
  755. sconfig.ch_count = ch;
  756. sconfig.frame_rate = params_rate(params);
  757. sconfig.type = dma->stream_type;
  758. if (dma->stream_type == SDW_STREAM_PDM) {
  759. sconfig.frame_rate *= 50;
  760. sconfig.bps = 1;
  761. } else {
  762. sconfig.bps = snd_pcm_format_width(params_format(params));
  763. }
  764. /* Port configuration */
  765. pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
  766. if (!pconfig) {
  767. ret = -ENOMEM;
  768. goto error;
  769. }
  770. pconfig->num = pdi->num;
  771. pconfig->ch_mask = (1 << ch) - 1;
  772. ret = sdw_stream_add_master(&cdns->bus, &sconfig,
  773. pconfig, 1, dma->stream);
  774. if (ret)
  775. dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
  776. kfree(pconfig);
  777. error:
  778. return ret;
  779. }
  780. static int intel_prepare(struct snd_pcm_substream *substream,
  781. struct snd_soc_dai *dai)
  782. {
  783. struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
  784. struct sdw_intel *sdw = cdns_to_intel(cdns);
  785. struct sdw_cdns_dma_data *dma;
  786. int ch, dir;
  787. int ret = 0;
  788. dma = snd_soc_dai_get_dma_data(dai, substream);
  789. if (!dma) {
  790. dev_err(dai->dev, "failed to get dma data in %s",
  791. __func__);
  792. return -EIO;
  793. }
  794. if (dma->suspended) {
  795. dma->suspended = false;
  796. /*
  797. * .prepare() is called after system resume, where we
  798. * need to reinitialize the SHIM/ALH/Cadence IP.
  799. * .prepare() is also called to deal with underflows,
  800. * but in those cases we cannot touch ALH/SHIM
  801. * registers
  802. */
  803. /* configure stream */
  804. ch = params_channels(dma->hw_params);
  805. if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
  806. dir = SDW_DATA_DIR_RX;
  807. else
  808. dir = SDW_DATA_DIR_TX;
  809. intel_pdi_shim_configure(sdw, dma->pdi);
  810. intel_pdi_alh_configure(sdw, dma->pdi);
  811. sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
  812. /* Inform DSP about PDI stream number */
  813. ret = intel_params_stream(sdw, substream, dai,
  814. dma->hw_params,
  815. sdw->instance,
  816. dma->pdi->intel_alh_id);
  817. }
  818. return ret;
  819. }
  820. static int
  821. intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
  822. {
  823. struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
  824. struct sdw_intel *sdw = cdns_to_intel(cdns);
  825. struct sdw_cdns_dma_data *dma;
  826. int ret;
  827. dma = snd_soc_dai_get_dma_data(dai, substream);
  828. if (!dma)
  829. return -EIO;
  830. /*
  831. * The sdw stream state will transition to RELEASED when stream->
  832. * master_list is empty. So the stream state will transition to
  833. * DEPREPARED for the first cpu-dai and to RELEASED for the last
  834. * cpu-dai.
  835. */
  836. ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
  837. if (ret < 0) {
  838. dev_err(dai->dev, "remove master from stream %s failed: %d\n",
  839. dma->stream->name, ret);
  840. return ret;
  841. }
  842. ret = intel_free_stream(sdw, substream, dai, sdw->instance);
  843. if (ret < 0) {
  844. dev_err(dai->dev, "intel_free_stream: failed %d", ret);
  845. return ret;
  846. }
  847. dma->hw_params = NULL;
  848. dma->pdi = NULL;
  849. return 0;
  850. }
  851. static void intel_shutdown(struct snd_pcm_substream *substream,
  852. struct snd_soc_dai *dai)
  853. {
  854. struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
  855. pm_runtime_mark_last_busy(cdns->dev);
  856. pm_runtime_put_autosuspend(cdns->dev);
  857. }
  858. static int intel_component_dais_suspend(struct snd_soc_component *component)
  859. {
  860. struct sdw_cdns_dma_data *dma;
  861. struct snd_soc_dai *dai;
  862. for_each_component_dais(component, dai) {
  863. /*
  864. * we don't have a .suspend dai_ops, and we don't have access
  865. * to the substream, so let's mark both capture and playback
  866. * DMA contexts as suspended
  867. */
  868. dma = dai->playback_dma_data;
  869. if (dma)
  870. dma->suspended = true;
  871. dma = dai->capture_dma_data;
  872. if (dma)
  873. dma->suspended = true;
  874. }
  875. return 0;
  876. }
  877. static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
  878. void *stream, int direction)
  879. {
  880. return cdns_set_sdw_stream(dai, stream, true, direction);
  881. }
  882. static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
  883. void *stream, int direction)
  884. {
  885. return cdns_set_sdw_stream(dai, stream, false, direction);
  886. }
  887. static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
  888. int direction)
  889. {
  890. struct sdw_cdns_dma_data *dma;
  891. if (direction == SNDRV_PCM_STREAM_PLAYBACK)
  892. dma = dai->playback_dma_data;
  893. else
  894. dma = dai->capture_dma_data;
  895. if (!dma)
  896. return ERR_PTR(-EINVAL);
  897. return dma->stream;
  898. }
  899. static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
  900. .startup = intel_startup,
  901. .hw_params = intel_hw_params,
  902. .prepare = intel_prepare,
  903. .hw_free = intel_hw_free,
  904. .shutdown = intel_shutdown,
  905. .set_sdw_stream = intel_pcm_set_sdw_stream,
  906. .get_sdw_stream = intel_get_sdw_stream,
  907. };
  908. static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
  909. .startup = intel_startup,
  910. .hw_params = intel_hw_params,
  911. .prepare = intel_prepare,
  912. .hw_free = intel_hw_free,
  913. .shutdown = intel_shutdown,
  914. .set_sdw_stream = intel_pdm_set_sdw_stream,
  915. .get_sdw_stream = intel_get_sdw_stream,
  916. };
  917. static const struct snd_soc_component_driver dai_component = {
  918. .name = "soundwire",
  919. .suspend = intel_component_dais_suspend
  920. };
  921. static int intel_create_dai(struct sdw_cdns *cdns,
  922. struct snd_soc_dai_driver *dais,
  923. enum intel_pdi_type type,
  924. u32 num, u32 off, u32 max_ch, bool pcm)
  925. {
  926. int i;
  927. if (num == 0)
  928. return 0;
  929. /* TODO: Read supported rates/formats from hardware */
  930. for (i = off; i < (off + num); i++) {
  931. dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
  932. "SDW%d Pin%d",
  933. cdns->instance, i);
  934. if (!dais[i].name)
  935. return -ENOMEM;
  936. if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
  937. dais[i].playback.channels_min = 1;
  938. dais[i].playback.channels_max = max_ch;
  939. dais[i].playback.rates = SNDRV_PCM_RATE_48000;
  940. dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
  941. }
  942. if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
  943. dais[i].capture.channels_min = 1;
  944. dais[i].capture.channels_max = max_ch;
  945. dais[i].capture.rates = SNDRV_PCM_RATE_48000;
  946. dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
  947. }
  948. if (pcm)
  949. dais[i].ops = &intel_pcm_dai_ops;
  950. else
  951. dais[i].ops = &intel_pdm_dai_ops;
  952. }
  953. return 0;
  954. }
  955. static int intel_register_dai(struct sdw_intel *sdw)
  956. {
  957. struct sdw_cdns *cdns = &sdw->cdns;
  958. struct sdw_cdns_streams *stream;
  959. struct snd_soc_dai_driver *dais;
  960. int num_dai, ret, off = 0;
  961. /* DAIs are created based on total number of PDIs supported */
  962. num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi;
  963. dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
  964. if (!dais)
  965. return -ENOMEM;
  966. /* Create PCM DAIs */
  967. stream = &cdns->pcm;
  968. ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
  969. off, stream->num_ch_in, true);
  970. if (ret)
  971. return ret;
  972. off += cdns->pcm.num_in;
  973. ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
  974. off, stream->num_ch_out, true);
  975. if (ret)
  976. return ret;
  977. off += cdns->pcm.num_out;
  978. ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
  979. off, stream->num_ch_bd, true);
  980. if (ret)
  981. return ret;
  982. /* Create PDM DAIs */
  983. stream = &cdns->pdm;
  984. off += cdns->pcm.num_bd;
  985. ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
  986. off, stream->num_ch_in, false);
  987. if (ret)
  988. return ret;
  989. off += cdns->pdm.num_in;
  990. ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
  991. off, stream->num_ch_out, false);
  992. if (ret)
  993. return ret;
  994. off += cdns->pdm.num_out;
  995. ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
  996. off, stream->num_ch_bd, false);
  997. if (ret)
  998. return ret;
  999. return snd_soc_register_component(cdns->dev, &dai_component,
  1000. dais, num_dai);
  1001. }
  1002. static int sdw_master_read_intel_prop(struct sdw_bus *bus)
  1003. {
  1004. struct sdw_master_prop *prop = &bus->prop;
  1005. struct fwnode_handle *link;
  1006. char name[32];
  1007. u32 quirk_mask;
  1008. /* Find master handle */
  1009. snprintf(name, sizeof(name),
  1010. "mipi-sdw-link-%d-subproperties", bus->link_id);
  1011. link = device_get_named_child_node(bus->dev, name);
  1012. if (!link) {
  1013. dev_err(bus->dev, "Master node %s not found\n", name);
  1014. return -EIO;
  1015. }
  1016. fwnode_property_read_u32(link,
  1017. "intel-sdw-ip-clock",
  1018. &prop->mclk_freq);
  1019. /* the values reported by BIOS are the 2x clock, not the bus clock */
  1020. prop->mclk_freq /= 2;
  1021. fwnode_property_read_u32(link,
  1022. "intel-quirk-mask",
  1023. &quirk_mask);
  1024. if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
  1025. prop->hw_disabled = true;
  1026. return 0;
  1027. }
  1028. static int intel_prop_read(struct sdw_bus *bus)
  1029. {
  1030. /* Initialize with default handler to read all DisCo properties */
  1031. sdw_master_read_prop(bus);
  1032. /* read Intel-specific properties */
  1033. sdw_master_read_intel_prop(bus);
  1034. return 0;
  1035. }
  1036. static struct sdw_master_ops sdw_intel_ops = {
  1037. .read_prop = sdw_master_read_prop,
  1038. .xfer_msg = cdns_xfer_msg,
  1039. .xfer_msg_defer = cdns_xfer_msg_defer,
  1040. .reset_page_addr = cdns_reset_page_addr,
  1041. .set_bus_conf = cdns_bus_conf,
  1042. .pre_bank_switch = intel_pre_bank_switch,
  1043. .post_bank_switch = intel_post_bank_switch,
  1044. };
  1045. static int intel_init(struct sdw_intel *sdw)
  1046. {
  1047. bool clock_stop;
  1048. /* Initialize shim and controller */
  1049. intel_link_power_up(sdw);
  1050. clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
  1051. intel_shim_init(sdw, clock_stop);
  1052. return 0;
  1053. }
  1054. /*
  1055. * probe and init
  1056. */
  1057. static int intel_master_probe(struct platform_device *pdev)
  1058. {
  1059. struct device *dev = &pdev->dev;
  1060. struct sdw_intel *sdw;
  1061. struct sdw_cdns *cdns;
  1062. struct sdw_bus *bus;
  1063. int ret;
  1064. sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
  1065. if (!sdw)
  1066. return -ENOMEM;
  1067. cdns = &sdw->cdns;
  1068. bus = &cdns->bus;
  1069. sdw->instance = pdev->id;
  1070. sdw->link_res = dev_get_platdata(dev);
  1071. cdns->dev = dev;
  1072. cdns->registers = sdw->link_res->registers;
  1073. cdns->instance = sdw->instance;
  1074. cdns->msg_count = 0;
  1075. bus->link_id = pdev->id;
  1076. sdw_cdns_probe(cdns);
  1077. /* Set property read ops */
  1078. sdw_intel_ops.read_prop = intel_prop_read;
  1079. bus->ops = &sdw_intel_ops;
  1080. /* set driver data, accessed by snd_soc_dai_get_drvdata() */
  1081. dev_set_drvdata(dev, cdns);
  1082. /* use generic bandwidth allocation algorithm */
  1083. sdw->cdns.bus.compute_params = sdw_compute_params;
  1084. ret = sdw_bus_master_add(bus, dev, dev->fwnode);
  1085. if (ret) {
  1086. dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
  1087. return ret;
  1088. }
  1089. if (bus->prop.hw_disabled)
  1090. dev_info(dev,
  1091. "SoundWire master %d is disabled, will be ignored\n",
  1092. bus->link_id);
  1093. /*
  1094. * Ignore BIOS err_threshold, it's a really bad idea when dealing
  1095. * with multiple hardware synchronized links
  1096. */
  1097. bus->prop.err_threshold = 0;
  1098. return 0;
  1099. }
  1100. int intel_master_startup(struct platform_device *pdev)
  1101. {
  1102. struct sdw_cdns_stream_config config;
  1103. struct device *dev = &pdev->dev;
  1104. struct sdw_cdns *cdns = dev_get_drvdata(dev);
  1105. struct sdw_intel *sdw = cdns_to_intel(cdns);
  1106. struct sdw_bus *bus = &cdns->bus;
  1107. int link_flags;
  1108. bool multi_link;
  1109. u32 clock_stop_quirks;
  1110. int ret;
  1111. if (bus->prop.hw_disabled) {
  1112. dev_info(dev,
  1113. "SoundWire master %d is disabled, ignoring\n",
  1114. sdw->instance);
  1115. return 0;
  1116. }
  1117. link_flags = md_flags >> (bus->link_id * 8);
  1118. multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
  1119. if (!multi_link) {
  1120. dev_dbg(dev, "Multi-link is disabled\n");
  1121. bus->multi_link = false;
  1122. } else {
  1123. /*
  1124. * hardware-based synchronization is required regardless
  1125. * of the number of segments used by a stream: SSP-based
  1126. * synchronization is gated by gsync when the multi-master
  1127. * mode is set.
  1128. */
  1129. bus->multi_link = true;
  1130. bus->hw_sync_min_links = 1;
  1131. }
  1132. /* Initialize shim, controller */
  1133. ret = intel_init(sdw);
  1134. if (ret)
  1135. goto err_init;
  1136. /* Read the PDI config and initialize cadence PDI */
  1137. intel_pdi_init(sdw, &config);
  1138. ret = sdw_cdns_pdi_init(cdns, config);
  1139. if (ret)
  1140. goto err_init;
  1141. intel_pdi_ch_update(sdw);
  1142. ret = sdw_cdns_enable_interrupt(cdns, true);
  1143. if (ret < 0) {
  1144. dev_err(dev, "cannot enable interrupts\n");
  1145. goto err_init;
  1146. }
  1147. /*
  1148. * follow recommended programming flows to avoid timeouts when
  1149. * gsync is enabled
  1150. */
  1151. if (multi_link)
  1152. intel_shim_sync_arm(sdw);
  1153. ret = sdw_cdns_init(cdns);
  1154. if (ret < 0) {
  1155. dev_err(dev, "unable to initialize Cadence IP\n");
  1156. goto err_interrupt;
  1157. }
  1158. ret = sdw_cdns_exit_reset(cdns);
  1159. if (ret < 0) {
  1160. dev_err(dev, "unable to exit bus reset sequence\n");
  1161. goto err_interrupt;
  1162. }
  1163. if (multi_link) {
  1164. ret = intel_shim_sync_go(sdw);
  1165. if (ret < 0) {
  1166. dev_err(dev, "sync go failed: %d\n", ret);
  1167. goto err_interrupt;
  1168. }
  1169. }
  1170. /* Register DAIs */
  1171. ret = intel_register_dai(sdw);
  1172. if (ret) {
  1173. dev_err(dev, "DAI registration failed: %d\n", ret);
  1174. snd_soc_unregister_component(dev);
  1175. goto err_interrupt;
  1176. }
  1177. intel_debugfs_init(sdw);
  1178. /* Enable runtime PM */
  1179. if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
  1180. pm_runtime_set_autosuspend_delay(dev,
  1181. INTEL_MASTER_SUSPEND_DELAY_MS);
  1182. pm_runtime_use_autosuspend(dev);
  1183. pm_runtime_mark_last_busy(dev);
  1184. pm_runtime_set_active(dev);
  1185. pm_runtime_enable(dev);
  1186. }
  1187. clock_stop_quirks = sdw->link_res->clock_stop_quirks;
  1188. if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
  1189. /*
  1190. * To keep the clock running we need to prevent
  1191. * pm_runtime suspend from happening by increasing the
  1192. * reference count.
  1193. * This quirk is specified by the parent PCI device in
  1194. * case of specific latency requirements. It will have
  1195. * no effect if pm_runtime is disabled by the user via
  1196. * a module parameter for testing purposes.
  1197. */
  1198. pm_runtime_get_noresume(dev);
  1199. }
  1200. /*
  1201. * The runtime PM status of Slave devices is "Unsupported"
  1202. * until they report as ATTACHED. If they don't, e.g. because
  1203. * there are no Slave devices populated or if the power-on is
  1204. * delayed or dependent on a power switch, the Master will
  1205. * remain active and prevent its parent from suspending.
  1206. *
  1207. * Conditionally force the pm_runtime core to re-evaluate the
  1208. * Master status in the absence of any Slave activity. A quirk
  1209. * is provided to e.g. deal with Slaves that may be powered on
  1210. * with a delay. A more complete solution would require the
  1211. * definition of Master properties.
  1212. */
  1213. if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
  1214. pm_runtime_idle(dev);
  1215. return 0;
  1216. err_interrupt:
  1217. sdw_cdns_enable_interrupt(cdns, false);
  1218. err_init:
  1219. return ret;
  1220. }
  1221. static int intel_master_remove(struct platform_device *pdev)
  1222. {
  1223. struct device *dev = &pdev->dev;
  1224. struct sdw_cdns *cdns = dev_get_drvdata(dev);
  1225. struct sdw_intel *sdw = cdns_to_intel(cdns);
  1226. struct sdw_bus *bus = &cdns->bus;
  1227. /*
  1228. * Since pm_runtime is already disabled, we don't decrease
  1229. * the refcount when the clock_stop_quirk is
  1230. * SDW_INTEL_CLK_STOP_NOT_ALLOWED
  1231. */
  1232. if (!bus->prop.hw_disabled) {
  1233. intel_debugfs_exit(sdw);
  1234. sdw_cdns_enable_interrupt(cdns, false);
  1235. snd_soc_unregister_component(dev);
  1236. }
  1237. sdw_bus_master_delete(bus);
  1238. return 0;
  1239. }
  1240. int intel_master_process_wakeen_event(struct platform_device *pdev)
  1241. {
  1242. struct device *dev = &pdev->dev;
  1243. struct sdw_intel *sdw;
  1244. struct sdw_bus *bus;
  1245. void __iomem *shim;
  1246. u16 wake_sts;
  1247. sdw = platform_get_drvdata(pdev);
  1248. bus = &sdw->cdns.bus;
  1249. if (bus->prop.hw_disabled) {
  1250. dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n", bus->link_id);
  1251. return 0;
  1252. }
  1253. shim = sdw->link_res->shim;
  1254. wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
  1255. if (!(wake_sts & BIT(sdw->instance)))
  1256. return 0;
  1257. /* disable WAKEEN interrupt ASAP to prevent interrupt flood */
  1258. intel_shim_wake(sdw, false);
  1259. /*
  1260. * resume the Master, which will generate a bus reset and result in
  1261. * Slaves re-attaching and be re-enumerated. The SoundWire physical
  1262. * device which generated the wake will trigger an interrupt, which
  1263. * will in turn cause the corresponding Linux Slave device to be
  1264. * resumed and the Slave codec driver to check the status.
  1265. */
  1266. pm_request_resume(dev);
  1267. return 0;
  1268. }
  1269. /*
  1270. * PM calls
  1271. */
  1272. #ifdef CONFIG_PM
  1273. static int __maybe_unused intel_suspend(struct device *dev)
  1274. {
  1275. struct sdw_cdns *cdns = dev_get_drvdata(dev);
  1276. struct sdw_intel *sdw = cdns_to_intel(cdns);
  1277. struct sdw_bus *bus = &cdns->bus;
  1278. u32 clock_stop_quirks;
  1279. int ret;
  1280. if (bus->prop.hw_disabled) {
  1281. dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
  1282. bus->link_id);
  1283. return 0;
  1284. }
  1285. if (pm_runtime_suspended(dev)) {
  1286. dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
  1287. clock_stop_quirks = sdw->link_res->clock_stop_quirks;
  1288. if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
  1289. !clock_stop_quirks) &&
  1290. !pm_runtime_suspended(dev->parent)) {
  1291. /*
  1292. * if we've enabled clock stop, and the parent
  1293. * is still active, disable shim wake. The
  1294. * SHIM registers are not accessible if the
  1295. * parent is already pm_runtime suspended so
  1296. * it's too late to change that configuration
  1297. */
  1298. intel_shim_wake(sdw, false);
  1299. }
  1300. return 0;
  1301. }
  1302. ret = sdw_cdns_enable_interrupt(cdns, false);
  1303. if (ret < 0) {
  1304. dev_err(dev, "cannot disable interrupts on suspend\n");
  1305. return ret;
  1306. }
  1307. ret = intel_link_power_down(sdw);
  1308. if (ret) {
  1309. dev_err(dev, "Link power down failed: %d", ret);
  1310. return ret;
  1311. }
  1312. intel_shim_wake(sdw, false);
  1313. return 0;
  1314. }
  1315. static int intel_suspend_runtime(struct device *dev)
  1316. {
  1317. struct sdw_cdns *cdns = dev_get_drvdata(dev);
  1318. struct sdw_intel *sdw = cdns_to_intel(cdns);
  1319. struct sdw_bus *bus = &cdns->bus;
  1320. u32 clock_stop_quirks;
  1321. int ret;
  1322. if (bus->prop.hw_disabled) {
  1323. dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
  1324. bus->link_id);
  1325. return 0;
  1326. }
  1327. clock_stop_quirks = sdw->link_res->clock_stop_quirks;
  1328. if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
  1329. ret = sdw_cdns_enable_interrupt(cdns, false);
  1330. if (ret < 0) {
  1331. dev_err(dev, "cannot disable interrupts on suspend\n");
  1332. return ret;
  1333. }
  1334. ret = intel_link_power_down(sdw);
  1335. if (ret) {
  1336. dev_err(dev, "Link power down failed: %d", ret);
  1337. return ret;
  1338. }
  1339. intel_shim_wake(sdw, false);
  1340. } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
  1341. !clock_stop_quirks) {
  1342. ret = sdw_cdns_clock_stop(cdns, true);
  1343. if (ret < 0) {
  1344. dev_err(dev, "cannot enable clock stop on suspend\n");
  1345. return ret;
  1346. }
  1347. ret = sdw_cdns_enable_interrupt(cdns, false);
  1348. if (ret < 0) {
  1349. dev_err(dev, "cannot disable interrupts on suspend\n");
  1350. return ret;
  1351. }
  1352. ret = intel_link_power_down(sdw);
  1353. if (ret) {
  1354. dev_err(dev, "Link power down failed: %d", ret);
  1355. return ret;
  1356. }
  1357. intel_shim_wake(sdw, true);
  1358. } else {
  1359. dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
  1360. __func__, clock_stop_quirks);
  1361. ret = -EINVAL;
  1362. }
  1363. return ret;
  1364. }
  1365. static int __maybe_unused intel_resume(struct device *dev)
  1366. {
  1367. struct sdw_cdns *cdns = dev_get_drvdata(dev);
  1368. struct sdw_intel *sdw = cdns_to_intel(cdns);
  1369. struct sdw_bus *bus = &cdns->bus;
  1370. int link_flags;
  1371. bool multi_link;
  1372. int ret;
  1373. if (bus->prop.hw_disabled) {
  1374. dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
  1375. bus->link_id);
  1376. return 0;
  1377. }
  1378. link_flags = md_flags >> (bus->link_id * 8);
  1379. multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
  1380. if (pm_runtime_suspended(dev)) {
  1381. dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
  1382. /* follow required sequence from runtime_pm.rst */
  1383. pm_runtime_disable(dev);
  1384. pm_runtime_set_active(dev);
  1385. pm_runtime_mark_last_busy(dev);
  1386. pm_runtime_enable(dev);
  1387. link_flags = md_flags >> (bus->link_id * 8);
  1388. if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
  1389. pm_runtime_idle(dev);
  1390. }
  1391. ret = intel_init(sdw);
  1392. if (ret) {
  1393. dev_err(dev, "%s failed: %d", __func__, ret);
  1394. return ret;
  1395. }
  1396. /*
  1397. * make sure all Slaves are tagged as UNATTACHED and provide
  1398. * reason for reinitialization
  1399. */
  1400. sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
  1401. ret = sdw_cdns_enable_interrupt(cdns, true);
  1402. if (ret < 0) {
  1403. dev_err(dev, "cannot enable interrupts during resume\n");
  1404. return ret;
  1405. }
  1406. /*
  1407. * follow recommended programming flows to avoid timeouts when
  1408. * gsync is enabled
  1409. */
  1410. if (multi_link)
  1411. intel_shim_sync_arm(sdw);
  1412. ret = sdw_cdns_init(&sdw->cdns);
  1413. if (ret < 0) {
  1414. dev_err(dev, "unable to initialize Cadence IP during resume\n");
  1415. return ret;
  1416. }
  1417. ret = sdw_cdns_exit_reset(cdns);
  1418. if (ret < 0) {
  1419. dev_err(dev, "unable to exit bus reset sequence during resume\n");
  1420. return ret;
  1421. }
  1422. if (multi_link) {
  1423. ret = intel_shim_sync_go(sdw);
  1424. if (ret < 0) {
  1425. dev_err(dev, "sync go failed during resume\n");
  1426. return ret;
  1427. }
  1428. }
  1429. /*
  1430. * after system resume, the pm_runtime suspend() may kick in
  1431. * during the enumeration, before any children device force the
  1432. * master device to remain active. Using pm_runtime_get()
  1433. * routines is not really possible, since it'd prevent the
  1434. * master from suspending.
  1435. * A reasonable compromise is to update the pm_runtime
  1436. * counters and delay the pm_runtime suspend by several
  1437. * seconds, by when all enumeration should be complete.
  1438. */
  1439. pm_runtime_mark_last_busy(dev);
  1440. return ret;
  1441. }
  1442. static int intel_resume_runtime(struct device *dev)
  1443. {
  1444. struct sdw_cdns *cdns = dev_get_drvdata(dev);
  1445. struct sdw_intel *sdw = cdns_to_intel(cdns);
  1446. struct sdw_bus *bus = &cdns->bus;
  1447. u32 clock_stop_quirks;
  1448. bool clock_stop0;
  1449. int link_flags;
  1450. bool multi_link;
  1451. int status;
  1452. int ret;
  1453. if (bus->prop.hw_disabled) {
  1454. dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
  1455. bus->link_id);
  1456. return 0;
  1457. }
  1458. link_flags = md_flags >> (bus->link_id * 8);
  1459. multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
  1460. clock_stop_quirks = sdw->link_res->clock_stop_quirks;
  1461. if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
  1462. ret = intel_init(sdw);
  1463. if (ret) {
  1464. dev_err(dev, "%s failed: %d", __func__, ret);
  1465. return ret;
  1466. }
  1467. /*
  1468. * make sure all Slaves are tagged as UNATTACHED and provide
  1469. * reason for reinitialization
  1470. */
  1471. sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
  1472. ret = sdw_cdns_enable_interrupt(cdns, true);
  1473. if (ret < 0) {
  1474. dev_err(dev, "cannot enable interrupts during resume\n");
  1475. return ret;
  1476. }
  1477. /*
  1478. * follow recommended programming flows to avoid
  1479. * timeouts when gsync is enabled
  1480. */
  1481. if (multi_link)
  1482. intel_shim_sync_arm(sdw);
  1483. ret = sdw_cdns_init(&sdw->cdns);
  1484. if (ret < 0) {
  1485. dev_err(dev, "unable to initialize Cadence IP during resume\n");
  1486. return ret;
  1487. }
  1488. ret = sdw_cdns_exit_reset(cdns);
  1489. if (ret < 0) {
  1490. dev_err(dev, "unable to exit bus reset sequence during resume\n");
  1491. return ret;
  1492. }
  1493. if (multi_link) {
  1494. ret = intel_shim_sync_go(sdw);
  1495. if (ret < 0) {
  1496. dev_err(dev, "sync go failed during resume\n");
  1497. return ret;
  1498. }
  1499. }
  1500. } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
  1501. ret = intel_init(sdw);
  1502. if (ret) {
  1503. dev_err(dev, "%s failed: %d", __func__, ret);
  1504. return ret;
  1505. }
  1506. /*
  1507. * An exception condition occurs for the CLK_STOP_BUS_RESET
  1508. * case if one or more masters remain active. In this condition,
  1509. * all the masters are powered on for they are in the same power
  1510. * domain. Master can preserve its context for clock stop0, so
  1511. * there is no need to clear slave status and reset bus.
  1512. */
  1513. clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
  1514. if (!clock_stop0) {
  1515. /*
  1516. * make sure all Slaves are tagged as UNATTACHED and
  1517. * provide reason for reinitialization
  1518. */
  1519. status = SDW_UNATTACH_REQUEST_MASTER_RESET;
  1520. sdw_clear_slave_status(bus, status);
  1521. ret = sdw_cdns_enable_interrupt(cdns, true);
  1522. if (ret < 0) {
  1523. dev_err(dev, "cannot enable interrupts during resume\n");
  1524. return ret;
  1525. }
  1526. /*
  1527. * follow recommended programming flows to avoid
  1528. * timeouts when gsync is enabled
  1529. */
  1530. if (multi_link)
  1531. intel_shim_sync_arm(sdw);
  1532. /*
  1533. * Re-initialize the IP since it was powered-off
  1534. */
  1535. sdw_cdns_init(&sdw->cdns);
  1536. } else {
  1537. ret = sdw_cdns_enable_interrupt(cdns, true);
  1538. if (ret < 0) {
  1539. dev_err(dev, "cannot enable interrupts during resume\n");
  1540. return ret;
  1541. }
  1542. }
  1543. ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
  1544. if (ret < 0) {
  1545. dev_err(dev, "unable to restart clock during resume\n");
  1546. return ret;
  1547. }
  1548. if (!clock_stop0) {
  1549. ret = sdw_cdns_exit_reset(cdns);
  1550. if (ret < 0) {
  1551. dev_err(dev, "unable to exit bus reset sequence during resume\n");
  1552. return ret;
  1553. }
  1554. if (multi_link) {
  1555. ret = intel_shim_sync_go(sdw);
  1556. if (ret < 0) {
  1557. dev_err(sdw->cdns.dev, "sync go failed during resume\n");
  1558. return ret;
  1559. }
  1560. }
  1561. }
  1562. } else if (!clock_stop_quirks) {
  1563. clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
  1564. if (!clock_stop0)
  1565. dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
  1566. ret = intel_init(sdw);
  1567. if (ret) {
  1568. dev_err(dev, "%s failed: %d", __func__, ret);
  1569. return ret;
  1570. }
  1571. ret = sdw_cdns_enable_interrupt(cdns, true);
  1572. if (ret < 0) {
  1573. dev_err(dev, "cannot enable interrupts during resume\n");
  1574. return ret;
  1575. }
  1576. ret = sdw_cdns_clock_restart(cdns, false);
  1577. if (ret < 0) {
  1578. dev_err(dev, "unable to resume master during resume\n");
  1579. return ret;
  1580. }
  1581. } else {
  1582. dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
  1583. __func__, clock_stop_quirks);
  1584. ret = -EINVAL;
  1585. }
  1586. return ret;
  1587. }
  1588. #endif
  1589. static const struct dev_pm_ops intel_pm = {
  1590. SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
  1591. SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
  1592. };
  1593. static struct platform_driver sdw_intel_drv = {
  1594. .probe = intel_master_probe,
  1595. .remove = intel_master_remove,
  1596. .driver = {
  1597. .name = "intel-sdw",
  1598. .pm = &intel_pm,
  1599. }
  1600. };
  1601. module_platform_driver(sdw_intel_drv);
  1602. MODULE_LICENSE("Dual BSD/GPL");
  1603. MODULE_ALIAS("platform:intel-sdw");
  1604. MODULE_DESCRIPTION("Intel Soundwire Master Driver");