i3c-master-cdns.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 Cadence Design Systems Inc.
  4. *
  5. * Author: Boris Brezillon <boris.brezillon@bootlin.com>
  6. */
  7. #include <linux/bitops.h>
  8. #include <linux/clk.h>
  9. #include <linux/err.h>
  10. #include <linux/errno.h>
  11. #include <linux/i3c/master.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/ioport.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/workqueue.h>
  24. #include <linux/of_device.h>
  25. #define DEV_ID 0x0
  26. #define DEV_ID_I3C_MASTER 0x5034
  27. #define CONF_STATUS0 0x4
  28. #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
  29. #define CONF_STATUS0_ECC_CHK BIT(28)
  30. #define CONF_STATUS0_INTEG_CHK BIT(27)
  31. #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
  32. #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
  33. #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
  34. #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
  35. #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
  36. #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
  37. #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
  38. #define CONF_STATUS0_SEC_MASTER BIT(4)
  39. #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
  40. #define CONF_STATUS1 0x8
  41. #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
  42. #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
  43. #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
  44. #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
  45. #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
  46. #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
  47. #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
  48. #define REV_ID 0xc
  49. #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
  50. #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
  51. #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
  52. #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
  53. #define CTRL 0x10
  54. #define CTRL_DEV_EN BIT(31)
  55. #define CTRL_HALT_EN BIT(30)
  56. #define CTRL_MCS BIT(29)
  57. #define CTRL_MCS_EN BIT(28)
  58. #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
  59. #define CTRL_HJ_DISEC BIT(8)
  60. #define CTRL_MST_ACK BIT(7)
  61. #define CTRL_HJ_ACK BIT(6)
  62. #define CTRL_HJ_INIT BIT(5)
  63. #define CTRL_MST_INIT BIT(4)
  64. #define CTRL_AHDR_OPT BIT(3)
  65. #define CTRL_PURE_BUS_MODE 0
  66. #define CTRL_MIXED_FAST_BUS_MODE 2
  67. #define CTRL_MIXED_SLOW_BUS_MODE 3
  68. #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
  69. #define THD_DELAY_MAX 3
  70. #define PRESCL_CTRL0 0x14
  71. #define PRESCL_CTRL0_I2C(x) ((x) << 16)
  72. #define PRESCL_CTRL0_I3C(x) (x)
  73. #define PRESCL_CTRL0_MAX GENMASK(9, 0)
  74. #define PRESCL_CTRL1 0x18
  75. #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
  76. #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
  77. #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
  78. #define PRESCL_CTRL1_OD_LOW(x) (x)
  79. #define MST_IER 0x20
  80. #define MST_IDR 0x24
  81. #define MST_IMR 0x28
  82. #define MST_ICR 0x2c
  83. #define MST_ISR 0x30
  84. #define MST_INT_HALTED BIT(18)
  85. #define MST_INT_MR_DONE BIT(17)
  86. #define MST_INT_IMM_COMP BIT(16)
  87. #define MST_INT_TX_THR BIT(15)
  88. #define MST_INT_TX_OVF BIT(14)
  89. #define MST_INT_IBID_THR BIT(12)
  90. #define MST_INT_IBID_UNF BIT(11)
  91. #define MST_INT_IBIR_THR BIT(10)
  92. #define MST_INT_IBIR_UNF BIT(9)
  93. #define MST_INT_IBIR_OVF BIT(8)
  94. #define MST_INT_RX_THR BIT(7)
  95. #define MST_INT_RX_UNF BIT(6)
  96. #define MST_INT_CMDD_EMP BIT(5)
  97. #define MST_INT_CMDD_THR BIT(4)
  98. #define MST_INT_CMDD_OVF BIT(3)
  99. #define MST_INT_CMDR_THR BIT(2)
  100. #define MST_INT_CMDR_UNF BIT(1)
  101. #define MST_INT_CMDR_OVF BIT(0)
  102. #define MST_STATUS0 0x34
  103. #define MST_STATUS0_IDLE BIT(18)
  104. #define MST_STATUS0_HALTED BIT(17)
  105. #define MST_STATUS0_MASTER_MODE BIT(16)
  106. #define MST_STATUS0_TX_FULL BIT(13)
  107. #define MST_STATUS0_IBID_FULL BIT(12)
  108. #define MST_STATUS0_IBIR_FULL BIT(11)
  109. #define MST_STATUS0_RX_FULL BIT(10)
  110. #define MST_STATUS0_CMDD_FULL BIT(9)
  111. #define MST_STATUS0_CMDR_FULL BIT(8)
  112. #define MST_STATUS0_TX_EMP BIT(5)
  113. #define MST_STATUS0_IBID_EMP BIT(4)
  114. #define MST_STATUS0_IBIR_EMP BIT(3)
  115. #define MST_STATUS0_RX_EMP BIT(2)
  116. #define MST_STATUS0_CMDD_EMP BIT(1)
  117. #define MST_STATUS0_CMDR_EMP BIT(0)
  118. #define CMDR 0x38
  119. #define CMDR_NO_ERROR 0
  120. #define CMDR_DDR_PREAMBLE_ERROR 1
  121. #define CMDR_DDR_PARITY_ERROR 2
  122. #define CMDR_DDR_RX_FIFO_OVF 3
  123. #define CMDR_DDR_TX_FIFO_UNF 4
  124. #define CMDR_M0_ERROR 5
  125. #define CMDR_M1_ERROR 6
  126. #define CMDR_M2_ERROR 7
  127. #define CMDR_MST_ABORT 8
  128. #define CMDR_NACK_RESP 9
  129. #define CMDR_INVALID_DA 10
  130. #define CMDR_DDR_DROPPED 11
  131. #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
  132. #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
  133. #define CMDR_CMDID_HJACK_DISEC 0xfe
  134. #define CMDR_CMDID_HJACK_ENTDAA 0xff
  135. #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
  136. #define IBIR 0x3c
  137. #define IBIR_ACKED BIT(12)
  138. #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
  139. #define IBIR_ERROR BIT(7)
  140. #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
  141. #define IBIR_TYPE_IBI 0
  142. #define IBIR_TYPE_HJ 1
  143. #define IBIR_TYPE_MR 2
  144. #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
  145. #define SLV_IER 0x40
  146. #define SLV_IDR 0x44
  147. #define SLV_IMR 0x48
  148. #define SLV_ICR 0x4c
  149. #define SLV_ISR 0x50
  150. #define SLV_INT_TM BIT(20)
  151. #define SLV_INT_ERROR BIT(19)
  152. #define SLV_INT_EVENT_UP BIT(18)
  153. #define SLV_INT_HJ_DONE BIT(17)
  154. #define SLV_INT_MR_DONE BIT(16)
  155. #define SLV_INT_DA_UPD BIT(15)
  156. #define SLV_INT_SDR_FAIL BIT(14)
  157. #define SLV_INT_DDR_FAIL BIT(13)
  158. #define SLV_INT_M_RD_ABORT BIT(12)
  159. #define SLV_INT_DDR_RX_THR BIT(11)
  160. #define SLV_INT_DDR_TX_THR BIT(10)
  161. #define SLV_INT_SDR_RX_THR BIT(9)
  162. #define SLV_INT_SDR_TX_THR BIT(8)
  163. #define SLV_INT_DDR_RX_UNF BIT(7)
  164. #define SLV_INT_DDR_TX_OVF BIT(6)
  165. #define SLV_INT_SDR_RX_UNF BIT(5)
  166. #define SLV_INT_SDR_TX_OVF BIT(4)
  167. #define SLV_INT_DDR_RD_COMP BIT(3)
  168. #define SLV_INT_DDR_WR_COMP BIT(2)
  169. #define SLV_INT_SDR_RD_COMP BIT(1)
  170. #define SLV_INT_SDR_WR_COMP BIT(0)
  171. #define SLV_STATUS0 0x54
  172. #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
  173. #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
  174. #define SLV_STATUS1 0x58
  175. #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
  176. #define SLV_STATUS1_VEN_TM BIT(19)
  177. #define SLV_STATUS1_HJ_DIS BIT(18)
  178. #define SLV_STATUS1_MR_DIS BIT(17)
  179. #define SLV_STATUS1_PROT_ERR BIT(16)
  180. #define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
  181. #define SLV_STATUS1_HAS_DA BIT(8)
  182. #define SLV_STATUS1_DDR_RX_FULL BIT(7)
  183. #define SLV_STATUS1_DDR_TX_FULL BIT(6)
  184. #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
  185. #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
  186. #define SLV_STATUS1_SDR_RX_FULL BIT(3)
  187. #define SLV_STATUS1_SDR_TX_FULL BIT(2)
  188. #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
  189. #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
  190. #define CMD0_FIFO 0x60
  191. #define CMD0_FIFO_IS_DDR BIT(31)
  192. #define CMD0_FIFO_IS_CCC BIT(30)
  193. #define CMD0_FIFO_BCH BIT(29)
  194. #define XMIT_BURST_STATIC_SUBADDR 0
  195. #define XMIT_SINGLE_INC_SUBADDR 1
  196. #define XMIT_SINGLE_STATIC_SUBADDR 2
  197. #define XMIT_BURST_WITHOUT_SUBADDR 3
  198. #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
  199. #define CMD0_FIFO_SBCA BIT(26)
  200. #define CMD0_FIFO_RSBC BIT(25)
  201. #define CMD0_FIFO_IS_10B BIT(24)
  202. #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
  203. #define CMD0_FIFO_PL_LEN_MAX 4095
  204. #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
  205. #define CMD0_FIFO_RNW BIT(0)
  206. #define CMD1_FIFO 0x64
  207. #define CMD1_FIFO_CMDID(id) ((id) << 24)
  208. #define CMD1_FIFO_CSRADDR(a) (a)
  209. #define CMD1_FIFO_CCC(id) (id)
  210. #define TX_FIFO 0x68
  211. #define IMD_CMD0 0x70
  212. #define IMD_CMD0_PL_LEN(l) ((l) << 12)
  213. #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
  214. #define IMD_CMD0_RNW BIT(0)
  215. #define IMD_CMD1 0x74
  216. #define IMD_CMD1_CCC(id) (id)
  217. #define IMD_DATA 0x78
  218. #define RX_FIFO 0x80
  219. #define IBI_DATA_FIFO 0x84
  220. #define SLV_DDR_TX_FIFO 0x88
  221. #define SLV_DDR_RX_FIFO 0x8c
  222. #define CMD_IBI_THR_CTRL 0x90
  223. #define IBIR_THR(t) ((t) << 24)
  224. #define CMDR_THR(t) ((t) << 16)
  225. #define IBI_THR(t) ((t) << 8)
  226. #define CMD_THR(t) (t)
  227. #define TX_RX_THR_CTRL 0x94
  228. #define RX_THR(t) ((t) << 16)
  229. #define TX_THR(t) (t)
  230. #define SLV_DDR_TX_RX_THR_CTRL 0x98
  231. #define SLV_DDR_RX_THR(t) ((t) << 16)
  232. #define SLV_DDR_TX_THR(t) (t)
  233. #define FLUSH_CTRL 0x9c
  234. #define FLUSH_IBI_RESP BIT(23)
  235. #define FLUSH_CMD_RESP BIT(22)
  236. #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
  237. #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
  238. #define FLUSH_IMM_FIFO BIT(20)
  239. #define FLUSH_IBI_FIFO BIT(19)
  240. #define FLUSH_RX_FIFO BIT(18)
  241. #define FLUSH_TX_FIFO BIT(17)
  242. #define FLUSH_CMD_FIFO BIT(16)
  243. #define TTO_PRESCL_CTRL0 0xb0
  244. #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
  245. #define TTO_PRESCL_CTRL0_DIVA(x) (x)
  246. #define TTO_PRESCL_CTRL1 0xb4
  247. #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
  248. #define TTO_PRESCL_CTRL1_DIVA(x) (x)
  249. #define DEVS_CTRL 0xb8
  250. #define DEVS_CTRL_DEV_CLR_SHIFT 16
  251. #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
  252. #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
  253. #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
  254. #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
  255. #define MAX_DEVS 16
  256. #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
  257. #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
  258. #define DEV_ID_RR0_HDR_CAP BIT(10)
  259. #define DEV_ID_RR0_IS_I3C BIT(9)
  260. #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
  261. #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
  262. (((a) & GENMASK(9, 7)) << 6))
  263. #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
  264. (((x) >> 6) & GENMASK(9, 7)))
  265. #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
  266. #define DEV_ID_RR1_PID_MSB(pid) (pid)
  267. #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
  268. #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
  269. #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
  270. #define DEV_ID_RR2_DCR(dcr) (dcr)
  271. #define DEV_ID_RR2_LVR(lvr) (lvr)
  272. #define SIR_MAP(x) (0x180 + ((x) * 4))
  273. #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
  274. #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
  275. #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
  276. #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
  277. #define DEV_ROLE_SLAVE 0
  278. #define DEV_ROLE_MASTER 1
  279. #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
  280. #define SIR_MAP_DEV_SLOW BIT(13)
  281. #define SIR_MAP_DEV_PL(l) ((l) << 8)
  282. #define SIR_MAP_PL_MAX GENMASK(4, 0)
  283. #define SIR_MAP_DEV_DA(a) ((a) << 1)
  284. #define SIR_MAP_DEV_ACK BIT(0)
  285. #define GPIR_WORD(x) (0x200 + ((x) * 4))
  286. #define GPI_REG(val, id) \
  287. (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
  288. #define GPOR_WORD(x) (0x220 + ((x) * 4))
  289. #define GPO_REG(val, id) \
  290. (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
  291. #define ASF_INT_STATUS 0x300
  292. #define ASF_INT_RAW_STATUS 0x304
  293. #define ASF_INT_MASK 0x308
  294. #define ASF_INT_TEST 0x30c
  295. #define ASF_INT_FATAL_SELECT 0x310
  296. #define ASF_INTEGRITY_ERR BIT(6)
  297. #define ASF_PROTOCOL_ERR BIT(5)
  298. #define ASF_TRANS_TIMEOUT_ERR BIT(4)
  299. #define ASF_CSR_ERR BIT(3)
  300. #define ASF_DAP_ERR BIT(2)
  301. #define ASF_SRAM_UNCORR_ERR BIT(1)
  302. #define ASF_SRAM_CORR_ERR BIT(0)
  303. #define ASF_SRAM_CORR_FAULT_STATUS 0x320
  304. #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
  305. #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
  306. #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
  307. #define ASF_SRAM_FAULT_STATS 0x328
  308. #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
  309. #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
  310. #define ASF_TRANS_TOUT_CTRL 0x330
  311. #define ASF_TRANS_TOUT_EN BIT(31)
  312. #define ASF_TRANS_TOUT_VAL(x) (x)
  313. #define ASF_TRANS_TOUT_FAULT_MASK 0x334
  314. #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
  315. #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
  316. #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
  317. #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
  318. #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
  319. #define ASF_PROTO_FAULT_MASK 0x340
  320. #define ASF_PROTO_FAULT_STATUS 0x344
  321. #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
  322. #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
  323. #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
  324. #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
  325. #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
  326. #define ASF_PROTO_FAULT_M(x) BIT(x)
  327. struct cdns_i3c_master_caps {
  328. u32 cmdfifodepth;
  329. u32 cmdrfifodepth;
  330. u32 txfifodepth;
  331. u32 rxfifodepth;
  332. u32 ibirfifodepth;
  333. };
  334. struct cdns_i3c_cmd {
  335. u32 cmd0;
  336. u32 cmd1;
  337. u32 tx_len;
  338. const void *tx_buf;
  339. u32 rx_len;
  340. void *rx_buf;
  341. u32 error;
  342. };
  343. struct cdns_i3c_xfer {
  344. struct list_head node;
  345. struct completion comp;
  346. int ret;
  347. unsigned int ncmds;
  348. struct cdns_i3c_cmd cmds[];
  349. };
  350. struct cdns_i3c_data {
  351. u8 thd_delay_ns;
  352. };
  353. struct cdns_i3c_master {
  354. struct work_struct hj_work;
  355. struct i3c_master_controller base;
  356. u32 free_rr_slots;
  357. unsigned int maxdevs;
  358. struct {
  359. unsigned int num_slots;
  360. struct i3c_dev_desc **slots;
  361. spinlock_t lock;
  362. } ibi;
  363. struct {
  364. struct list_head list;
  365. struct cdns_i3c_xfer *cur;
  366. spinlock_t lock;
  367. } xferqueue;
  368. void __iomem *regs;
  369. struct clk *sysclk;
  370. struct clk *pclk;
  371. struct cdns_i3c_master_caps caps;
  372. unsigned long i3c_scl_lim;
  373. const struct cdns_i3c_data *devdata;
  374. };
  375. static inline struct cdns_i3c_master *
  376. to_cdns_i3c_master(struct i3c_master_controller *master)
  377. {
  378. return container_of(master, struct cdns_i3c_master, base);
  379. }
  380. static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
  381. const u8 *bytes, int nbytes)
  382. {
  383. writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
  384. if (nbytes & 3) {
  385. u32 tmp = 0;
  386. memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
  387. writesl(master->regs + TX_FIFO, &tmp, 1);
  388. }
  389. }
  390. static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
  391. u8 *bytes, int nbytes)
  392. {
  393. readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
  394. if (nbytes & 3) {
  395. u32 tmp;
  396. readsl(master->regs + RX_FIFO, &tmp, 1);
  397. memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
  398. }
  399. }
  400. static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
  401. const struct i3c_ccc_cmd *cmd)
  402. {
  403. if (cmd->ndests > 1)
  404. return false;
  405. switch (cmd->id) {
  406. case I3C_CCC_ENEC(true):
  407. case I3C_CCC_ENEC(false):
  408. case I3C_CCC_DISEC(true):
  409. case I3C_CCC_DISEC(false):
  410. case I3C_CCC_ENTAS(0, true):
  411. case I3C_CCC_ENTAS(0, false):
  412. case I3C_CCC_RSTDAA(true):
  413. case I3C_CCC_RSTDAA(false):
  414. case I3C_CCC_ENTDAA:
  415. case I3C_CCC_SETMWL(true):
  416. case I3C_CCC_SETMWL(false):
  417. case I3C_CCC_SETMRL(true):
  418. case I3C_CCC_SETMRL(false):
  419. case I3C_CCC_DEFSLVS:
  420. case I3C_CCC_ENTHDR(0):
  421. case I3C_CCC_SETDASA:
  422. case I3C_CCC_SETNEWDA:
  423. case I3C_CCC_GETMWL:
  424. case I3C_CCC_GETMRL:
  425. case I3C_CCC_GETPID:
  426. case I3C_CCC_GETBCR:
  427. case I3C_CCC_GETDCR:
  428. case I3C_CCC_GETSTATUS:
  429. case I3C_CCC_GETACCMST:
  430. case I3C_CCC_GETMXDS:
  431. case I3C_CCC_GETHDRCAP:
  432. return true;
  433. default:
  434. break;
  435. }
  436. return false;
  437. }
  438. static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
  439. {
  440. u32 status;
  441. writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
  442. return readl_poll_timeout(master->regs + MST_STATUS0, status,
  443. status & MST_STATUS0_IDLE, 10, 1000000);
  444. }
  445. static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
  446. {
  447. writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
  448. }
  449. static struct cdns_i3c_xfer *
  450. cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
  451. {
  452. struct cdns_i3c_xfer *xfer;
  453. xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
  454. if (!xfer)
  455. return NULL;
  456. INIT_LIST_HEAD(&xfer->node);
  457. xfer->ncmds = ncmds;
  458. xfer->ret = -ETIMEDOUT;
  459. return xfer;
  460. }
  461. static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
  462. {
  463. kfree(xfer);
  464. }
  465. static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
  466. {
  467. struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
  468. unsigned int i;
  469. if (!xfer)
  470. return;
  471. writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
  472. for (i = 0; i < xfer->ncmds; i++) {
  473. struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
  474. cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
  475. cmd->tx_len);
  476. }
  477. for (i = 0; i < xfer->ncmds; i++) {
  478. struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
  479. writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
  480. master->regs + CMD1_FIFO);
  481. writel(cmd->cmd0, master->regs + CMD0_FIFO);
  482. }
  483. writel(readl(master->regs + CTRL) | CTRL_MCS,
  484. master->regs + CTRL);
  485. writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
  486. }
  487. static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
  488. u32 isr)
  489. {
  490. struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
  491. int i, ret = 0;
  492. u32 status0;
  493. if (!xfer)
  494. return;
  495. if (!(isr & MST_INT_CMDD_EMP))
  496. return;
  497. writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
  498. for (status0 = readl(master->regs + MST_STATUS0);
  499. !(status0 & MST_STATUS0_CMDR_EMP);
  500. status0 = readl(master->regs + MST_STATUS0)) {
  501. struct cdns_i3c_cmd *cmd;
  502. u32 cmdr, rx_len, id;
  503. cmdr = readl(master->regs + CMDR);
  504. id = CMDR_CMDID(cmdr);
  505. if (id == CMDR_CMDID_HJACK_DISEC ||
  506. id == CMDR_CMDID_HJACK_ENTDAA ||
  507. WARN_ON(id >= xfer->ncmds))
  508. continue;
  509. cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
  510. rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
  511. cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
  512. cmd->error = CMDR_ERROR(cmdr);
  513. }
  514. for (i = 0; i < xfer->ncmds; i++) {
  515. switch (xfer->cmds[i].error) {
  516. case CMDR_NO_ERROR:
  517. break;
  518. case CMDR_DDR_PREAMBLE_ERROR:
  519. case CMDR_DDR_PARITY_ERROR:
  520. case CMDR_M0_ERROR:
  521. case CMDR_M1_ERROR:
  522. case CMDR_M2_ERROR:
  523. case CMDR_MST_ABORT:
  524. case CMDR_NACK_RESP:
  525. case CMDR_DDR_DROPPED:
  526. ret = -EIO;
  527. break;
  528. case CMDR_DDR_RX_FIFO_OVF:
  529. case CMDR_DDR_TX_FIFO_UNF:
  530. ret = -ENOSPC;
  531. break;
  532. case CMDR_INVALID_DA:
  533. default:
  534. ret = -EINVAL;
  535. break;
  536. }
  537. }
  538. xfer->ret = ret;
  539. complete(&xfer->comp);
  540. xfer = list_first_entry_or_null(&master->xferqueue.list,
  541. struct cdns_i3c_xfer, node);
  542. if (xfer)
  543. list_del_init(&xfer->node);
  544. master->xferqueue.cur = xfer;
  545. cdns_i3c_master_start_xfer_locked(master);
  546. }
  547. static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
  548. struct cdns_i3c_xfer *xfer)
  549. {
  550. unsigned long flags;
  551. init_completion(&xfer->comp);
  552. spin_lock_irqsave(&master->xferqueue.lock, flags);
  553. if (master->xferqueue.cur) {
  554. list_add_tail(&xfer->node, &master->xferqueue.list);
  555. } else {
  556. master->xferqueue.cur = xfer;
  557. cdns_i3c_master_start_xfer_locked(master);
  558. }
  559. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  560. }
  561. static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
  562. struct cdns_i3c_xfer *xfer)
  563. {
  564. unsigned long flags;
  565. spin_lock_irqsave(&master->xferqueue.lock, flags);
  566. if (master->xferqueue.cur == xfer) {
  567. u32 status;
  568. writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
  569. master->regs + CTRL);
  570. readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
  571. status & MST_STATUS0_IDLE, 10,
  572. 1000000);
  573. master->xferqueue.cur = NULL;
  574. writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
  575. FLUSH_CMD_RESP,
  576. master->regs + FLUSH_CTRL);
  577. writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
  578. writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
  579. master->regs + CTRL);
  580. } else {
  581. list_del_init(&xfer->node);
  582. }
  583. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  584. }
  585. static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
  586. {
  587. switch (cmd->error) {
  588. case CMDR_M0_ERROR:
  589. return I3C_ERROR_M0;
  590. case CMDR_M1_ERROR:
  591. return I3C_ERROR_M1;
  592. case CMDR_M2_ERROR:
  593. case CMDR_NACK_RESP:
  594. return I3C_ERROR_M2;
  595. default:
  596. break;
  597. }
  598. return I3C_ERROR_UNKNOWN;
  599. }
  600. static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
  601. struct i3c_ccc_cmd *cmd)
  602. {
  603. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  604. struct cdns_i3c_xfer *xfer;
  605. struct cdns_i3c_cmd *ccmd;
  606. int ret;
  607. xfer = cdns_i3c_master_alloc_xfer(master, 1);
  608. if (!xfer)
  609. return -ENOMEM;
  610. ccmd = xfer->cmds;
  611. ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
  612. ccmd->cmd0 = CMD0_FIFO_IS_CCC |
  613. CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
  614. if (cmd->id & I3C_CCC_DIRECT)
  615. ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
  616. if (cmd->rnw) {
  617. ccmd->cmd0 |= CMD0_FIFO_RNW;
  618. ccmd->rx_buf = cmd->dests[0].payload.data;
  619. ccmd->rx_len = cmd->dests[0].payload.len;
  620. } else {
  621. ccmd->tx_buf = cmd->dests[0].payload.data;
  622. ccmd->tx_len = cmd->dests[0].payload.len;
  623. }
  624. cdns_i3c_master_queue_xfer(master, xfer);
  625. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  626. cdns_i3c_master_unqueue_xfer(master, xfer);
  627. ret = xfer->ret;
  628. cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
  629. cdns_i3c_master_free_xfer(xfer);
  630. return ret;
  631. }
  632. static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
  633. struct i3c_priv_xfer *xfers,
  634. int nxfers)
  635. {
  636. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  637. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  638. int txslots = 0, rxslots = 0, i, ret;
  639. struct cdns_i3c_xfer *cdns_xfer;
  640. for (i = 0; i < nxfers; i++) {
  641. if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
  642. return -ENOTSUPP;
  643. }
  644. if (!nxfers)
  645. return 0;
  646. if (nxfers > master->caps.cmdfifodepth ||
  647. nxfers > master->caps.cmdrfifodepth)
  648. return -ENOTSUPP;
  649. /*
  650. * First make sure that all transactions (block of transfers separated
  651. * by a STOP marker) fit in the FIFOs.
  652. */
  653. for (i = 0; i < nxfers; i++) {
  654. if (xfers[i].rnw)
  655. rxslots += DIV_ROUND_UP(xfers[i].len, 4);
  656. else
  657. txslots += DIV_ROUND_UP(xfers[i].len, 4);
  658. }
  659. if (rxslots > master->caps.rxfifodepth ||
  660. txslots > master->caps.txfifodepth)
  661. return -ENOTSUPP;
  662. cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
  663. if (!cdns_xfer)
  664. return -ENOMEM;
  665. for (i = 0; i < nxfers; i++) {
  666. struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
  667. u32 pl_len = xfers[i].len;
  668. ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
  669. CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
  670. if (xfers[i].rnw) {
  671. ccmd->cmd0 |= CMD0_FIFO_RNW;
  672. ccmd->rx_buf = xfers[i].data.in;
  673. ccmd->rx_len = xfers[i].len;
  674. pl_len++;
  675. } else {
  676. ccmd->tx_buf = xfers[i].data.out;
  677. ccmd->tx_len = xfers[i].len;
  678. }
  679. ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
  680. if (i < nxfers - 1)
  681. ccmd->cmd0 |= CMD0_FIFO_RSBC;
  682. if (!i)
  683. ccmd->cmd0 |= CMD0_FIFO_BCH;
  684. }
  685. cdns_i3c_master_queue_xfer(master, cdns_xfer);
  686. if (!wait_for_completion_timeout(&cdns_xfer->comp,
  687. msecs_to_jiffies(1000)))
  688. cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
  689. ret = cdns_xfer->ret;
  690. for (i = 0; i < nxfers; i++)
  691. xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
  692. cdns_i3c_master_free_xfer(cdns_xfer);
  693. return ret;
  694. }
  695. static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
  696. const struct i2c_msg *xfers, int nxfers)
  697. {
  698. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  699. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  700. unsigned int nrxwords = 0, ntxwords = 0;
  701. struct cdns_i3c_xfer *xfer;
  702. int i, ret = 0;
  703. if (nxfers > master->caps.cmdfifodepth)
  704. return -ENOTSUPP;
  705. for (i = 0; i < nxfers; i++) {
  706. if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
  707. return -ENOTSUPP;
  708. if (xfers[i].flags & I2C_M_RD)
  709. nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
  710. else
  711. ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
  712. }
  713. if (ntxwords > master->caps.txfifodepth ||
  714. nrxwords > master->caps.rxfifodepth)
  715. return -ENOTSUPP;
  716. xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
  717. if (!xfer)
  718. return -ENOMEM;
  719. for (i = 0; i < nxfers; i++) {
  720. struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
  721. ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
  722. CMD0_FIFO_PL_LEN(xfers[i].len) |
  723. CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
  724. if (xfers[i].flags & I2C_M_TEN)
  725. ccmd->cmd0 |= CMD0_FIFO_IS_10B;
  726. if (xfers[i].flags & I2C_M_RD) {
  727. ccmd->cmd0 |= CMD0_FIFO_RNW;
  728. ccmd->rx_buf = xfers[i].buf;
  729. ccmd->rx_len = xfers[i].len;
  730. } else {
  731. ccmd->tx_buf = xfers[i].buf;
  732. ccmd->tx_len = xfers[i].len;
  733. }
  734. }
  735. cdns_i3c_master_queue_xfer(master, xfer);
  736. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  737. cdns_i3c_master_unqueue_xfer(master, xfer);
  738. ret = xfer->ret;
  739. cdns_i3c_master_free_xfer(xfer);
  740. return ret;
  741. }
  742. struct cdns_i3c_i2c_dev_data {
  743. u16 id;
  744. s16 ibi;
  745. struct i3c_generic_ibi_pool *ibi_pool;
  746. };
  747. static u32 prepare_rr0_dev_address(u32 addr)
  748. {
  749. u32 ret = (addr << 1) & 0xff;
  750. /* RR0[7:1] = addr[6:0] */
  751. ret |= (addr & GENMASK(6, 0)) << 1;
  752. /* RR0[15:13] = addr[9:7] */
  753. ret |= (addr & GENMASK(9, 7)) << 6;
  754. /* RR0[0] = ~XOR(addr[6:0]) */
  755. if (!(hweight8(addr & 0x7f) & 1))
  756. ret |= 1;
  757. return ret;
  758. }
  759. static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
  760. {
  761. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  762. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  763. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  764. u32 rr;
  765. rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
  766. dev->info.dyn_addr :
  767. dev->info.static_addr);
  768. writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
  769. }
  770. static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
  771. u8 dyn_addr)
  772. {
  773. unsigned long activedevs;
  774. u32 rr;
  775. int i;
  776. if (!dyn_addr) {
  777. if (!master->free_rr_slots)
  778. return -ENOSPC;
  779. return ffs(master->free_rr_slots) - 1;
  780. }
  781. activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
  782. activedevs &= ~BIT(0);
  783. for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
  784. rr = readl(master->regs + DEV_ID_RR0(i));
  785. if (!(rr & DEV_ID_RR0_IS_I3C) ||
  786. DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
  787. continue;
  788. return i;
  789. }
  790. return -EINVAL;
  791. }
  792. static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
  793. u8 old_dyn_addr)
  794. {
  795. cdns_i3c_master_upd_i3c_addr(dev);
  796. return 0;
  797. }
  798. static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
  799. {
  800. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  801. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  802. struct cdns_i3c_i2c_dev_data *data;
  803. int slot;
  804. data = kzalloc(sizeof(*data), GFP_KERNEL);
  805. if (!data)
  806. return -ENOMEM;
  807. slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
  808. if (slot < 0) {
  809. kfree(data);
  810. return slot;
  811. }
  812. data->ibi = -1;
  813. data->id = slot;
  814. i3c_dev_set_master_data(dev, data);
  815. master->free_rr_slots &= ~BIT(slot);
  816. if (!dev->info.dyn_addr) {
  817. cdns_i3c_master_upd_i3c_addr(dev);
  818. writel(readl(master->regs + DEVS_CTRL) |
  819. DEVS_CTRL_DEV_ACTIVE(data->id),
  820. master->regs + DEVS_CTRL);
  821. }
  822. return 0;
  823. }
  824. static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
  825. {
  826. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  827. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  828. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  829. writel(readl(master->regs + DEVS_CTRL) |
  830. DEVS_CTRL_DEV_CLR(data->id),
  831. master->regs + DEVS_CTRL);
  832. i3c_dev_set_master_data(dev, NULL);
  833. master->free_rr_slots |= BIT(data->id);
  834. kfree(data);
  835. }
  836. static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
  837. {
  838. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  839. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  840. struct cdns_i3c_i2c_dev_data *data;
  841. int slot;
  842. slot = cdns_i3c_master_get_rr_slot(master, 0);
  843. if (slot < 0)
  844. return slot;
  845. data = kzalloc(sizeof(*data), GFP_KERNEL);
  846. if (!data)
  847. return -ENOMEM;
  848. data->id = slot;
  849. master->free_rr_slots &= ~BIT(slot);
  850. i2c_dev_set_master_data(dev, data);
  851. writel(prepare_rr0_dev_address(dev->addr),
  852. master->regs + DEV_ID_RR0(data->id));
  853. writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
  854. writel(readl(master->regs + DEVS_CTRL) |
  855. DEVS_CTRL_DEV_ACTIVE(data->id),
  856. master->regs + DEVS_CTRL);
  857. return 0;
  858. }
  859. static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
  860. {
  861. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  862. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  863. struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
  864. writel(readl(master->regs + DEVS_CTRL) |
  865. DEVS_CTRL_DEV_CLR(data->id),
  866. master->regs + DEVS_CTRL);
  867. master->free_rr_slots |= BIT(data->id);
  868. i2c_dev_set_master_data(dev, NULL);
  869. kfree(data);
  870. }
  871. static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
  872. {
  873. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  874. cdns_i3c_master_disable(master);
  875. }
  876. static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
  877. unsigned int slot,
  878. struct i3c_device_info *info)
  879. {
  880. u32 rr;
  881. memset(info, 0, sizeof(*info));
  882. rr = readl(master->regs + DEV_ID_RR0(slot));
  883. info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
  884. rr = readl(master->regs + DEV_ID_RR2(slot));
  885. info->dcr = rr;
  886. info->bcr = rr >> 8;
  887. info->pid = rr >> 16;
  888. info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
  889. }
  890. static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
  891. {
  892. struct i3c_master_controller *m = &master->base;
  893. unsigned long i3c_lim_period, pres_step, ncycles;
  894. struct i3c_bus *bus = i3c_master_get_bus(m);
  895. unsigned long new_i3c_scl_lim = 0;
  896. struct i3c_dev_desc *dev;
  897. u32 prescl1, ctrl;
  898. i3c_bus_for_each_i3cdev(bus, dev) {
  899. unsigned long max_fscl;
  900. max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
  901. I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
  902. switch (max_fscl) {
  903. case I3C_SDR1_FSCL_8MHZ:
  904. max_fscl = 8000000;
  905. break;
  906. case I3C_SDR2_FSCL_6MHZ:
  907. max_fscl = 6000000;
  908. break;
  909. case I3C_SDR3_FSCL_4MHZ:
  910. max_fscl = 4000000;
  911. break;
  912. case I3C_SDR4_FSCL_2MHZ:
  913. max_fscl = 2000000;
  914. break;
  915. case I3C_SDR0_FSCL_MAX:
  916. default:
  917. max_fscl = 0;
  918. break;
  919. }
  920. if (max_fscl &&
  921. (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
  922. new_i3c_scl_lim = max_fscl;
  923. }
  924. /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
  925. if (new_i3c_scl_lim == master->i3c_scl_lim)
  926. return;
  927. master->i3c_scl_lim = new_i3c_scl_lim;
  928. if (!new_i3c_scl_lim)
  929. return;
  930. pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
  931. /* Configure PP_LOW to meet I3C slave limitations. */
  932. prescl1 = readl(master->regs + PRESCL_CTRL1) &
  933. ~PRESCL_CTRL1_PP_LOW_MASK;
  934. ctrl = readl(master->regs + CTRL);
  935. i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
  936. ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
  937. if (ncycles < 4)
  938. ncycles = 0;
  939. else
  940. ncycles -= 4;
  941. prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
  942. /* Disable I3C master before updating PRESCL_CTRL1. */
  943. if (ctrl & CTRL_DEV_EN)
  944. cdns_i3c_master_disable(master);
  945. writel(prescl1, master->regs + PRESCL_CTRL1);
  946. if (ctrl & CTRL_DEV_EN)
  947. cdns_i3c_master_enable(master);
  948. }
  949. static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
  950. {
  951. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  952. unsigned long olddevs, newdevs;
  953. int ret, slot;
  954. u8 addrs[MAX_DEVS] = { };
  955. u8 last_addr = 0;
  956. olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
  957. olddevs |= BIT(0);
  958. /* Prepare RR slots before launching DAA. */
  959. for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
  960. ret = i3c_master_get_free_addr(m, last_addr + 1);
  961. if (ret < 0)
  962. return -ENOSPC;
  963. last_addr = ret;
  964. addrs[slot] = last_addr;
  965. writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
  966. master->regs + DEV_ID_RR0(slot));
  967. writel(0, master->regs + DEV_ID_RR1(slot));
  968. writel(0, master->regs + DEV_ID_RR2(slot));
  969. }
  970. ret = i3c_master_entdaa_locked(&master->base);
  971. if (ret && ret != I3C_ERROR_M2)
  972. return ret;
  973. newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
  974. newdevs &= ~olddevs;
  975. /*
  976. * Clear all retaining registers filled during DAA. We already
  977. * have the addressed assigned to them in the addrs array.
  978. */
  979. for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
  980. i3c_master_add_i3c_dev_locked(m, addrs[slot]);
  981. /*
  982. * Clear slots that ended up not being used. Can be caused by I3C
  983. * device creation failure or when the I3C device was already known
  984. * by the system but with a different address (in this case the device
  985. * already has a slot and does not need a new one).
  986. */
  987. writel(readl(master->regs + DEVS_CTRL) |
  988. master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
  989. master->regs + DEVS_CTRL);
  990. i3c_master_defslvs_locked(&master->base);
  991. cdns_i3c_master_upd_i3c_scl_lim(master);
  992. /* Unmask Hot-Join and Mastership request interrupts. */
  993. i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
  994. I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
  995. return 0;
  996. }
  997. static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
  998. {
  999. unsigned long sysclk_rate = clk_get_rate(master->sysclk);
  1000. u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
  1001. (NSEC_PER_SEC / sysclk_rate));
  1002. /* Every value greater than 3 is not valid. */
  1003. if (thd_delay > THD_DELAY_MAX)
  1004. thd_delay = THD_DELAY_MAX;
  1005. /* CTLR_THD_DEL value is encoded. */
  1006. return (THD_DELAY_MAX - thd_delay);
  1007. }
  1008. static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
  1009. {
  1010. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1011. unsigned long pres_step, sysclk_rate, max_i2cfreq;
  1012. struct i3c_bus *bus = i3c_master_get_bus(m);
  1013. u32 ctrl, prescl0, prescl1, pres, low;
  1014. struct i3c_device_info info = { };
  1015. int ret, ncycles;
  1016. switch (bus->mode) {
  1017. case I3C_BUS_MODE_PURE:
  1018. ctrl = CTRL_PURE_BUS_MODE;
  1019. break;
  1020. case I3C_BUS_MODE_MIXED_FAST:
  1021. ctrl = CTRL_MIXED_FAST_BUS_MODE;
  1022. break;
  1023. case I3C_BUS_MODE_MIXED_SLOW:
  1024. ctrl = CTRL_MIXED_SLOW_BUS_MODE;
  1025. break;
  1026. default:
  1027. return -EINVAL;
  1028. }
  1029. sysclk_rate = clk_get_rate(master->sysclk);
  1030. if (!sysclk_rate)
  1031. return -EINVAL;
  1032. pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
  1033. if (pres > PRESCL_CTRL0_MAX)
  1034. return -ERANGE;
  1035. bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
  1036. prescl0 = PRESCL_CTRL0_I3C(pres);
  1037. low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
  1038. prescl1 = PRESCL_CTRL1_OD_LOW(low);
  1039. max_i2cfreq = bus->scl_rate.i2c;
  1040. pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
  1041. if (pres > PRESCL_CTRL0_MAX)
  1042. return -ERANGE;
  1043. bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
  1044. prescl0 |= PRESCL_CTRL0_I2C(pres);
  1045. writel(prescl0, master->regs + PRESCL_CTRL0);
  1046. /* Calculate OD and PP low. */
  1047. pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
  1048. ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
  1049. if (ncycles < 0)
  1050. ncycles = 0;
  1051. prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
  1052. writel(prescl1, master->regs + PRESCL_CTRL1);
  1053. /* Get an address for the master. */
  1054. ret = i3c_master_get_free_addr(m, 0);
  1055. if (ret < 0)
  1056. return ret;
  1057. writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
  1058. master->regs + DEV_ID_RR0(0));
  1059. cdns_i3c_master_dev_rr_to_info(master, 0, &info);
  1060. if (info.bcr & I3C_BCR_HDR_CAP)
  1061. info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
  1062. ret = i3c_master_set_info(&master->base, &info);
  1063. if (ret)
  1064. return ret;
  1065. /*
  1066. * Enable Hot-Join, and, when a Hot-Join request happens, disable all
  1067. * events coming from this device.
  1068. *
  1069. * We will issue ENTDAA afterwards from the threaded IRQ handler.
  1070. */
  1071. ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
  1072. /*
  1073. * Configure data hold delay based on device-specific data.
  1074. *
  1075. * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
  1076. * master output. This setting allows to meet this timing on master's
  1077. * SoC outputs, regardless of PCB balancing.
  1078. */
  1079. ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
  1080. writel(ctrl, master->regs + CTRL);
  1081. cdns_i3c_master_enable(master);
  1082. return 0;
  1083. }
  1084. static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
  1085. u32 ibir)
  1086. {
  1087. struct cdns_i3c_i2c_dev_data *data;
  1088. bool data_consumed = false;
  1089. struct i3c_ibi_slot *slot;
  1090. u32 id = IBIR_SLVID(ibir);
  1091. struct i3c_dev_desc *dev;
  1092. size_t nbytes;
  1093. u8 *buf;
  1094. /*
  1095. * FIXME: maybe we should report the FIFO OVF errors to the upper
  1096. * layer.
  1097. */
  1098. if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
  1099. goto out;
  1100. dev = master->ibi.slots[id];
  1101. spin_lock(&master->ibi.lock);
  1102. data = i3c_dev_get_master_data(dev);
  1103. slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
  1104. if (!slot)
  1105. goto out_unlock;
  1106. buf = slot->data;
  1107. nbytes = IBIR_XFER_BYTES(ibir);
  1108. readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
  1109. if (nbytes % 3) {
  1110. u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
  1111. memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
  1112. }
  1113. slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
  1114. dev->ibi->max_payload_len);
  1115. i3c_master_queue_ibi(dev, slot);
  1116. data_consumed = true;
  1117. out_unlock:
  1118. spin_unlock(&master->ibi.lock);
  1119. out:
  1120. /* Consume data from the FIFO if it's not been done already. */
  1121. if (!data_consumed) {
  1122. int i;
  1123. for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
  1124. readl(master->regs + IBI_DATA_FIFO);
  1125. }
  1126. }
  1127. static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
  1128. {
  1129. u32 status0;
  1130. writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
  1131. for (status0 = readl(master->regs + MST_STATUS0);
  1132. !(status0 & MST_STATUS0_IBIR_EMP);
  1133. status0 = readl(master->regs + MST_STATUS0)) {
  1134. u32 ibir = readl(master->regs + IBIR);
  1135. switch (IBIR_TYPE(ibir)) {
  1136. case IBIR_TYPE_IBI:
  1137. cdns_i3c_master_handle_ibi(master, ibir);
  1138. break;
  1139. case IBIR_TYPE_HJ:
  1140. WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
  1141. queue_work(master->base.wq, &master->hj_work);
  1142. break;
  1143. case IBIR_TYPE_MR:
  1144. WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
  1145. default:
  1146. break;
  1147. }
  1148. }
  1149. }
  1150. static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
  1151. {
  1152. struct cdns_i3c_master *master = data;
  1153. u32 status;
  1154. status = readl(master->regs + MST_ISR);
  1155. if (!(status & readl(master->regs + MST_IMR)))
  1156. return IRQ_NONE;
  1157. spin_lock(&master->xferqueue.lock);
  1158. cdns_i3c_master_end_xfer_locked(master, status);
  1159. spin_unlock(&master->xferqueue.lock);
  1160. if (status & MST_INT_IBIR_THR)
  1161. cnds_i3c_master_demux_ibis(master);
  1162. return IRQ_HANDLED;
  1163. }
  1164. static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
  1165. {
  1166. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1167. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1168. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1169. unsigned long flags;
  1170. u32 sirmap;
  1171. int ret;
  1172. ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
  1173. I3C_CCC_EVENT_SIR);
  1174. if (ret)
  1175. return ret;
  1176. spin_lock_irqsave(&master->ibi.lock, flags);
  1177. sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
  1178. sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
  1179. sirmap |= SIR_MAP_DEV_CONF(data->ibi,
  1180. SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
  1181. writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
  1182. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1183. return ret;
  1184. }
  1185. static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
  1186. {
  1187. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1188. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1189. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1190. unsigned long flags;
  1191. u32 sircfg, sirmap;
  1192. int ret;
  1193. spin_lock_irqsave(&master->ibi.lock, flags);
  1194. sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
  1195. sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
  1196. sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
  1197. SIR_MAP_DEV_DA(dev->info.dyn_addr) |
  1198. SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
  1199. SIR_MAP_DEV_ACK;
  1200. if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
  1201. sircfg |= SIR_MAP_DEV_SLOW;
  1202. sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
  1203. writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
  1204. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1205. ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
  1206. I3C_CCC_EVENT_SIR);
  1207. if (ret) {
  1208. spin_lock_irqsave(&master->ibi.lock, flags);
  1209. sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
  1210. sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
  1211. sirmap |= SIR_MAP_DEV_CONF(data->ibi,
  1212. SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
  1213. writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
  1214. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1215. }
  1216. return ret;
  1217. }
  1218. static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
  1219. const struct i3c_ibi_setup *req)
  1220. {
  1221. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1222. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1223. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1224. unsigned long flags;
  1225. unsigned int i;
  1226. data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
  1227. if (IS_ERR(data->ibi_pool))
  1228. return PTR_ERR(data->ibi_pool);
  1229. spin_lock_irqsave(&master->ibi.lock, flags);
  1230. for (i = 0; i < master->ibi.num_slots; i++) {
  1231. if (!master->ibi.slots[i]) {
  1232. data->ibi = i;
  1233. master->ibi.slots[i] = dev;
  1234. break;
  1235. }
  1236. }
  1237. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1238. if (i < master->ibi.num_slots)
  1239. return 0;
  1240. i3c_generic_ibi_free_pool(data->ibi_pool);
  1241. data->ibi_pool = NULL;
  1242. return -ENOSPC;
  1243. }
  1244. static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
  1245. {
  1246. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1247. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1248. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1249. unsigned long flags;
  1250. spin_lock_irqsave(&master->ibi.lock, flags);
  1251. master->ibi.slots[data->ibi] = NULL;
  1252. data->ibi = -1;
  1253. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1254. i3c_generic_ibi_free_pool(data->ibi_pool);
  1255. }
  1256. static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
  1257. struct i3c_ibi_slot *slot)
  1258. {
  1259. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1260. i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
  1261. }
  1262. static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
  1263. .bus_init = cdns_i3c_master_bus_init,
  1264. .bus_cleanup = cdns_i3c_master_bus_cleanup,
  1265. .do_daa = cdns_i3c_master_do_daa,
  1266. .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
  1267. .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
  1268. .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
  1269. .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
  1270. .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
  1271. .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
  1272. .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
  1273. .priv_xfers = cdns_i3c_master_priv_xfers,
  1274. .i2c_xfers = cdns_i3c_master_i2c_xfers,
  1275. .enable_ibi = cdns_i3c_master_enable_ibi,
  1276. .disable_ibi = cdns_i3c_master_disable_ibi,
  1277. .request_ibi = cdns_i3c_master_request_ibi,
  1278. .free_ibi = cdns_i3c_master_free_ibi,
  1279. .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
  1280. };
  1281. static void cdns_i3c_master_hj(struct work_struct *work)
  1282. {
  1283. struct cdns_i3c_master *master = container_of(work,
  1284. struct cdns_i3c_master,
  1285. hj_work);
  1286. i3c_master_do_daa(&master->base);
  1287. }
  1288. static struct cdns_i3c_data cdns_i3c_devdata = {
  1289. .thd_delay_ns = 10,
  1290. };
  1291. static const struct of_device_id cdns_i3c_master_of_ids[] = {
  1292. { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
  1293. { /* sentinel */ },
  1294. };
  1295. static int cdns_i3c_master_probe(struct platform_device *pdev)
  1296. {
  1297. struct cdns_i3c_master *master;
  1298. int ret, irq;
  1299. u32 val;
  1300. master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
  1301. if (!master)
  1302. return -ENOMEM;
  1303. master->devdata = of_device_get_match_data(&pdev->dev);
  1304. if (!master->devdata)
  1305. return -EINVAL;
  1306. master->regs = devm_platform_ioremap_resource(pdev, 0);
  1307. if (IS_ERR(master->regs))
  1308. return PTR_ERR(master->regs);
  1309. master->pclk = devm_clk_get(&pdev->dev, "pclk");
  1310. if (IS_ERR(master->pclk))
  1311. return PTR_ERR(master->pclk);
  1312. master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
  1313. if (IS_ERR(master->sysclk))
  1314. return PTR_ERR(master->sysclk);
  1315. irq = platform_get_irq(pdev, 0);
  1316. if (irq < 0)
  1317. return irq;
  1318. ret = clk_prepare_enable(master->pclk);
  1319. if (ret)
  1320. return ret;
  1321. ret = clk_prepare_enable(master->sysclk);
  1322. if (ret)
  1323. goto err_disable_pclk;
  1324. if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
  1325. ret = -EINVAL;
  1326. goto err_disable_sysclk;
  1327. }
  1328. spin_lock_init(&master->xferqueue.lock);
  1329. INIT_LIST_HEAD(&master->xferqueue.list);
  1330. INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
  1331. writel(0xffffffff, master->regs + MST_IDR);
  1332. writel(0xffffffff, master->regs + SLV_IDR);
  1333. ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
  1334. dev_name(&pdev->dev), master);
  1335. if (ret)
  1336. goto err_disable_sysclk;
  1337. platform_set_drvdata(pdev, master);
  1338. val = readl(master->regs + CONF_STATUS0);
  1339. /* Device ID0 is reserved to describe this master. */
  1340. master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
  1341. master->free_rr_slots = GENMASK(master->maxdevs, 1);
  1342. val = readl(master->regs + CONF_STATUS1);
  1343. master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
  1344. master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
  1345. master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
  1346. master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
  1347. master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
  1348. spin_lock_init(&master->ibi.lock);
  1349. master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
  1350. master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
  1351. sizeof(*master->ibi.slots),
  1352. GFP_KERNEL);
  1353. if (!master->ibi.slots) {
  1354. ret = -ENOMEM;
  1355. goto err_disable_sysclk;
  1356. }
  1357. writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
  1358. writel(MST_INT_IBIR_THR, master->regs + MST_IER);
  1359. writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
  1360. ret = i3c_master_register(&master->base, &pdev->dev,
  1361. &cdns_i3c_master_ops, false);
  1362. if (ret)
  1363. goto err_disable_sysclk;
  1364. return 0;
  1365. err_disable_sysclk:
  1366. clk_disable_unprepare(master->sysclk);
  1367. err_disable_pclk:
  1368. clk_disable_unprepare(master->pclk);
  1369. return ret;
  1370. }
  1371. static int cdns_i3c_master_remove(struct platform_device *pdev)
  1372. {
  1373. struct cdns_i3c_master *master = platform_get_drvdata(pdev);
  1374. int ret;
  1375. ret = i3c_master_unregister(&master->base);
  1376. if (ret)
  1377. return ret;
  1378. clk_disable_unprepare(master->sysclk);
  1379. clk_disable_unprepare(master->pclk);
  1380. return 0;
  1381. }
  1382. static struct platform_driver cdns_i3c_master = {
  1383. .probe = cdns_i3c_master_probe,
  1384. .remove = cdns_i3c_master_remove,
  1385. .driver = {
  1386. .name = "cdns-i3c-master",
  1387. .of_match_table = cdns_i3c_master_of_ids,
  1388. },
  1389. };
  1390. module_platform_driver(cdns_i3c_master);
  1391. MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
  1392. MODULE_DESCRIPTION("Cadence I3C master driver");
  1393. MODULE_LICENSE("GPL v2");
  1394. MODULE_ALIAS("platform:cdns-i3c-master");