qcom-ebi2.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qualcomm External Bus Interface 2 (EBI2) driver
  4. * an older version of the Qualcomm Parallel Interface Controller (QPIC)
  5. *
  6. * Copyright (C) 2016 Linaro Ltd.
  7. *
  8. * Author: Linus Walleij <linus.walleij@linaro.org>
  9. *
  10. * See the device tree bindings for this block for more details on the
  11. * hardware.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/clk.h>
  15. #include <linux/err.h>
  16. #include <linux/io.h>
  17. #include <linux/of.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/init.h>
  20. #include <linux/slab.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/bitops.h>
  23. /*
  24. * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
  25. */
  26. #define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
  27. #define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
  28. #define EBI2_CS2_ENABLE_MASK BIT(4)
  29. #define EBI2_CS3_ENABLE_MASK BIT(5)
  30. #define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
  31. #define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
  32. #define EBI2_CSN_MASK GENMASK(9, 0)
  33. #define EBI2_XMEM_CFG 0x0000 /* Power management etc */
  34. /*
  35. * SLOW CSn CFG
  36. *
  37. * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
  38. * memory continues to drive the data bus after OE is de-asserted.
  39. * Inserted when reading one CS and switching to another CS or read
  40. * followed by write on the same CS. Valid values 0 thru 15.
  41. * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
  42. * every write minimum 1. The data out is driven from the time WE is
  43. * asserted until CS is asserted. With a hold of 1, the CS stays
  44. * active for 1 extra cycle etc. Valid values 0 thru 15.
  45. * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
  46. * write to a page or burst memory
  47. * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
  48. * read to a page or burst memory
  49. * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
  50. * so 1 thru 16 cycles.
  51. * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
  52. * so 1 thru 16 cycles.
  53. */
  54. #define EBI2_XMEM_CS0_SLOW_CFG 0x0008
  55. #define EBI2_XMEM_CS1_SLOW_CFG 0x000C
  56. #define EBI2_XMEM_CS2_SLOW_CFG 0x0010
  57. #define EBI2_XMEM_CS3_SLOW_CFG 0x0014
  58. #define EBI2_XMEM_CS4_SLOW_CFG 0x0018
  59. #define EBI2_XMEM_CS5_SLOW_CFG 0x001C
  60. #define EBI2_XMEM_RECOVERY_SHIFT 28
  61. #define EBI2_XMEM_WR_HOLD_SHIFT 24
  62. #define EBI2_XMEM_WR_DELTA_SHIFT 16
  63. #define EBI2_XMEM_RD_DELTA_SHIFT 8
  64. #define EBI2_XMEM_WR_WAIT_SHIFT 4
  65. #define EBI2_XMEM_RD_WAIT_SHIFT 0
  66. /*
  67. * FAST CSn CFG
  68. * Bits 31-28: ?
  69. * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
  70. * transfer. For a single read trandfer this will be the time
  71. * from CS assertion to OE assertion.
  72. * Bits 18-24: ?
  73. * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
  74. * assertion, with respect to the cycle where ADV is asserted.
  75. * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
  76. * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
  77. * hold time requirements with ADV assertion.
  78. *
  79. * The manual mentions "write precharge cycles" and "precharge cycles".
  80. * We have not been able to figure out which bit fields these correspond to
  81. * in the hardware, or what valid values exist. The current hypothesis is that
  82. * this is something just used on the FAST chip selects. There is also a "byte
  83. * device enable" flag somewhere for 8bit memories.
  84. */
  85. #define EBI2_XMEM_CS0_FAST_CFG 0x0028
  86. #define EBI2_XMEM_CS1_FAST_CFG 0x002C
  87. #define EBI2_XMEM_CS2_FAST_CFG 0x0030
  88. #define EBI2_XMEM_CS3_FAST_CFG 0x0034
  89. #define EBI2_XMEM_CS4_FAST_CFG 0x0038
  90. #define EBI2_XMEM_CS5_FAST_CFG 0x003C
  91. #define EBI2_XMEM_RD_HOLD_SHIFT 24
  92. #define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
  93. #define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
  94. /**
  95. * struct cs_data - struct with info on a chipselect setting
  96. * @enable_mask: mask to enable the chipselect in the EBI2 config
  97. * @slow_cfg0: offset to XMEMC slow CS config
  98. * @fast_cfg1: offset to XMEMC fast CS config
  99. */
  100. struct cs_data {
  101. u32 enable_mask;
  102. u16 slow_cfg;
  103. u16 fast_cfg;
  104. };
  105. static const struct cs_data cs_info[] = {
  106. {
  107. /* CS0 */
  108. .enable_mask = EBI2_CS0_ENABLE_MASK,
  109. .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
  110. .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
  111. },
  112. {
  113. /* CS1 */
  114. .enable_mask = EBI2_CS1_ENABLE_MASK,
  115. .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
  116. .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
  117. },
  118. {
  119. /* CS2 */
  120. .enable_mask = EBI2_CS2_ENABLE_MASK,
  121. .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
  122. .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
  123. },
  124. {
  125. /* CS3 */
  126. .enable_mask = EBI2_CS3_ENABLE_MASK,
  127. .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
  128. .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
  129. },
  130. {
  131. /* CS4 */
  132. .enable_mask = EBI2_CS4_ENABLE_MASK,
  133. .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
  134. .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
  135. },
  136. {
  137. /* CS5 */
  138. .enable_mask = EBI2_CS5_ENABLE_MASK,
  139. .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
  140. .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
  141. },
  142. };
  143. /**
  144. * struct ebi2_xmem_prop - describes an XMEM config property
  145. * @prop: the device tree binding name
  146. * @max: maximum value for the property
  147. * @slowreg: true if this property is in the SLOW CS config register
  148. * else it is assumed to be in the FAST config register
  149. * @shift: the bit field start in the SLOW or FAST register for this
  150. * property
  151. */
  152. struct ebi2_xmem_prop {
  153. const char *prop;
  154. u32 max;
  155. bool slowreg;
  156. u16 shift;
  157. };
  158. static const struct ebi2_xmem_prop xmem_props[] = {
  159. {
  160. .prop = "qcom,xmem-recovery-cycles",
  161. .max = 15,
  162. .slowreg = true,
  163. .shift = EBI2_XMEM_RECOVERY_SHIFT,
  164. },
  165. {
  166. .prop = "qcom,xmem-write-hold-cycles",
  167. .max = 15,
  168. .slowreg = true,
  169. .shift = EBI2_XMEM_WR_HOLD_SHIFT,
  170. },
  171. {
  172. .prop = "qcom,xmem-write-delta-cycles",
  173. .max = 255,
  174. .slowreg = true,
  175. .shift = EBI2_XMEM_WR_DELTA_SHIFT,
  176. },
  177. {
  178. .prop = "qcom,xmem-read-delta-cycles",
  179. .max = 255,
  180. .slowreg = true,
  181. .shift = EBI2_XMEM_RD_DELTA_SHIFT,
  182. },
  183. {
  184. .prop = "qcom,xmem-write-wait-cycles",
  185. .max = 15,
  186. .slowreg = true,
  187. .shift = EBI2_XMEM_WR_WAIT_SHIFT,
  188. },
  189. {
  190. .prop = "qcom,xmem-read-wait-cycles",
  191. .max = 15,
  192. .slowreg = true,
  193. .shift = EBI2_XMEM_RD_WAIT_SHIFT,
  194. },
  195. {
  196. .prop = "qcom,xmem-address-hold-enable",
  197. .max = 1, /* boolean prop */
  198. .slowreg = false,
  199. .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
  200. },
  201. {
  202. .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
  203. .max = 3,
  204. .slowreg = false,
  205. .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
  206. },
  207. {
  208. .prop = "qcom,xmem-read-hold-cycles",
  209. .max = 15,
  210. .slowreg = false,
  211. .shift = EBI2_XMEM_RD_HOLD_SHIFT,
  212. },
  213. };
  214. static void qcom_ebi2_setup_chipselect(struct device_node *np,
  215. struct device *dev,
  216. void __iomem *ebi2_base,
  217. void __iomem *ebi2_xmem,
  218. u32 csindex)
  219. {
  220. const struct cs_data *csd;
  221. u32 slowcfg, fastcfg;
  222. u32 val;
  223. int ret;
  224. int i;
  225. csd = &cs_info[csindex];
  226. val = readl(ebi2_base);
  227. val |= csd->enable_mask;
  228. writel(val, ebi2_base);
  229. dev_dbg(dev, "enabled CS%u\n", csindex);
  230. /* Next set up the XMEMC */
  231. slowcfg = 0;
  232. fastcfg = 0;
  233. for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
  234. const struct ebi2_xmem_prop *xp = &xmem_props[i];
  235. /* All are regular u32 values */
  236. ret = of_property_read_u32(np, xp->prop, &val);
  237. if (ret) {
  238. dev_dbg(dev, "could not read %s for CS%d\n",
  239. xp->prop, csindex);
  240. continue;
  241. }
  242. /* First check boolean props */
  243. if (xp->max == 1 && val) {
  244. if (xp->slowreg)
  245. slowcfg |= BIT(xp->shift);
  246. else
  247. fastcfg |= BIT(xp->shift);
  248. dev_dbg(dev, "set %s flag\n", xp->prop);
  249. continue;
  250. }
  251. /* We're dealing with an u32 */
  252. if (val > xp->max) {
  253. dev_err(dev,
  254. "too high value for %s: %u, capped at %u\n",
  255. xp->prop, val, xp->max);
  256. val = xp->max;
  257. }
  258. if (xp->slowreg)
  259. slowcfg |= (val << xp->shift);
  260. else
  261. fastcfg |= (val << xp->shift);
  262. dev_dbg(dev, "set %s to %u\n", xp->prop, val);
  263. }
  264. dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
  265. csindex, slowcfg, fastcfg);
  266. if (slowcfg)
  267. writel(slowcfg, ebi2_xmem + csd->slow_cfg);
  268. if (fastcfg)
  269. writel(fastcfg, ebi2_xmem + csd->fast_cfg);
  270. }
  271. static int qcom_ebi2_probe(struct platform_device *pdev)
  272. {
  273. struct device_node *np = pdev->dev.of_node;
  274. struct device_node *child;
  275. struct device *dev = &pdev->dev;
  276. struct resource *res;
  277. void __iomem *ebi2_base;
  278. void __iomem *ebi2_xmem;
  279. struct clk *ebi2xclk;
  280. struct clk *ebi2clk;
  281. bool have_children = false;
  282. u32 val;
  283. int ret;
  284. ebi2xclk = devm_clk_get(dev, "ebi2x");
  285. if (IS_ERR(ebi2xclk))
  286. return PTR_ERR(ebi2xclk);
  287. ret = clk_prepare_enable(ebi2xclk);
  288. if (ret) {
  289. dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
  290. return ret;
  291. }
  292. ebi2clk = devm_clk_get(dev, "ebi2");
  293. if (IS_ERR(ebi2clk)) {
  294. ret = PTR_ERR(ebi2clk);
  295. goto err_disable_2x_clk;
  296. }
  297. ret = clk_prepare_enable(ebi2clk);
  298. if (ret) {
  299. dev_err(dev, "could not enable EBI2 clk\n");
  300. goto err_disable_2x_clk;
  301. }
  302. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  303. ebi2_base = devm_ioremap_resource(dev, res);
  304. if (IS_ERR(ebi2_base)) {
  305. ret = PTR_ERR(ebi2_base);
  306. goto err_disable_clk;
  307. }
  308. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  309. ebi2_xmem = devm_ioremap_resource(dev, res);
  310. if (IS_ERR(ebi2_xmem)) {
  311. ret = PTR_ERR(ebi2_xmem);
  312. goto err_disable_clk;
  313. }
  314. /* Allegedly this turns the power save mode off */
  315. writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
  316. /* Disable all chipselects */
  317. val = readl(ebi2_base);
  318. val &= ~EBI2_CSN_MASK;
  319. writel(val, ebi2_base);
  320. /* Walk over the child nodes and see what chipselects we use */
  321. for_each_available_child_of_node(np, child) {
  322. u32 csindex;
  323. /* Figure out the chipselect */
  324. ret = of_property_read_u32(child, "reg", &csindex);
  325. if (ret) {
  326. of_node_put(child);
  327. return ret;
  328. }
  329. if (csindex > 5) {
  330. dev_err(dev,
  331. "invalid chipselect %u, we only support 0-5\n",
  332. csindex);
  333. continue;
  334. }
  335. qcom_ebi2_setup_chipselect(child,
  336. dev,
  337. ebi2_base,
  338. ebi2_xmem,
  339. csindex);
  340. /* We have at least one child */
  341. have_children = true;
  342. }
  343. if (have_children)
  344. return of_platform_default_populate(np, NULL, dev);
  345. return 0;
  346. err_disable_clk:
  347. clk_disable_unprepare(ebi2clk);
  348. err_disable_2x_clk:
  349. clk_disable_unprepare(ebi2xclk);
  350. return ret;
  351. }
  352. static const struct of_device_id qcom_ebi2_of_match[] = {
  353. { .compatible = "qcom,msm8660-ebi2", },
  354. { .compatible = "qcom,apq8060-ebi2", },
  355. { }
  356. };
  357. static struct platform_driver qcom_ebi2_driver = {
  358. .probe = qcom_ebi2_probe,
  359. .driver = {
  360. .name = "qcom-ebi2",
  361. .of_match_table = qcom_ebi2_of_match,
  362. },
  363. };
  364. module_platform_driver(qcom_ebi2_driver);
  365. MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
  366. MODULE_DESCRIPTION("Qualcomm EBI2 driver");
  367. MODULE_LICENSE("GPL");