clk_zynq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Weidmüller Interface GmbH & Co. KG
  4. * Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
  5. *
  6. * Copyright (C) 2013 Soren Brinkmann <soren.brinkmann@xilinx.com>
  7. * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
  8. */
  9. #include <common.h>
  10. #include <clk-uclass.h>
  11. #include <dm.h>
  12. #include <log.h>
  13. #include <asm/global_data.h>
  14. #include <dm/device_compat.h>
  15. #include <dm/lists.h>
  16. #include <errno.h>
  17. #include <asm/io.h>
  18. #include <asm/arch/clk.h>
  19. #include <asm/arch/hardware.h>
  20. #include <asm/arch/sys_proto.h>
  21. /* Register bitfield defines */
  22. #define PLLCTRL_FBDIV_MASK 0x7f000
  23. #define PLLCTRL_FBDIV_SHIFT 12
  24. #define PLLCTRL_BPFORCE_MASK (1 << 4)
  25. #define PLLCTRL_PWRDWN_MASK 2
  26. #define PLLCTRL_PWRDWN_SHIFT 1
  27. #define PLLCTRL_RESET_MASK 1
  28. #define PLLCTRL_RESET_SHIFT 0
  29. #define ZYNQ_CLK_MAXDIV 0x3f
  30. #define CLK_CTRL_DIV1_SHIFT 20
  31. #define CLK_CTRL_DIV1_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV1_SHIFT)
  32. #define CLK_CTRL_DIV0_SHIFT 8
  33. #define CLK_CTRL_DIV0_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV0_SHIFT)
  34. #define CLK_CTRL_SRCSEL_SHIFT 4
  35. #define CLK_CTRL_SRCSEL_MASK (0x3 << CLK_CTRL_SRCSEL_SHIFT)
  36. #define CLK_CTRL_DIV2X_SHIFT 26
  37. #define CLK_CTRL_DIV2X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV2X_SHIFT)
  38. #define CLK_CTRL_DIV3X_SHIFT 20
  39. #define CLK_CTRL_DIV3X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV3X_SHIFT)
  40. DECLARE_GLOBAL_DATA_PTR;
  41. #ifndef CONFIG_SPL_BUILD
  42. enum zynq_clk_rclk {mio_clk, emio_clk};
  43. #endif
  44. struct zynq_clk_priv {
  45. ulong ps_clk_freq;
  46. #ifndef CONFIG_SPL_BUILD
  47. struct clk gem_emio_clk[2];
  48. #endif
  49. };
  50. static void *zynq_clk_get_register(enum zynq_clk id)
  51. {
  52. switch (id) {
  53. case armpll_clk:
  54. return &slcr_base->arm_pll_ctrl;
  55. case ddrpll_clk:
  56. return &slcr_base->ddr_pll_ctrl;
  57. case iopll_clk:
  58. return &slcr_base->io_pll_ctrl;
  59. case lqspi_clk:
  60. return &slcr_base->lqspi_clk_ctrl;
  61. case smc_clk:
  62. return &slcr_base->smc_clk_ctrl;
  63. case pcap_clk:
  64. return &slcr_base->pcap_clk_ctrl;
  65. case sdio0_clk ... sdio1_clk:
  66. return &slcr_base->sdio_clk_ctrl;
  67. case uart0_clk ... uart1_clk:
  68. return &slcr_base->uart_clk_ctrl;
  69. case spi0_clk ... spi1_clk:
  70. return &slcr_base->spi_clk_ctrl;
  71. #ifndef CONFIG_SPL_BUILD
  72. case dci_clk:
  73. return &slcr_base->dci_clk_ctrl;
  74. case gem0_clk:
  75. return &slcr_base->gem0_clk_ctrl;
  76. case gem1_clk:
  77. return &slcr_base->gem1_clk_ctrl;
  78. case fclk0_clk:
  79. return &slcr_base->fpga0_clk_ctrl;
  80. case fclk1_clk:
  81. return &slcr_base->fpga1_clk_ctrl;
  82. case fclk2_clk:
  83. return &slcr_base->fpga2_clk_ctrl;
  84. case fclk3_clk:
  85. return &slcr_base->fpga3_clk_ctrl;
  86. case can0_clk ... can1_clk:
  87. return &slcr_base->can_clk_ctrl;
  88. case dbg_trc_clk ... dbg_apb_clk:
  89. /* fall through */
  90. #endif
  91. default:
  92. return &slcr_base->dbg_clk_ctrl;
  93. }
  94. }
  95. static enum zynq_clk zynq_clk_get_cpu_pll(u32 clk_ctrl)
  96. {
  97. u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
  98. switch (srcsel) {
  99. case 2:
  100. return ddrpll_clk;
  101. case 3:
  102. return iopll_clk;
  103. case 0 ... 1:
  104. default:
  105. return armpll_clk;
  106. }
  107. }
  108. static enum zynq_clk zynq_clk_get_peripheral_pll(u32 clk_ctrl)
  109. {
  110. u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
  111. switch (srcsel) {
  112. case 2:
  113. return armpll_clk;
  114. case 3:
  115. return ddrpll_clk;
  116. case 0 ... 1:
  117. default:
  118. return iopll_clk;
  119. }
  120. }
  121. static ulong zynq_clk_get_pll_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
  122. {
  123. u32 clk_ctrl, reset, pwrdwn, mul, bypass;
  124. clk_ctrl = readl(zynq_clk_get_register(id));
  125. reset = (clk_ctrl & PLLCTRL_RESET_MASK) >> PLLCTRL_RESET_SHIFT;
  126. pwrdwn = (clk_ctrl & PLLCTRL_PWRDWN_MASK) >> PLLCTRL_PWRDWN_SHIFT;
  127. if (reset || pwrdwn)
  128. return 0;
  129. bypass = clk_ctrl & PLLCTRL_BPFORCE_MASK;
  130. if (bypass)
  131. mul = 1;
  132. else
  133. mul = (clk_ctrl & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT;
  134. return priv->ps_clk_freq * mul;
  135. }
  136. #ifndef CONFIG_SPL_BUILD
  137. static enum zynq_clk_rclk zynq_clk_get_gem_rclk(enum zynq_clk id)
  138. {
  139. u32 clk_ctrl, srcsel;
  140. if (id == gem0_clk)
  141. clk_ctrl = readl(&slcr_base->gem0_rclk_ctrl);
  142. else
  143. clk_ctrl = readl(&slcr_base->gem1_rclk_ctrl);
  144. srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
  145. if (srcsel)
  146. return emio_clk;
  147. else
  148. return mio_clk;
  149. }
  150. #endif
  151. static ulong zynq_clk_get_cpu_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
  152. {
  153. u32 clk_621, clk_ctrl, div;
  154. enum zynq_clk pll;
  155. clk_ctrl = readl(&slcr_base->arm_clk_ctrl);
  156. div = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
  157. switch (id) {
  158. case cpu_1x_clk:
  159. div *= 2;
  160. /* fall through */
  161. case cpu_2x_clk:
  162. clk_621 = readl(&slcr_base->clk_621_true) & 1;
  163. div *= 2 + clk_621;
  164. break;
  165. case cpu_3or2x_clk:
  166. div *= 2;
  167. /* fall through */
  168. case cpu_6or4x_clk:
  169. break;
  170. default:
  171. return 0;
  172. }
  173. pll = zynq_clk_get_cpu_pll(clk_ctrl);
  174. return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, pll), div);
  175. }
  176. #ifndef CONFIG_SPL_BUILD
  177. static ulong zynq_clk_get_ddr2x_rate(struct zynq_clk_priv *priv)
  178. {
  179. u32 clk_ctrl, div;
  180. clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
  181. div = (clk_ctrl & CLK_CTRL_DIV2X_MASK) >> CLK_CTRL_DIV2X_SHIFT;
  182. return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
  183. }
  184. #endif
  185. static ulong zynq_clk_get_ddr3x_rate(struct zynq_clk_priv *priv)
  186. {
  187. u32 clk_ctrl, div;
  188. clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
  189. div = (clk_ctrl & CLK_CTRL_DIV3X_MASK) >> CLK_CTRL_DIV3X_SHIFT;
  190. return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
  191. }
  192. #ifndef CONFIG_SPL_BUILD
  193. static ulong zynq_clk_get_dci_rate(struct zynq_clk_priv *priv)
  194. {
  195. u32 clk_ctrl, div0, div1;
  196. clk_ctrl = readl(&slcr_base->dci_clk_ctrl);
  197. div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
  198. div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
  199. return DIV_ROUND_CLOSEST(DIV_ROUND_CLOSEST(
  200. zynq_clk_get_pll_rate(priv, ddrpll_clk), div0), div1);
  201. }
  202. #endif
  203. static ulong zynq_clk_get_peripheral_rate(struct zynq_clk_priv *priv,
  204. enum zynq_clk id, bool two_divs)
  205. {
  206. enum zynq_clk pll;
  207. u32 clk_ctrl, div0;
  208. u32 div1 = 1;
  209. clk_ctrl = readl(zynq_clk_get_register(id));
  210. div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
  211. if (!div0)
  212. div0 = 1;
  213. #ifndef CONFIG_SPL_BUILD
  214. if (two_divs) {
  215. div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
  216. if (!div1)
  217. div1 = 1;
  218. }
  219. #endif
  220. pll = zynq_clk_get_peripheral_pll(clk_ctrl);
  221. return
  222. DIV_ROUND_CLOSEST(
  223. DIV_ROUND_CLOSEST(
  224. zynq_clk_get_pll_rate(priv, pll), div0),
  225. div1);
  226. }
  227. #ifndef CONFIG_SPL_BUILD
  228. static ulong zynq_clk_get_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
  229. {
  230. struct clk *parent;
  231. if (zynq_clk_get_gem_rclk(id) == mio_clk)
  232. return zynq_clk_get_peripheral_rate(priv, id, true);
  233. parent = &priv->gem_emio_clk[id - gem0_clk];
  234. if (parent->dev)
  235. return clk_get_rate(parent);
  236. debug("%s: gem%d emio rx clock source unknown\n", __func__,
  237. id - gem0_clk);
  238. return -ENOSYS;
  239. }
  240. static unsigned long zynq_clk_calc_peripheral_two_divs(ulong rate,
  241. ulong pll_rate,
  242. u32 *div0, u32 *div1)
  243. {
  244. long new_err, best_err = (long)(~0UL >> 1);
  245. ulong new_rate, best_rate = 0;
  246. u32 d0, d1;
  247. for (d0 = 1; d0 <= ZYNQ_CLK_MAXDIV; d0++) {
  248. for (d1 = 1; d1 <= ZYNQ_CLK_MAXDIV >> 1; d1++) {
  249. new_rate = DIV_ROUND_CLOSEST(
  250. DIV_ROUND_CLOSEST(pll_rate, d0), d1);
  251. new_err = abs(new_rate - rate);
  252. if (new_err < best_err) {
  253. *div0 = d0;
  254. *div1 = d1;
  255. best_err = new_err;
  256. best_rate = new_rate;
  257. }
  258. }
  259. }
  260. return best_rate;
  261. }
  262. static ulong zynq_clk_set_peripheral_rate(struct zynq_clk_priv *priv,
  263. enum zynq_clk id, ulong rate,
  264. bool two_divs)
  265. {
  266. enum zynq_clk pll;
  267. u32 clk_ctrl, div0 = 0, div1 = 0;
  268. ulong pll_rate, new_rate;
  269. u32 *reg;
  270. reg = zynq_clk_get_register(id);
  271. clk_ctrl = readl(reg);
  272. pll = zynq_clk_get_peripheral_pll(clk_ctrl);
  273. pll_rate = zynq_clk_get_pll_rate(priv, pll);
  274. clk_ctrl &= ~CLK_CTRL_DIV0_MASK;
  275. if (two_divs) {
  276. clk_ctrl &= ~CLK_CTRL_DIV1_MASK;
  277. new_rate = zynq_clk_calc_peripheral_two_divs(rate, pll_rate,
  278. &div0, &div1);
  279. clk_ctrl |= div1 << CLK_CTRL_DIV1_SHIFT;
  280. } else {
  281. div0 = DIV_ROUND_CLOSEST(pll_rate, rate);
  282. if (div0 > ZYNQ_CLK_MAXDIV)
  283. div0 = ZYNQ_CLK_MAXDIV;
  284. new_rate = DIV_ROUND_CLOSEST(rate, div0);
  285. }
  286. clk_ctrl |= div0 << CLK_CTRL_DIV0_SHIFT;
  287. zynq_slcr_unlock();
  288. writel(clk_ctrl, reg);
  289. zynq_slcr_lock();
  290. return new_rate;
  291. }
  292. static ulong zynq_clk_set_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id,
  293. ulong rate)
  294. {
  295. struct clk *parent;
  296. if (zynq_clk_get_gem_rclk(id) == mio_clk)
  297. return zynq_clk_set_peripheral_rate(priv, id, rate, true);
  298. parent = &priv->gem_emio_clk[id - gem0_clk];
  299. if (parent->dev)
  300. return clk_set_rate(parent, rate);
  301. debug("%s: gem%d emio rx clock source unknown\n", __func__,
  302. id - gem0_clk);
  303. return -ENOSYS;
  304. }
  305. #endif
  306. #ifndef CONFIG_SPL_BUILD
  307. static ulong zynq_clk_get_rate(struct clk *clk)
  308. {
  309. struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
  310. enum zynq_clk id = clk->id;
  311. bool two_divs = false;
  312. switch (id) {
  313. case armpll_clk ... iopll_clk:
  314. return zynq_clk_get_pll_rate(priv, id);
  315. case cpu_6or4x_clk ... cpu_1x_clk:
  316. return zynq_clk_get_cpu_rate(priv, id);
  317. case ddr2x_clk:
  318. return zynq_clk_get_ddr2x_rate(priv);
  319. case ddr3x_clk:
  320. return zynq_clk_get_ddr3x_rate(priv);
  321. case dci_clk:
  322. return zynq_clk_get_dci_rate(priv);
  323. case gem0_clk ... gem1_clk:
  324. return zynq_clk_get_gem_rate(priv, id);
  325. case fclk0_clk ... can1_clk:
  326. two_divs = true;
  327. /* fall through */
  328. case dbg_trc_clk ... dbg_apb_clk:
  329. case lqspi_clk ... pcap_clk:
  330. case sdio0_clk ... spi1_clk:
  331. return zynq_clk_get_peripheral_rate(priv, id, two_divs);
  332. case dma_clk:
  333. return zynq_clk_get_cpu_rate(priv, cpu_2x_clk);
  334. case usb0_aper_clk ... swdt_clk:
  335. return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
  336. default:
  337. return -ENXIO;
  338. }
  339. }
  340. static ulong zynq_clk_set_rate(struct clk *clk, ulong rate)
  341. {
  342. struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
  343. enum zynq_clk id = clk->id;
  344. bool two_divs = false;
  345. switch (id) {
  346. case gem0_clk ... gem1_clk:
  347. return zynq_clk_set_gem_rate(priv, id, rate);
  348. case fclk0_clk ... can1_clk:
  349. two_divs = true;
  350. /* fall through */
  351. case lqspi_clk ... pcap_clk:
  352. case sdio0_clk ... spi1_clk:
  353. case dbg_trc_clk ... dbg_apb_clk:
  354. return zynq_clk_set_peripheral_rate(priv, id, rate, two_divs);
  355. default:
  356. return -ENXIO;
  357. }
  358. }
  359. #else
  360. static ulong zynq_clk_get_rate(struct clk *clk)
  361. {
  362. struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
  363. enum zynq_clk id = clk->id;
  364. switch (id) {
  365. case cpu_6or4x_clk ... cpu_1x_clk:
  366. return zynq_clk_get_cpu_rate(priv, id);
  367. case ddr3x_clk:
  368. return zynq_clk_get_ddr3x_rate(priv);
  369. case lqspi_clk ... pcap_clk:
  370. case sdio0_clk ... spi1_clk:
  371. return zynq_clk_get_peripheral_rate(priv, id, 0);
  372. case i2c0_aper_clk ... i2c1_aper_clk:
  373. return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
  374. default:
  375. return -ENXIO;
  376. }
  377. }
  378. #endif
  379. static int dummy_enable(struct clk *clk)
  380. {
  381. /*
  382. * Add implementation but by default all clocks are enabled
  383. * after power up which is only one supported case now.
  384. */
  385. return 0;
  386. }
  387. static struct clk_ops zynq_clk_ops = {
  388. .get_rate = zynq_clk_get_rate,
  389. #ifndef CONFIG_SPL_BUILD
  390. .set_rate = zynq_clk_set_rate,
  391. #endif
  392. .enable = dummy_enable,
  393. };
  394. static int zynq_clk_probe(struct udevice *dev)
  395. {
  396. struct zynq_clk_priv *priv = dev_get_priv(dev);
  397. #ifndef CONFIG_SPL_BUILD
  398. unsigned int i;
  399. char name[16];
  400. int ret;
  401. for (i = 0; i < 2; i++) {
  402. sprintf(name, "gem%d_emio_clk", i);
  403. ret = clk_get_by_name(dev, name, &priv->gem_emio_clk[i]);
  404. if (ret < 0 && ret != -ENODATA) {
  405. dev_err(dev, "failed to get %s clock\n", name);
  406. return ret;
  407. }
  408. }
  409. #endif
  410. priv->ps_clk_freq = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(dev),
  411. "ps-clk-frequency", 33333333UL);
  412. return 0;
  413. }
  414. static const struct udevice_id zynq_clk_ids[] = {
  415. { .compatible = "xlnx,ps7-clkc"},
  416. {}
  417. };
  418. U_BOOT_DRIVER(zynq_clk) = {
  419. .name = "zynq_clk",
  420. .id = UCLASS_CLK,
  421. .of_match = zynq_clk_ids,
  422. .ops = &zynq_clk_ops,
  423. .priv_auto = sizeof(struct zynq_clk_priv),
  424. .probe = zynq_clk_probe,
  425. };