clk_zynq.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Weidmüller Interface GmbH & Co. KG
  4. * Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
  5. *
  6. * Copyright (C) 2013 Soren Brinkmann <soren.brinkmann@xilinx.com>
  7. * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
  8. */
  9. #include <common.h>
  10. #include <clk-uclass.h>
  11. #include <dm.h>
  12. #include <dm/lists.h>
  13. #include <errno.h>
  14. #include <asm/io.h>
  15. #include <asm/arch/clk.h>
  16. #include <asm/arch/hardware.h>
  17. #include <asm/arch/sys_proto.h>
  18. /* Register bitfield defines */
  19. #define PLLCTRL_FBDIV_MASK 0x7f000
  20. #define PLLCTRL_FBDIV_SHIFT 12
  21. #define PLLCTRL_BPFORCE_MASK (1 << 4)
  22. #define PLLCTRL_PWRDWN_MASK 2
  23. #define PLLCTRL_PWRDWN_SHIFT 1
  24. #define PLLCTRL_RESET_MASK 1
  25. #define PLLCTRL_RESET_SHIFT 0
  26. #define ZYNQ_CLK_MAXDIV 0x3f
  27. #define CLK_CTRL_DIV1_SHIFT 20
  28. #define CLK_CTRL_DIV1_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV1_SHIFT)
  29. #define CLK_CTRL_DIV0_SHIFT 8
  30. #define CLK_CTRL_DIV0_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV0_SHIFT)
  31. #define CLK_CTRL_SRCSEL_SHIFT 4
  32. #define CLK_CTRL_SRCSEL_MASK (0x3 << CLK_CTRL_SRCSEL_SHIFT)
  33. #define CLK_CTRL_DIV2X_SHIFT 26
  34. #define CLK_CTRL_DIV2X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV2X_SHIFT)
  35. #define CLK_CTRL_DIV3X_SHIFT 20
  36. #define CLK_CTRL_DIV3X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV3X_SHIFT)
  37. DECLARE_GLOBAL_DATA_PTR;
  38. #ifndef CONFIG_SPL_BUILD
  39. enum zynq_clk_rclk {mio_clk, emio_clk};
  40. #endif
  41. struct zynq_clk_priv {
  42. ulong ps_clk_freq;
  43. #ifndef CONFIG_SPL_BUILD
  44. struct clk gem_emio_clk[2];
  45. #endif
  46. };
  47. static void *zynq_clk_get_register(enum zynq_clk id)
  48. {
  49. switch (id) {
  50. case armpll_clk:
  51. return &slcr_base->arm_pll_ctrl;
  52. case ddrpll_clk:
  53. return &slcr_base->ddr_pll_ctrl;
  54. case iopll_clk:
  55. return &slcr_base->io_pll_ctrl;
  56. case lqspi_clk:
  57. return &slcr_base->lqspi_clk_ctrl;
  58. case smc_clk:
  59. return &slcr_base->smc_clk_ctrl;
  60. case pcap_clk:
  61. return &slcr_base->pcap_clk_ctrl;
  62. case sdio0_clk ... sdio1_clk:
  63. return &slcr_base->sdio_clk_ctrl;
  64. case uart0_clk ... uart1_clk:
  65. return &slcr_base->uart_clk_ctrl;
  66. case spi0_clk ... spi1_clk:
  67. return &slcr_base->spi_clk_ctrl;
  68. #ifndef CONFIG_SPL_BUILD
  69. case dci_clk:
  70. return &slcr_base->dci_clk_ctrl;
  71. case gem0_clk:
  72. return &slcr_base->gem0_clk_ctrl;
  73. case gem1_clk:
  74. return &slcr_base->gem1_clk_ctrl;
  75. case fclk0_clk:
  76. return &slcr_base->fpga0_clk_ctrl;
  77. case fclk1_clk:
  78. return &slcr_base->fpga1_clk_ctrl;
  79. case fclk2_clk:
  80. return &slcr_base->fpga2_clk_ctrl;
  81. case fclk3_clk:
  82. return &slcr_base->fpga3_clk_ctrl;
  83. case can0_clk ... can1_clk:
  84. return &slcr_base->can_clk_ctrl;
  85. case dbg_trc_clk ... dbg_apb_clk:
  86. /* fall through */
  87. #endif
  88. default:
  89. return &slcr_base->dbg_clk_ctrl;
  90. }
  91. }
  92. static enum zynq_clk zynq_clk_get_cpu_pll(u32 clk_ctrl)
  93. {
  94. u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
  95. switch (srcsel) {
  96. case 2:
  97. return ddrpll_clk;
  98. case 3:
  99. return iopll_clk;
  100. case 0 ... 1:
  101. default:
  102. return armpll_clk;
  103. }
  104. }
  105. static enum zynq_clk zynq_clk_get_peripheral_pll(u32 clk_ctrl)
  106. {
  107. u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
  108. switch (srcsel) {
  109. case 2:
  110. return armpll_clk;
  111. case 3:
  112. return ddrpll_clk;
  113. case 0 ... 1:
  114. default:
  115. return iopll_clk;
  116. }
  117. }
  118. static ulong zynq_clk_get_pll_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
  119. {
  120. u32 clk_ctrl, reset, pwrdwn, mul, bypass;
  121. clk_ctrl = readl(zynq_clk_get_register(id));
  122. reset = (clk_ctrl & PLLCTRL_RESET_MASK) >> PLLCTRL_RESET_SHIFT;
  123. pwrdwn = (clk_ctrl & PLLCTRL_PWRDWN_MASK) >> PLLCTRL_PWRDWN_SHIFT;
  124. if (reset || pwrdwn)
  125. return 0;
  126. bypass = clk_ctrl & PLLCTRL_BPFORCE_MASK;
  127. if (bypass)
  128. mul = 1;
  129. else
  130. mul = (clk_ctrl & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT;
  131. return priv->ps_clk_freq * mul;
  132. }
  133. #ifndef CONFIG_SPL_BUILD
  134. static enum zynq_clk_rclk zynq_clk_get_gem_rclk(enum zynq_clk id)
  135. {
  136. u32 clk_ctrl, srcsel;
  137. if (id == gem0_clk)
  138. clk_ctrl = readl(&slcr_base->gem0_rclk_ctrl);
  139. else
  140. clk_ctrl = readl(&slcr_base->gem1_rclk_ctrl);
  141. srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
  142. if (srcsel)
  143. return emio_clk;
  144. else
  145. return mio_clk;
  146. }
  147. #endif
  148. static ulong zynq_clk_get_cpu_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
  149. {
  150. u32 clk_621, clk_ctrl, div;
  151. enum zynq_clk pll;
  152. clk_ctrl = readl(&slcr_base->arm_clk_ctrl);
  153. div = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
  154. switch (id) {
  155. case cpu_1x_clk:
  156. div *= 2;
  157. /* fall through */
  158. case cpu_2x_clk:
  159. clk_621 = readl(&slcr_base->clk_621_true) & 1;
  160. div *= 2 + clk_621;
  161. break;
  162. case cpu_3or2x_clk:
  163. div *= 2;
  164. /* fall through */
  165. case cpu_6or4x_clk:
  166. break;
  167. default:
  168. return 0;
  169. }
  170. pll = zynq_clk_get_cpu_pll(clk_ctrl);
  171. return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, pll), div);
  172. }
  173. #ifndef CONFIG_SPL_BUILD
  174. static ulong zynq_clk_get_ddr2x_rate(struct zynq_clk_priv *priv)
  175. {
  176. u32 clk_ctrl, div;
  177. clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
  178. div = (clk_ctrl & CLK_CTRL_DIV2X_MASK) >> CLK_CTRL_DIV2X_SHIFT;
  179. return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
  180. }
  181. #endif
  182. static ulong zynq_clk_get_ddr3x_rate(struct zynq_clk_priv *priv)
  183. {
  184. u32 clk_ctrl, div;
  185. clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
  186. div = (clk_ctrl & CLK_CTRL_DIV3X_MASK) >> CLK_CTRL_DIV3X_SHIFT;
  187. return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
  188. }
  189. #ifndef CONFIG_SPL_BUILD
  190. static ulong zynq_clk_get_dci_rate(struct zynq_clk_priv *priv)
  191. {
  192. u32 clk_ctrl, div0, div1;
  193. clk_ctrl = readl(&slcr_base->dci_clk_ctrl);
  194. div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
  195. div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
  196. return DIV_ROUND_CLOSEST(DIV_ROUND_CLOSEST(
  197. zynq_clk_get_pll_rate(priv, ddrpll_clk), div0), div1);
  198. }
  199. #endif
  200. static ulong zynq_clk_get_peripheral_rate(struct zynq_clk_priv *priv,
  201. enum zynq_clk id, bool two_divs)
  202. {
  203. enum zynq_clk pll;
  204. u32 clk_ctrl, div0;
  205. u32 div1 = 1;
  206. clk_ctrl = readl(zynq_clk_get_register(id));
  207. div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
  208. if (!div0)
  209. div0 = 1;
  210. #ifndef CONFIG_SPL_BUILD
  211. if (two_divs) {
  212. div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
  213. if (!div1)
  214. div1 = 1;
  215. }
  216. #endif
  217. pll = zynq_clk_get_peripheral_pll(clk_ctrl);
  218. return
  219. DIV_ROUND_CLOSEST(
  220. DIV_ROUND_CLOSEST(
  221. zynq_clk_get_pll_rate(priv, pll), div0),
  222. div1);
  223. }
  224. #ifndef CONFIG_SPL_BUILD
  225. static ulong zynq_clk_get_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
  226. {
  227. struct clk *parent;
  228. if (zynq_clk_get_gem_rclk(id) == mio_clk)
  229. return zynq_clk_get_peripheral_rate(priv, id, true);
  230. parent = &priv->gem_emio_clk[id - gem0_clk];
  231. if (parent->dev)
  232. return clk_get_rate(parent);
  233. debug("%s: gem%d emio rx clock source unknown\n", __func__,
  234. id - gem0_clk);
  235. return -ENOSYS;
  236. }
  237. static unsigned long zynq_clk_calc_peripheral_two_divs(ulong rate,
  238. ulong pll_rate,
  239. u32 *div0, u32 *div1)
  240. {
  241. long new_err, best_err = (long)(~0UL >> 1);
  242. ulong new_rate, best_rate = 0;
  243. u32 d0, d1;
  244. for (d0 = 1; d0 <= ZYNQ_CLK_MAXDIV; d0++) {
  245. for (d1 = 1; d1 <= ZYNQ_CLK_MAXDIV >> 1; d1++) {
  246. new_rate = DIV_ROUND_CLOSEST(
  247. DIV_ROUND_CLOSEST(pll_rate, d0), d1);
  248. new_err = abs(new_rate - rate);
  249. if (new_err < best_err) {
  250. *div0 = d0;
  251. *div1 = d1;
  252. best_err = new_err;
  253. best_rate = new_rate;
  254. }
  255. }
  256. }
  257. return best_rate;
  258. }
  259. static ulong zynq_clk_set_peripheral_rate(struct zynq_clk_priv *priv,
  260. enum zynq_clk id, ulong rate,
  261. bool two_divs)
  262. {
  263. enum zynq_clk pll;
  264. u32 clk_ctrl, div0 = 0, div1 = 0;
  265. ulong pll_rate, new_rate;
  266. u32 *reg;
  267. reg = zynq_clk_get_register(id);
  268. clk_ctrl = readl(reg);
  269. pll = zynq_clk_get_peripheral_pll(clk_ctrl);
  270. pll_rate = zynq_clk_get_pll_rate(priv, pll);
  271. clk_ctrl &= ~CLK_CTRL_DIV0_MASK;
  272. if (two_divs) {
  273. clk_ctrl &= ~CLK_CTRL_DIV1_MASK;
  274. new_rate = zynq_clk_calc_peripheral_two_divs(rate, pll_rate,
  275. &div0, &div1);
  276. clk_ctrl |= div1 << CLK_CTRL_DIV1_SHIFT;
  277. } else {
  278. div0 = DIV_ROUND_CLOSEST(pll_rate, rate);
  279. if (div0 > ZYNQ_CLK_MAXDIV)
  280. div0 = ZYNQ_CLK_MAXDIV;
  281. new_rate = DIV_ROUND_CLOSEST(rate, div0);
  282. }
  283. clk_ctrl |= div0 << CLK_CTRL_DIV0_SHIFT;
  284. zynq_slcr_unlock();
  285. writel(clk_ctrl, reg);
  286. zynq_slcr_lock();
  287. return new_rate;
  288. }
  289. static ulong zynq_clk_set_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id,
  290. ulong rate)
  291. {
  292. struct clk *parent;
  293. if (zynq_clk_get_gem_rclk(id) == mio_clk)
  294. return zynq_clk_set_peripheral_rate(priv, id, rate, true);
  295. parent = &priv->gem_emio_clk[id - gem0_clk];
  296. if (parent->dev)
  297. return clk_set_rate(parent, rate);
  298. debug("%s: gem%d emio rx clock source unknown\n", __func__,
  299. id - gem0_clk);
  300. return -ENOSYS;
  301. }
  302. #endif
  303. #ifndef CONFIG_SPL_BUILD
  304. static ulong zynq_clk_get_rate(struct clk *clk)
  305. {
  306. struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
  307. enum zynq_clk id = clk->id;
  308. bool two_divs = false;
  309. switch (id) {
  310. case armpll_clk ... iopll_clk:
  311. return zynq_clk_get_pll_rate(priv, id);
  312. case cpu_6or4x_clk ... cpu_1x_clk:
  313. return zynq_clk_get_cpu_rate(priv, id);
  314. case ddr2x_clk:
  315. return zynq_clk_get_ddr2x_rate(priv);
  316. case ddr3x_clk:
  317. return zynq_clk_get_ddr3x_rate(priv);
  318. case dci_clk:
  319. return zynq_clk_get_dci_rate(priv);
  320. case gem0_clk ... gem1_clk:
  321. return zynq_clk_get_gem_rate(priv, id);
  322. case fclk0_clk ... can1_clk:
  323. two_divs = true;
  324. /* fall through */
  325. case dbg_trc_clk ... dbg_apb_clk:
  326. case lqspi_clk ... pcap_clk:
  327. case sdio0_clk ... spi1_clk:
  328. return zynq_clk_get_peripheral_rate(priv, id, two_divs);
  329. case dma_clk:
  330. return zynq_clk_get_cpu_rate(priv, cpu_2x_clk);
  331. case usb0_aper_clk ... swdt_clk:
  332. return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
  333. default:
  334. return -ENXIO;
  335. }
  336. }
  337. static ulong zynq_clk_set_rate(struct clk *clk, ulong rate)
  338. {
  339. struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
  340. enum zynq_clk id = clk->id;
  341. bool two_divs = false;
  342. switch (id) {
  343. case gem0_clk ... gem1_clk:
  344. return zynq_clk_set_gem_rate(priv, id, rate);
  345. case fclk0_clk ... can1_clk:
  346. two_divs = true;
  347. /* fall through */
  348. case lqspi_clk ... pcap_clk:
  349. case sdio0_clk ... spi1_clk:
  350. case dbg_trc_clk ... dbg_apb_clk:
  351. return zynq_clk_set_peripheral_rate(priv, id, rate, two_divs);
  352. default:
  353. return -ENXIO;
  354. }
  355. }
  356. #else
  357. static ulong zynq_clk_get_rate(struct clk *clk)
  358. {
  359. struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
  360. enum zynq_clk id = clk->id;
  361. switch (id) {
  362. case cpu_6or4x_clk ... cpu_1x_clk:
  363. return zynq_clk_get_cpu_rate(priv, id);
  364. case ddr3x_clk:
  365. return zynq_clk_get_ddr3x_rate(priv);
  366. case lqspi_clk ... pcap_clk:
  367. case sdio0_clk ... spi1_clk:
  368. return zynq_clk_get_peripheral_rate(priv, id, 0);
  369. default:
  370. return -ENXIO;
  371. }
  372. }
  373. #endif
  374. static struct clk_ops zynq_clk_ops = {
  375. .get_rate = zynq_clk_get_rate,
  376. #ifndef CONFIG_SPL_BUILD
  377. .set_rate = zynq_clk_set_rate,
  378. #endif
  379. };
  380. static int zynq_clk_probe(struct udevice *dev)
  381. {
  382. struct zynq_clk_priv *priv = dev_get_priv(dev);
  383. #ifndef CONFIG_SPL_BUILD
  384. unsigned int i;
  385. char name[16];
  386. int ret;
  387. for (i = 0; i < 2; i++) {
  388. sprintf(name, "gem%d_emio_clk", i);
  389. ret = clk_get_by_name(dev, name, &priv->gem_emio_clk[i]);
  390. if (ret < 0 && ret != -ENODATA) {
  391. dev_err(dev, "failed to get %s clock\n", name);
  392. return ret;
  393. }
  394. }
  395. #endif
  396. priv->ps_clk_freq = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(dev),
  397. "ps-clk-frequency", 33333333UL);
  398. return 0;
  399. }
  400. static const struct udevice_id zynq_clk_ids[] = {
  401. { .compatible = "xlnx,ps7-clkc"},
  402. {}
  403. };
  404. U_BOOT_DRIVER(zynq_clk) = {
  405. .name = "zynq_clk",
  406. .id = UCLASS_CLK,
  407. .of_match = zynq_clk_ids,
  408. .flags = DM_FLAG_PRE_RELOC,
  409. .ops = &zynq_clk_ops,
  410. .priv_auto_alloc_size = sizeof(struct zynq_clk_priv),
  411. .probe = zynq_clk_probe,
  412. };