clk-periph-gate.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <linux/clk-provider.h>
  6. #include <linux/slab.h>
  7. #include <linux/io.h>
  8. #include <linux/delay.h>
  9. #include <linux/err.h>
  10. #include <soc/tegra/fuse.h>
  11. #include "clk.h"
  12. static DEFINE_SPINLOCK(periph_ref_lock);
  13. /* Macros to assist peripheral gate clock */
  14. #define read_enb(gate) \
  15. readl_relaxed(gate->clk_base + (gate->regs->enb_reg))
  16. #define write_enb_set(val, gate) \
  17. writel_relaxed(val, gate->clk_base + (gate->regs->enb_set_reg))
  18. #define write_enb_clr(val, gate) \
  19. writel_relaxed(val, gate->clk_base + (gate->regs->enb_clr_reg))
  20. #define read_rst(gate) \
  21. readl_relaxed(gate->clk_base + (gate->regs->rst_reg))
  22. #define write_rst_clr(val, gate) \
  23. writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
  24. #define periph_clk_to_bit(gate) (1 << (gate->clk_num % 32))
  25. #define LVL2_CLK_GATE_OVRE 0x554
  26. /* Peripheral gate clock ops */
  27. static int clk_periph_is_enabled(struct clk_hw *hw)
  28. {
  29. struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
  30. int state = 1;
  31. if (!(read_enb(gate) & periph_clk_to_bit(gate)))
  32. state = 0;
  33. if (!(gate->flags & TEGRA_PERIPH_NO_RESET))
  34. if (read_rst(gate) & periph_clk_to_bit(gate))
  35. state = 0;
  36. return state;
  37. }
  38. static void clk_periph_enable_locked(struct clk_hw *hw)
  39. {
  40. struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
  41. write_enb_set(periph_clk_to_bit(gate), gate);
  42. udelay(2);
  43. if (!(gate->flags & TEGRA_PERIPH_NO_RESET) &&
  44. !(gate->flags & TEGRA_PERIPH_MANUAL_RESET)) {
  45. if (read_rst(gate) & periph_clk_to_bit(gate)) {
  46. udelay(5); /* reset propogation delay */
  47. write_rst_clr(periph_clk_to_bit(gate), gate);
  48. }
  49. }
  50. if (gate->flags & TEGRA_PERIPH_WAR_1005168) {
  51. writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
  52. writel_relaxed(BIT(22), gate->clk_base + LVL2_CLK_GATE_OVRE);
  53. udelay(1);
  54. writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
  55. }
  56. }
  57. static void clk_periph_disable_locked(struct clk_hw *hw)
  58. {
  59. struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
  60. /*
  61. * If peripheral is in the APB bus then read the APB bus to
  62. * flush the write operation in apb bus. This will avoid the
  63. * peripheral access after disabling clock
  64. */
  65. if (gate->flags & TEGRA_PERIPH_ON_APB)
  66. tegra_read_chipid();
  67. write_enb_clr(periph_clk_to_bit(gate), gate);
  68. }
  69. static int clk_periph_enable(struct clk_hw *hw)
  70. {
  71. struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
  72. unsigned long flags = 0;
  73. spin_lock_irqsave(&periph_ref_lock, flags);
  74. if (!gate->enable_refcnt[gate->clk_num]++)
  75. clk_periph_enable_locked(hw);
  76. spin_unlock_irqrestore(&periph_ref_lock, flags);
  77. return 0;
  78. }
  79. static void clk_periph_disable(struct clk_hw *hw)
  80. {
  81. struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
  82. unsigned long flags = 0;
  83. spin_lock_irqsave(&periph_ref_lock, flags);
  84. WARN_ON(!gate->enable_refcnt[gate->clk_num]);
  85. if (--gate->enable_refcnt[gate->clk_num] == 0)
  86. clk_periph_disable_locked(hw);
  87. spin_unlock_irqrestore(&periph_ref_lock, flags);
  88. }
  89. static void clk_periph_disable_unused(struct clk_hw *hw)
  90. {
  91. struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
  92. unsigned long flags = 0;
  93. spin_lock_irqsave(&periph_ref_lock, flags);
  94. /*
  95. * Some clocks are duplicated and some of them are marked as critical,
  96. * like fuse and fuse_burn for example, thus the enable_refcnt will
  97. * be non-zero here if the "unused" duplicate is disabled by CCF.
  98. */
  99. if (!gate->enable_refcnt[gate->clk_num])
  100. clk_periph_disable_locked(hw);
  101. spin_unlock_irqrestore(&periph_ref_lock, flags);
  102. }
  103. const struct clk_ops tegra_clk_periph_gate_ops = {
  104. .is_enabled = clk_periph_is_enabled,
  105. .enable = clk_periph_enable,
  106. .disable = clk_periph_disable,
  107. .disable_unused = clk_periph_disable_unused,
  108. };
  109. struct clk *tegra_clk_register_periph_gate(const char *name,
  110. const char *parent_name, u8 gate_flags, void __iomem *clk_base,
  111. unsigned long flags, int clk_num, int *enable_refcnt)
  112. {
  113. struct tegra_clk_periph_gate *gate;
  114. struct clk *clk;
  115. struct clk_init_data init;
  116. const struct tegra_clk_periph_regs *pregs;
  117. pregs = get_reg_bank(clk_num);
  118. if (!pregs)
  119. return ERR_PTR(-EINVAL);
  120. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  121. if (!gate) {
  122. pr_err("%s: could not allocate periph gate clk\n", __func__);
  123. return ERR_PTR(-ENOMEM);
  124. }
  125. init.name = name;
  126. init.flags = flags;
  127. init.parent_names = parent_name ? &parent_name : NULL;
  128. init.num_parents = parent_name ? 1 : 0;
  129. init.ops = &tegra_clk_periph_gate_ops;
  130. gate->magic = TEGRA_CLK_PERIPH_GATE_MAGIC;
  131. gate->clk_base = clk_base;
  132. gate->clk_num = clk_num;
  133. gate->flags = gate_flags;
  134. gate->enable_refcnt = enable_refcnt;
  135. gate->regs = pregs;
  136. /* Data in .init is copied by clk_register(), so stack variable OK */
  137. gate->hw.init = &init;
  138. clk = clk_register(NULL, &gate->hw);
  139. if (IS_ERR(clk))
  140. kfree(gate);
  141. return clk;
  142. }