clk-gate.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * mmp gate clock operation source file
  3. *
  4. * Copyright (C) 2014 Marvell
  5. * Chao Xie <chao.xie@marvell.com>
  6. *
  7. * This file is licensed under the terms of the GNU General Public
  8. * License version 2. This program is licensed "as is" without any
  9. * warranty of any kind, whether express or implied.
  10. */
  11. #include <linux/clk-provider.h>
  12. #include <linux/slab.h>
  13. #include <linux/io.h>
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include "clk.h"
  17. /*
  18. * Some clocks will have mutiple bits to enable the clocks, and
  19. * the bits to disable the clock is not same as enabling bits.
  20. */
  21. #define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
  22. static int mmp_clk_gate_enable(struct clk_hw *hw)
  23. {
  24. struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
  25. unsigned long flags = 0;
  26. unsigned long rate;
  27. u32 tmp;
  28. if (gate->lock)
  29. spin_lock_irqsave(gate->lock, flags);
  30. tmp = readl(gate->reg);
  31. tmp &= ~gate->mask;
  32. tmp |= gate->val_enable;
  33. writel(tmp, gate->reg);
  34. if (gate->lock)
  35. spin_unlock_irqrestore(gate->lock, flags);
  36. if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
  37. rate = clk_hw_get_rate(hw);
  38. /* Need delay 2 cycles. */
  39. udelay(2000000/rate);
  40. }
  41. return 0;
  42. }
  43. static void mmp_clk_gate_disable(struct clk_hw *hw)
  44. {
  45. struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
  46. unsigned long flags = 0;
  47. u32 tmp;
  48. if (gate->lock)
  49. spin_lock_irqsave(gate->lock, flags);
  50. tmp = readl(gate->reg);
  51. tmp &= ~gate->mask;
  52. tmp |= gate->val_disable;
  53. writel(tmp, gate->reg);
  54. if (gate->lock)
  55. spin_unlock_irqrestore(gate->lock, flags);
  56. }
  57. static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
  58. {
  59. struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
  60. unsigned long flags = 0;
  61. u32 tmp;
  62. if (gate->lock)
  63. spin_lock_irqsave(gate->lock, flags);
  64. tmp = readl(gate->reg);
  65. if (gate->lock)
  66. spin_unlock_irqrestore(gate->lock, flags);
  67. return (tmp & gate->mask) == gate->val_enable;
  68. }
  69. const struct clk_ops mmp_clk_gate_ops = {
  70. .enable = mmp_clk_gate_enable,
  71. .disable = mmp_clk_gate_disable,
  72. .is_enabled = mmp_clk_gate_is_enabled,
  73. };
  74. struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
  75. const char *parent_name, unsigned long flags,
  76. void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
  77. unsigned int gate_flags, spinlock_t *lock)
  78. {
  79. struct mmp_clk_gate *gate;
  80. struct clk *clk;
  81. struct clk_init_data init;
  82. /* allocate the gate */
  83. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  84. if (!gate)
  85. return ERR_PTR(-ENOMEM);
  86. init.name = name;
  87. init.ops = &mmp_clk_gate_ops;
  88. init.flags = flags;
  89. init.parent_names = (parent_name ? &parent_name : NULL);
  90. init.num_parents = (parent_name ? 1 : 0);
  91. /* struct clk_gate assignments */
  92. gate->reg = reg;
  93. gate->mask = mask;
  94. gate->val_enable = val_enable;
  95. gate->val_disable = val_disable;
  96. gate->flags = gate_flags;
  97. gate->lock = lock;
  98. gate->hw.init = &init;
  99. clk = clk_register(dev, &gate->hw);
  100. if (IS_ERR(clk))
  101. kfree(gate);
  102. return clk;
  103. }