timer-mp-csky.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/init.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/sched_clock.h>
  6. #include <linux/cpu.h>
  7. #include <linux/of_irq.h>
  8. #include <asm/reg_ops.h>
  9. #include "timer-of.h"
  10. #define PTIM_CCVR "cr<3, 14>"
  11. #define PTIM_CTLR "cr<0, 14>"
  12. #define PTIM_LVR "cr<6, 14>"
  13. #define PTIM_TSR "cr<1, 14>"
  14. static int csky_mptimer_irq;
  15. static int csky_mptimer_set_next_event(unsigned long delta,
  16. struct clock_event_device *ce)
  17. {
  18. mtcr(PTIM_LVR, delta);
  19. return 0;
  20. }
  21. static int csky_mptimer_shutdown(struct clock_event_device *ce)
  22. {
  23. mtcr(PTIM_CTLR, 0);
  24. return 0;
  25. }
  26. static int csky_mptimer_oneshot(struct clock_event_device *ce)
  27. {
  28. mtcr(PTIM_CTLR, 1);
  29. return 0;
  30. }
  31. static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
  32. {
  33. mtcr(PTIM_CTLR, 0);
  34. return 0;
  35. }
  36. static DEFINE_PER_CPU(struct timer_of, csky_to) = {
  37. .flags = TIMER_OF_CLOCK,
  38. .clkevt = {
  39. .rating = 300,
  40. .features = CLOCK_EVT_FEAT_PERCPU |
  41. CLOCK_EVT_FEAT_ONESHOT,
  42. .set_state_shutdown = csky_mptimer_shutdown,
  43. .set_state_oneshot = csky_mptimer_oneshot,
  44. .set_state_oneshot_stopped = csky_mptimer_oneshot_stopped,
  45. .set_next_event = csky_mptimer_set_next_event,
  46. },
  47. };
  48. static irqreturn_t csky_timer_interrupt(int irq, void *dev)
  49. {
  50. struct timer_of *to = this_cpu_ptr(&csky_to);
  51. mtcr(PTIM_TSR, 0);
  52. to->clkevt.event_handler(&to->clkevt);
  53. return IRQ_HANDLED;
  54. }
  55. /*
  56. * clock event for percpu
  57. */
  58. static int csky_mptimer_starting_cpu(unsigned int cpu)
  59. {
  60. struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
  61. to->clkevt.cpumask = cpumask_of(cpu);
  62. enable_percpu_irq(csky_mptimer_irq, 0);
  63. clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
  64. 2, ULONG_MAX);
  65. return 0;
  66. }
  67. static int csky_mptimer_dying_cpu(unsigned int cpu)
  68. {
  69. disable_percpu_irq(csky_mptimer_irq);
  70. return 0;
  71. }
  72. /*
  73. * clock source
  74. */
  75. static u64 notrace sched_clock_read(void)
  76. {
  77. return (u64)mfcr(PTIM_CCVR);
  78. }
  79. static u64 clksrc_read(struct clocksource *c)
  80. {
  81. return (u64)mfcr(PTIM_CCVR);
  82. }
  83. struct clocksource csky_clocksource = {
  84. .name = "csky",
  85. .rating = 400,
  86. .mask = CLOCKSOURCE_MASK(32),
  87. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  88. .read = clksrc_read,
  89. };
  90. static int __init csky_mptimer_init(struct device_node *np)
  91. {
  92. int ret, cpu, cpu_rollback;
  93. struct timer_of *to = NULL;
  94. /*
  95. * Csky_mptimer is designed for C-SKY SMP multi-processors and
  96. * every core has it's own private irq and regs for clkevt and
  97. * clksrc.
  98. *
  99. * The regs is accessed by cpu instruction: mfcr/mtcr instead of
  100. * mmio map style. So we needn't mmio-address in dts, but we still
  101. * need to give clk and irq number.
  102. *
  103. * We use private irq for the mptimer and irq number is the same
  104. * for every core. So we use request_percpu_irq() in timer_of_init.
  105. */
  106. csky_mptimer_irq = irq_of_parse_and_map(np, 0);
  107. if (csky_mptimer_irq <= 0)
  108. return -EINVAL;
  109. ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
  110. "csky_mp_timer", &csky_to);
  111. if (ret)
  112. return -EINVAL;
  113. for_each_possible_cpu(cpu) {
  114. to = per_cpu_ptr(&csky_to, cpu);
  115. ret = timer_of_init(np, to);
  116. if (ret)
  117. goto rollback;
  118. }
  119. clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
  120. sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
  121. ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
  122. "clockevents/csky/timer:starting",
  123. csky_mptimer_starting_cpu,
  124. csky_mptimer_dying_cpu);
  125. if (ret)
  126. return -EINVAL;
  127. return 0;
  128. rollback:
  129. for_each_possible_cpu(cpu_rollback) {
  130. if (cpu_rollback == cpu)
  131. break;
  132. to = per_cpu_ptr(&csky_to, cpu_rollback);
  133. timer_of_cleanup(to);
  134. }
  135. return -EINVAL;
  136. }
  137. TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);