cpufreq_times.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /* drivers/cpufreq/cpufreq_times.c
  2. *
  3. * Copyright (C) 2018 Google, Inc.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/cpufreq.h>
  16. #include <linux/cpufreq_times.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/sched.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/slab.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/threads.h>
  23. #include <trace/hooks/cpufreq.h>
  24. static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
  25. /**
  26. * struct cpu_freqs - per-cpu frequency information
  27. * @offset: start of these freqs' stats in task time_in_state array
  28. * @max_state: number of entries in freq_table
  29. * @last_index: index in freq_table of last frequency switched to
  30. * @freq_table: list of available frequencies
  31. */
  32. struct cpu_freqs {
  33. unsigned int offset;
  34. unsigned int max_state;
  35. unsigned int last_index;
  36. unsigned int freq_table[0];
  37. };
  38. static struct cpu_freqs *all_freqs[NR_CPUS];
  39. static unsigned int next_offset;
  40. void cpufreq_task_times_init(struct task_struct *p)
  41. {
  42. unsigned long flags;
  43. spin_lock_irqsave(&task_time_in_state_lock, flags);
  44. p->time_in_state = NULL;
  45. spin_unlock_irqrestore(&task_time_in_state_lock, flags);
  46. p->max_state = 0;
  47. }
  48. void cpufreq_task_times_alloc(struct task_struct *p)
  49. {
  50. void *temp;
  51. unsigned long flags;
  52. unsigned int max_state = READ_ONCE(next_offset);
  53. /* We use one array to avoid multiple allocs per task */
  54. temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
  55. if (!temp)
  56. return;
  57. spin_lock_irqsave(&task_time_in_state_lock, flags);
  58. p->time_in_state = temp;
  59. spin_unlock_irqrestore(&task_time_in_state_lock, flags);
  60. p->max_state = max_state;
  61. }
  62. /* Caller must hold task_time_in_state_lock */
  63. static int cpufreq_task_times_realloc_locked(struct task_struct *p)
  64. {
  65. void *temp;
  66. unsigned int max_state = READ_ONCE(next_offset);
  67. temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
  68. if (!temp)
  69. return -ENOMEM;
  70. p->time_in_state = temp;
  71. memset(p->time_in_state + p->max_state, 0,
  72. (max_state - p->max_state) * sizeof(u64));
  73. p->max_state = max_state;
  74. return 0;
  75. }
  76. void cpufreq_task_times_exit(struct task_struct *p)
  77. {
  78. unsigned long flags;
  79. void *temp;
  80. if (!p->time_in_state)
  81. return;
  82. spin_lock_irqsave(&task_time_in_state_lock, flags);
  83. temp = p->time_in_state;
  84. p->time_in_state = NULL;
  85. spin_unlock_irqrestore(&task_time_in_state_lock, flags);
  86. kfree(temp);
  87. }
  88. int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
  89. struct pid *pid, struct task_struct *p)
  90. {
  91. unsigned int cpu, i;
  92. u64 cputime;
  93. unsigned long flags;
  94. struct cpu_freqs *freqs;
  95. struct cpu_freqs *last_freqs = NULL;
  96. spin_lock_irqsave(&task_time_in_state_lock, flags);
  97. for_each_possible_cpu(cpu) {
  98. freqs = all_freqs[cpu];
  99. if (!freqs || freqs == last_freqs)
  100. continue;
  101. last_freqs = freqs;
  102. seq_printf(m, "cpu%u\n", cpu);
  103. for (i = 0; i < freqs->max_state; i++) {
  104. cputime = 0;
  105. if (freqs->offset + i < p->max_state &&
  106. p->time_in_state)
  107. cputime = p->time_in_state[freqs->offset + i];
  108. seq_printf(m, "%u %lu\n", freqs->freq_table[i],
  109. (unsigned long)nsec_to_clock_t(cputime));
  110. }
  111. }
  112. spin_unlock_irqrestore(&task_time_in_state_lock, flags);
  113. return 0;
  114. }
  115. void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
  116. {
  117. unsigned long flags;
  118. unsigned int state;
  119. struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
  120. if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
  121. return;
  122. state = freqs->offset + READ_ONCE(freqs->last_index);
  123. spin_lock_irqsave(&task_time_in_state_lock, flags);
  124. if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
  125. p->time_in_state)
  126. p->time_in_state[state] += cputime;
  127. spin_unlock_irqrestore(&task_time_in_state_lock, flags);
  128. trace_android_vh_cpufreq_acct_update_power(cputime, p, state);
  129. }
  130. static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
  131. {
  132. int index;
  133. for (index = 0; index < freqs->max_state; ++index) {
  134. if (freqs->freq_table[index] == freq)
  135. return index;
  136. }
  137. return -1;
  138. }
  139. void cpufreq_times_create_policy(struct cpufreq_policy *policy)
  140. {
  141. int cpu, index = 0;
  142. unsigned int count = 0;
  143. struct cpufreq_frequency_table *pos, *table;
  144. struct cpu_freqs *freqs;
  145. void *tmp;
  146. if (all_freqs[policy->cpu])
  147. return;
  148. table = policy->freq_table;
  149. if (!table)
  150. return;
  151. cpufreq_for_each_valid_entry(pos, table)
  152. count++;
  153. tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
  154. GFP_KERNEL);
  155. if (!tmp)
  156. return;
  157. freqs = tmp;
  158. freqs->max_state = count;
  159. cpufreq_for_each_valid_entry(pos, table)
  160. freqs->freq_table[index++] = pos->frequency;
  161. index = cpufreq_times_get_index(freqs, policy->cur);
  162. if (index >= 0)
  163. WRITE_ONCE(freqs->last_index, index);
  164. freqs->offset = next_offset;
  165. WRITE_ONCE(next_offset, freqs->offset + count);
  166. for_each_cpu(cpu, policy->related_cpus)
  167. all_freqs[cpu] = freqs;
  168. }
  169. void cpufreq_times_record_transition(struct cpufreq_policy *policy,
  170. unsigned int new_freq)
  171. {
  172. int index;
  173. struct cpu_freqs *freqs = all_freqs[policy->cpu];
  174. if (!freqs)
  175. return;
  176. index = cpufreq_times_get_index(freqs, new_freq);
  177. if (index >= 0)
  178. WRITE_ONCE(freqs->last_index, index);
  179. }