timing.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  7. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  8. */
  9. #include <linux/kvm_host.h>
  10. #include <linux/fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/module.h>
  15. #include <asm/time.h>
  16. #include <asm-generic/div64.h>
  17. #include "timing.h"
  18. void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
  19. {
  20. int i;
  21. /* Take a lock to avoid concurrent updates */
  22. mutex_lock(&vcpu->arch.exit_timing_lock);
  23. vcpu->arch.last_exit_type = 0xDEAD;
  24. for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
  25. vcpu->arch.timing_count_type[i] = 0;
  26. vcpu->arch.timing_max_duration[i] = 0;
  27. vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
  28. vcpu->arch.timing_sum_duration[i] = 0;
  29. vcpu->arch.timing_sum_quad_duration[i] = 0;
  30. }
  31. vcpu->arch.timing_last_exit = 0;
  32. vcpu->arch.timing_exit.tv64 = 0;
  33. vcpu->arch.timing_last_enter.tv64 = 0;
  34. mutex_unlock(&vcpu->arch.exit_timing_lock);
  35. }
  36. static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
  37. {
  38. u64 old;
  39. mutex_lock(&vcpu->arch.exit_timing_lock);
  40. vcpu->arch.timing_count_type[type]++;
  41. /* sum */
  42. old = vcpu->arch.timing_sum_duration[type];
  43. vcpu->arch.timing_sum_duration[type] += duration;
  44. if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
  45. printk(KERN_ERR"%s - wrap adding sum of durations"
  46. " old %lld new %lld type %d exit # of type %d\n",
  47. __func__, old, vcpu->arch.timing_sum_duration[type],
  48. type, vcpu->arch.timing_count_type[type]);
  49. }
  50. /* square sum */
  51. old = vcpu->arch.timing_sum_quad_duration[type];
  52. vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
  53. if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
  54. printk(KERN_ERR"%s - wrap adding sum of squared durations"
  55. " old %lld new %lld type %d exit # of type %d\n",
  56. __func__, old,
  57. vcpu->arch.timing_sum_quad_duration[type],
  58. type, vcpu->arch.timing_count_type[type]);
  59. }
  60. /* set min/max */
  61. if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
  62. vcpu->arch.timing_min_duration[type] = duration;
  63. if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
  64. vcpu->arch.timing_max_duration[type] = duration;
  65. mutex_unlock(&vcpu->arch.exit_timing_lock);
  66. }
  67. void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
  68. {
  69. u64 exit = vcpu->arch.timing_last_exit;
  70. u64 enter = vcpu->arch.timing_last_enter.tv64;
  71. /* save exit time, used next exit when the reenter time is known */
  72. vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
  73. if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
  74. return; /* skip incomplete cycle (e.g. after reset) */
  75. /* update statistics for average and standard deviation */
  76. add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
  77. /* enter -> timing_last_exit is time spent in guest - log this too */
  78. add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
  79. TIMEINGUEST);
  80. }
  81. static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
  82. [MMIO_EXITS] = "MMIO",
  83. [SIGNAL_EXITS] = "SIGNAL",
  84. [ITLB_REAL_MISS_EXITS] = "ITLBREAL",
  85. [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
  86. [DTLB_REAL_MISS_EXITS] = "DTLBREAL",
  87. [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT",
  88. [SYSCALL_EXITS] = "SYSCALL",
  89. [ISI_EXITS] = "ISI",
  90. [DSI_EXITS] = "DSI",
  91. [EMULATED_INST_EXITS] = "EMULINST",
  92. [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT",
  93. [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE",
  94. [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR",
  95. [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR",
  96. [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR",
  97. [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR",
  98. [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX",
  99. [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE",
  100. [EMULATED_RFI_EXITS] = "EMUL_RFI",
  101. [DEC_EXITS] = "DEC",
  102. [EXT_INTR_EXITS] = "EXTINT",
  103. [HALT_WAKEUP] = "HALT",
  104. [USR_PR_INST] = "USR_PR_INST",
  105. [FP_UNAVAIL] = "FP_UNAVAIL",
  106. [DEBUG_EXITS] = "DEBUG",
  107. [TIMEINGUEST] = "TIMEINGUEST"
  108. };
  109. static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
  110. {
  111. struct kvm_vcpu *vcpu = m->private;
  112. int i;
  113. u64 min, max, sum, sum_quad;
  114. seq_puts(m, "type count min max sum sum_squared\n");
  115. for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
  116. min = vcpu->arch.timing_min_duration[i];
  117. do_div(min, tb_ticks_per_usec);
  118. max = vcpu->arch.timing_max_duration[i];
  119. do_div(max, tb_ticks_per_usec);
  120. sum = vcpu->arch.timing_sum_duration[i];
  121. do_div(sum, tb_ticks_per_usec);
  122. sum_quad = vcpu->arch.timing_sum_quad_duration[i];
  123. do_div(sum_quad, tb_ticks_per_usec);
  124. seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n",
  125. kvm_exit_names[i],
  126. vcpu->arch.timing_count_type[i],
  127. min,
  128. max,
  129. sum,
  130. sum_quad);
  131. }
  132. return 0;
  133. }
  134. /* Write 'c' to clear the timing statistics. */
  135. static ssize_t kvmppc_exit_timing_write(struct file *file,
  136. const char __user *user_buf,
  137. size_t count, loff_t *ppos)
  138. {
  139. int err = -EINVAL;
  140. char c;
  141. if (count > 1) {
  142. goto done;
  143. }
  144. if (get_user(c, user_buf)) {
  145. err = -EFAULT;
  146. goto done;
  147. }
  148. if (c == 'c') {
  149. struct seq_file *seqf = file->private_data;
  150. struct kvm_vcpu *vcpu = seqf->private;
  151. /* Write does not affect our buffers previously generated with
  152. * show. seq_file is locked here to prevent races of init with
  153. * a show call */
  154. mutex_lock(&seqf->lock);
  155. kvmppc_init_timing_stats(vcpu);
  156. mutex_unlock(&seqf->lock);
  157. err = count;
  158. }
  159. done:
  160. return err;
  161. }
  162. static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
  163. {
  164. return single_open(file, kvmppc_exit_timing_show, inode->i_private);
  165. }
  166. static const struct file_operations kvmppc_exit_timing_fops = {
  167. .owner = THIS_MODULE,
  168. .open = kvmppc_exit_timing_open,
  169. .read = seq_read,
  170. .write = kvmppc_exit_timing_write,
  171. .llseek = seq_lseek,
  172. .release = single_release,
  173. };
  174. void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
  175. {
  176. static char dbg_fname[50];
  177. struct dentry *debugfs_file;
  178. snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
  179. current->pid, id);
  180. debugfs_file = debugfs_create_file(dbg_fname, 0666, kvm_debugfs_dir,
  181. vcpu, &kvmppc_exit_timing_fops);
  182. vcpu->arch.debugfs_exit_timing = debugfs_file;
  183. }
  184. void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
  185. {
  186. debugfs_remove(vcpu->arch.debugfs_exit_timing);
  187. vcpu->arch.debugfs_exit_timing = NULL;
  188. }