pmu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  4. *
  5. * Copyright 2015 Red Hat, Inc. and/or its affiliates.
  6. *
  7. * Authors:
  8. * Avi Kivity <avi@redhat.com>
  9. * Gleb Natapov <gleb@redhat.com>
  10. * Wei Huang <wei@redhat.com>
  11. */
  12. #include <linux/types.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/perf_event.h>
  15. #include <asm/perf_event.h>
  16. #include "x86.h"
  17. #include "cpuid.h"
  18. #include "lapic.h"
  19. #include "pmu.h"
  20. /* This is enough to filter the vast majority of currently defined events. */
  21. #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
  22. /* NOTE:
  23. * - Each perf counter is defined as "struct kvm_pmc";
  24. * - There are two types of perf counters: general purpose (gp) and fixed.
  25. * gp counters are stored in gp_counters[] and fixed counters are stored
  26. * in fixed_counters[] respectively. Both of them are part of "struct
  27. * kvm_pmu";
  28. * - pmu.c understands the difference between gp counters and fixed counters.
  29. * However AMD doesn't support fixed-counters;
  30. * - There are three types of index to access perf counters (PMC):
  31. * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
  32. * has MSR_K7_PERFCTRn.
  33. * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
  34. * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
  35. * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
  36. * that it also supports fixed counters. idx can be used to as index to
  37. * gp and fixed counters.
  38. * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
  39. * code. Each pmc, stored in kvm_pmc.idx field, is unique across
  40. * all perf counters (both gp and fixed). The mapping relationship
  41. * between pmc and perf counters is as the following:
  42. * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
  43. * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
  44. * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
  45. */
  46. static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
  47. {
  48. struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
  49. struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  50. kvm_pmu_deliver_pmi(vcpu);
  51. }
  52. static void kvm_perf_overflow(struct perf_event *perf_event,
  53. struct perf_sample_data *data,
  54. struct pt_regs *regs)
  55. {
  56. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  57. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  58. if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
  59. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  60. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  61. }
  62. }
  63. static void kvm_perf_overflow_intr(struct perf_event *perf_event,
  64. struct perf_sample_data *data,
  65. struct pt_regs *regs)
  66. {
  67. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  68. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  69. if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
  70. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  71. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  72. /*
  73. * Inject PMI. If vcpu was in a guest mode during NMI PMI
  74. * can be ejected on a guest mode re-entry. Otherwise we can't
  75. * be sure that vcpu wasn't executing hlt instruction at the
  76. * time of vmexit and is not going to re-enter guest mode until
  77. * woken up. So we should wake it, but this is impossible from
  78. * NMI context. Do it from irq work instead.
  79. */
  80. if (!kvm_is_in_guest())
  81. irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
  82. else
  83. kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
  84. }
  85. }
  86. static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
  87. u64 config, bool exclude_user,
  88. bool exclude_kernel, bool intr,
  89. bool in_tx, bool in_tx_cp)
  90. {
  91. struct perf_event *event;
  92. struct perf_event_attr attr = {
  93. .type = type,
  94. .size = sizeof(attr),
  95. .pinned = true,
  96. .exclude_idle = true,
  97. .exclude_host = 1,
  98. .exclude_user = exclude_user,
  99. .exclude_kernel = exclude_kernel,
  100. .config = config,
  101. };
  102. attr.sample_period = get_sample_period(pmc, pmc->counter);
  103. if (in_tx)
  104. attr.config |= HSW_IN_TX;
  105. if (in_tx_cp) {
  106. /*
  107. * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
  108. * period. Just clear the sample period so at least
  109. * allocating the counter doesn't fail.
  110. */
  111. attr.sample_period = 0;
  112. attr.config |= HSW_IN_TX_CHECKPOINTED;
  113. }
  114. event = perf_event_create_kernel_counter(&attr, -1, current,
  115. intr ? kvm_perf_overflow_intr :
  116. kvm_perf_overflow, pmc);
  117. if (IS_ERR(event)) {
  118. pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
  119. PTR_ERR(event), pmc->idx);
  120. return;
  121. }
  122. pmc->perf_event = event;
  123. pmc_to_pmu(pmc)->event_count++;
  124. clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
  125. }
  126. static void pmc_pause_counter(struct kvm_pmc *pmc)
  127. {
  128. u64 counter = pmc->counter;
  129. if (!pmc->perf_event)
  130. return;
  131. /* update counter, reset event value to avoid redundant accumulation */
  132. counter += perf_event_pause(pmc->perf_event, true);
  133. pmc->counter = counter & pmc_bitmask(pmc);
  134. }
  135. static bool pmc_resume_counter(struct kvm_pmc *pmc)
  136. {
  137. if (!pmc->perf_event)
  138. return false;
  139. /* recalibrate sample period and check if it's accepted by perf core */
  140. if (perf_event_period(pmc->perf_event,
  141. get_sample_period(pmc, pmc->counter)))
  142. return false;
  143. /* reuse perf_event to serve as pmc_reprogram_counter() does*/
  144. perf_event_enable(pmc->perf_event);
  145. clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
  146. return true;
  147. }
  148. void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
  149. {
  150. u64 config;
  151. u32 type = PERF_TYPE_RAW;
  152. struct kvm *kvm = pmc->vcpu->kvm;
  153. struct kvm_pmu_event_filter *filter;
  154. int i;
  155. bool allow_event = true;
  156. if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
  157. printk_once("kvm pmu: pin control bit is ignored\n");
  158. pmc->eventsel = eventsel;
  159. pmc_pause_counter(pmc);
  160. if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
  161. return;
  162. filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
  163. if (filter) {
  164. for (i = 0; i < filter->nevents; i++)
  165. if (filter->events[i] ==
  166. (eventsel & AMD64_RAW_EVENT_MASK_NB))
  167. break;
  168. if (filter->action == KVM_PMU_EVENT_ALLOW &&
  169. i == filter->nevents)
  170. allow_event = false;
  171. if (filter->action == KVM_PMU_EVENT_DENY &&
  172. i < filter->nevents)
  173. allow_event = false;
  174. }
  175. if (!allow_event)
  176. return;
  177. if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
  178. ARCH_PERFMON_EVENTSEL_INV |
  179. ARCH_PERFMON_EVENTSEL_CMASK |
  180. HSW_IN_TX |
  181. HSW_IN_TX_CHECKPOINTED))) {
  182. config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
  183. if (config != PERF_COUNT_HW_MAX)
  184. type = PERF_TYPE_HARDWARE;
  185. }
  186. if (type == PERF_TYPE_RAW)
  187. config = eventsel & AMD64_RAW_EVENT_MASK;
  188. if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
  189. return;
  190. pmc_release_perf_event(pmc);
  191. pmc->current_config = eventsel;
  192. pmc_reprogram_counter(pmc, type, config,
  193. !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
  194. !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
  195. eventsel & ARCH_PERFMON_EVENTSEL_INT,
  196. (eventsel & HSW_IN_TX),
  197. (eventsel & HSW_IN_TX_CHECKPOINTED));
  198. }
  199. EXPORT_SYMBOL_GPL(reprogram_gp_counter);
  200. void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
  201. {
  202. unsigned en_field = ctrl & 0x3;
  203. bool pmi = ctrl & 0x8;
  204. struct kvm_pmu_event_filter *filter;
  205. struct kvm *kvm = pmc->vcpu->kvm;
  206. pmc_pause_counter(pmc);
  207. if (!en_field || !pmc_is_enabled(pmc))
  208. return;
  209. filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
  210. if (filter) {
  211. if (filter->action == KVM_PMU_EVENT_DENY &&
  212. test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
  213. return;
  214. if (filter->action == KVM_PMU_EVENT_ALLOW &&
  215. !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
  216. return;
  217. }
  218. if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
  219. return;
  220. pmc_release_perf_event(pmc);
  221. pmc->current_config = (u64)ctrl;
  222. pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
  223. kvm_x86_ops.pmu_ops->find_fixed_event(idx),
  224. !(en_field & 0x2), /* exclude user */
  225. !(en_field & 0x1), /* exclude kernel */
  226. pmi, false, false);
  227. }
  228. EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
  229. void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
  230. {
  231. struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
  232. if (!pmc)
  233. return;
  234. if (pmc_is_gp(pmc))
  235. reprogram_gp_counter(pmc, pmc->eventsel);
  236. else {
  237. int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
  238. u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
  239. reprogram_fixed_counter(pmc, ctrl, idx);
  240. }
  241. }
  242. EXPORT_SYMBOL_GPL(reprogram_counter);
  243. void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
  244. {
  245. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  246. int bit;
  247. for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
  248. struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
  249. if (unlikely(!pmc || !pmc->perf_event)) {
  250. clear_bit(bit, pmu->reprogram_pmi);
  251. continue;
  252. }
  253. reprogram_counter(pmu, bit);
  254. }
  255. /*
  256. * Unused perf_events are only released if the corresponding MSRs
  257. * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
  258. * triggers KVM_REQ_PMU if cleanup is needed.
  259. */
  260. if (unlikely(pmu->need_cleanup))
  261. kvm_pmu_cleanup(vcpu);
  262. }
  263. /* check if idx is a valid index to access PMU */
  264. int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
  265. {
  266. return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
  267. }
  268. bool is_vmware_backdoor_pmc(u32 pmc_idx)
  269. {
  270. switch (pmc_idx) {
  271. case VMWARE_BACKDOOR_PMC_HOST_TSC:
  272. case VMWARE_BACKDOOR_PMC_REAL_TIME:
  273. case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
  274. return true;
  275. }
  276. return false;
  277. }
  278. static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
  279. {
  280. u64 ctr_val;
  281. switch (idx) {
  282. case VMWARE_BACKDOOR_PMC_HOST_TSC:
  283. ctr_val = rdtsc();
  284. break;
  285. case VMWARE_BACKDOOR_PMC_REAL_TIME:
  286. ctr_val = ktime_get_boottime_ns();
  287. break;
  288. case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
  289. ctr_val = ktime_get_boottime_ns() +
  290. vcpu->kvm->arch.kvmclock_offset;
  291. break;
  292. default:
  293. return 1;
  294. }
  295. *data = ctr_val;
  296. return 0;
  297. }
  298. int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
  299. {
  300. bool fast_mode = idx & (1u << 31);
  301. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  302. struct kvm_pmc *pmc;
  303. u64 mask = fast_mode ? ~0u : ~0ull;
  304. if (!pmu->version)
  305. return 1;
  306. if (is_vmware_backdoor_pmc(idx))
  307. return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
  308. pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
  309. if (!pmc)
  310. return 1;
  311. if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
  312. (kvm_x86_ops.get_cpl(vcpu) != 0) &&
  313. (kvm_read_cr0(vcpu) & X86_CR0_PE))
  314. return 1;
  315. *data = pmc_read_counter(pmc) & mask;
  316. return 0;
  317. }
  318. void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
  319. {
  320. if (lapic_in_kernel(vcpu))
  321. kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
  322. }
  323. bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  324. {
  325. return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
  326. kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
  327. }
  328. static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
  329. {
  330. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  331. struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
  332. if (pmc)
  333. __set_bit(pmc->idx, pmu->pmc_in_use);
  334. }
  335. int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  336. {
  337. return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
  338. }
  339. int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  340. {
  341. kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
  342. return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
  343. }
  344. /* refresh PMU settings. This function generally is called when underlying
  345. * settings are changed (such as changes of PMU CPUID by guest VMs), which
  346. * should rarely happen.
  347. */
  348. void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
  349. {
  350. kvm_x86_ops.pmu_ops->refresh(vcpu);
  351. }
  352. void kvm_pmu_reset(struct kvm_vcpu *vcpu)
  353. {
  354. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  355. irq_work_sync(&pmu->irq_work);
  356. kvm_x86_ops.pmu_ops->reset(vcpu);
  357. }
  358. void kvm_pmu_init(struct kvm_vcpu *vcpu)
  359. {
  360. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  361. memset(pmu, 0, sizeof(*pmu));
  362. kvm_x86_ops.pmu_ops->init(vcpu);
  363. init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
  364. pmu->event_count = 0;
  365. pmu->need_cleanup = false;
  366. kvm_pmu_refresh(vcpu);
  367. }
  368. static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
  369. {
  370. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  371. if (pmc_is_fixed(pmc))
  372. return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
  373. pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
  374. return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
  375. }
  376. /* Release perf_events for vPMCs that have been unused for a full time slice. */
  377. void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
  378. {
  379. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  380. struct kvm_pmc *pmc = NULL;
  381. DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
  382. int i;
  383. pmu->need_cleanup = false;
  384. bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
  385. pmu->pmc_in_use, X86_PMC_IDX_MAX);
  386. for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
  387. pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
  388. if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
  389. pmc_stop_counter(pmc);
  390. }
  391. bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
  392. }
  393. void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
  394. {
  395. kvm_pmu_reset(vcpu);
  396. }
  397. int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
  398. {
  399. struct kvm_pmu_event_filter tmp, *filter;
  400. size_t size;
  401. int r;
  402. if (copy_from_user(&tmp, argp, sizeof(tmp)))
  403. return -EFAULT;
  404. if (tmp.action != KVM_PMU_EVENT_ALLOW &&
  405. tmp.action != KVM_PMU_EVENT_DENY)
  406. return -EINVAL;
  407. if (tmp.flags != 0)
  408. return -EINVAL;
  409. if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
  410. return -E2BIG;
  411. size = struct_size(filter, events, tmp.nevents);
  412. filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
  413. if (!filter)
  414. return -ENOMEM;
  415. r = -EFAULT;
  416. if (copy_from_user(filter, argp, size))
  417. goto cleanup;
  418. /* Ensure nevents can't be changed between the user copies. */
  419. *filter = tmp;
  420. mutex_lock(&kvm->lock);
  421. filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
  422. mutex_is_locked(&kvm->lock));
  423. mutex_unlock(&kvm->lock);
  424. synchronize_srcu_expedited(&kvm->srcu);
  425. r = 0;
  426. cleanup:
  427. kfree(filter);
  428. return r;
  429. }