trace_pr.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
  3. #define _TRACE_KVM_PR_H
  4. #include <linux/tracepoint.h>
  5. #include "trace_book3s.h"
  6. #undef TRACE_SYSTEM
  7. #define TRACE_SYSTEM kvm_pr
  8. TRACE_EVENT(kvm_book3s_reenter,
  9. TP_PROTO(int r, struct kvm_vcpu *vcpu),
  10. TP_ARGS(r, vcpu),
  11. TP_STRUCT__entry(
  12. __field( unsigned int, r )
  13. __field( unsigned long, pc )
  14. ),
  15. TP_fast_assign(
  16. __entry->r = r;
  17. __entry->pc = kvmppc_get_pc(vcpu);
  18. ),
  19. TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
  20. );
  21. #ifdef CONFIG_PPC_BOOK3S_64
  22. TRACE_EVENT(kvm_book3s_64_mmu_map,
  23. TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
  24. struct kvmppc_pte *orig_pte),
  25. TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
  26. TP_STRUCT__entry(
  27. __field( unsigned char, flag_w )
  28. __field( unsigned char, flag_x )
  29. __field( unsigned long, eaddr )
  30. __field( unsigned long, hpteg )
  31. __field( unsigned long, va )
  32. __field( unsigned long long, vpage )
  33. __field( unsigned long, hpaddr )
  34. ),
  35. TP_fast_assign(
  36. __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
  37. __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
  38. __entry->eaddr = orig_pte->eaddr;
  39. __entry->hpteg = hpteg;
  40. __entry->va = va;
  41. __entry->vpage = orig_pte->vpage;
  42. __entry->hpaddr = hpaddr;
  43. ),
  44. TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
  45. __entry->flag_w, __entry->flag_x, __entry->eaddr,
  46. __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
  47. );
  48. #endif /* CONFIG_PPC_BOOK3S_64 */
  49. TRACE_EVENT(kvm_book3s_mmu_map,
  50. TP_PROTO(struct hpte_cache *pte),
  51. TP_ARGS(pte),
  52. TP_STRUCT__entry(
  53. __field( u64, host_vpn )
  54. __field( u64, pfn )
  55. __field( ulong, eaddr )
  56. __field( u64, vpage )
  57. __field( ulong, raddr )
  58. __field( int, flags )
  59. ),
  60. TP_fast_assign(
  61. __entry->host_vpn = pte->host_vpn;
  62. __entry->pfn = pte->pfn;
  63. __entry->eaddr = pte->pte.eaddr;
  64. __entry->vpage = pte->pte.vpage;
  65. __entry->raddr = pte->pte.raddr;
  66. __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
  67. (pte->pte.may_write ? 0x2 : 0) |
  68. (pte->pte.may_execute ? 0x1 : 0);
  69. ),
  70. TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
  71. __entry->host_vpn, __entry->pfn, __entry->eaddr,
  72. __entry->vpage, __entry->raddr, __entry->flags)
  73. );
  74. TRACE_EVENT(kvm_book3s_mmu_invalidate,
  75. TP_PROTO(struct hpte_cache *pte),
  76. TP_ARGS(pte),
  77. TP_STRUCT__entry(
  78. __field( u64, host_vpn )
  79. __field( u64, pfn )
  80. __field( ulong, eaddr )
  81. __field( u64, vpage )
  82. __field( ulong, raddr )
  83. __field( int, flags )
  84. ),
  85. TP_fast_assign(
  86. __entry->host_vpn = pte->host_vpn;
  87. __entry->pfn = pte->pfn;
  88. __entry->eaddr = pte->pte.eaddr;
  89. __entry->vpage = pte->pte.vpage;
  90. __entry->raddr = pte->pte.raddr;
  91. __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
  92. (pte->pte.may_write ? 0x2 : 0) |
  93. (pte->pte.may_execute ? 0x1 : 0);
  94. ),
  95. TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
  96. __entry->host_vpn, __entry->pfn, __entry->eaddr,
  97. __entry->vpage, __entry->raddr, __entry->flags)
  98. );
  99. TRACE_EVENT(kvm_book3s_mmu_flush,
  100. TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
  101. unsigned long long p2),
  102. TP_ARGS(type, vcpu, p1, p2),
  103. TP_STRUCT__entry(
  104. __field( int, count )
  105. __field( unsigned long long, p1 )
  106. __field( unsigned long long, p2 )
  107. __field( const char *, type )
  108. ),
  109. TP_fast_assign(
  110. __entry->count = to_book3s(vcpu)->hpte_cache_count;
  111. __entry->p1 = p1;
  112. __entry->p2 = p2;
  113. __entry->type = type;
  114. ),
  115. TP_printk("Flush %d %sPTEs: %llx - %llx",
  116. __entry->count, __entry->type, __entry->p1, __entry->p2)
  117. );
  118. TRACE_EVENT(kvm_book3s_slb_found,
  119. TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
  120. TP_ARGS(gvsid, hvsid),
  121. TP_STRUCT__entry(
  122. __field( unsigned long long, gvsid )
  123. __field( unsigned long long, hvsid )
  124. ),
  125. TP_fast_assign(
  126. __entry->gvsid = gvsid;
  127. __entry->hvsid = hvsid;
  128. ),
  129. TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
  130. );
  131. TRACE_EVENT(kvm_book3s_slb_fail,
  132. TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
  133. TP_ARGS(sid_map_mask, gvsid),
  134. TP_STRUCT__entry(
  135. __field( unsigned short, sid_map_mask )
  136. __field( unsigned long long, gvsid )
  137. ),
  138. TP_fast_assign(
  139. __entry->sid_map_mask = sid_map_mask;
  140. __entry->gvsid = gvsid;
  141. ),
  142. TP_printk("%x/%x: %llx", __entry->sid_map_mask,
  143. SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
  144. );
  145. TRACE_EVENT(kvm_book3s_slb_map,
  146. TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
  147. unsigned long long hvsid),
  148. TP_ARGS(sid_map_mask, gvsid, hvsid),
  149. TP_STRUCT__entry(
  150. __field( unsigned short, sid_map_mask )
  151. __field( unsigned long long, guest_vsid )
  152. __field( unsigned long long, host_vsid )
  153. ),
  154. TP_fast_assign(
  155. __entry->sid_map_mask = sid_map_mask;
  156. __entry->guest_vsid = gvsid;
  157. __entry->host_vsid = hvsid;
  158. ),
  159. TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
  160. __entry->guest_vsid, __entry->host_vsid)
  161. );
  162. TRACE_EVENT(kvm_book3s_slbmte,
  163. TP_PROTO(u64 slb_vsid, u64 slb_esid),
  164. TP_ARGS(slb_vsid, slb_esid),
  165. TP_STRUCT__entry(
  166. __field( u64, slb_vsid )
  167. __field( u64, slb_esid )
  168. ),
  169. TP_fast_assign(
  170. __entry->slb_vsid = slb_vsid;
  171. __entry->slb_esid = slb_esid;
  172. ),
  173. TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
  174. );
  175. TRACE_EVENT(kvm_exit,
  176. TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
  177. TP_ARGS(exit_nr, vcpu),
  178. TP_STRUCT__entry(
  179. __field( unsigned int, exit_nr )
  180. __field( unsigned long, pc )
  181. __field( unsigned long, msr )
  182. __field( unsigned long, dar )
  183. __field( unsigned long, srr1 )
  184. __field( unsigned long, last_inst )
  185. ),
  186. TP_fast_assign(
  187. __entry->exit_nr = exit_nr;
  188. __entry->pc = kvmppc_get_pc(vcpu);
  189. __entry->dar = kvmppc_get_fault_dar(vcpu);
  190. __entry->msr = kvmppc_get_msr(vcpu);
  191. __entry->srr1 = vcpu->arch.shadow_srr1;
  192. __entry->last_inst = vcpu->arch.last_inst;
  193. ),
  194. TP_printk("exit=%s"
  195. " | pc=0x%lx"
  196. " | msr=0x%lx"
  197. " | dar=0x%lx"
  198. " | srr1=0x%lx"
  199. " | last_inst=0x%lx"
  200. ,
  201. __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
  202. __entry->pc,
  203. __entry->msr,
  204. __entry->dar,
  205. __entry->srr1,
  206. __entry->last_inst
  207. )
  208. );
  209. #endif /* _TRACE_KVM_H */
  210. /* This part must be outside protection */
  211. #undef TRACE_INCLUDE_PATH
  212. #undef TRACE_INCLUDE_FILE
  213. #define TRACE_INCLUDE_PATH .
  214. #define TRACE_INCLUDE_FILE trace_pr
  215. #include <trace/define_trace.h>