blob: e8e2b9ad4ac6e920623df004b207a0ec2bbceffc [file] [log] [blame]
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +05301
2#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_PR_H
4
5#include <linux/tracepoint.h>
Suresh E. Warrier3c78f782014-12-03 18:48:10 -06006#include "trace_book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +05307
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM kvm_pr
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053010
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053011TRACE_EVENT(kvm_book3s_reenter,
12 TP_PROTO(int r, struct kvm_vcpu *vcpu),
13 TP_ARGS(r, vcpu),
14
15 TP_STRUCT__entry(
16 __field( unsigned int, r )
17 __field( unsigned long, pc )
18 ),
19
20 TP_fast_assign(
21 __entry->r = r;
22 __entry->pc = kvmppc_get_pc(vcpu);
23 ),
24
25 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
26);
27
28#ifdef CONFIG_PPC_BOOK3S_64
29
30TRACE_EVENT(kvm_book3s_64_mmu_map,
Dan Williamsba049e92016-01-15 16:56:11 -080031 TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053032 struct kvmppc_pte *orig_pte),
33 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
34
35 TP_STRUCT__entry(
36 __field( unsigned char, flag_w )
37 __field( unsigned char, flag_x )
38 __field( unsigned long, eaddr )
39 __field( unsigned long, hpteg )
40 __field( unsigned long, va )
41 __field( unsigned long long, vpage )
42 __field( unsigned long, hpaddr )
43 ),
44
45 TP_fast_assign(
46 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
47 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
48 __entry->eaddr = orig_pte->eaddr;
49 __entry->hpteg = hpteg;
50 __entry->va = va;
51 __entry->vpage = orig_pte->vpage;
52 __entry->hpaddr = hpaddr;
53 ),
54
55 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
56 __entry->flag_w, __entry->flag_x, __entry->eaddr,
57 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
58);
59
60#endif /* CONFIG_PPC_BOOK3S_64 */
61
62TRACE_EVENT(kvm_book3s_mmu_map,
63 TP_PROTO(struct hpte_cache *pte),
64 TP_ARGS(pte),
65
66 TP_STRUCT__entry(
67 __field( u64, host_vpn )
68 __field( u64, pfn )
69 __field( ulong, eaddr )
70 __field( u64, vpage )
71 __field( ulong, raddr )
72 __field( int, flags )
73 ),
74
75 TP_fast_assign(
76 __entry->host_vpn = pte->host_vpn;
77 __entry->pfn = pte->pfn;
78 __entry->eaddr = pte->pte.eaddr;
79 __entry->vpage = pte->pte.vpage;
80 __entry->raddr = pte->pte.raddr;
81 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
82 (pte->pte.may_write ? 0x2 : 0) |
83 (pte->pte.may_execute ? 0x1 : 0);
84 ),
85
86 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
87 __entry->host_vpn, __entry->pfn, __entry->eaddr,
88 __entry->vpage, __entry->raddr, __entry->flags)
89);
90
91TRACE_EVENT(kvm_book3s_mmu_invalidate,
92 TP_PROTO(struct hpte_cache *pte),
93 TP_ARGS(pte),
94
95 TP_STRUCT__entry(
96 __field( u64, host_vpn )
97 __field( u64, pfn )
98 __field( ulong, eaddr )
99 __field( u64, vpage )
100 __field( ulong, raddr )
101 __field( int, flags )
102 ),
103
104 TP_fast_assign(
105 __entry->host_vpn = pte->host_vpn;
106 __entry->pfn = pte->pfn;
107 __entry->eaddr = pte->pte.eaddr;
108 __entry->vpage = pte->pte.vpage;
109 __entry->raddr = pte->pte.raddr;
110 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
111 (pte->pte.may_write ? 0x2 : 0) |
112 (pte->pte.may_execute ? 0x1 : 0);
113 ),
114
115 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
116 __entry->host_vpn, __entry->pfn, __entry->eaddr,
117 __entry->vpage, __entry->raddr, __entry->flags)
118);
119
120TRACE_EVENT(kvm_book3s_mmu_flush,
121 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
122 unsigned long long p2),
123 TP_ARGS(type, vcpu, p1, p2),
124
125 TP_STRUCT__entry(
126 __field( int, count )
127 __field( unsigned long long, p1 )
128 __field( unsigned long long, p2 )
129 __field( const char *, type )
130 ),
131
132 TP_fast_assign(
133 __entry->count = to_book3s(vcpu)->hpte_cache_count;
134 __entry->p1 = p1;
135 __entry->p2 = p2;
136 __entry->type = type;
137 ),
138
139 TP_printk("Flush %d %sPTEs: %llx - %llx",
140 __entry->count, __entry->type, __entry->p1, __entry->p2)
141);
142
143TRACE_EVENT(kvm_book3s_slb_found,
144 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
145 TP_ARGS(gvsid, hvsid),
146
147 TP_STRUCT__entry(
148 __field( unsigned long long, gvsid )
149 __field( unsigned long long, hvsid )
150 ),
151
152 TP_fast_assign(
153 __entry->gvsid = gvsid;
154 __entry->hvsid = hvsid;
155 ),
156
157 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
158);
159
160TRACE_EVENT(kvm_book3s_slb_fail,
161 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
162 TP_ARGS(sid_map_mask, gvsid),
163
164 TP_STRUCT__entry(
165 __field( unsigned short, sid_map_mask )
166 __field( unsigned long long, gvsid )
167 ),
168
169 TP_fast_assign(
170 __entry->sid_map_mask = sid_map_mask;
171 __entry->gvsid = gvsid;
172 ),
173
174 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
175 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
176);
177
178TRACE_EVENT(kvm_book3s_slb_map,
179 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
180 unsigned long long hvsid),
181 TP_ARGS(sid_map_mask, gvsid, hvsid),
182
183 TP_STRUCT__entry(
184 __field( unsigned short, sid_map_mask )
185 __field( unsigned long long, guest_vsid )
186 __field( unsigned long long, host_vsid )
187 ),
188
189 TP_fast_assign(
190 __entry->sid_map_mask = sid_map_mask;
191 __entry->guest_vsid = gvsid;
192 __entry->host_vsid = hvsid;
193 ),
194
195 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
196 __entry->guest_vsid, __entry->host_vsid)
197);
198
199TRACE_EVENT(kvm_book3s_slbmte,
200 TP_PROTO(u64 slb_vsid, u64 slb_esid),
201 TP_ARGS(slb_vsid, slb_esid),
202
203 TP_STRUCT__entry(
204 __field( u64, slb_vsid )
205 __field( u64, slb_esid )
206 ),
207
208 TP_fast_assign(
209 __entry->slb_vsid = slb_vsid;
210 __entry->slb_esid = slb_esid;
211 ),
212
213 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
214);
215
216TRACE_EVENT(kvm_exit,
217 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
218 TP_ARGS(exit_nr, vcpu),
219
220 TP_STRUCT__entry(
221 __field( unsigned int, exit_nr )
222 __field( unsigned long, pc )
223 __field( unsigned long, msr )
224 __field( unsigned long, dar )
225 __field( unsigned long, srr1 )
226 __field( unsigned long, last_inst )
227 ),
228
229 TP_fast_assign(
230 __entry->exit_nr = exit_nr;
231 __entry->pc = kvmppc_get_pc(vcpu);
232 __entry->dar = kvmppc_get_fault_dar(vcpu);
Alexander Graf5deb8e72014-04-24 13:46:24 +0200233 __entry->msr = kvmppc_get_msr(vcpu);
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +0530234 __entry->srr1 = vcpu->arch.shadow_srr1;
235 __entry->last_inst = vcpu->arch.last_inst;
236 ),
237
238 TP_printk("exit=%s"
239 " | pc=0x%lx"
240 " | msr=0x%lx"
241 " | dar=0x%lx"
242 " | srr1=0x%lx"
243 " | last_inst=0x%lx"
244 ,
245 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
246 __entry->pc,
247 __entry->msr,
248 __entry->dar,
249 __entry->srr1,
250 __entry->last_inst
251 )
252);
253
254TRACE_EVENT(kvm_unmap_hva,
255 TP_PROTO(unsigned long hva),
256 TP_ARGS(hva),
257
258 TP_STRUCT__entry(
259 __field( unsigned long, hva )
260 ),
261
262 TP_fast_assign(
263 __entry->hva = hva;
264 ),
265
266 TP_printk("unmap hva 0x%lx\n", __entry->hva)
267);
268
269#endif /* _TRACE_KVM_H */
270
271/* This part must be outside protection */
Scott Woodc57911f2018-11-06 19:49:34 -0600272
273#undef TRACE_INCLUDE_PATH
274#undef TRACE_INCLUDE_FILE
275
276#define TRACE_INCLUDE_PATH .
277#define TRACE_INCLUDE_FILE trace_pr
278
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +0530279#include <trace/define_trace.h>