blob: 8b22e4748344c498d95415610c30b55a6af458fa [file] [log] [blame]
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +05301
2#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_PR_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm_pr
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace_pr
11
12#define kvm_trace_symbol_exit \
13 {0x100, "SYSTEM_RESET"}, \
14 {0x200, "MACHINE_CHECK"}, \
15 {0x300, "DATA_STORAGE"}, \
16 {0x380, "DATA_SEGMENT"}, \
17 {0x400, "INST_STORAGE"}, \
18 {0x480, "INST_SEGMENT"}, \
19 {0x500, "EXTERNAL"}, \
20 {0x501, "EXTERNAL_LEVEL"}, \
21 {0x502, "EXTERNAL_HV"}, \
22 {0x600, "ALIGNMENT"}, \
23 {0x700, "PROGRAM"}, \
24 {0x800, "FP_UNAVAIL"}, \
25 {0x900, "DECREMENTER"}, \
26 {0x980, "HV_DECREMENTER"}, \
27 {0xc00, "SYSCALL"}, \
28 {0xd00, "TRACE"}, \
29 {0xe00, "H_DATA_STORAGE"}, \
30 {0xe20, "H_INST_STORAGE"}, \
31 {0xe40, "H_EMUL_ASSIST"}, \
32 {0xf00, "PERFMON"}, \
33 {0xf20, "ALTIVEC"}, \
34 {0xf40, "VSX"}
35
36TRACE_EVENT(kvm_book3s_reenter,
37 TP_PROTO(int r, struct kvm_vcpu *vcpu),
38 TP_ARGS(r, vcpu),
39
40 TP_STRUCT__entry(
41 __field( unsigned int, r )
42 __field( unsigned long, pc )
43 ),
44
45 TP_fast_assign(
46 __entry->r = r;
47 __entry->pc = kvmppc_get_pc(vcpu);
48 ),
49
50 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
51);
52
53#ifdef CONFIG_PPC_BOOK3S_64
54
55TRACE_EVENT(kvm_book3s_64_mmu_map,
56 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
57 struct kvmppc_pte *orig_pte),
58 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
59
60 TP_STRUCT__entry(
61 __field( unsigned char, flag_w )
62 __field( unsigned char, flag_x )
63 __field( unsigned long, eaddr )
64 __field( unsigned long, hpteg )
65 __field( unsigned long, va )
66 __field( unsigned long long, vpage )
67 __field( unsigned long, hpaddr )
68 ),
69
70 TP_fast_assign(
71 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
72 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
73 __entry->eaddr = orig_pte->eaddr;
74 __entry->hpteg = hpteg;
75 __entry->va = va;
76 __entry->vpage = orig_pte->vpage;
77 __entry->hpaddr = hpaddr;
78 ),
79
80 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
81 __entry->flag_w, __entry->flag_x, __entry->eaddr,
82 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
83);
84
85#endif /* CONFIG_PPC_BOOK3S_64 */
86
87TRACE_EVENT(kvm_book3s_mmu_map,
88 TP_PROTO(struct hpte_cache *pte),
89 TP_ARGS(pte),
90
91 TP_STRUCT__entry(
92 __field( u64, host_vpn )
93 __field( u64, pfn )
94 __field( ulong, eaddr )
95 __field( u64, vpage )
96 __field( ulong, raddr )
97 __field( int, flags )
98 ),
99
100 TP_fast_assign(
101 __entry->host_vpn = pte->host_vpn;
102 __entry->pfn = pte->pfn;
103 __entry->eaddr = pte->pte.eaddr;
104 __entry->vpage = pte->pte.vpage;
105 __entry->raddr = pte->pte.raddr;
106 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
107 (pte->pte.may_write ? 0x2 : 0) |
108 (pte->pte.may_execute ? 0x1 : 0);
109 ),
110
111 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
112 __entry->host_vpn, __entry->pfn, __entry->eaddr,
113 __entry->vpage, __entry->raddr, __entry->flags)
114);
115
116TRACE_EVENT(kvm_book3s_mmu_invalidate,
117 TP_PROTO(struct hpte_cache *pte),
118 TP_ARGS(pte),
119
120 TP_STRUCT__entry(
121 __field( u64, host_vpn )
122 __field( u64, pfn )
123 __field( ulong, eaddr )
124 __field( u64, vpage )
125 __field( ulong, raddr )
126 __field( int, flags )
127 ),
128
129 TP_fast_assign(
130 __entry->host_vpn = pte->host_vpn;
131 __entry->pfn = pte->pfn;
132 __entry->eaddr = pte->pte.eaddr;
133 __entry->vpage = pte->pte.vpage;
134 __entry->raddr = pte->pte.raddr;
135 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
136 (pte->pte.may_write ? 0x2 : 0) |
137 (pte->pte.may_execute ? 0x1 : 0);
138 ),
139
140 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
141 __entry->host_vpn, __entry->pfn, __entry->eaddr,
142 __entry->vpage, __entry->raddr, __entry->flags)
143);
144
145TRACE_EVENT(kvm_book3s_mmu_flush,
146 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
147 unsigned long long p2),
148 TP_ARGS(type, vcpu, p1, p2),
149
150 TP_STRUCT__entry(
151 __field( int, count )
152 __field( unsigned long long, p1 )
153 __field( unsigned long long, p2 )
154 __field( const char *, type )
155 ),
156
157 TP_fast_assign(
158 __entry->count = to_book3s(vcpu)->hpte_cache_count;
159 __entry->p1 = p1;
160 __entry->p2 = p2;
161 __entry->type = type;
162 ),
163
164 TP_printk("Flush %d %sPTEs: %llx - %llx",
165 __entry->count, __entry->type, __entry->p1, __entry->p2)
166);
167
168TRACE_EVENT(kvm_book3s_slb_found,
169 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
170 TP_ARGS(gvsid, hvsid),
171
172 TP_STRUCT__entry(
173 __field( unsigned long long, gvsid )
174 __field( unsigned long long, hvsid )
175 ),
176
177 TP_fast_assign(
178 __entry->gvsid = gvsid;
179 __entry->hvsid = hvsid;
180 ),
181
182 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
183);
184
185TRACE_EVENT(kvm_book3s_slb_fail,
186 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
187 TP_ARGS(sid_map_mask, gvsid),
188
189 TP_STRUCT__entry(
190 __field( unsigned short, sid_map_mask )
191 __field( unsigned long long, gvsid )
192 ),
193
194 TP_fast_assign(
195 __entry->sid_map_mask = sid_map_mask;
196 __entry->gvsid = gvsid;
197 ),
198
199 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
200 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
201);
202
203TRACE_EVENT(kvm_book3s_slb_map,
204 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
205 unsigned long long hvsid),
206 TP_ARGS(sid_map_mask, gvsid, hvsid),
207
208 TP_STRUCT__entry(
209 __field( unsigned short, sid_map_mask )
210 __field( unsigned long long, guest_vsid )
211 __field( unsigned long long, host_vsid )
212 ),
213
214 TP_fast_assign(
215 __entry->sid_map_mask = sid_map_mask;
216 __entry->guest_vsid = gvsid;
217 __entry->host_vsid = hvsid;
218 ),
219
220 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
221 __entry->guest_vsid, __entry->host_vsid)
222);
223
224TRACE_EVENT(kvm_book3s_slbmte,
225 TP_PROTO(u64 slb_vsid, u64 slb_esid),
226 TP_ARGS(slb_vsid, slb_esid),
227
228 TP_STRUCT__entry(
229 __field( u64, slb_vsid )
230 __field( u64, slb_esid )
231 ),
232
233 TP_fast_assign(
234 __entry->slb_vsid = slb_vsid;
235 __entry->slb_esid = slb_esid;
236 ),
237
238 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
239);
240
241TRACE_EVENT(kvm_exit,
242 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
243 TP_ARGS(exit_nr, vcpu),
244
245 TP_STRUCT__entry(
246 __field( unsigned int, exit_nr )
247 __field( unsigned long, pc )
248 __field( unsigned long, msr )
249 __field( unsigned long, dar )
250 __field( unsigned long, srr1 )
251 __field( unsigned long, last_inst )
252 ),
253
254 TP_fast_assign(
255 __entry->exit_nr = exit_nr;
256 __entry->pc = kvmppc_get_pc(vcpu);
257 __entry->dar = kvmppc_get_fault_dar(vcpu);
258 __entry->msr = vcpu->arch.shared->msr;
259 __entry->srr1 = vcpu->arch.shadow_srr1;
260 __entry->last_inst = vcpu->arch.last_inst;
261 ),
262
263 TP_printk("exit=%s"
264 " | pc=0x%lx"
265 " | msr=0x%lx"
266 " | dar=0x%lx"
267 " | srr1=0x%lx"
268 " | last_inst=0x%lx"
269 ,
270 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
271 __entry->pc,
272 __entry->msr,
273 __entry->dar,
274 __entry->srr1,
275 __entry->last_inst
276 )
277);
278
279TRACE_EVENT(kvm_unmap_hva,
280 TP_PROTO(unsigned long hva),
281 TP_ARGS(hva),
282
283 TP_STRUCT__entry(
284 __field( unsigned long, hva )
285 ),
286
287 TP_fast_assign(
288 __entry->hva = hva;
289 ),
290
291 TP_printk("unmap hva 0x%lx\n", __entry->hva)
292);
293
294#endif /* _TRACE_KVM_H */
295
296/* This part must be outside protection */
297#include <trace/define_trace.h>