blob: 810507cb688aaa3aa6d3b01c8e0f39e8df82a09f [file] [log] [blame]
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +05301
2#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_PR_H
4
5#include <linux/tracepoint.h>
Suresh E. Warrier3c78f782014-12-03 18:48:10 -06006#include "trace_book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +05307
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM kvm_pr
10#define TRACE_INCLUDE_PATH .
11#define TRACE_INCLUDE_FILE trace_pr
12
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053013TRACE_EVENT(kvm_book3s_reenter,
14 TP_PROTO(int r, struct kvm_vcpu *vcpu),
15 TP_ARGS(r, vcpu),
16
17 TP_STRUCT__entry(
18 __field( unsigned int, r )
19 __field( unsigned long, pc )
20 ),
21
22 TP_fast_assign(
23 __entry->r = r;
24 __entry->pc = kvmppc_get_pc(vcpu);
25 ),
26
27 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
28);
29
30#ifdef CONFIG_PPC_BOOK3S_64
31
32TRACE_EVENT(kvm_book3s_64_mmu_map,
33 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
34 struct kvmppc_pte *orig_pte),
35 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
36
37 TP_STRUCT__entry(
38 __field( unsigned char, flag_w )
39 __field( unsigned char, flag_x )
40 __field( unsigned long, eaddr )
41 __field( unsigned long, hpteg )
42 __field( unsigned long, va )
43 __field( unsigned long long, vpage )
44 __field( unsigned long, hpaddr )
45 ),
46
47 TP_fast_assign(
48 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
49 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
50 __entry->eaddr = orig_pte->eaddr;
51 __entry->hpteg = hpteg;
52 __entry->va = va;
53 __entry->vpage = orig_pte->vpage;
54 __entry->hpaddr = hpaddr;
55 ),
56
57 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
58 __entry->flag_w, __entry->flag_x, __entry->eaddr,
59 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
60);
61
62#endif /* CONFIG_PPC_BOOK3S_64 */
63
64TRACE_EVENT(kvm_book3s_mmu_map,
65 TP_PROTO(struct hpte_cache *pte),
66 TP_ARGS(pte),
67
68 TP_STRUCT__entry(
69 __field( u64, host_vpn )
70 __field( u64, pfn )
71 __field( ulong, eaddr )
72 __field( u64, vpage )
73 __field( ulong, raddr )
74 __field( int, flags )
75 ),
76
77 TP_fast_assign(
78 __entry->host_vpn = pte->host_vpn;
79 __entry->pfn = pte->pfn;
80 __entry->eaddr = pte->pte.eaddr;
81 __entry->vpage = pte->pte.vpage;
82 __entry->raddr = pte->pte.raddr;
83 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
84 (pte->pte.may_write ? 0x2 : 0) |
85 (pte->pte.may_execute ? 0x1 : 0);
86 ),
87
88 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
89 __entry->host_vpn, __entry->pfn, __entry->eaddr,
90 __entry->vpage, __entry->raddr, __entry->flags)
91);
92
93TRACE_EVENT(kvm_book3s_mmu_invalidate,
94 TP_PROTO(struct hpte_cache *pte),
95 TP_ARGS(pte),
96
97 TP_STRUCT__entry(
98 __field( u64, host_vpn )
99 __field( u64, pfn )
100 __field( ulong, eaddr )
101 __field( u64, vpage )
102 __field( ulong, raddr )
103 __field( int, flags )
104 ),
105
106 TP_fast_assign(
107 __entry->host_vpn = pte->host_vpn;
108 __entry->pfn = pte->pfn;
109 __entry->eaddr = pte->pte.eaddr;
110 __entry->vpage = pte->pte.vpage;
111 __entry->raddr = pte->pte.raddr;
112 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
113 (pte->pte.may_write ? 0x2 : 0) |
114 (pte->pte.may_execute ? 0x1 : 0);
115 ),
116
117 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
118 __entry->host_vpn, __entry->pfn, __entry->eaddr,
119 __entry->vpage, __entry->raddr, __entry->flags)
120);
121
122TRACE_EVENT(kvm_book3s_mmu_flush,
123 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
124 unsigned long long p2),
125 TP_ARGS(type, vcpu, p1, p2),
126
127 TP_STRUCT__entry(
128 __field( int, count )
129 __field( unsigned long long, p1 )
130 __field( unsigned long long, p2 )
131 __field( const char *, type )
132 ),
133
134 TP_fast_assign(
135 __entry->count = to_book3s(vcpu)->hpte_cache_count;
136 __entry->p1 = p1;
137 __entry->p2 = p2;
138 __entry->type = type;
139 ),
140
141 TP_printk("Flush %d %sPTEs: %llx - %llx",
142 __entry->count, __entry->type, __entry->p1, __entry->p2)
143);
144
145TRACE_EVENT(kvm_book3s_slb_found,
146 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
147 TP_ARGS(gvsid, hvsid),
148
149 TP_STRUCT__entry(
150 __field( unsigned long long, gvsid )
151 __field( unsigned long long, hvsid )
152 ),
153
154 TP_fast_assign(
155 __entry->gvsid = gvsid;
156 __entry->hvsid = hvsid;
157 ),
158
159 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
160);
161
162TRACE_EVENT(kvm_book3s_slb_fail,
163 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
164 TP_ARGS(sid_map_mask, gvsid),
165
166 TP_STRUCT__entry(
167 __field( unsigned short, sid_map_mask )
168 __field( unsigned long long, gvsid )
169 ),
170
171 TP_fast_assign(
172 __entry->sid_map_mask = sid_map_mask;
173 __entry->gvsid = gvsid;
174 ),
175
176 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
177 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
178);
179
180TRACE_EVENT(kvm_book3s_slb_map,
181 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
182 unsigned long long hvsid),
183 TP_ARGS(sid_map_mask, gvsid, hvsid),
184
185 TP_STRUCT__entry(
186 __field( unsigned short, sid_map_mask )
187 __field( unsigned long long, guest_vsid )
188 __field( unsigned long long, host_vsid )
189 ),
190
191 TP_fast_assign(
192 __entry->sid_map_mask = sid_map_mask;
193 __entry->guest_vsid = gvsid;
194 __entry->host_vsid = hvsid;
195 ),
196
197 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
198 __entry->guest_vsid, __entry->host_vsid)
199);
200
201TRACE_EVENT(kvm_book3s_slbmte,
202 TP_PROTO(u64 slb_vsid, u64 slb_esid),
203 TP_ARGS(slb_vsid, slb_esid),
204
205 TP_STRUCT__entry(
206 __field( u64, slb_vsid )
207 __field( u64, slb_esid )
208 ),
209
210 TP_fast_assign(
211 __entry->slb_vsid = slb_vsid;
212 __entry->slb_esid = slb_esid;
213 ),
214
215 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
216);
217
218TRACE_EVENT(kvm_exit,
219 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
220 TP_ARGS(exit_nr, vcpu),
221
222 TP_STRUCT__entry(
223 __field( unsigned int, exit_nr )
224 __field( unsigned long, pc )
225 __field( unsigned long, msr )
226 __field( unsigned long, dar )
227 __field( unsigned long, srr1 )
228 __field( unsigned long, last_inst )
229 ),
230
231 TP_fast_assign(
232 __entry->exit_nr = exit_nr;
233 __entry->pc = kvmppc_get_pc(vcpu);
234 __entry->dar = kvmppc_get_fault_dar(vcpu);
Alexander Graf5deb8e72014-04-24 13:46:24 +0200235 __entry->msr = kvmppc_get_msr(vcpu);
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +0530236 __entry->srr1 = vcpu->arch.shadow_srr1;
237 __entry->last_inst = vcpu->arch.last_inst;
238 ),
239
240 TP_printk("exit=%s"
241 " | pc=0x%lx"
242 " | msr=0x%lx"
243 " | dar=0x%lx"
244 " | srr1=0x%lx"
245 " | last_inst=0x%lx"
246 ,
247 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
248 __entry->pc,
249 __entry->msr,
250 __entry->dar,
251 __entry->srr1,
252 __entry->last_inst
253 )
254);
255
256TRACE_EVENT(kvm_unmap_hva,
257 TP_PROTO(unsigned long hva),
258 TP_ARGS(hva),
259
260 TP_STRUCT__entry(
261 __field( unsigned long, hva )
262 ),
263
264 TP_fast_assign(
265 __entry->hva = hva;
266 ),
267
268 TP_printk("unmap hva 0x%lx\n", __entry->hva)
269);
270
271#endif /* _TRACE_KVM_H */
272
273/* This part must be outside protection */
274#include <trace/define_trace.h>