Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) |
| 2 | #define _AMDGPU_TRACE_H_ |
| 3 | |
| 4 | #include <linux/stringify.h> |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/tracepoint.h> |
| 7 | |
| 8 | #include <drm/drmP.h> |
| 9 | |
| 10 | #undef TRACE_SYSTEM |
| 11 | #define TRACE_SYSTEM amdgpu |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 12 | #define TRACE_INCLUDE_FILE amdgpu_trace |
| 13 | |
| 14 | TRACE_EVENT(amdgpu_bo_create, |
| 15 | TP_PROTO(struct amdgpu_bo *bo), |
| 16 | TP_ARGS(bo), |
| 17 | TP_STRUCT__entry( |
| 18 | __field(struct amdgpu_bo *, bo) |
| 19 | __field(u32, pages) |
| 20 | ), |
| 21 | |
| 22 | TP_fast_assign( |
| 23 | __entry->bo = bo; |
| 24 | __entry->pages = bo->tbo.num_pages; |
| 25 | ), |
| 26 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) |
| 27 | ); |
| 28 | |
| 29 | TRACE_EVENT(amdgpu_cs, |
| 30 | TP_PROTO(struct amdgpu_cs_parser *p, int i), |
| 31 | TP_ARGS(p, i), |
| 32 | TP_STRUCT__entry( |
Christian König | e30590e | 2015-06-10 19:21:14 +0200 | [diff] [blame^] | 33 | __field(struct amdgpu_bo_list *, bo_list) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 34 | __field(u32, ring) |
| 35 | __field(u32, dw) |
| 36 | __field(u32, fences) |
| 37 | ), |
| 38 | |
| 39 | TP_fast_assign( |
Christian König | e30590e | 2015-06-10 19:21:14 +0200 | [diff] [blame^] | 40 | __entry->bo_list = p->bo_list; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 41 | __entry->ring = p->ibs[i].ring->idx; |
| 42 | __entry->dw = p->ibs[i].length_dw; |
| 43 | __entry->fences = amdgpu_fence_count_emitted( |
| 44 | p->ibs[i].ring); |
| 45 | ), |
Christian König | e30590e | 2015-06-10 19:21:14 +0200 | [diff] [blame^] | 46 | TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", |
| 47 | __entry->bo_list, __entry->ring, __entry->dw, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 48 | __entry->fences) |
| 49 | ); |
| 50 | |
| 51 | TRACE_EVENT(amdgpu_vm_grab_id, |
| 52 | TP_PROTO(unsigned vmid, int ring), |
| 53 | TP_ARGS(vmid, ring), |
| 54 | TP_STRUCT__entry( |
| 55 | __field(u32, vmid) |
| 56 | __field(u32, ring) |
| 57 | ), |
| 58 | |
| 59 | TP_fast_assign( |
| 60 | __entry->vmid = vmid; |
| 61 | __entry->ring = ring; |
| 62 | ), |
| 63 | TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) |
| 64 | ); |
| 65 | |
Christian König | 93e3e43 | 2015-06-09 16:58:33 +0200 | [diff] [blame] | 66 | TRACE_EVENT(amdgpu_vm_bo_map, |
| 67 | TP_PROTO(struct amdgpu_bo_va *bo_va, |
| 68 | struct amdgpu_bo_va_mapping *mapping), |
| 69 | TP_ARGS(bo_va, mapping), |
| 70 | TP_STRUCT__entry( |
| 71 | __field(struct amdgpu_bo *, bo) |
| 72 | __field(long, start) |
| 73 | __field(long, last) |
| 74 | __field(u64, offset) |
| 75 | __field(u32, flags) |
| 76 | ), |
| 77 | |
| 78 | TP_fast_assign( |
| 79 | __entry->bo = bo_va->bo; |
| 80 | __entry->start = mapping->it.start; |
| 81 | __entry->last = mapping->it.last; |
| 82 | __entry->offset = mapping->offset; |
| 83 | __entry->flags = mapping->flags; |
| 84 | ), |
| 85 | TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", |
| 86 | __entry->bo, __entry->start, __entry->last, |
| 87 | __entry->offset, __entry->flags) |
| 88 | ); |
| 89 | |
| 90 | TRACE_EVENT(amdgpu_vm_bo_unmap, |
| 91 | TP_PROTO(struct amdgpu_bo_va *bo_va, |
| 92 | struct amdgpu_bo_va_mapping *mapping), |
| 93 | TP_ARGS(bo_va, mapping), |
| 94 | TP_STRUCT__entry( |
| 95 | __field(struct amdgpu_bo *, bo) |
| 96 | __field(long, start) |
| 97 | __field(long, last) |
| 98 | __field(u64, offset) |
| 99 | __field(u32, flags) |
| 100 | ), |
| 101 | |
| 102 | TP_fast_assign( |
| 103 | __entry->bo = bo_va->bo; |
| 104 | __entry->start = mapping->it.start; |
| 105 | __entry->last = mapping->it.last; |
| 106 | __entry->offset = mapping->offset; |
| 107 | __entry->flags = mapping->flags; |
| 108 | ), |
| 109 | TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", |
| 110 | __entry->bo, __entry->start, __entry->last, |
| 111 | __entry->offset, __entry->flags) |
| 112 | ); |
| 113 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 114 | TRACE_EVENT(amdgpu_vm_bo_update, |
| 115 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), |
| 116 | TP_ARGS(mapping), |
| 117 | TP_STRUCT__entry( |
| 118 | __field(u64, soffset) |
| 119 | __field(u64, eoffset) |
| 120 | __field(u32, flags) |
| 121 | ), |
| 122 | |
| 123 | TP_fast_assign( |
| 124 | __entry->soffset = mapping->it.start; |
| 125 | __entry->eoffset = mapping->it.last + 1; |
| 126 | __entry->flags = mapping->flags; |
| 127 | ), |
| 128 | TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", |
| 129 | __entry->soffset, __entry->eoffset, __entry->flags) |
| 130 | ); |
| 131 | |
| 132 | TRACE_EVENT(amdgpu_vm_set_page, |
| 133 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, |
| 134 | uint32_t incr, uint32_t flags), |
| 135 | TP_ARGS(pe, addr, count, incr, flags), |
| 136 | TP_STRUCT__entry( |
| 137 | __field(u64, pe) |
| 138 | __field(u64, addr) |
| 139 | __field(u32, count) |
| 140 | __field(u32, incr) |
| 141 | __field(u32, flags) |
| 142 | ), |
| 143 | |
| 144 | TP_fast_assign( |
| 145 | __entry->pe = pe; |
| 146 | __entry->addr = addr; |
| 147 | __entry->count = count; |
| 148 | __entry->incr = incr; |
| 149 | __entry->flags = flags; |
| 150 | ), |
| 151 | TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", |
| 152 | __entry->pe, __entry->addr, __entry->incr, |
| 153 | __entry->flags, __entry->count) |
| 154 | ); |
| 155 | |
| 156 | TRACE_EVENT(amdgpu_vm_flush, |
| 157 | TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), |
| 158 | TP_ARGS(pd_addr, ring, id), |
| 159 | TP_STRUCT__entry( |
| 160 | __field(u64, pd_addr) |
| 161 | __field(u32, ring) |
| 162 | __field(u32, id) |
| 163 | ), |
| 164 | |
| 165 | TP_fast_assign( |
| 166 | __entry->pd_addr = pd_addr; |
| 167 | __entry->ring = ring; |
| 168 | __entry->id = id; |
| 169 | ), |
| 170 | TP_printk("pd_addr=%010Lx, ring=%u, id=%u", |
| 171 | __entry->pd_addr, __entry->ring, __entry->id) |
| 172 | ); |
| 173 | |
Christian König | ec74407 | 2015-06-10 14:45:21 +0200 | [diff] [blame] | 174 | TRACE_EVENT(amdgpu_bo_list_set, |
| 175 | TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), |
| 176 | TP_ARGS(list, bo), |
| 177 | TP_STRUCT__entry( |
| 178 | __field(struct amdgpu_bo_list *, list) |
| 179 | __field(struct amdgpu_bo *, bo) |
| 180 | ), |
| 181 | |
| 182 | TP_fast_assign( |
| 183 | __entry->list = list; |
| 184 | __entry->bo = bo; |
| 185 | ), |
| 186 | TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) |
| 187 | ); |
| 188 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 189 | DECLARE_EVENT_CLASS(amdgpu_fence_request, |
| 190 | |
| 191 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 192 | |
| 193 | TP_ARGS(dev, ring, seqno), |
| 194 | |
| 195 | TP_STRUCT__entry( |
| 196 | __field(u32, dev) |
| 197 | __field(int, ring) |
| 198 | __field(u32, seqno) |
| 199 | ), |
| 200 | |
| 201 | TP_fast_assign( |
| 202 | __entry->dev = dev->primary->index; |
| 203 | __entry->ring = ring; |
| 204 | __entry->seqno = seqno; |
| 205 | ), |
| 206 | |
| 207 | TP_printk("dev=%u, ring=%d, seqno=%u", |
| 208 | __entry->dev, __entry->ring, __entry->seqno) |
| 209 | ); |
| 210 | |
| 211 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, |
| 212 | |
| 213 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 214 | |
| 215 | TP_ARGS(dev, ring, seqno) |
| 216 | ); |
| 217 | |
| 218 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, |
| 219 | |
| 220 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 221 | |
| 222 | TP_ARGS(dev, ring, seqno) |
| 223 | ); |
| 224 | |
| 225 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, |
| 226 | |
| 227 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 228 | |
| 229 | TP_ARGS(dev, ring, seqno) |
| 230 | ); |
| 231 | |
| 232 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, |
| 233 | |
| 234 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
| 235 | |
| 236 | TP_ARGS(ring, sem), |
| 237 | |
| 238 | TP_STRUCT__entry( |
| 239 | __field(int, ring) |
| 240 | __field(signed, waiters) |
| 241 | __field(uint64_t, gpu_addr) |
| 242 | ), |
| 243 | |
| 244 | TP_fast_assign( |
| 245 | __entry->ring = ring; |
| 246 | __entry->waiters = sem->waiters; |
| 247 | __entry->gpu_addr = sem->gpu_addr; |
| 248 | ), |
| 249 | |
| 250 | TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, |
| 251 | __entry->waiters, __entry->gpu_addr) |
| 252 | ); |
| 253 | |
| 254 | DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale, |
| 255 | |
| 256 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
| 257 | |
| 258 | TP_ARGS(ring, sem) |
| 259 | ); |
| 260 | |
| 261 | DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait, |
| 262 | |
| 263 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
| 264 | |
| 265 | TP_ARGS(ring, sem) |
| 266 | ); |
| 267 | |
| 268 | #endif |
| 269 | |
| 270 | /* This part must be outside protection */ |
| 271 | #undef TRACE_INCLUDE_PATH |
| 272 | #define TRACE_INCLUDE_PATH . |
| 273 | #include <trace/define_trace.h> |