Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) |
| 2 | #define _AMDGPU_TRACE_H_ |
| 3 | |
| 4 | #include <linux/stringify.h> |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/tracepoint.h> |
| 7 | |
| 8 | #include <drm/drmP.h> |
| 9 | |
| 10 | #undef TRACE_SYSTEM |
| 11 | #define TRACE_SYSTEM amdgpu |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 12 | #define TRACE_INCLUDE_FILE amdgpu_trace |
| 13 | |
| 14 | TRACE_EVENT(amdgpu_bo_create, |
| 15 | TP_PROTO(struct amdgpu_bo *bo), |
| 16 | TP_ARGS(bo), |
| 17 | TP_STRUCT__entry( |
| 18 | __field(struct amdgpu_bo *, bo) |
| 19 | __field(u32, pages) |
| 20 | ), |
| 21 | |
| 22 | TP_fast_assign( |
| 23 | __entry->bo = bo; |
| 24 | __entry->pages = bo->tbo.num_pages; |
| 25 | ), |
| 26 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) |
| 27 | ); |
| 28 | |
| 29 | TRACE_EVENT(amdgpu_cs, |
| 30 | TP_PROTO(struct amdgpu_cs_parser *p, int i), |
| 31 | TP_ARGS(p, i), |
| 32 | TP_STRUCT__entry( |
| 33 | __field(u32, ring) |
| 34 | __field(u32, dw) |
| 35 | __field(u32, fences) |
| 36 | ), |
| 37 | |
| 38 | TP_fast_assign( |
| 39 | __entry->ring = p->ibs[i].ring->idx; |
| 40 | __entry->dw = p->ibs[i].length_dw; |
| 41 | __entry->fences = amdgpu_fence_count_emitted( |
| 42 | p->ibs[i].ring); |
| 43 | ), |
| 44 | TP_printk("ring=%u, dw=%u, fences=%u", |
| 45 | __entry->ring, __entry->dw, |
| 46 | __entry->fences) |
| 47 | ); |
| 48 | |
| 49 | TRACE_EVENT(amdgpu_vm_grab_id, |
| 50 | TP_PROTO(unsigned vmid, int ring), |
| 51 | TP_ARGS(vmid, ring), |
| 52 | TP_STRUCT__entry( |
| 53 | __field(u32, vmid) |
| 54 | __field(u32, ring) |
| 55 | ), |
| 56 | |
| 57 | TP_fast_assign( |
| 58 | __entry->vmid = vmid; |
| 59 | __entry->ring = ring; |
| 60 | ), |
| 61 | TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) |
| 62 | ); |
| 63 | |
Christian König | 93e3e43 | 2015-06-09 16:58:33 +0200 | [diff] [blame^] | 64 | TRACE_EVENT(amdgpu_vm_bo_map, |
| 65 | TP_PROTO(struct amdgpu_bo_va *bo_va, |
| 66 | struct amdgpu_bo_va_mapping *mapping), |
| 67 | TP_ARGS(bo_va, mapping), |
| 68 | TP_STRUCT__entry( |
| 69 | __field(struct amdgpu_bo *, bo) |
| 70 | __field(long, start) |
| 71 | __field(long, last) |
| 72 | __field(u64, offset) |
| 73 | __field(u32, flags) |
| 74 | ), |
| 75 | |
| 76 | TP_fast_assign( |
| 77 | __entry->bo = bo_va->bo; |
| 78 | __entry->start = mapping->it.start; |
| 79 | __entry->last = mapping->it.last; |
| 80 | __entry->offset = mapping->offset; |
| 81 | __entry->flags = mapping->flags; |
| 82 | ), |
| 83 | TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", |
| 84 | __entry->bo, __entry->start, __entry->last, |
| 85 | __entry->offset, __entry->flags) |
| 86 | ); |
| 87 | |
| 88 | TRACE_EVENT(amdgpu_vm_bo_unmap, |
| 89 | TP_PROTO(struct amdgpu_bo_va *bo_va, |
| 90 | struct amdgpu_bo_va_mapping *mapping), |
| 91 | TP_ARGS(bo_va, mapping), |
| 92 | TP_STRUCT__entry( |
| 93 | __field(struct amdgpu_bo *, bo) |
| 94 | __field(long, start) |
| 95 | __field(long, last) |
| 96 | __field(u64, offset) |
| 97 | __field(u32, flags) |
| 98 | ), |
| 99 | |
| 100 | TP_fast_assign( |
| 101 | __entry->bo = bo_va->bo; |
| 102 | __entry->start = mapping->it.start; |
| 103 | __entry->last = mapping->it.last; |
| 104 | __entry->offset = mapping->offset; |
| 105 | __entry->flags = mapping->flags; |
| 106 | ), |
| 107 | TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", |
| 108 | __entry->bo, __entry->start, __entry->last, |
| 109 | __entry->offset, __entry->flags) |
| 110 | ); |
| 111 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 112 | TRACE_EVENT(amdgpu_vm_bo_update, |
| 113 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), |
| 114 | TP_ARGS(mapping), |
| 115 | TP_STRUCT__entry( |
| 116 | __field(u64, soffset) |
| 117 | __field(u64, eoffset) |
| 118 | __field(u32, flags) |
| 119 | ), |
| 120 | |
| 121 | TP_fast_assign( |
| 122 | __entry->soffset = mapping->it.start; |
| 123 | __entry->eoffset = mapping->it.last + 1; |
| 124 | __entry->flags = mapping->flags; |
| 125 | ), |
| 126 | TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", |
| 127 | __entry->soffset, __entry->eoffset, __entry->flags) |
| 128 | ); |
| 129 | |
| 130 | TRACE_EVENT(amdgpu_vm_set_page, |
| 131 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, |
| 132 | uint32_t incr, uint32_t flags), |
| 133 | TP_ARGS(pe, addr, count, incr, flags), |
| 134 | TP_STRUCT__entry( |
| 135 | __field(u64, pe) |
| 136 | __field(u64, addr) |
| 137 | __field(u32, count) |
| 138 | __field(u32, incr) |
| 139 | __field(u32, flags) |
| 140 | ), |
| 141 | |
| 142 | TP_fast_assign( |
| 143 | __entry->pe = pe; |
| 144 | __entry->addr = addr; |
| 145 | __entry->count = count; |
| 146 | __entry->incr = incr; |
| 147 | __entry->flags = flags; |
| 148 | ), |
| 149 | TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", |
| 150 | __entry->pe, __entry->addr, __entry->incr, |
| 151 | __entry->flags, __entry->count) |
| 152 | ); |
| 153 | |
| 154 | TRACE_EVENT(amdgpu_vm_flush, |
| 155 | TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), |
| 156 | TP_ARGS(pd_addr, ring, id), |
| 157 | TP_STRUCT__entry( |
| 158 | __field(u64, pd_addr) |
| 159 | __field(u32, ring) |
| 160 | __field(u32, id) |
| 161 | ), |
| 162 | |
| 163 | TP_fast_assign( |
| 164 | __entry->pd_addr = pd_addr; |
| 165 | __entry->ring = ring; |
| 166 | __entry->id = id; |
| 167 | ), |
| 168 | TP_printk("pd_addr=%010Lx, ring=%u, id=%u", |
| 169 | __entry->pd_addr, __entry->ring, __entry->id) |
| 170 | ); |
| 171 | |
| 172 | DECLARE_EVENT_CLASS(amdgpu_fence_request, |
| 173 | |
| 174 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 175 | |
| 176 | TP_ARGS(dev, ring, seqno), |
| 177 | |
| 178 | TP_STRUCT__entry( |
| 179 | __field(u32, dev) |
| 180 | __field(int, ring) |
| 181 | __field(u32, seqno) |
| 182 | ), |
| 183 | |
| 184 | TP_fast_assign( |
| 185 | __entry->dev = dev->primary->index; |
| 186 | __entry->ring = ring; |
| 187 | __entry->seqno = seqno; |
| 188 | ), |
| 189 | |
| 190 | TP_printk("dev=%u, ring=%d, seqno=%u", |
| 191 | __entry->dev, __entry->ring, __entry->seqno) |
| 192 | ); |
| 193 | |
| 194 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, |
| 195 | |
| 196 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 197 | |
| 198 | TP_ARGS(dev, ring, seqno) |
| 199 | ); |
| 200 | |
| 201 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, |
| 202 | |
| 203 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 204 | |
| 205 | TP_ARGS(dev, ring, seqno) |
| 206 | ); |
| 207 | |
| 208 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, |
| 209 | |
| 210 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 211 | |
| 212 | TP_ARGS(dev, ring, seqno) |
| 213 | ); |
| 214 | |
| 215 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, |
| 216 | |
| 217 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
| 218 | |
| 219 | TP_ARGS(ring, sem), |
| 220 | |
| 221 | TP_STRUCT__entry( |
| 222 | __field(int, ring) |
| 223 | __field(signed, waiters) |
| 224 | __field(uint64_t, gpu_addr) |
| 225 | ), |
| 226 | |
| 227 | TP_fast_assign( |
| 228 | __entry->ring = ring; |
| 229 | __entry->waiters = sem->waiters; |
| 230 | __entry->gpu_addr = sem->gpu_addr; |
| 231 | ), |
| 232 | |
| 233 | TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, |
| 234 | __entry->waiters, __entry->gpu_addr) |
| 235 | ); |
| 236 | |
| 237 | DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale, |
| 238 | |
| 239 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
| 240 | |
| 241 | TP_ARGS(ring, sem) |
| 242 | ); |
| 243 | |
| 244 | DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait, |
| 245 | |
| 246 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
| 247 | |
| 248 | TP_ARGS(ring, sem) |
| 249 | ); |
| 250 | |
| 251 | #endif |
| 252 | |
| 253 | /* This part must be outside protection */ |
| 254 | #undef TRACE_INCLUDE_PATH |
| 255 | #define TRACE_INCLUDE_PATH . |
| 256 | #include <trace/define_trace.h> |