1fb4d8502Sjsg /* 2fb4d8502Sjsg * Copyright 2017 Advanced Micro Devices, Inc. 3fb4d8502Sjsg * 4fb4d8502Sjsg * Permission is hereby granted, free of charge, to any person obtaining a 5fb4d8502Sjsg * copy of this software and associated documentation files (the "Software"), 6fb4d8502Sjsg * to deal in the Software without restriction, including without limitation 7fb4d8502Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8fb4d8502Sjsg * and/or sell copies of the Software, and to permit persons to whom the 9fb4d8502Sjsg * Software is furnished to do so, subject to the following conditions: 10fb4d8502Sjsg * 11fb4d8502Sjsg * The above copyright notice and this permission notice shall be included in 12fb4d8502Sjsg * all copies or substantial portions of the Software. 13fb4d8502Sjsg * 14fb4d8502Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15fb4d8502Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16fb4d8502Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17fb4d8502Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18fb4d8502Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19fb4d8502Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20fb4d8502Sjsg * OTHER DEALINGS IN THE SOFTWARE. 21fb4d8502Sjsg * 22fb4d8502Sjsg */ 23fb4d8502Sjsg 24ad8b1aafSjsg #if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 25fb4d8502Sjsg #define _AMDGPU_TRACE_H_ 26fb4d8502Sjsg 27fb4d8502Sjsg #include <linux/stringify.h> 28fb4d8502Sjsg #include <linux/types.h> 29fb4d8502Sjsg #include <linux/tracepoint.h> 30fb4d8502Sjsg 31fb4d8502Sjsg #undef TRACE_SYSTEM 32fb4d8502Sjsg #define TRACE_SYSTEM amdgpu 33fb4d8502Sjsg #define TRACE_INCLUDE_FILE amdgpu_trace 34fb4d8502Sjsg 35fb4d8502Sjsg #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ 36fb4d8502Sjsg job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) 37fb4d8502Sjsg 38ad8b1aafSjsg TRACE_EVENT(amdgpu_device_rreg, 39fb4d8502Sjsg TP_PROTO(unsigned did, uint32_t reg, uint32_t value), 40fb4d8502Sjsg TP_ARGS(did, reg, value), 41fb4d8502Sjsg TP_STRUCT__entry( 42fb4d8502Sjsg __field(unsigned, did) 43fb4d8502Sjsg __field(uint32_t, reg) 44fb4d8502Sjsg __field(uint32_t, value) 45fb4d8502Sjsg ), 46fb4d8502Sjsg TP_fast_assign( 47fb4d8502Sjsg __entry->did = did; 48fb4d8502Sjsg __entry->reg = reg; 49fb4d8502Sjsg __entry->value = value; 50fb4d8502Sjsg ), 51fb4d8502Sjsg TP_printk("0x%04lx, 0x%08lx, 0x%08lx", 52fb4d8502Sjsg (unsigned long)__entry->did, 53fb4d8502Sjsg (unsigned long)__entry->reg, 54fb4d8502Sjsg (unsigned long)__entry->value) 55fb4d8502Sjsg ); 56fb4d8502Sjsg 57ad8b1aafSjsg TRACE_EVENT(amdgpu_device_wreg, 58fb4d8502Sjsg TP_PROTO(unsigned did, uint32_t reg, uint32_t value), 59fb4d8502Sjsg TP_ARGS(did, reg, value), 60fb4d8502Sjsg TP_STRUCT__entry( 61fb4d8502Sjsg __field(unsigned, did) 62fb4d8502Sjsg __field(uint32_t, reg) 63fb4d8502Sjsg __field(uint32_t, value) 64fb4d8502Sjsg ), 65fb4d8502Sjsg TP_fast_assign( 66fb4d8502Sjsg __entry->did = did; 67fb4d8502Sjsg __entry->reg = reg; 68fb4d8502Sjsg __entry->value = value; 69fb4d8502Sjsg ), 70fb4d8502Sjsg TP_printk("0x%04lx, 0x%08lx, 0x%08lx", 71fb4d8502Sjsg (unsigned long)__entry->did, 72fb4d8502Sjsg (unsigned long)__entry->reg, 73fb4d8502Sjsg (unsigned long)__entry->value) 74fb4d8502Sjsg ); 75fb4d8502Sjsg 76fb4d8502Sjsg TRACE_EVENT(amdgpu_iv, 77c349dbc7Sjsg TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv), 78c349dbc7Sjsg TP_ARGS(ih, iv), 79fb4d8502Sjsg TP_STRUCT__entry( 80c349dbc7Sjsg __field(unsigned, ih) 81fb4d8502Sjsg __field(unsigned, client_id) 82fb4d8502Sjsg __field(unsigned, src_id) 83fb4d8502Sjsg __field(unsigned, ring_id) 84fb4d8502Sjsg __field(unsigned, vmid) 85fb4d8502Sjsg __field(unsigned, vmid_src) 86fb4d8502Sjsg __field(uint64_t, timestamp) 87fb4d8502Sjsg __field(unsigned, timestamp_src) 88fb4d8502Sjsg __field(unsigned, pasid) 89fb4d8502Sjsg __array(unsigned, src_data, 4) 90fb4d8502Sjsg ), 91fb4d8502Sjsg TP_fast_assign( 92c349dbc7Sjsg __entry->ih = ih; 93fb4d8502Sjsg __entry->client_id = iv->client_id; 94fb4d8502Sjsg __entry->src_id = iv->src_id; 95fb4d8502Sjsg __entry->ring_id = iv->ring_id; 96fb4d8502Sjsg __entry->vmid = iv->vmid; 97fb4d8502Sjsg __entry->vmid_src = iv->vmid_src; 98fb4d8502Sjsg __entry->timestamp = iv->timestamp; 99fb4d8502Sjsg __entry->timestamp_src = iv->timestamp_src; 100fb4d8502Sjsg __entry->pasid = iv->pasid; 101fb4d8502Sjsg __entry->src_data[0] = iv->src_data[0]; 102fb4d8502Sjsg __entry->src_data[1] = iv->src_data[1]; 103fb4d8502Sjsg __entry->src_data[2] = iv->src_data[2]; 104fb4d8502Sjsg __entry->src_data[3] = iv->src_data[3]; 105fb4d8502Sjsg ), 106c349dbc7Sjsg TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u " 107c349dbc7Sjsg "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x", 108c349dbc7Sjsg __entry->ih, __entry->client_id, __entry->src_id, 109fb4d8502Sjsg __entry->ring_id, __entry->vmid, 110fb4d8502Sjsg __entry->timestamp, __entry->pasid, 111fb4d8502Sjsg __entry->src_data[0], __entry->src_data[1], 112fb4d8502Sjsg __entry->src_data[2], __entry->src_data[3]) 113fb4d8502Sjsg ); 114fb4d8502Sjsg 115fb4d8502Sjsg 116fb4d8502Sjsg TRACE_EVENT(amdgpu_bo_create, 117fb4d8502Sjsg TP_PROTO(struct amdgpu_bo *bo), 118fb4d8502Sjsg TP_ARGS(bo), 119fb4d8502Sjsg TP_STRUCT__entry( 120fb4d8502Sjsg __field(struct amdgpu_bo *, bo) 121fb4d8502Sjsg __field(u32, pages) 122fb4d8502Sjsg __field(u32, type) 123fb4d8502Sjsg __field(u32, prefer) 124fb4d8502Sjsg __field(u32, allow) 125fb4d8502Sjsg __field(u32, visible) 126fb4d8502Sjsg ), 127fb4d8502Sjsg 128fb4d8502Sjsg TP_fast_assign( 129fb4d8502Sjsg __entry->bo = bo; 130*f005ef32Sjsg __entry->pages = PFN_UP(bo->tbo.resource->size); 1315ca02815Sjsg __entry->type = bo->tbo.resource->mem_type; 132fb4d8502Sjsg __entry->prefer = bo->preferred_domains; 133fb4d8502Sjsg __entry->allow = bo->allowed_domains; 134fb4d8502Sjsg __entry->visible = bo->flags; 135fb4d8502Sjsg ), 136fb4d8502Sjsg 137fb4d8502Sjsg TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d", 138fb4d8502Sjsg __entry->bo, __entry->pages, __entry->type, 139fb4d8502Sjsg __entry->prefer, __entry->allow, __entry->visible) 140fb4d8502Sjsg ); 141fb4d8502Sjsg 142fb4d8502Sjsg TRACE_EVENT(amdgpu_cs, 1431bb76ff1Sjsg TP_PROTO(struct amdgpu_cs_parser *p, 1441bb76ff1Sjsg struct amdgpu_job *job, 1451bb76ff1Sjsg struct amdgpu_ib *ib), 1461bb76ff1Sjsg TP_ARGS(p, job, ib), 147fb4d8502Sjsg TP_STRUCT__entry( 148fb4d8502Sjsg __field(struct amdgpu_bo_list *, bo_list) 149fb4d8502Sjsg __field(u32, ring) 150fb4d8502Sjsg __field(u32, dw) 151fb4d8502Sjsg __field(u32, fences) 152fb4d8502Sjsg ), 153fb4d8502Sjsg 154fb4d8502Sjsg TP_fast_assign( 155fb4d8502Sjsg __entry->bo_list = p->bo_list; 156aad183ceSjsg __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx; 1571bb76ff1Sjsg __entry->dw = ib->length_dw; 158fb4d8502Sjsg __entry->fences = amdgpu_fence_count_emitted( 159aad183ceSjsg to_amdgpu_ring(job->base.entity->rq->sched)); 160fb4d8502Sjsg ), 161fb4d8502Sjsg TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", 162fb4d8502Sjsg __entry->bo_list, __entry->ring, __entry->dw, 163fb4d8502Sjsg __entry->fences) 164fb4d8502Sjsg ); 165fb4d8502Sjsg 166fb4d8502Sjsg TRACE_EVENT(amdgpu_cs_ioctl, 167fb4d8502Sjsg TP_PROTO(struct amdgpu_job *job), 168fb4d8502Sjsg TP_ARGS(job), 169fb4d8502Sjsg TP_STRUCT__entry( 170fb4d8502Sjsg __field(uint64_t, sched_job_id) 171fb4d8502Sjsg __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) 172fb4d8502Sjsg __field(unsigned int, context) 173fb4d8502Sjsg __field(unsigned int, seqno) 174fb4d8502Sjsg __field(struct dma_fence *, fence) 175c349dbc7Sjsg __string(ring, to_amdgpu_ring(job->base.sched)->name) 176fb4d8502Sjsg __field(u32, num_ibs) 177fb4d8502Sjsg ), 178fb4d8502Sjsg 179fb4d8502Sjsg TP_fast_assign( 180fb4d8502Sjsg __entry->sched_job_id = job->base.id; 1815ca02815Sjsg __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); 182fb4d8502Sjsg __entry->context = job->base.s_fence->finished.context; 183fb4d8502Sjsg __entry->seqno = job->base.s_fence->finished.seqno; 1845ca02815Sjsg __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); 185fb4d8502Sjsg __entry->num_ibs = job->num_ibs; 186fb4d8502Sjsg ), 187fb4d8502Sjsg TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 188fb4d8502Sjsg __entry->sched_job_id, __get_str(timeline), __entry->context, 189c349dbc7Sjsg __entry->seqno, __get_str(ring), __entry->num_ibs) 190fb4d8502Sjsg ); 191fb4d8502Sjsg 192fb4d8502Sjsg TRACE_EVENT(amdgpu_sched_run_job, 193fb4d8502Sjsg TP_PROTO(struct amdgpu_job *job), 194fb4d8502Sjsg TP_ARGS(job), 195fb4d8502Sjsg TP_STRUCT__entry( 196fb4d8502Sjsg __field(uint64_t, sched_job_id) 197fb4d8502Sjsg __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) 198fb4d8502Sjsg __field(unsigned int, context) 199fb4d8502Sjsg __field(unsigned int, seqno) 200c349dbc7Sjsg __string(ring, to_amdgpu_ring(job->base.sched)->name) 201fb4d8502Sjsg __field(u32, num_ibs) 202fb4d8502Sjsg ), 203fb4d8502Sjsg 204fb4d8502Sjsg TP_fast_assign( 205fb4d8502Sjsg __entry->sched_job_id = job->base.id; 2065ca02815Sjsg __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); 207fb4d8502Sjsg __entry->context = job->base.s_fence->finished.context; 208fb4d8502Sjsg __entry->seqno = job->base.s_fence->finished.seqno; 2095ca02815Sjsg __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); 210fb4d8502Sjsg __entry->num_ibs = job->num_ibs; 211fb4d8502Sjsg ), 212fb4d8502Sjsg TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 213fb4d8502Sjsg __entry->sched_job_id, __get_str(timeline), __entry->context, 214c349dbc7Sjsg __entry->seqno, __get_str(ring), __entry->num_ibs) 215fb4d8502Sjsg ); 216fb4d8502Sjsg 217fb4d8502Sjsg 218fb4d8502Sjsg TRACE_EVENT(amdgpu_vm_grab_id, 219fb4d8502Sjsg TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 220fb4d8502Sjsg struct amdgpu_job *job), 221fb4d8502Sjsg TP_ARGS(vm, ring, job), 222fb4d8502Sjsg TP_STRUCT__entry( 223fb4d8502Sjsg __field(u32, pasid) 224c349dbc7Sjsg __string(ring, ring->name) 225fb4d8502Sjsg __field(u32, ring) 226fb4d8502Sjsg __field(u32, vmid) 227fb4d8502Sjsg __field(u32, vm_hub) 228fb4d8502Sjsg __field(u64, pd_addr) 229fb4d8502Sjsg __field(u32, needs_flush) 230fb4d8502Sjsg ), 231fb4d8502Sjsg 232fb4d8502Sjsg TP_fast_assign( 233fb4d8502Sjsg __entry->pasid = vm->pasid; 2345ca02815Sjsg __assign_str(ring, ring->name); 235fb4d8502Sjsg __entry->vmid = job->vmid; 236*f005ef32Sjsg __entry->vm_hub = ring->vm_hub, 237fb4d8502Sjsg __entry->pd_addr = job->vm_pd_addr; 238fb4d8502Sjsg __entry->needs_flush = job->vm_needs_flush; 239fb4d8502Sjsg ), 240c349dbc7Sjsg TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", 241c349dbc7Sjsg __entry->pasid, __get_str(ring), __entry->vmid, 242fb4d8502Sjsg __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) 243fb4d8502Sjsg ); 244fb4d8502Sjsg 245fb4d8502Sjsg TRACE_EVENT(amdgpu_vm_bo_map, 246fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_va *bo_va, 247fb4d8502Sjsg struct amdgpu_bo_va_mapping *mapping), 248fb4d8502Sjsg TP_ARGS(bo_va, mapping), 249fb4d8502Sjsg TP_STRUCT__entry( 250fb4d8502Sjsg __field(struct amdgpu_bo *, bo) 251fb4d8502Sjsg __field(long, start) 252fb4d8502Sjsg __field(long, last) 253fb4d8502Sjsg __field(u64, offset) 254fb4d8502Sjsg __field(u64, flags) 255fb4d8502Sjsg ), 256fb4d8502Sjsg 257fb4d8502Sjsg TP_fast_assign( 258fb4d8502Sjsg __entry->bo = bo_va ? bo_va->base.bo : NULL; 259fb4d8502Sjsg __entry->start = mapping->start; 260fb4d8502Sjsg __entry->last = mapping->last; 261fb4d8502Sjsg __entry->offset = mapping->offset; 262fb4d8502Sjsg __entry->flags = mapping->flags; 263fb4d8502Sjsg ), 264fb4d8502Sjsg TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", 265fb4d8502Sjsg __entry->bo, __entry->start, __entry->last, 266fb4d8502Sjsg __entry->offset, __entry->flags) 267fb4d8502Sjsg ); 268fb4d8502Sjsg 269fb4d8502Sjsg TRACE_EVENT(amdgpu_vm_bo_unmap, 270fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_va *bo_va, 271fb4d8502Sjsg struct amdgpu_bo_va_mapping *mapping), 272fb4d8502Sjsg TP_ARGS(bo_va, mapping), 273fb4d8502Sjsg TP_STRUCT__entry( 274fb4d8502Sjsg __field(struct amdgpu_bo *, bo) 275fb4d8502Sjsg __field(long, start) 276fb4d8502Sjsg __field(long, last) 277fb4d8502Sjsg __field(u64, offset) 278fb4d8502Sjsg __field(u64, flags) 279fb4d8502Sjsg ), 280fb4d8502Sjsg 281fb4d8502Sjsg TP_fast_assign( 282fb4d8502Sjsg __entry->bo = bo_va ? bo_va->base.bo : NULL; 283fb4d8502Sjsg __entry->start = mapping->start; 284fb4d8502Sjsg __entry->last = mapping->last; 285fb4d8502Sjsg __entry->offset = mapping->offset; 286fb4d8502Sjsg __entry->flags = mapping->flags; 287fb4d8502Sjsg ), 288fb4d8502Sjsg TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", 289fb4d8502Sjsg __entry->bo, __entry->start, __entry->last, 290fb4d8502Sjsg __entry->offset, __entry->flags) 291fb4d8502Sjsg ); 292fb4d8502Sjsg 293fb4d8502Sjsg DECLARE_EVENT_CLASS(amdgpu_vm_mapping, 294fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 295fb4d8502Sjsg TP_ARGS(mapping), 296fb4d8502Sjsg TP_STRUCT__entry( 297fb4d8502Sjsg __field(u64, soffset) 298fb4d8502Sjsg __field(u64, eoffset) 299fb4d8502Sjsg __field(u64, flags) 300fb4d8502Sjsg ), 301fb4d8502Sjsg 302fb4d8502Sjsg TP_fast_assign( 303fb4d8502Sjsg __entry->soffset = mapping->start; 304fb4d8502Sjsg __entry->eoffset = mapping->last + 1; 305fb4d8502Sjsg __entry->flags = mapping->flags; 306fb4d8502Sjsg ), 307fb4d8502Sjsg TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx", 308fb4d8502Sjsg __entry->soffset, __entry->eoffset, __entry->flags) 309fb4d8502Sjsg ); 310fb4d8502Sjsg 311fb4d8502Sjsg DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update, 312fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 313fb4d8502Sjsg TP_ARGS(mapping) 314fb4d8502Sjsg ); 315fb4d8502Sjsg 316fb4d8502Sjsg DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, 317fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 318fb4d8502Sjsg TP_ARGS(mapping) 319fb4d8502Sjsg ); 320fb4d8502Sjsg 321fb4d8502Sjsg DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs, 322fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 323fb4d8502Sjsg TP_ARGS(mapping) 324fb4d8502Sjsg ); 325fb4d8502Sjsg 326ad8b1aafSjsg TRACE_EVENT(amdgpu_vm_update_ptes, 327ad8b1aafSjsg TP_PROTO(struct amdgpu_vm_update_params *p, 328ad8b1aafSjsg uint64_t start, uint64_t end, 329ad8b1aafSjsg unsigned int nptes, uint64_t dst, 330ad8b1aafSjsg uint64_t incr, uint64_t flags, 331ad8b1aafSjsg pid_t pid, uint64_t vm_ctx), 332ad8b1aafSjsg TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx), 333ad8b1aafSjsg TP_STRUCT__entry( 334ad8b1aafSjsg __field(u64, start) 335ad8b1aafSjsg __field(u64, end) 336ad8b1aafSjsg __field(u64, flags) 337ad8b1aafSjsg __field(unsigned int, nptes) 338ad8b1aafSjsg __field(u64, incr) 339ad8b1aafSjsg __field(pid_t, pid) 340ad8b1aafSjsg __field(u64, vm_ctx) 341ad8b1aafSjsg __dynamic_array(u64, dst, nptes) 342ad8b1aafSjsg ), 343ad8b1aafSjsg 344ad8b1aafSjsg TP_fast_assign( 345ad8b1aafSjsg unsigned int i; 346ad8b1aafSjsg 347ad8b1aafSjsg __entry->start = start; 348ad8b1aafSjsg __entry->end = end; 349ad8b1aafSjsg __entry->flags = flags; 350ad8b1aafSjsg __entry->incr = incr; 351ad8b1aafSjsg __entry->nptes = nptes; 352ad8b1aafSjsg __entry->pid = pid; 353ad8b1aafSjsg __entry->vm_ctx = vm_ctx; 354ad8b1aafSjsg for (i = 0; i < nptes; ++i) { 355ad8b1aafSjsg u64 addr = p->pages_addr ? amdgpu_vm_map_gart( 356ad8b1aafSjsg p->pages_addr, dst) : dst; 357ad8b1aafSjsg 358ad8b1aafSjsg ((u64 *)__get_dynamic_array(dst))[i] = addr; 359ad8b1aafSjsg dst += incr; 360ad8b1aafSjsg } 361ad8b1aafSjsg ), 362ad8b1aafSjsg TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx," 3631bb76ff1Sjsg " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid, 364ad8b1aafSjsg __entry->vm_ctx, __entry->start, __entry->end, 365ad8b1aafSjsg __entry->flags, __entry->incr, __print_array( 3661bb76ff1Sjsg __get_dynamic_array(dst), __entry->nptes, 8)) 367ad8b1aafSjsg ); 368ad8b1aafSjsg 369fb4d8502Sjsg TRACE_EVENT(amdgpu_vm_set_ptes, 370fb4d8502Sjsg TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 3715ca02815Sjsg uint32_t incr, uint64_t flags, bool immediate), 3725ca02815Sjsg TP_ARGS(pe, addr, count, incr, flags, immediate), 373fb4d8502Sjsg TP_STRUCT__entry( 374fb4d8502Sjsg __field(u64, pe) 375fb4d8502Sjsg __field(u64, addr) 376fb4d8502Sjsg __field(u32, count) 377fb4d8502Sjsg __field(u32, incr) 378fb4d8502Sjsg __field(u64, flags) 3795ca02815Sjsg __field(bool, immediate) 380fb4d8502Sjsg ), 381fb4d8502Sjsg 382fb4d8502Sjsg TP_fast_assign( 383fb4d8502Sjsg __entry->pe = pe; 384fb4d8502Sjsg __entry->addr = addr; 385fb4d8502Sjsg __entry->count = count; 386fb4d8502Sjsg __entry->incr = incr; 387fb4d8502Sjsg __entry->flags = flags; 3885ca02815Sjsg __entry->immediate = immediate; 389fb4d8502Sjsg ), 390c349dbc7Sjsg TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, " 3915ca02815Sjsg "immediate=%d", __entry->pe, __entry->addr, __entry->incr, 3925ca02815Sjsg __entry->flags, __entry->count, __entry->immediate) 393fb4d8502Sjsg ); 394fb4d8502Sjsg 395fb4d8502Sjsg TRACE_EVENT(amdgpu_vm_copy_ptes, 3965ca02815Sjsg TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool immediate), 3975ca02815Sjsg TP_ARGS(pe, src, count, immediate), 398fb4d8502Sjsg TP_STRUCT__entry( 399fb4d8502Sjsg __field(u64, pe) 400fb4d8502Sjsg __field(u64, src) 401fb4d8502Sjsg __field(u32, count) 4025ca02815Sjsg __field(bool, immediate) 403fb4d8502Sjsg ), 404fb4d8502Sjsg 405fb4d8502Sjsg TP_fast_assign( 406fb4d8502Sjsg __entry->pe = pe; 407fb4d8502Sjsg __entry->src = src; 408fb4d8502Sjsg __entry->count = count; 4095ca02815Sjsg __entry->immediate = immediate; 410fb4d8502Sjsg ), 4115ca02815Sjsg TP_printk("pe=%010Lx, src=%010Lx, count=%u, immediate=%d", 412c349dbc7Sjsg __entry->pe, __entry->src, __entry->count, 4135ca02815Sjsg __entry->immediate) 414fb4d8502Sjsg ); 415fb4d8502Sjsg 416fb4d8502Sjsg TRACE_EVENT(amdgpu_vm_flush, 417fb4d8502Sjsg TP_PROTO(struct amdgpu_ring *ring, unsigned vmid, 418fb4d8502Sjsg uint64_t pd_addr), 419fb4d8502Sjsg TP_ARGS(ring, vmid, pd_addr), 420fb4d8502Sjsg TP_STRUCT__entry( 421c349dbc7Sjsg __string(ring, ring->name) 422fb4d8502Sjsg __field(u32, vmid) 423fb4d8502Sjsg __field(u32, vm_hub) 424fb4d8502Sjsg __field(u64, pd_addr) 425fb4d8502Sjsg ), 426fb4d8502Sjsg 427fb4d8502Sjsg TP_fast_assign( 4285ca02815Sjsg __assign_str(ring, ring->name); 429fb4d8502Sjsg __entry->vmid = vmid; 430*f005ef32Sjsg __entry->vm_hub = ring->vm_hub; 431fb4d8502Sjsg __entry->pd_addr = pd_addr; 432fb4d8502Sjsg ), 433c349dbc7Sjsg TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx", 434c349dbc7Sjsg __get_str(ring), __entry->vmid, 435fb4d8502Sjsg __entry->vm_hub, __entry->pd_addr) 436fb4d8502Sjsg ); 437fb4d8502Sjsg 438fb4d8502Sjsg DECLARE_EVENT_CLASS(amdgpu_pasid, 439fb4d8502Sjsg TP_PROTO(unsigned pasid), 440fb4d8502Sjsg TP_ARGS(pasid), 441fb4d8502Sjsg TP_STRUCT__entry( 442fb4d8502Sjsg __field(unsigned, pasid) 443fb4d8502Sjsg ), 444fb4d8502Sjsg TP_fast_assign( 445fb4d8502Sjsg __entry->pasid = pasid; 446fb4d8502Sjsg ), 447fb4d8502Sjsg TP_printk("pasid=%u", __entry->pasid) 448fb4d8502Sjsg ); 449fb4d8502Sjsg 450fb4d8502Sjsg DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated, 451fb4d8502Sjsg TP_PROTO(unsigned pasid), 452fb4d8502Sjsg TP_ARGS(pasid) 453fb4d8502Sjsg ); 454fb4d8502Sjsg 455fb4d8502Sjsg DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed, 456fb4d8502Sjsg TP_PROTO(unsigned pasid), 457fb4d8502Sjsg TP_ARGS(pasid) 458fb4d8502Sjsg ); 459fb4d8502Sjsg 460fb4d8502Sjsg TRACE_EVENT(amdgpu_bo_list_set, 461fb4d8502Sjsg TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), 462fb4d8502Sjsg TP_ARGS(list, bo), 463fb4d8502Sjsg TP_STRUCT__entry( 464fb4d8502Sjsg __field(struct amdgpu_bo_list *, list) 465fb4d8502Sjsg __field(struct amdgpu_bo *, bo) 466fb4d8502Sjsg __field(u64, bo_size) 467fb4d8502Sjsg ), 468fb4d8502Sjsg 469fb4d8502Sjsg TP_fast_assign( 470fb4d8502Sjsg __entry->list = list; 471fb4d8502Sjsg __entry->bo = bo; 472fb4d8502Sjsg __entry->bo_size = amdgpu_bo_size(bo); 473fb4d8502Sjsg ), 474fb4d8502Sjsg TP_printk("list=%p, bo=%p, bo_size=%Ld", 475fb4d8502Sjsg __entry->list, 476fb4d8502Sjsg __entry->bo, 477fb4d8502Sjsg __entry->bo_size) 478fb4d8502Sjsg ); 479fb4d8502Sjsg 480fb4d8502Sjsg TRACE_EVENT(amdgpu_cs_bo_status, 481fb4d8502Sjsg TP_PROTO(uint64_t total_bo, uint64_t total_size), 482fb4d8502Sjsg TP_ARGS(total_bo, total_size), 483fb4d8502Sjsg TP_STRUCT__entry( 484fb4d8502Sjsg __field(u64, total_bo) 485fb4d8502Sjsg __field(u64, total_size) 486fb4d8502Sjsg ), 487fb4d8502Sjsg 488fb4d8502Sjsg TP_fast_assign( 489fb4d8502Sjsg __entry->total_bo = total_bo; 490fb4d8502Sjsg __entry->total_size = total_size; 491fb4d8502Sjsg ), 492fb4d8502Sjsg TP_printk("total_bo_size=%Ld, total_bo_count=%Ld", 493fb4d8502Sjsg __entry->total_bo, __entry->total_size) 494fb4d8502Sjsg ); 495fb4d8502Sjsg 496fb4d8502Sjsg TRACE_EVENT(amdgpu_bo_move, 497fb4d8502Sjsg TP_PROTO(struct amdgpu_bo *bo, uint32_t new_placement, uint32_t old_placement), 498fb4d8502Sjsg TP_ARGS(bo, new_placement, old_placement), 499fb4d8502Sjsg TP_STRUCT__entry( 500fb4d8502Sjsg __field(struct amdgpu_bo *, bo) 501fb4d8502Sjsg __field(u64, bo_size) 502fb4d8502Sjsg __field(u32, new_placement) 503fb4d8502Sjsg __field(u32, old_placement) 504fb4d8502Sjsg ), 505fb4d8502Sjsg 506fb4d8502Sjsg TP_fast_assign( 507fb4d8502Sjsg __entry->bo = bo; 508fb4d8502Sjsg __entry->bo_size = amdgpu_bo_size(bo); 509fb4d8502Sjsg __entry->new_placement = new_placement; 510fb4d8502Sjsg __entry->old_placement = old_placement; 511fb4d8502Sjsg ), 512fb4d8502Sjsg TP_printk("bo=%p, from=%d, to=%d, size=%Ld", 513fb4d8502Sjsg __entry->bo, __entry->old_placement, 514fb4d8502Sjsg __entry->new_placement, __entry->bo_size) 515fb4d8502Sjsg ); 516fb4d8502Sjsg 517c349dbc7Sjsg TRACE_EVENT(amdgpu_ib_pipe_sync, 518c349dbc7Sjsg TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), 519c349dbc7Sjsg TP_ARGS(sched_job, fence), 520c349dbc7Sjsg TP_STRUCT__entry( 521c349dbc7Sjsg __string(ring, sched_job->base.sched->name) 522c349dbc7Sjsg __field(uint64_t, id) 523c349dbc7Sjsg __field(struct dma_fence *, fence) 524c349dbc7Sjsg __field(uint64_t, ctx) 525c349dbc7Sjsg __field(unsigned, seqno) 526c349dbc7Sjsg ), 527c349dbc7Sjsg 528c349dbc7Sjsg TP_fast_assign( 5295ca02815Sjsg __assign_str(ring, sched_job->base.sched->name); 530c349dbc7Sjsg __entry->id = sched_job->base.id; 531c349dbc7Sjsg __entry->fence = fence; 532c349dbc7Sjsg __entry->ctx = fence->context; 533c349dbc7Sjsg __entry->seqno = fence->seqno; 534c349dbc7Sjsg ), 535c349dbc7Sjsg TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", 536c349dbc7Sjsg __get_str(ring), __entry->id, 537c349dbc7Sjsg __entry->fence, __entry->ctx, 538c349dbc7Sjsg __entry->seqno) 539c349dbc7Sjsg ); 540c349dbc7Sjsg 5411bb76ff1Sjsg TRACE_EVENT(amdgpu_reset_reg_dumps, 5421bb76ff1Sjsg TP_PROTO(uint32_t address, uint32_t value), 5431bb76ff1Sjsg TP_ARGS(address, value), 5441bb76ff1Sjsg TP_STRUCT__entry( 5451bb76ff1Sjsg __field(uint32_t, address) 5461bb76ff1Sjsg __field(uint32_t, value) 5471bb76ff1Sjsg ), 5481bb76ff1Sjsg TP_fast_assign( 5491bb76ff1Sjsg __entry->address = address; 5501bb76ff1Sjsg __entry->value = value; 5511bb76ff1Sjsg ), 5521bb76ff1Sjsg TP_printk("amdgpu register dump 0x%x: 0x%x", 5531bb76ff1Sjsg __entry->address, 5541bb76ff1Sjsg __entry->value) 5551bb76ff1Sjsg ); 5561bb76ff1Sjsg 557fb4d8502Sjsg #undef AMDGPU_JOB_GET_TIMELINE_NAME 558fb4d8502Sjsg #endif 559fb4d8502Sjsg 560fb4d8502Sjsg /* This part must be outside protection */ 561fb4d8502Sjsg #undef TRACE_INCLUDE_PATH 562fb4d8502Sjsg #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu 563fb4d8502Sjsg #include <trace/define_trace.h> 564