1*b843c749SSergey Zigachev /* 2*b843c749SSergey Zigachev * Copyright 2009 Jerome Glisse. 3*b843c749SSergey Zigachev * All Rights Reserved. 4*b843c749SSergey Zigachev * 5*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a 6*b843c749SSergey Zigachev * copy of this software and associated documentation files (the 7*b843c749SSergey Zigachev * "Software"), to deal in the Software without restriction, including 8*b843c749SSergey Zigachev * without limitation the rights to use, copy, modify, merge, publish, 9*b843c749SSergey Zigachev * distribute, sub license, and/or sell copies of the Software, and to 10*b843c749SSergey Zigachev * permit persons to whom the Software is furnished to do so, subject to 11*b843c749SSergey Zigachev * the following conditions: 12*b843c749SSergey Zigachev * 13*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16*b843c749SSergey Zigachev * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17*b843c749SSergey Zigachev * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18*b843c749SSergey Zigachev * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19*b843c749SSergey Zigachev * USE OR OTHER DEALINGS IN THE SOFTWARE. 20*b843c749SSergey Zigachev * 21*b843c749SSergey Zigachev * The above copyright notice and this permission notice (including the 22*b843c749SSergey Zigachev * next paragraph) shall be included in all copies or substantial portions 23*b843c749SSergey Zigachev * of the Software. 24*b843c749SSergey Zigachev * 25*b843c749SSergey Zigachev */ 26*b843c749SSergey Zigachev /* 27*b843c749SSergey Zigachev * Authors: 28*b843c749SSergey Zigachev * Jerome Glisse <glisse@freedesktop.org> 29*b843c749SSergey Zigachev * Dave Airlie 30*b843c749SSergey Zigachev */ 31*b843c749SSergey Zigachev #include <linux/seq_file.h> 32*b843c749SSergey Zigachev #include <linux/atomic.h> 33*b843c749SSergey Zigachev #include <linux/wait.h> 34*b843c749SSergey Zigachev #include <linux/kref.h> 35*b843c749SSergey Zigachev #include <linux/slab.h> 36*b843c749SSergey Zigachev #include <linux/firmware.h> 37*b843c749SSergey Zigachev #include <drm/drmP.h> 38*b843c749SSergey Zigachev #include "amdgpu.h" 39*b843c749SSergey Zigachev #include "amdgpu_trace.h" 40*b843c749SSergey Zigachev 41*b843c749SSergey Zigachev /* 42*b843c749SSergey Zigachev * Fences 43*b843c749SSergey Zigachev * Fences mark an event in the GPUs pipeline and are used 44*b843c749SSergey Zigachev * for GPU/CPU synchronization. When the fence is written, 45*b843c749SSergey Zigachev * it is expected that all buffers associated with that fence 46*b843c749SSergey Zigachev * are no longer in use by the associated ring on the GPU and 47*b843c749SSergey Zigachev * that the the relevant GPU caches have been flushed. 48*b843c749SSergey Zigachev */ 49*b843c749SSergey Zigachev 50*b843c749SSergey Zigachev struct amdgpu_fence { 51*b843c749SSergey Zigachev struct dma_fence base; 52*b843c749SSergey Zigachev 53*b843c749SSergey Zigachev /* RB, DMA, etc. */ 54*b843c749SSergey Zigachev struct amdgpu_ring *ring; 55*b843c749SSergey Zigachev }; 56*b843c749SSergey Zigachev 57*b843c749SSergey Zigachev static struct kmem_cache *amdgpu_fence_slab; 58*b843c749SSergey Zigachev 59*b843c749SSergey Zigachev int amdgpu_fence_slab_init(void) 60*b843c749SSergey Zigachev { 61*b843c749SSergey Zigachev amdgpu_fence_slab = kmem_cache_create( 62*b843c749SSergey Zigachev "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 63*b843c749SSergey Zigachev SLAB_HWCACHE_ALIGN, NULL); 64*b843c749SSergey Zigachev if (!amdgpu_fence_slab) 65*b843c749SSergey Zigachev return -ENOMEM; 66*b843c749SSergey Zigachev return 0; 67*b843c749SSergey Zigachev } 68*b843c749SSergey Zigachev 69*b843c749SSergey Zigachev void amdgpu_fence_slab_fini(void) 70*b843c749SSergey Zigachev { 71*b843c749SSergey Zigachev rcu_barrier(); 72*b843c749SSergey Zigachev kmem_cache_destroy(amdgpu_fence_slab); 73*b843c749SSergey Zigachev } 74*b843c749SSergey Zigachev /* 75*b843c749SSergey Zigachev * Cast helper 76*b843c749SSergey Zigachev */ 77*b843c749SSergey Zigachev static const struct dma_fence_ops amdgpu_fence_ops; 78*b843c749SSergey Zigachev static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 79*b843c749SSergey Zigachev { 80*b843c749SSergey Zigachev struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 81*b843c749SSergey Zigachev 82*b843c749SSergey Zigachev if (__f->base.ops == &amdgpu_fence_ops) 83*b843c749SSergey Zigachev return __f; 84*b843c749SSergey Zigachev 85*b843c749SSergey Zigachev return NULL; 86*b843c749SSergey Zigachev } 87*b843c749SSergey Zigachev 88*b843c749SSergey Zigachev /** 89*b843c749SSergey Zigachev * amdgpu_fence_write - write a fence value 90*b843c749SSergey Zigachev * 91*b843c749SSergey Zigachev * @ring: ring the fence is associated with 92*b843c749SSergey Zigachev * @seq: sequence number to write 93*b843c749SSergey Zigachev * 94*b843c749SSergey Zigachev * Writes a fence value to memory (all asics). 95*b843c749SSergey Zigachev */ 96*b843c749SSergey Zigachev static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 97*b843c749SSergey Zigachev { 98*b843c749SSergey Zigachev struct amdgpu_fence_driver *drv = &ring->fence_drv; 99*b843c749SSergey Zigachev 100*b843c749SSergey Zigachev if (drv->cpu_addr) 101*b843c749SSergey Zigachev *drv->cpu_addr = cpu_to_le32(seq); 102*b843c749SSergey Zigachev } 103*b843c749SSergey Zigachev 104*b843c749SSergey Zigachev /** 105*b843c749SSergey Zigachev * amdgpu_fence_read - read a fence value 106*b843c749SSergey Zigachev * 107*b843c749SSergey Zigachev * @ring: ring the fence is associated with 108*b843c749SSergey Zigachev * 109*b843c749SSergey Zigachev * Reads a fence value from memory (all asics). 110*b843c749SSergey Zigachev * Returns the value of the fence read from memory. 111*b843c749SSergey Zigachev */ 112*b843c749SSergey Zigachev static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 113*b843c749SSergey Zigachev { 114*b843c749SSergey Zigachev struct amdgpu_fence_driver *drv = &ring->fence_drv; 115*b843c749SSergey Zigachev u32 seq = 0; 116*b843c749SSergey Zigachev 117*b843c749SSergey Zigachev if (drv->cpu_addr) 118*b843c749SSergey Zigachev seq = le32_to_cpu(*drv->cpu_addr); 119*b843c749SSergey Zigachev else 120*b843c749SSergey Zigachev seq = atomic_read(&drv->last_seq); 121*b843c749SSergey Zigachev 122*b843c749SSergey Zigachev return seq; 123*b843c749SSergey Zigachev } 124*b843c749SSergey Zigachev 125*b843c749SSergey Zigachev /** 126*b843c749SSergey Zigachev * amdgpu_fence_emit - emit a fence on the requested ring 127*b843c749SSergey Zigachev * 128*b843c749SSergey Zigachev * @ring: ring the fence is associated with 129*b843c749SSergey Zigachev * @f: resulting fence object 130*b843c749SSergey Zigachev * 131*b843c749SSergey Zigachev * Emits a fence command on the requested ring (all asics). 132*b843c749SSergey Zigachev * Returns 0 on success, -ENOMEM on failure. 133*b843c749SSergey Zigachev */ 134*b843c749SSergey Zigachev int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, 135*b843c749SSergey Zigachev unsigned flags) 136*b843c749SSergey Zigachev { 137*b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev; 138*b843c749SSergey Zigachev struct amdgpu_fence *fence; 139*b843c749SSergey Zigachev struct dma_fence __rcu **ptr; 140*b843c749SSergey Zigachev uint32_t seq; 141*b843c749SSergey Zigachev int r; 142*b843c749SSergey Zigachev 143*b843c749SSergey Zigachev fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 144*b843c749SSergey Zigachev if (fence == NULL) 145*b843c749SSergey Zigachev return -ENOMEM; 146*b843c749SSergey Zigachev 147*b843c749SSergey Zigachev seq = ++ring->fence_drv.sync_seq; 148*b843c749SSergey Zigachev fence->ring = ring; 149*b843c749SSergey Zigachev dma_fence_init(&fence->base, &amdgpu_fence_ops, 150*b843c749SSergey Zigachev &ring->fence_drv.lock, 151*b843c749SSergey Zigachev adev->fence_context + ring->idx, 152*b843c749SSergey Zigachev seq); 153*b843c749SSergey Zigachev amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 154*b843c749SSergey Zigachev seq, flags | AMDGPU_FENCE_FLAG_INT); 155*b843c749SSergey Zigachev 156*b843c749SSergey Zigachev ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 157*b843c749SSergey Zigachev if (unlikely(rcu_dereference_protected(*ptr, 1))) { 158*b843c749SSergey Zigachev struct dma_fence *old; 159*b843c749SSergey Zigachev 160*b843c749SSergey Zigachev rcu_read_lock(); 161*b843c749SSergey Zigachev old = dma_fence_get_rcu_safe(ptr); 162*b843c749SSergey Zigachev rcu_read_unlock(); 163*b843c749SSergey Zigachev 164*b843c749SSergey Zigachev if (old) { 165*b843c749SSergey Zigachev r = dma_fence_wait(old, false); 166*b843c749SSergey Zigachev dma_fence_put(old); 167*b843c749SSergey Zigachev if (r) 168*b843c749SSergey Zigachev return r; 169*b843c749SSergey Zigachev } 170*b843c749SSergey Zigachev } 171*b843c749SSergey Zigachev 172*b843c749SSergey Zigachev /* This function can't be called concurrently anyway, otherwise 173*b843c749SSergey Zigachev * emitting the fence would mess up the hardware ring buffer. 174*b843c749SSergey Zigachev */ 175*b843c749SSergey Zigachev rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); 176*b843c749SSergey Zigachev 177*b843c749SSergey Zigachev *f = &fence->base; 178*b843c749SSergey Zigachev 179*b843c749SSergey Zigachev return 0; 180*b843c749SSergey Zigachev } 181*b843c749SSergey Zigachev 182*b843c749SSergey Zigachev /** 183*b843c749SSergey Zigachev * amdgpu_fence_emit_polling - emit a fence on the requeste ring 184*b843c749SSergey Zigachev * 185*b843c749SSergey Zigachev * @ring: ring the fence is associated with 186*b843c749SSergey Zigachev * @s: resulting sequence number 187*b843c749SSergey Zigachev * 188*b843c749SSergey Zigachev * Emits a fence command on the requested ring (all asics). 189*b843c749SSergey Zigachev * Used For polling fence. 190*b843c749SSergey Zigachev * Returns 0 on success, -ENOMEM on failure. 191*b843c749SSergey Zigachev */ 192*b843c749SSergey Zigachev int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) 193*b843c749SSergey Zigachev { 194*b843c749SSergey Zigachev uint32_t seq; 195*b843c749SSergey Zigachev 196*b843c749SSergey Zigachev if (!s) 197*b843c749SSergey Zigachev return -EINVAL; 198*b843c749SSergey Zigachev 199*b843c749SSergey Zigachev seq = ++ring->fence_drv.sync_seq; 200*b843c749SSergey Zigachev amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 201*b843c749SSergey Zigachev seq, 0); 202*b843c749SSergey Zigachev 203*b843c749SSergey Zigachev *s = seq; 204*b843c749SSergey Zigachev 205*b843c749SSergey Zigachev return 0; 206*b843c749SSergey Zigachev } 207*b843c749SSergey Zigachev 208*b843c749SSergey Zigachev /** 209*b843c749SSergey Zigachev * amdgpu_fence_schedule_fallback - schedule fallback check 210*b843c749SSergey Zigachev * 211*b843c749SSergey Zigachev * @ring: pointer to struct amdgpu_ring 212*b843c749SSergey Zigachev * 213*b843c749SSergey Zigachev * Start a timer as fallback to our interrupts. 214*b843c749SSergey Zigachev */ 215*b843c749SSergey Zigachev static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 216*b843c749SSergey Zigachev { 217*b843c749SSergey Zigachev mod_timer(&ring->fence_drv.fallback_timer, 218*b843c749SSergey Zigachev jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 219*b843c749SSergey Zigachev } 220*b843c749SSergey Zigachev 221*b843c749SSergey Zigachev /** 222*b843c749SSergey Zigachev * amdgpu_fence_process - check for fence activity 223*b843c749SSergey Zigachev * 224*b843c749SSergey Zigachev * @ring: pointer to struct amdgpu_ring 225*b843c749SSergey Zigachev * 226*b843c749SSergey Zigachev * Checks the current fence value and calculates the last 227*b843c749SSergey Zigachev * signalled fence value. Wakes the fence queue if the 228*b843c749SSergey Zigachev * sequence number has increased. 229*b843c749SSergey Zigachev */ 230*b843c749SSergey Zigachev void amdgpu_fence_process(struct amdgpu_ring *ring) 231*b843c749SSergey Zigachev { 232*b843c749SSergey Zigachev struct amdgpu_fence_driver *drv = &ring->fence_drv; 233*b843c749SSergey Zigachev uint32_t seq, last_seq; 234*b843c749SSergey Zigachev int r; 235*b843c749SSergey Zigachev 236*b843c749SSergey Zigachev do { 237*b843c749SSergey Zigachev last_seq = atomic_read(&ring->fence_drv.last_seq); 238*b843c749SSergey Zigachev seq = amdgpu_fence_read(ring); 239*b843c749SSergey Zigachev 240*b843c749SSergey Zigachev } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 241*b843c749SSergey Zigachev 242*b843c749SSergey Zigachev if (seq != ring->fence_drv.sync_seq) 243*b843c749SSergey Zigachev amdgpu_fence_schedule_fallback(ring); 244*b843c749SSergey Zigachev 245*b843c749SSergey Zigachev if (unlikely(seq == last_seq)) 246*b843c749SSergey Zigachev return; 247*b843c749SSergey Zigachev 248*b843c749SSergey Zigachev last_seq &= drv->num_fences_mask; 249*b843c749SSergey Zigachev seq &= drv->num_fences_mask; 250*b843c749SSergey Zigachev 251*b843c749SSergey Zigachev do { 252*b843c749SSergey Zigachev struct dma_fence *fence, **ptr; 253*b843c749SSergey Zigachev 254*b843c749SSergey Zigachev ++last_seq; 255*b843c749SSergey Zigachev last_seq &= drv->num_fences_mask; 256*b843c749SSergey Zigachev ptr = &drv->fences[last_seq]; 257*b843c749SSergey Zigachev 258*b843c749SSergey Zigachev /* There is always exactly one thread signaling this fence slot */ 259*b843c749SSergey Zigachev fence = rcu_dereference_protected(*ptr, 1); 260*b843c749SSergey Zigachev RCU_INIT_POINTER(*ptr, NULL); 261*b843c749SSergey Zigachev 262*b843c749SSergey Zigachev if (!fence) 263*b843c749SSergey Zigachev continue; 264*b843c749SSergey Zigachev 265*b843c749SSergey Zigachev r = dma_fence_signal(fence); 266*b843c749SSergey Zigachev if (!r) 267*b843c749SSergey Zigachev DMA_FENCE_TRACE(fence, "signaled from irq context\n"); 268*b843c749SSergey Zigachev else 269*b843c749SSergey Zigachev BUG(); 270*b843c749SSergey Zigachev 271*b843c749SSergey Zigachev dma_fence_put(fence); 272*b843c749SSergey Zigachev } while (last_seq != seq); 273*b843c749SSergey Zigachev } 274*b843c749SSergey Zigachev 275*b843c749SSergey Zigachev /** 276*b843c749SSergey Zigachev * amdgpu_fence_fallback - fallback for hardware interrupts 277*b843c749SSergey Zigachev * 278*b843c749SSergey Zigachev * @work: delayed work item 279*b843c749SSergey Zigachev * 280*b843c749SSergey Zigachev * Checks for fence activity. 281*b843c749SSergey Zigachev */ 282*b843c749SSergey Zigachev static void amdgpu_fence_fallback(struct timer_list *t) 283*b843c749SSergey Zigachev { 284*b843c749SSergey Zigachev struct amdgpu_ring *ring = from_timer(ring, t, 285*b843c749SSergey Zigachev fence_drv.fallback_timer); 286*b843c749SSergey Zigachev 287*b843c749SSergey Zigachev amdgpu_fence_process(ring); 288*b843c749SSergey Zigachev } 289*b843c749SSergey Zigachev 290*b843c749SSergey Zigachev /** 291*b843c749SSergey Zigachev * amdgpu_fence_wait_empty - wait for all fences to signal 292*b843c749SSergey Zigachev * 293*b843c749SSergey Zigachev * @adev: amdgpu device pointer 294*b843c749SSergey Zigachev * @ring: ring index the fence is associated with 295*b843c749SSergey Zigachev * 296*b843c749SSergey Zigachev * Wait for all fences on the requested ring to signal (all asics). 297*b843c749SSergey Zigachev * Returns 0 if the fences have passed, error for all other cases. 298*b843c749SSergey Zigachev */ 299*b843c749SSergey Zigachev int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 300*b843c749SSergey Zigachev { 301*b843c749SSergey Zigachev uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 302*b843c749SSergey Zigachev struct dma_fence *fence, **ptr; 303*b843c749SSergey Zigachev int r; 304*b843c749SSergey Zigachev 305*b843c749SSergey Zigachev if (!seq) 306*b843c749SSergey Zigachev return 0; 307*b843c749SSergey Zigachev 308*b843c749SSergey Zigachev ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 309*b843c749SSergey Zigachev rcu_read_lock(); 310*b843c749SSergey Zigachev fence = rcu_dereference(*ptr); 311*b843c749SSergey Zigachev if (!fence || !dma_fence_get_rcu(fence)) { 312*b843c749SSergey Zigachev rcu_read_unlock(); 313*b843c749SSergey Zigachev return 0; 314*b843c749SSergey Zigachev } 315*b843c749SSergey Zigachev rcu_read_unlock(); 316*b843c749SSergey Zigachev 317*b843c749SSergey Zigachev r = dma_fence_wait(fence, false); 318*b843c749SSergey Zigachev dma_fence_put(fence); 319*b843c749SSergey Zigachev return r; 320*b843c749SSergey Zigachev } 321*b843c749SSergey Zigachev 322*b843c749SSergey Zigachev /** 323*b843c749SSergey Zigachev * amdgpu_fence_wait_polling - busy wait for givn sequence number 324*b843c749SSergey Zigachev * 325*b843c749SSergey Zigachev * @ring: ring index the fence is associated with 326*b843c749SSergey Zigachev * @wait_seq: sequence number to wait 327*b843c749SSergey Zigachev * @timeout: the timeout for waiting in usecs 328*b843c749SSergey Zigachev * 329*b843c749SSergey Zigachev * Wait for all fences on the requested ring to signal (all asics). 330*b843c749SSergey Zigachev * Returns left time if no timeout, 0 or minus if timeout. 331*b843c749SSergey Zigachev */ 332*b843c749SSergey Zigachev signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 333*b843c749SSergey Zigachev uint32_t wait_seq, 334*b843c749SSergey Zigachev signed long timeout) 335*b843c749SSergey Zigachev { 336*b843c749SSergey Zigachev uint32_t seq; 337*b843c749SSergey Zigachev 338*b843c749SSergey Zigachev do { 339*b843c749SSergey Zigachev seq = amdgpu_fence_read(ring); 340*b843c749SSergey Zigachev udelay(5); 341*b843c749SSergey Zigachev timeout -= 5; 342*b843c749SSergey Zigachev } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); 343*b843c749SSergey Zigachev 344*b843c749SSergey Zigachev return timeout > 0 ? timeout : 0; 345*b843c749SSergey Zigachev } 346*b843c749SSergey Zigachev /** 347*b843c749SSergey Zigachev * amdgpu_fence_count_emitted - get the count of emitted fences 348*b843c749SSergey Zigachev * 349*b843c749SSergey Zigachev * @ring: ring the fence is associated with 350*b843c749SSergey Zigachev * 351*b843c749SSergey Zigachev * Get the number of fences emitted on the requested ring (all asics). 352*b843c749SSergey Zigachev * Returns the number of emitted fences on the ring. Used by the 353*b843c749SSergey Zigachev * dynpm code to ring track activity. 354*b843c749SSergey Zigachev */ 355*b843c749SSergey Zigachev unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 356*b843c749SSergey Zigachev { 357*b843c749SSergey Zigachev uint64_t emitted; 358*b843c749SSergey Zigachev 359*b843c749SSergey Zigachev /* We are not protected by ring lock when reading the last sequence 360*b843c749SSergey Zigachev * but it's ok to report slightly wrong fence count here. 361*b843c749SSergey Zigachev */ 362*b843c749SSergey Zigachev amdgpu_fence_process(ring); 363*b843c749SSergey Zigachev emitted = 0x100000000ull; 364*b843c749SSergey Zigachev emitted -= atomic_read(&ring->fence_drv.last_seq); 365*b843c749SSergey Zigachev emitted += READ_ONCE(ring->fence_drv.sync_seq); 366*b843c749SSergey Zigachev return lower_32_bits(emitted); 367*b843c749SSergey Zigachev } 368*b843c749SSergey Zigachev 369*b843c749SSergey Zigachev /** 370*b843c749SSergey Zigachev * amdgpu_fence_driver_start_ring - make the fence driver 371*b843c749SSergey Zigachev * ready for use on the requested ring. 372*b843c749SSergey Zigachev * 373*b843c749SSergey Zigachev * @ring: ring to start the fence driver on 374*b843c749SSergey Zigachev * @irq_src: interrupt source to use for this ring 375*b843c749SSergey Zigachev * @irq_type: interrupt type to use for this ring 376*b843c749SSergey Zigachev * 377*b843c749SSergey Zigachev * Make the fence driver ready for processing (all asics). 378*b843c749SSergey Zigachev * Not all asics have all rings, so each asic will only 379*b843c749SSergey Zigachev * start the fence driver on the rings it has. 380*b843c749SSergey Zigachev * Returns 0 for success, errors for failure. 381*b843c749SSergey Zigachev */ 382*b843c749SSergey Zigachev int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 383*b843c749SSergey Zigachev struct amdgpu_irq_src *irq_src, 384*b843c749SSergey Zigachev unsigned irq_type) 385*b843c749SSergey Zigachev { 386*b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev; 387*b843c749SSergey Zigachev uint64_t index; 388*b843c749SSergey Zigachev 389*b843c749SSergey Zigachev if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { 390*b843c749SSergey Zigachev ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 391*b843c749SSergey Zigachev ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 392*b843c749SSergey Zigachev } else { 393*b843c749SSergey Zigachev /* put fence directly behind firmware */ 394*b843c749SSergey Zigachev index = ALIGN(adev->uvd.fw->size, 8); 395*b843c749SSergey Zigachev ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; 396*b843c749SSergey Zigachev ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; 397*b843c749SSergey Zigachev } 398*b843c749SSergey Zigachev amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 399*b843c749SSergey Zigachev 400*b843c749SSergey Zigachev if (irq_src) 401*b843c749SSergey Zigachev amdgpu_irq_get(adev, irq_src, irq_type); 402*b843c749SSergey Zigachev 403*b843c749SSergey Zigachev ring->fence_drv.irq_src = irq_src; 404*b843c749SSergey Zigachev ring->fence_drv.irq_type = irq_type; 405*b843c749SSergey Zigachev ring->fence_drv.initialized = true; 406*b843c749SSergey Zigachev 407*b843c749SSergey Zigachev dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 408*b843c749SSergey Zigachev "cpu addr 0x%p\n", ring->idx, 409*b843c749SSergey Zigachev ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 410*b843c749SSergey Zigachev return 0; 411*b843c749SSergey Zigachev } 412*b843c749SSergey Zigachev 413*b843c749SSergey Zigachev /** 414*b843c749SSergey Zigachev * amdgpu_fence_driver_init_ring - init the fence driver 415*b843c749SSergey Zigachev * for the requested ring. 416*b843c749SSergey Zigachev * 417*b843c749SSergey Zigachev * @ring: ring to init the fence driver on 418*b843c749SSergey Zigachev * @num_hw_submission: number of entries on the hardware queue 419*b843c749SSergey Zigachev * 420*b843c749SSergey Zigachev * Init the fence driver for the requested ring (all asics). 421*b843c749SSergey Zigachev * Helper function for amdgpu_fence_driver_init(). 422*b843c749SSergey Zigachev */ 423*b843c749SSergey Zigachev int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 424*b843c749SSergey Zigachev unsigned num_hw_submission) 425*b843c749SSergey Zigachev { 426*b843c749SSergey Zigachev long timeout; 427*b843c749SSergey Zigachev int r; 428*b843c749SSergey Zigachev 429*b843c749SSergey Zigachev /* Check that num_hw_submission is a power of two */ 430*b843c749SSergey Zigachev if ((num_hw_submission & (num_hw_submission - 1)) != 0) 431*b843c749SSergey Zigachev return -EINVAL; 432*b843c749SSergey Zigachev 433*b843c749SSergey Zigachev ring->fence_drv.cpu_addr = NULL; 434*b843c749SSergey Zigachev ring->fence_drv.gpu_addr = 0; 435*b843c749SSergey Zigachev ring->fence_drv.sync_seq = 0; 436*b843c749SSergey Zigachev atomic_set(&ring->fence_drv.last_seq, 0); 437*b843c749SSergey Zigachev ring->fence_drv.initialized = false; 438*b843c749SSergey Zigachev 439*b843c749SSergey Zigachev timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 440*b843c749SSergey Zigachev 441*b843c749SSergey Zigachev ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; 442*b843c749SSergey Zigachev spin_lock_init(&ring->fence_drv.lock); 443*b843c749SSergey Zigachev ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), 444*b843c749SSergey Zigachev GFP_KERNEL); 445*b843c749SSergey Zigachev if (!ring->fence_drv.fences) 446*b843c749SSergey Zigachev return -ENOMEM; 447*b843c749SSergey Zigachev 448*b843c749SSergey Zigachev /* No need to setup the GPU scheduler for KIQ ring */ 449*b843c749SSergey Zigachev if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { 450*b843c749SSergey Zigachev /* for non-sriov case, no timeout enforce on compute ring */ 451*b843c749SSergey Zigachev if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 452*b843c749SSergey Zigachev && !amdgpu_sriov_vf(ring->adev)) 453*b843c749SSergey Zigachev timeout = MAX_SCHEDULE_TIMEOUT; 454*b843c749SSergey Zigachev else 455*b843c749SSergey Zigachev timeout = msecs_to_jiffies(amdgpu_lockup_timeout); 456*b843c749SSergey Zigachev 457*b843c749SSergey Zigachev r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, 458*b843c749SSergey Zigachev num_hw_submission, amdgpu_job_hang_limit, 459*b843c749SSergey Zigachev timeout, ring->name); 460*b843c749SSergey Zigachev if (r) { 461*b843c749SSergey Zigachev DRM_ERROR("Failed to create scheduler on ring %s.\n", 462*b843c749SSergey Zigachev ring->name); 463*b843c749SSergey Zigachev return r; 464*b843c749SSergey Zigachev } 465*b843c749SSergey Zigachev } 466*b843c749SSergey Zigachev 467*b843c749SSergey Zigachev return 0; 468*b843c749SSergey Zigachev } 469*b843c749SSergey Zigachev 470*b843c749SSergey Zigachev /** 471*b843c749SSergey Zigachev * amdgpu_fence_driver_init - init the fence driver 472*b843c749SSergey Zigachev * for all possible rings. 473*b843c749SSergey Zigachev * 474*b843c749SSergey Zigachev * @adev: amdgpu device pointer 475*b843c749SSergey Zigachev * 476*b843c749SSergey Zigachev * Init the fence driver for all possible rings (all asics). 477*b843c749SSergey Zigachev * Not all asics have all rings, so each asic will only 478*b843c749SSergey Zigachev * start the fence driver on the rings it has using 479*b843c749SSergey Zigachev * amdgpu_fence_driver_start_ring(). 480*b843c749SSergey Zigachev * Returns 0 for success. 481*b843c749SSergey Zigachev */ 482*b843c749SSergey Zigachev int amdgpu_fence_driver_init(struct amdgpu_device *adev) 483*b843c749SSergey Zigachev { 484*b843c749SSergey Zigachev if (amdgpu_debugfs_fence_init(adev)) 485*b843c749SSergey Zigachev dev_err(adev->dev, "fence debugfs file creation failed\n"); 486*b843c749SSergey Zigachev 487*b843c749SSergey Zigachev return 0; 488*b843c749SSergey Zigachev } 489*b843c749SSergey Zigachev 490*b843c749SSergey Zigachev /** 491*b843c749SSergey Zigachev * amdgpu_fence_driver_fini - tear down the fence driver 492*b843c749SSergey Zigachev * for all possible rings. 493*b843c749SSergey Zigachev * 494*b843c749SSergey Zigachev * @adev: amdgpu device pointer 495*b843c749SSergey Zigachev * 496*b843c749SSergey Zigachev * Tear down the fence driver for all possible rings (all asics). 497*b843c749SSergey Zigachev */ 498*b843c749SSergey Zigachev void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 499*b843c749SSergey Zigachev { 500*b843c749SSergey Zigachev unsigned i, j; 501*b843c749SSergey Zigachev int r; 502*b843c749SSergey Zigachev 503*b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 504*b843c749SSergey Zigachev struct amdgpu_ring *ring = adev->rings[i]; 505*b843c749SSergey Zigachev 506*b843c749SSergey Zigachev if (!ring || !ring->fence_drv.initialized) 507*b843c749SSergey Zigachev continue; 508*b843c749SSergey Zigachev r = amdgpu_fence_wait_empty(ring); 509*b843c749SSergey Zigachev if (r) { 510*b843c749SSergey Zigachev /* no need to trigger GPU reset as we are unloading */ 511*b843c749SSergey Zigachev amdgpu_fence_driver_force_completion(ring); 512*b843c749SSergey Zigachev } 513*b843c749SSergey Zigachev if (ring->fence_drv.irq_src) 514*b843c749SSergey Zigachev amdgpu_irq_put(adev, ring->fence_drv.irq_src, 515*b843c749SSergey Zigachev ring->fence_drv.irq_type); 516*b843c749SSergey Zigachev drm_sched_fini(&ring->sched); 517*b843c749SSergey Zigachev del_timer_sync(&ring->fence_drv.fallback_timer); 518*b843c749SSergey Zigachev for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 519*b843c749SSergey Zigachev dma_fence_put(ring->fence_drv.fences[j]); 520*b843c749SSergey Zigachev kfree(ring->fence_drv.fences); 521*b843c749SSergey Zigachev ring->fence_drv.fences = NULL; 522*b843c749SSergey Zigachev ring->fence_drv.initialized = false; 523*b843c749SSergey Zigachev } 524*b843c749SSergey Zigachev } 525*b843c749SSergey Zigachev 526*b843c749SSergey Zigachev /** 527*b843c749SSergey Zigachev * amdgpu_fence_driver_suspend - suspend the fence driver 528*b843c749SSergey Zigachev * for all possible rings. 529*b843c749SSergey Zigachev * 530*b843c749SSergey Zigachev * @adev: amdgpu device pointer 531*b843c749SSergey Zigachev * 532*b843c749SSergey Zigachev * Suspend the fence driver for all possible rings (all asics). 533*b843c749SSergey Zigachev */ 534*b843c749SSergey Zigachev void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) 535*b843c749SSergey Zigachev { 536*b843c749SSergey Zigachev int i, r; 537*b843c749SSergey Zigachev 538*b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 539*b843c749SSergey Zigachev struct amdgpu_ring *ring = adev->rings[i]; 540*b843c749SSergey Zigachev if (!ring || !ring->fence_drv.initialized) 541*b843c749SSergey Zigachev continue; 542*b843c749SSergey Zigachev 543*b843c749SSergey Zigachev /* wait for gpu to finish processing current batch */ 544*b843c749SSergey Zigachev r = amdgpu_fence_wait_empty(ring); 545*b843c749SSergey Zigachev if (r) { 546*b843c749SSergey Zigachev /* delay GPU reset to resume */ 547*b843c749SSergey Zigachev amdgpu_fence_driver_force_completion(ring); 548*b843c749SSergey Zigachev } 549*b843c749SSergey Zigachev 550*b843c749SSergey Zigachev /* disable the interrupt */ 551*b843c749SSergey Zigachev if (ring->fence_drv.irq_src) 552*b843c749SSergey Zigachev amdgpu_irq_put(adev, ring->fence_drv.irq_src, 553*b843c749SSergey Zigachev ring->fence_drv.irq_type); 554*b843c749SSergey Zigachev } 555*b843c749SSergey Zigachev } 556*b843c749SSergey Zigachev 557*b843c749SSergey Zigachev /** 558*b843c749SSergey Zigachev * amdgpu_fence_driver_resume - resume the fence driver 559*b843c749SSergey Zigachev * for all possible rings. 560*b843c749SSergey Zigachev * 561*b843c749SSergey Zigachev * @adev: amdgpu device pointer 562*b843c749SSergey Zigachev * 563*b843c749SSergey Zigachev * Resume the fence driver for all possible rings (all asics). 564*b843c749SSergey Zigachev * Not all asics have all rings, so each asic will only 565*b843c749SSergey Zigachev * start the fence driver on the rings it has using 566*b843c749SSergey Zigachev * amdgpu_fence_driver_start_ring(). 567*b843c749SSergey Zigachev * Returns 0 for success. 568*b843c749SSergey Zigachev */ 569*b843c749SSergey Zigachev void amdgpu_fence_driver_resume(struct amdgpu_device *adev) 570*b843c749SSergey Zigachev { 571*b843c749SSergey Zigachev int i; 572*b843c749SSergey Zigachev 573*b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 574*b843c749SSergey Zigachev struct amdgpu_ring *ring = adev->rings[i]; 575*b843c749SSergey Zigachev if (!ring || !ring->fence_drv.initialized) 576*b843c749SSergey Zigachev continue; 577*b843c749SSergey Zigachev 578*b843c749SSergey Zigachev /* enable the interrupt */ 579*b843c749SSergey Zigachev if (ring->fence_drv.irq_src) 580*b843c749SSergey Zigachev amdgpu_irq_get(adev, ring->fence_drv.irq_src, 581*b843c749SSergey Zigachev ring->fence_drv.irq_type); 582*b843c749SSergey Zigachev } 583*b843c749SSergey Zigachev } 584*b843c749SSergey Zigachev 585*b843c749SSergey Zigachev /** 586*b843c749SSergey Zigachev * amdgpu_fence_driver_force_completion - force signal latest fence of ring 587*b843c749SSergey Zigachev * 588*b843c749SSergey Zigachev * @ring: fence of the ring to signal 589*b843c749SSergey Zigachev * 590*b843c749SSergey Zigachev */ 591*b843c749SSergey Zigachev void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 592*b843c749SSergey Zigachev { 593*b843c749SSergey Zigachev amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 594*b843c749SSergey Zigachev amdgpu_fence_process(ring); 595*b843c749SSergey Zigachev } 596*b843c749SSergey Zigachev 597*b843c749SSergey Zigachev /* 598*b843c749SSergey Zigachev * Common fence implementation 599*b843c749SSergey Zigachev */ 600*b843c749SSergey Zigachev 601*b843c749SSergey Zigachev static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 602*b843c749SSergey Zigachev { 603*b843c749SSergey Zigachev return "amdgpu"; 604*b843c749SSergey Zigachev } 605*b843c749SSergey Zigachev 606*b843c749SSergey Zigachev static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 607*b843c749SSergey Zigachev { 608*b843c749SSergey Zigachev struct amdgpu_fence *fence = to_amdgpu_fence(f); 609*b843c749SSergey Zigachev return (const char *)fence->ring->name; 610*b843c749SSergey Zigachev } 611*b843c749SSergey Zigachev 612*b843c749SSergey Zigachev /** 613*b843c749SSergey Zigachev * amdgpu_fence_enable_signaling - enable signalling on fence 614*b843c749SSergey Zigachev * @fence: fence 615*b843c749SSergey Zigachev * 616*b843c749SSergey Zigachev * This function is called with fence_queue lock held, and adds a callback 617*b843c749SSergey Zigachev * to fence_queue that checks if this fence is signaled, and if so it 618*b843c749SSergey Zigachev * signals the fence and removes itself. 619*b843c749SSergey Zigachev */ 620*b843c749SSergey Zigachev static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 621*b843c749SSergey Zigachev { 622*b843c749SSergey Zigachev struct amdgpu_fence *fence = to_amdgpu_fence(f); 623*b843c749SSergey Zigachev struct amdgpu_ring *ring = fence->ring; 624*b843c749SSergey Zigachev 625*b843c749SSergey Zigachev if (!timer_pending(&ring->fence_drv.fallback_timer)) 626*b843c749SSergey Zigachev amdgpu_fence_schedule_fallback(ring); 627*b843c749SSergey Zigachev 628*b843c749SSergey Zigachev DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 629*b843c749SSergey Zigachev 630*b843c749SSergey Zigachev return true; 631*b843c749SSergey Zigachev } 632*b843c749SSergey Zigachev 633*b843c749SSergey Zigachev /** 634*b843c749SSergey Zigachev * amdgpu_fence_free - free up the fence memory 635*b843c749SSergey Zigachev * 636*b843c749SSergey Zigachev * @rcu: RCU callback head 637*b843c749SSergey Zigachev * 638*b843c749SSergey Zigachev * Free up the fence memory after the RCU grace period. 639*b843c749SSergey Zigachev */ 640*b843c749SSergey Zigachev static void amdgpu_fence_free(struct rcu_head *rcu) 641*b843c749SSergey Zigachev { 642*b843c749SSergey Zigachev struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 643*b843c749SSergey Zigachev struct amdgpu_fence *fence = to_amdgpu_fence(f); 644*b843c749SSergey Zigachev kmem_cache_free(amdgpu_fence_slab, fence); 645*b843c749SSergey Zigachev } 646*b843c749SSergey Zigachev 647*b843c749SSergey Zigachev /** 648*b843c749SSergey Zigachev * amdgpu_fence_release - callback that fence can be freed 649*b843c749SSergey Zigachev * 650*b843c749SSergey Zigachev * @fence: fence 651*b843c749SSergey Zigachev * 652*b843c749SSergey Zigachev * This function is called when the reference count becomes zero. 653*b843c749SSergey Zigachev * It just RCU schedules freeing up the fence. 654*b843c749SSergey Zigachev */ 655*b843c749SSergey Zigachev static void amdgpu_fence_release(struct dma_fence *f) 656*b843c749SSergey Zigachev { 657*b843c749SSergey Zigachev call_rcu(&f->rcu, amdgpu_fence_free); 658*b843c749SSergey Zigachev } 659*b843c749SSergey Zigachev 660*b843c749SSergey Zigachev static const struct dma_fence_ops amdgpu_fence_ops = { 661*b843c749SSergey Zigachev .get_driver_name = amdgpu_fence_get_driver_name, 662*b843c749SSergey Zigachev .get_timeline_name = amdgpu_fence_get_timeline_name, 663*b843c749SSergey Zigachev .enable_signaling = amdgpu_fence_enable_signaling, 664*b843c749SSergey Zigachev .release = amdgpu_fence_release, 665*b843c749SSergey Zigachev }; 666*b843c749SSergey Zigachev 667*b843c749SSergey Zigachev /* 668*b843c749SSergey Zigachev * Fence debugfs 669*b843c749SSergey Zigachev */ 670*b843c749SSergey Zigachev #if defined(CONFIG_DEBUG_FS) 671*b843c749SSergey Zigachev static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 672*b843c749SSergey Zigachev { 673*b843c749SSergey Zigachev struct drm_info_node *node = (struct drm_info_node *)m->private; 674*b843c749SSergey Zigachev struct drm_device *dev = node->minor->dev; 675*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 676*b843c749SSergey Zigachev int i; 677*b843c749SSergey Zigachev 678*b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 679*b843c749SSergey Zigachev struct amdgpu_ring *ring = adev->rings[i]; 680*b843c749SSergey Zigachev if (!ring || !ring->fence_drv.initialized) 681*b843c749SSergey Zigachev continue; 682*b843c749SSergey Zigachev 683*b843c749SSergey Zigachev amdgpu_fence_process(ring); 684*b843c749SSergey Zigachev 685*b843c749SSergey Zigachev seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 686*b843c749SSergey Zigachev seq_printf(m, "Last signaled fence 0x%08x\n", 687*b843c749SSergey Zigachev atomic_read(&ring->fence_drv.last_seq)); 688*b843c749SSergey Zigachev seq_printf(m, "Last emitted 0x%08x\n", 689*b843c749SSergey Zigachev ring->fence_drv.sync_seq); 690*b843c749SSergey Zigachev 691*b843c749SSergey Zigachev if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 692*b843c749SSergey Zigachev continue; 693*b843c749SSergey Zigachev 694*b843c749SSergey Zigachev /* set in CP_VMID_PREEMPT and preemption occurred */ 695*b843c749SSergey Zigachev seq_printf(m, "Last preempted 0x%08x\n", 696*b843c749SSergey Zigachev le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 697*b843c749SSergey Zigachev /* set in CP_VMID_RESET and reset occurred */ 698*b843c749SSergey Zigachev seq_printf(m, "Last reset 0x%08x\n", 699*b843c749SSergey Zigachev le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 700*b843c749SSergey Zigachev /* Both preemption and reset occurred */ 701*b843c749SSergey Zigachev seq_printf(m, "Last both 0x%08x\n", 702*b843c749SSergey Zigachev le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 703*b843c749SSergey Zigachev } 704*b843c749SSergey Zigachev return 0; 705*b843c749SSergey Zigachev } 706*b843c749SSergey Zigachev 707*b843c749SSergey Zigachev /** 708*b843c749SSergey Zigachev * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover 709*b843c749SSergey Zigachev * 710*b843c749SSergey Zigachev * Manually trigger a gpu reset at the next fence wait. 711*b843c749SSergey Zigachev */ 712*b843c749SSergey Zigachev static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) 713*b843c749SSergey Zigachev { 714*b843c749SSergey Zigachev struct drm_info_node *node = (struct drm_info_node *) m->private; 715*b843c749SSergey Zigachev struct drm_device *dev = node->minor->dev; 716*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 717*b843c749SSergey Zigachev 718*b843c749SSergey Zigachev seq_printf(m, "gpu recover\n"); 719*b843c749SSergey Zigachev amdgpu_device_gpu_recover(adev, NULL, true); 720*b843c749SSergey Zigachev 721*b843c749SSergey Zigachev return 0; 722*b843c749SSergey Zigachev } 723*b843c749SSergey Zigachev 724*b843c749SSergey Zigachev static const struct drm_info_list amdgpu_debugfs_fence_list[] = { 725*b843c749SSergey Zigachev {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 726*b843c749SSergey Zigachev {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL} 727*b843c749SSergey Zigachev }; 728*b843c749SSergey Zigachev 729*b843c749SSergey Zigachev static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { 730*b843c749SSergey Zigachev {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 731*b843c749SSergey Zigachev }; 732*b843c749SSergey Zigachev #endif 733*b843c749SSergey Zigachev 734*b843c749SSergey Zigachev int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 735*b843c749SSergey Zigachev { 736*b843c749SSergey Zigachev #if defined(CONFIG_DEBUG_FS) 737*b843c749SSergey Zigachev if (amdgpu_sriov_vf(adev)) 738*b843c749SSergey Zigachev return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1); 739*b843c749SSergey Zigachev return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); 740*b843c749SSergey Zigachev #else 741*b843c749SSergey Zigachev return 0; 742*b843c749SSergey Zigachev #endif 743*b843c749SSergey Zigachev } 744*b843c749SSergey Zigachev 745