1 /* $NetBSD: intel_ring_types.h,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */ 2 3 /* 4 * SPDX-License-Identifier: MIT 5 * 6 * Copyright © 2019 Intel Corporation 7 */ 8 9 #ifndef INTEL_RING_TYPES_H 10 #define INTEL_RING_TYPES_H 11 12 #include <linux/atomic.h> 13 #include <linux/kref.h> 14 #include <linux/types.h> 15 16 /* 17 * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 18 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 19 * to give some inclination as to some of the magic values used in the various 20 * workarounds! 21 */ 22 #define CACHELINE_BYTES 64 23 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) 24 25 struct i915_vma; 26 27 struct intel_ring { 28 struct kref ref; 29 struct i915_vma *vma; 30 void *vaddr; 31 32 /* 33 * As we have two types of rings, one global to the engine used 34 * by ringbuffer submission and those that are exclusive to a 35 * context used by execlists, we have to play safe and allow 36 * atomic updates to the pin_count. However, the actual pinning 37 * of the context is either done during initialisation for 38 * ringbuffer submission or serialised as part of the context 39 * pinning for execlists, and so we do not need a mutex ourselves 40 * to serialise intel_ring_pin/intel_ring_unpin. 41 */ 42 atomic_t pin_count; 43 44 u32 head; /* updated during retire, loosely tracks RING_HEAD */ 45 u32 tail; /* updated on submission, used for RING_TAIL */ 46 u32 emit; /* updated during request construction */ 47 48 u32 space; 49 u32 size; 50 u32 wrap; 51 u32 effective_size; 52 }; 53 54 #endif /* INTEL_RING_TYPES_H */ 55