1 /* Public domain. */ 2 3 #ifndef _LINUX_DMA_FENCE_H 4 #define _LINUX_DMA_FENCE_H 5 6 #include <sys/types.h> 7 #include <sys/malloc.h> 8 #include <sys/mutex.h> 9 #include <linux/kref.h> 10 #include <linux/list.h> 11 #include <linux/bug.h> 12 #include <linux/sched.h> 13 #include <linux/rcupdate.h> 14 15 #define DMA_FENCE_TRACE(fence, fmt, args...) do {} while(0) 16 17 struct dma_fence { 18 struct kref refcount; 19 const struct dma_fence_ops *ops; 20 unsigned long flags; 21 unsigned int context; 22 unsigned int seqno; 23 struct mutex *lock; 24 struct list_head cb_list; 25 int error; 26 struct rcu_head rcu; 27 }; 28 29 enum dma_fence_flag_bits { 30 DMA_FENCE_FLAG_SIGNALED_BIT, 31 DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 32 DMA_FENCE_FLAG_USER_BITS, 33 }; 34 35 struct dma_fence_ops { 36 const char * (*get_driver_name)(struct dma_fence *); 37 const char * (*get_timeline_name)(struct dma_fence *); 38 bool (*enable_signaling)(struct dma_fence *); 39 bool (*signaled)(struct dma_fence *); 40 long (*wait)(struct dma_fence *, bool, long); 41 void (*release)(struct dma_fence *); 42 }; 43 44 struct dma_fence_cb; 45 typedef void (*dma_fence_func_t)(struct dma_fence *fence, struct dma_fence_cb *cb); 46 47 struct dma_fence_cb { 48 struct list_head node; 49 dma_fence_func_t func; 50 }; 51 52 unsigned int dma_fence_context_alloc(unsigned int); 53 54 static inline struct dma_fence * 55 dma_fence_get(struct dma_fence *fence) 56 { 57 if (fence) 58 kref_get(&fence->refcount); 59 return fence; 60 } 61 62 static inline struct dma_fence * 63 dma_fence_get_rcu(struct dma_fence *fence) 64 { 65 if (fence) 66 kref_get(&fence->refcount); 67 return fence; 68 } 69 70 static inline struct dma_fence * 71 dma_fence_get_rcu_safe(struct dma_fence **dfp) 72 { 73 struct dma_fence *fence; 74 if (dfp == NULL) 75 return NULL; 76 fence = *dfp; 77 if (fence) 78 kref_get(&fence->refcount); 79 return fence; 80 } 81 82 static inline void 83 dma_fence_release(struct kref *ref) 84 { 85 struct dma_fence *fence = container_of(ref, struct dma_fence, refcount); 86 if (fence->ops && fence->ops->release) 87 fence->ops->release(fence); 88 else 89 free(fence, M_DRM, 0); 90 } 91 92 static inline void 93 dma_fence_free(struct dma_fence *fence) 94 { 95 free(fence, M_DRM, 0); 96 } 97 98 static inline void 99 dma_fence_put(struct dma_fence *fence) 100 { 101 if (fence) 102 kref_put(&fence->refcount, dma_fence_release); 103 } 104 105 static inline int 106 dma_fence_signal(struct dma_fence *fence) 107 { 108 if (fence == NULL) 109 return -EINVAL; 110 111 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 112 return -EINVAL; 113 114 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { 115 struct dma_fence_cb *cur, *tmp; 116 117 mtx_enter(fence->lock); 118 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 119 list_del_init(&cur->node); 120 cur->func(fence, cur); 121 } 122 mtx_leave(fence->lock); 123 } 124 125 return 0; 126 } 127 128 static inline int 129 dma_fence_signal_locked(struct dma_fence *fence) 130 { 131 struct dma_fence_cb *cur, *tmp; 132 133 if (fence == NULL) 134 return -EINVAL; 135 136 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 137 return -EINVAL; 138 139 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 140 list_del_init(&cur->node); 141 cur->func(fence, cur); 142 } 143 144 return 0; 145 } 146 147 static inline bool 148 dma_fence_is_signaled(struct dma_fence *fence) 149 { 150 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 151 return true; 152 153 if (fence->ops->signaled && fence->ops->signaled(fence)) { 154 dma_fence_signal(fence); 155 return true; 156 } 157 158 return false; 159 } 160 161 static inline bool 162 dma_fence_is_signaled_locked(struct dma_fence *fence) 163 { 164 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 165 return true; 166 167 if (fence->ops->signaled && fence->ops->signaled(fence)) { 168 dma_fence_signal_locked(fence); 169 return true; 170 } 171 172 return false; 173 } 174 175 long dma_fence_default_wait(struct dma_fence *, bool, long); 176 177 static inline long 178 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout) 179 { 180 if (timeout < 0) 181 return -EINVAL; 182 183 if (fence->ops->wait) 184 return fence->ops->wait(fence, intr, timeout); 185 else 186 return dma_fence_default_wait(fence, intr, timeout); 187 } 188 189 static inline long 190 dma_fence_wait(struct dma_fence *fence, bool intr) 191 { 192 return dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); 193 } 194 195 static inline void 196 dma_fence_enable_sw_signaling(struct dma_fence *fence) 197 { 198 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && 199 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && 200 fence->ops->enable_signaling) { 201 mtx_enter(fence->lock); 202 if (!fence->ops->enable_signaling(fence)) 203 dma_fence_signal_locked(fence); 204 mtx_leave(fence->lock); 205 } 206 } 207 208 static inline void 209 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, 210 struct mutex *lock, unsigned context, unsigned seqno) 211 { 212 fence->ops = ops; 213 fence->lock = lock; 214 fence->context = context; 215 fence->seqno = seqno; 216 fence->flags = 0; 217 fence->error = 0; 218 kref_init(&fence->refcount); 219 INIT_LIST_HEAD(&fence->cb_list); 220 } 221 222 static inline int 223 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, 224 dma_fence_func_t func) 225 { 226 int ret = 0; 227 bool was_set; 228 229 if (WARN_ON(!fence || !func)) 230 return -EINVAL; 231 232 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 233 INIT_LIST_HEAD(&cb->node); 234 return -ENOENT; 235 } 236 237 mtx_enter(fence->lock); 238 239 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); 240 241 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 242 ret = -ENOENT; 243 else if (!was_set && fence->ops->enable_signaling) { 244 if (!fence->ops->enable_signaling(fence)) { 245 dma_fence_signal_locked(fence); 246 ret = -ENOENT; 247 } 248 } 249 250 if (!ret) { 251 cb->func = func; 252 list_add_tail(&cb->node, &fence->cb_list); 253 } else 254 INIT_LIST_HEAD(&cb->node); 255 mtx_leave(fence->lock); 256 257 return ret; 258 } 259 260 static inline bool 261 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) 262 { 263 bool ret; 264 265 mtx_enter(fence->lock); 266 267 ret = !list_empty(&cb->node); 268 if (ret) 269 list_del_init(&cb->node); 270 271 mtx_leave(fence->lock); 272 273 return ret; 274 } 275 276 static inline bool 277 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b) 278 { 279 return (a->seqno > b->seqno); 280 } 281 282 static inline void 283 dma_fence_set_error(struct dma_fence *fence, int error) 284 { 285 fence->error = error; 286 } 287 288 long dma_fence_wait_any_timeout(struct dma_fence **, uint32_t, bool, long, 289 uint32_t *); 290 291 #endif 292