1*5f6c4afeSriastradh /* $NetBSD: i915_request.c,v 1.17 2022/07/11 18:56:00 riastradh Exp $ */
24e390cabSriastradh
34e390cabSriastradh /*
44e390cabSriastradh * Copyright © 2008-2015 Intel Corporation
54e390cabSriastradh *
64e390cabSriastradh * Permission is hereby granted, free of charge, to any person obtaining a
74e390cabSriastradh * copy of this software and associated documentation files (the "Software"),
84e390cabSriastradh * to deal in the Software without restriction, including without limitation
94e390cabSriastradh * the rights to use, copy, modify, merge, publish, distribute, sublicense,
104e390cabSriastradh * and/or sell copies of the Software, and to permit persons to whom the
114e390cabSriastradh * Software is furnished to do so, subject to the following conditions:
124e390cabSriastradh *
134e390cabSriastradh * The above copyright notice and this permission notice (including the next
144e390cabSriastradh * paragraph) shall be included in all copies or substantial portions of the
154e390cabSriastradh * Software.
164e390cabSriastradh *
174e390cabSriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
184e390cabSriastradh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
194e390cabSriastradh * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
204e390cabSriastradh * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
214e390cabSriastradh * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
224e390cabSriastradh * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
234e390cabSriastradh * IN THE SOFTWARE.
244e390cabSriastradh *
254e390cabSriastradh */
264e390cabSriastradh
274e390cabSriastradh #include <sys/cdefs.h>
28*5f6c4afeSriastradh __KERNEL_RCSID(0, "$NetBSD: i915_request.c,v 1.17 2022/07/11 18:56:00 riastradh Exp $");
294e390cabSriastradh
304e390cabSriastradh #include <linux/dma-fence-array.h>
314e390cabSriastradh #include <linux/irq_work.h>
324e390cabSriastradh #include <linux/prefetch.h>
334e390cabSriastradh #include <linux/sched.h>
344e390cabSriastradh #include <linux/sched/clock.h>
354e390cabSriastradh #include <linux/sched/signal.h>
364e390cabSriastradh
374e390cabSriastradh #include "gem/i915_gem_context.h"
384e390cabSriastradh #include "gt/intel_context.h"
394e390cabSriastradh #include "gt/intel_ring.h"
404e390cabSriastradh #include "gt/intel_rps.h"
414e390cabSriastradh
424e390cabSriastradh #include "i915_active.h"
434e390cabSriastradh #include "i915_drv.h"
444e390cabSriastradh #include "i915_globals.h"
454e390cabSriastradh #include "i915_trace.h"
464e390cabSriastradh #include "intel_pm.h"
474e390cabSriastradh
484e390cabSriastradh struct execute_cb {
494e390cabSriastradh struct list_head link;
504e390cabSriastradh struct irq_work work;
514e390cabSriastradh struct i915_sw_fence *fence;
524e390cabSriastradh void (*hook)(struct i915_request *rq, struct dma_fence *signal);
534e390cabSriastradh struct i915_request *signal;
544e390cabSriastradh };
554e390cabSriastradh
564e390cabSriastradh static struct i915_global_request {
574e390cabSriastradh struct i915_global base;
584e390cabSriastradh struct kmem_cache *slab_requests;
594e390cabSriastradh struct kmem_cache *slab_dependencies;
604e390cabSriastradh struct kmem_cache *slab_execute_cbs;
614e390cabSriastradh } global;
624e390cabSriastradh
i915_fence_get_driver_name(struct dma_fence * fence)634e390cabSriastradh static const char *i915_fence_get_driver_name(struct dma_fence *fence)
644e390cabSriastradh {
654e390cabSriastradh return dev_name(to_request(fence)->i915->drm.dev);
664e390cabSriastradh }
674e390cabSriastradh
i915_fence_get_timeline_name(struct dma_fence * fence)684e390cabSriastradh static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
694e390cabSriastradh {
704e390cabSriastradh const struct i915_gem_context *ctx;
714e390cabSriastradh
724e390cabSriastradh /*
734e390cabSriastradh * The timeline struct (as part of the ppgtt underneath a context)
744e390cabSriastradh * may be freed when the request is no longer in use by the GPU.
754e390cabSriastradh * We could extend the life of a context to beyond that of all
764e390cabSriastradh * fences, possibly keeping the hw resource around indefinitely,
774e390cabSriastradh * or we just give them a false name. Since
784e390cabSriastradh * dma_fence_ops.get_timeline_name is a debug feature, the occasional
794e390cabSriastradh * lie seems justifiable.
804e390cabSriastradh */
814e390cabSriastradh if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
824e390cabSriastradh return "signaled";
834e390cabSriastradh
844e390cabSriastradh ctx = i915_request_gem_context(to_request(fence));
854e390cabSriastradh if (!ctx)
864e390cabSriastradh return "[" DRIVER_NAME "]";
874e390cabSriastradh
884e390cabSriastradh return ctx->name;
894e390cabSriastradh }
904e390cabSriastradh
i915_fence_signaled(struct dma_fence * fence)914e390cabSriastradh static bool i915_fence_signaled(struct dma_fence *fence)
924e390cabSriastradh {
934e390cabSriastradh return i915_request_completed(to_request(fence));
944e390cabSriastradh }
954e390cabSriastradh
i915_fence_enable_signaling(struct dma_fence * fence)964e390cabSriastradh static bool i915_fence_enable_signaling(struct dma_fence *fence)
974e390cabSriastradh {
984e390cabSriastradh return i915_request_enable_breadcrumb(to_request(fence));
994e390cabSriastradh }
1004e390cabSriastradh
i915_fence_wait(struct dma_fence * fence,bool interruptible,signed long timeout)1014e390cabSriastradh static signed long i915_fence_wait(struct dma_fence *fence,
1024e390cabSriastradh bool interruptible,
1034e390cabSriastradh signed long timeout)
1044e390cabSriastradh {
1054e390cabSriastradh return i915_request_wait(to_request(fence),
1064e390cabSriastradh interruptible | I915_WAIT_PRIORITY,
1074e390cabSriastradh timeout);
1084e390cabSriastradh }
1094e390cabSriastradh
i915_fence_release(struct dma_fence * fence)1104e390cabSriastradh static void i915_fence_release(struct dma_fence *fence)
1114e390cabSriastradh {
1124e390cabSriastradh struct i915_request *rq = to_request(fence);
1134e390cabSriastradh
1144e390cabSriastradh /*
1154e390cabSriastradh * The request is put onto a RCU freelist (i.e. the address
1164e390cabSriastradh * is immediately reused), mark the fences as being freed now.
1174e390cabSriastradh * Otherwise the debugobjects for the fences are only marked as
1184e390cabSriastradh * freed when the slab cache itself is freed, and so we would get
1194e390cabSriastradh * caught trying to reuse dead objects.
1204e390cabSriastradh */
1210ca47d00Sriastradh #ifndef __NetBSD__
1224e390cabSriastradh i915_sw_fence_fini(&rq->submit);
1234e390cabSriastradh i915_sw_fence_fini(&rq->semaphore);
1240ca47d00Sriastradh #endif
1254e390cabSriastradh
1264e390cabSriastradh kmem_cache_free(global.slab_requests, rq);
1274e390cabSriastradh }
1284e390cabSriastradh
1294e390cabSriastradh const struct dma_fence_ops i915_fence_ops = {
1304e390cabSriastradh .get_driver_name = i915_fence_get_driver_name,
1314e390cabSriastradh .get_timeline_name = i915_fence_get_timeline_name,
1324e390cabSriastradh .enable_signaling = i915_fence_enable_signaling,
1334e390cabSriastradh .signaled = i915_fence_signaled,
1344e390cabSriastradh .wait = i915_fence_wait,
1354e390cabSriastradh .release = i915_fence_release,
1364e390cabSriastradh };
1374e390cabSriastradh
irq_execute_cb(struct irq_work * wrk)1384e390cabSriastradh static void irq_execute_cb(struct irq_work *wrk)
1394e390cabSriastradh {
1404e390cabSriastradh struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
1414e390cabSriastradh
1424e390cabSriastradh i915_sw_fence_complete(cb->fence);
1434e390cabSriastradh kmem_cache_free(global.slab_execute_cbs, cb);
1444e390cabSriastradh }
1454e390cabSriastradh
irq_execute_cb_hook(struct irq_work * wrk)1464e390cabSriastradh static void irq_execute_cb_hook(struct irq_work *wrk)
1474e390cabSriastradh {
1484e390cabSriastradh struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
1494e390cabSriastradh
1504e390cabSriastradh cb->hook(container_of(cb->fence, struct i915_request, submit),
1514e390cabSriastradh &cb->signal->fence);
1524e390cabSriastradh i915_request_put(cb->signal);
1534e390cabSriastradh
1544e390cabSriastradh irq_execute_cb(wrk);
1554e390cabSriastradh }
1564e390cabSriastradh
__notify_execute_cb(struct i915_request * rq)1574e390cabSriastradh static void __notify_execute_cb(struct i915_request *rq)
1584e390cabSriastradh {
1594e390cabSriastradh struct execute_cb *cb;
1604e390cabSriastradh
1614e390cabSriastradh lockdep_assert_held(&rq->lock);
1624e390cabSriastradh
1634e390cabSriastradh if (list_empty(&rq->execute_cb))
1644e390cabSriastradh return;
1654e390cabSriastradh
1664e390cabSriastradh list_for_each_entry(cb, &rq->execute_cb, link)
1674e390cabSriastradh irq_work_queue(&cb->work);
1684e390cabSriastradh
1694e390cabSriastradh /*
1704e390cabSriastradh * XXX Rollback on __i915_request_unsubmit()
1714e390cabSriastradh *
1724e390cabSriastradh * In the future, perhaps when we have an active time-slicing scheduler,
1734e390cabSriastradh * it will be interesting to unsubmit parallel execution and remove
1744e390cabSriastradh * busywaits from the GPU until their master is restarted. This is
1754e390cabSriastradh * quite hairy, we have to carefully rollback the fence and do a
1764e390cabSriastradh * preempt-to-idle cycle on the target engine, all the while the
1774e390cabSriastradh * master execute_cb may refire.
1784e390cabSriastradh */
1794e390cabSriastradh INIT_LIST_HEAD(&rq->execute_cb);
1804e390cabSriastradh }
1814e390cabSriastradh
1824e390cabSriastradh static inline void
remove_from_client(struct i915_request * request)1834e390cabSriastradh remove_from_client(struct i915_request *request)
1844e390cabSriastradh {
1854e390cabSriastradh struct drm_i915_file_private *file_priv;
1864e390cabSriastradh
1874e390cabSriastradh if (!READ_ONCE(request->file_priv))
1884e390cabSriastradh return;
1894e390cabSriastradh
1904e390cabSriastradh rcu_read_lock();
1914e390cabSriastradh file_priv = xchg(&request->file_priv, NULL);
1924e390cabSriastradh if (file_priv) {
1934e390cabSriastradh spin_lock(&file_priv->mm.lock);
1944e390cabSriastradh list_del(&request->client_link);
1954e390cabSriastradh spin_unlock(&file_priv->mm.lock);
1964e390cabSriastradh }
1974e390cabSriastradh rcu_read_unlock();
1984e390cabSriastradh }
1994e390cabSriastradh
free_capture_list(struct i915_request * request)2004e390cabSriastradh static void free_capture_list(struct i915_request *request)
2014e390cabSriastradh {
2024e390cabSriastradh struct i915_capture_list *capture;
2034e390cabSriastradh
2044e390cabSriastradh capture = fetch_and_zero(&request->capture_list);
2054e390cabSriastradh while (capture) {
2064e390cabSriastradh struct i915_capture_list *next = capture->next;
2074e390cabSriastradh
2084e390cabSriastradh kfree(capture);
2094e390cabSriastradh capture = next;
2104e390cabSriastradh }
2114e390cabSriastradh }
2124e390cabSriastradh
remove_from_engine(struct i915_request * rq)2134e390cabSriastradh static void remove_from_engine(struct i915_request *rq)
2144e390cabSriastradh {
2154e390cabSriastradh struct intel_engine_cs *engine, *locked;
2164e390cabSriastradh
2174e390cabSriastradh /*
2184e390cabSriastradh * Virtual engines complicate acquiring the engine timeline lock,
2194e390cabSriastradh * as their rq->engine pointer is not stable until under that
2204e390cabSriastradh * engine lock. The simple ploy we use is to take the lock then
2214e390cabSriastradh * check that the rq still belongs to the newly locked engine.
2224e390cabSriastradh */
2234e390cabSriastradh locked = READ_ONCE(rq->engine);
2244e390cabSriastradh spin_lock_irq(&locked->active.lock);
2254e390cabSriastradh while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
2264e390cabSriastradh spin_unlock(&locked->active.lock);
2274e390cabSriastradh spin_lock(&engine->active.lock);
2284e390cabSriastradh locked = engine;
2294e390cabSriastradh }
2304e390cabSriastradh list_del_init(&rq->sched.link);
2314e390cabSriastradh clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2324e390cabSriastradh clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
2334e390cabSriastradh spin_unlock_irq(&locked->active.lock);
2344e390cabSriastradh }
2354e390cabSriastradh
i915_request_retire(struct i915_request * rq)2364e390cabSriastradh bool i915_request_retire(struct i915_request *rq)
2374e390cabSriastradh {
2384e390cabSriastradh if (!i915_request_completed(rq))
2394e390cabSriastradh return false;
2404e390cabSriastradh
2414e390cabSriastradh RQ_TRACE(rq, "\n");
2424e390cabSriastradh
2434e390cabSriastradh GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
2444e390cabSriastradh trace_i915_request_retire(rq);
2454e390cabSriastradh
2464e390cabSriastradh /*
2474e390cabSriastradh * We know the GPU must have read the request to have
2484e390cabSriastradh * sent us the seqno + interrupt, so use the position
2494e390cabSriastradh * of tail of the request to update the last known position
2504e390cabSriastradh * of the GPU head.
2514e390cabSriastradh *
2524e390cabSriastradh * Note this requires that we are always called in request
2534e390cabSriastradh * completion order.
2544e390cabSriastradh */
2554e390cabSriastradh GEM_BUG_ON(!list_is_first(&rq->link,
2564e390cabSriastradh &i915_request_timeline(rq)->requests));
2574e390cabSriastradh rq->ring->head = rq->postfix;
2584e390cabSriastradh
2594e390cabSriastradh /*
2604e390cabSriastradh * We only loosely track inflight requests across preemption,
2614e390cabSriastradh * and so we may find ourselves attempting to retire a _completed_
2624e390cabSriastradh * request that we have removed from the HW and put back on a run
2634e390cabSriastradh * queue.
2644e390cabSriastradh */
2654e390cabSriastradh remove_from_engine(rq);
2664e390cabSriastradh
2674e390cabSriastradh spin_lock_irq(&rq->lock);
2684e390cabSriastradh i915_request_mark_complete(rq);
2694e390cabSriastradh if (!i915_request_signaled(rq))
2704e390cabSriastradh dma_fence_signal_locked(&rq->fence);
2714e390cabSriastradh if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2724e390cabSriastradh i915_request_cancel_breadcrumb(rq);
2734e390cabSriastradh if (i915_request_has_waitboost(rq)) {
2744e390cabSriastradh GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
2754e390cabSriastradh atomic_dec(&rq->engine->gt->rps.num_waiters);
2764e390cabSriastradh }
2774e390cabSriastradh if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
2784e390cabSriastradh set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
2794e390cabSriastradh __notify_execute_cb(rq);
2804e390cabSriastradh }
2814e390cabSriastradh GEM_BUG_ON(!list_empty(&rq->execute_cb));
2824e390cabSriastradh spin_unlock_irq(&rq->lock);
2834e390cabSriastradh
2844e390cabSriastradh remove_from_client(rq);
2854e390cabSriastradh list_del(&rq->link);
2864e390cabSriastradh
2874e390cabSriastradh intel_context_exit(rq->context);
2884e390cabSriastradh intel_context_unpin(rq->context);
2894e390cabSriastradh
2904e390cabSriastradh free_capture_list(rq);
2914e390cabSriastradh i915_sched_node_fini(&rq->sched);
2924e390cabSriastradh i915_request_put(rq);
2934e390cabSriastradh
2944e390cabSriastradh return true;
2954e390cabSriastradh }
2964e390cabSriastradh
i915_request_retire_upto(struct i915_request * rq)2974e390cabSriastradh void i915_request_retire_upto(struct i915_request *rq)
2984e390cabSriastradh {
2994e390cabSriastradh struct intel_timeline * const tl = i915_request_timeline(rq);
3004e390cabSriastradh struct i915_request *tmp;
3014e390cabSriastradh
3024e390cabSriastradh RQ_TRACE(rq, "\n");
3034e390cabSriastradh
3044e390cabSriastradh GEM_BUG_ON(!i915_request_completed(rq));
3054e390cabSriastradh
3064e390cabSriastradh do {
3074e390cabSriastradh tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
3084e390cabSriastradh } while (i915_request_retire(tmp) && tmp != rq);
3094e390cabSriastradh }
3104e390cabSriastradh
3114e390cabSriastradh static int
__await_execution(struct i915_request * rq,struct i915_request * signal,void (* hook)(struct i915_request * rq,struct dma_fence * signal),gfp_t gfp)3124e390cabSriastradh __await_execution(struct i915_request *rq,
3134e390cabSriastradh struct i915_request *signal,
3144e390cabSriastradh void (*hook)(struct i915_request *rq,
3154e390cabSriastradh struct dma_fence *signal),
3164e390cabSriastradh gfp_t gfp)
3174e390cabSriastradh {
3184e390cabSriastradh struct execute_cb *cb;
3194e390cabSriastradh
3204e390cabSriastradh if (i915_request_is_active(signal)) {
3214e390cabSriastradh if (hook)
3224e390cabSriastradh hook(rq, &signal->fence);
3234e390cabSriastradh return 0;
3244e390cabSriastradh }
3254e390cabSriastradh
3264e390cabSriastradh cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
3274e390cabSriastradh if (!cb)
3284e390cabSriastradh return -ENOMEM;
3294e390cabSriastradh
3304e390cabSriastradh cb->fence = &rq->submit;
3314e390cabSriastradh i915_sw_fence_await(cb->fence);
3324e390cabSriastradh init_irq_work(&cb->work, irq_execute_cb);
3334e390cabSriastradh
3344e390cabSriastradh if (hook) {
3354e390cabSriastradh cb->hook = hook;
3364e390cabSriastradh cb->signal = i915_request_get(signal);
3374e390cabSriastradh cb->work.func = irq_execute_cb_hook;
3384e390cabSriastradh }
3394e390cabSriastradh
3404e390cabSriastradh spin_lock_irq(&signal->lock);
3414e390cabSriastradh if (i915_request_is_active(signal)) {
3424e390cabSriastradh if (hook) {
3434e390cabSriastradh hook(rq, &signal->fence);
3444e390cabSriastradh i915_request_put(signal);
3454e390cabSriastradh }
3464e390cabSriastradh i915_sw_fence_complete(cb->fence);
3474e390cabSriastradh kmem_cache_free(global.slab_execute_cbs, cb);
3484e390cabSriastradh } else {
3494e390cabSriastradh list_add_tail(&cb->link, &signal->execute_cb);
3504e390cabSriastradh }
3514e390cabSriastradh spin_unlock_irq(&signal->lock);
3524e390cabSriastradh
3534e390cabSriastradh /* Copy across semaphore status as we need the same behaviour */
3544e390cabSriastradh rq->sched.flags |= signal->sched.flags;
3554e390cabSriastradh return 0;
3564e390cabSriastradh }
3574e390cabSriastradh
__i915_request_submit(struct i915_request * request)3584e390cabSriastradh bool __i915_request_submit(struct i915_request *request)
3594e390cabSriastradh {
3604e390cabSriastradh struct intel_engine_cs *engine = request->engine;
3614e390cabSriastradh bool result = false;
3624e390cabSriastradh
3634e390cabSriastradh RQ_TRACE(request, "\n");
3644e390cabSriastradh
3654e390cabSriastradh GEM_BUG_ON(!irqs_disabled());
3664e390cabSriastradh lockdep_assert_held(&engine->active.lock);
3674e390cabSriastradh
3684e390cabSriastradh /*
3694e390cabSriastradh * With the advent of preempt-to-busy, we frequently encounter
3704e390cabSriastradh * requests that we have unsubmitted from HW, but left running
3714e390cabSriastradh * until the next ack and so have completed in the meantime. On
3724e390cabSriastradh * resubmission of that completed request, we can skip
3734e390cabSriastradh * updating the payload, and execlists can even skip submitting
3744e390cabSriastradh * the request.
3754e390cabSriastradh *
3764e390cabSriastradh * We must remove the request from the caller's priority queue,
3774e390cabSriastradh * and the caller must only call us when the request is in their
3784e390cabSriastradh * priority queue, under the active.lock. This ensures that the
3794e390cabSriastradh * request has *not* yet been retired and we can safely move
3804e390cabSriastradh * the request into the engine->active.list where it will be
3814e390cabSriastradh * dropped upon retiring. (Otherwise if resubmit a *retired*
3824e390cabSriastradh * request, this would be a horrible use-after-free.)
3834e390cabSriastradh */
3844e390cabSriastradh if (i915_request_completed(request))
3854e390cabSriastradh goto xfer;
3864e390cabSriastradh
3874e390cabSriastradh if (intel_context_is_banned(request->context))
3884e390cabSriastradh i915_request_skip(request, -EIO);
3894e390cabSriastradh
3904e390cabSriastradh /*
3914e390cabSriastradh * Are we using semaphores when the gpu is already saturated?
3924e390cabSriastradh *
3934e390cabSriastradh * Using semaphores incurs a cost in having the GPU poll a
3944e390cabSriastradh * memory location, busywaiting for it to change. The continual
3954e390cabSriastradh * memory reads can have a noticeable impact on the rest of the
3964e390cabSriastradh * system with the extra bus traffic, stalling the cpu as it too
3974e390cabSriastradh * tries to access memory across the bus (perf stat -e bus-cycles).
3984e390cabSriastradh *
3994e390cabSriastradh * If we installed a semaphore on this request and we only submit
4004e390cabSriastradh * the request after the signaler completed, that indicates the
4014e390cabSriastradh * system is overloaded and using semaphores at this time only
4024e390cabSriastradh * increases the amount of work we are doing. If so, we disable
4034e390cabSriastradh * further use of semaphores until we are idle again, whence we
4044e390cabSriastradh * optimistically try again.
4054e390cabSriastradh */
4064e390cabSriastradh if (request->sched.semaphores &&
4074e390cabSriastradh i915_sw_fence_signaled(&request->semaphore))
4084e390cabSriastradh engine->saturated |= request->sched.semaphores;
4094e390cabSriastradh
4104e390cabSriastradh engine->emit_fini_breadcrumb(request,
4114e390cabSriastradh request->ring->vaddr + request->postfix);
4124e390cabSriastradh
4134e390cabSriastradh trace_i915_request_execute(request);
4144e390cabSriastradh engine->serial++;
4154e390cabSriastradh result = true;
4164e390cabSriastradh
4174e390cabSriastradh xfer: /* We may be recursing from the signal callback of another i915 fence */
4184e390cabSriastradh spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
4194e390cabSriastradh
4204e390cabSriastradh if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
4214e390cabSriastradh list_move_tail(&request->sched.link, &engine->active.requests);
4224e390cabSriastradh clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
4234e390cabSriastradh }
4244e390cabSriastradh
4254e390cabSriastradh if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
4264e390cabSriastradh !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
4274e390cabSriastradh !i915_request_enable_breadcrumb(request))
4284e390cabSriastradh intel_engine_signal_breadcrumbs(engine);
4294e390cabSriastradh
4304e390cabSriastradh __notify_execute_cb(request);
4314e390cabSriastradh
4324e390cabSriastradh spin_unlock(&request->lock);
4334e390cabSriastradh
4344e390cabSriastradh return result;
4354e390cabSriastradh }
4364e390cabSriastradh
i915_request_submit(struct i915_request * request)4374e390cabSriastradh void i915_request_submit(struct i915_request *request)
4384e390cabSriastradh {
4394e390cabSriastradh struct intel_engine_cs *engine = request->engine;
4404e390cabSriastradh unsigned long flags;
4414e390cabSriastradh
4424e390cabSriastradh /* Will be called from irq-context when using foreign fences. */
4434e390cabSriastradh spin_lock_irqsave(&engine->active.lock, flags);
4444e390cabSriastradh
4454e390cabSriastradh __i915_request_submit(request);
4464e390cabSriastradh
4474e390cabSriastradh spin_unlock_irqrestore(&engine->active.lock, flags);
4484e390cabSriastradh }
4494e390cabSriastradh
__i915_request_unsubmit(struct i915_request * request)4504e390cabSriastradh void __i915_request_unsubmit(struct i915_request *request)
4514e390cabSriastradh {
4523ce207bbSriastradh struct intel_engine_cs *engine __lockdep_used = request->engine;
4534e390cabSriastradh
4544e390cabSriastradh RQ_TRACE(request, "\n");
4554e390cabSriastradh
4564e390cabSriastradh GEM_BUG_ON(!irqs_disabled());
4574e390cabSriastradh lockdep_assert_held(&engine->active.lock);
4584e390cabSriastradh
4594e390cabSriastradh /*
4604e390cabSriastradh * Only unwind in reverse order, required so that the per-context list
4614e390cabSriastradh * is kept in seqno/ring order.
4624e390cabSriastradh */
4634e390cabSriastradh
4644e390cabSriastradh /* We may be recursing from the signal callback of another i915 fence */
4654e390cabSriastradh spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
4664e390cabSriastradh
4674e390cabSriastradh if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
4684e390cabSriastradh i915_request_cancel_breadcrumb(request);
4694e390cabSriastradh
4704e390cabSriastradh GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
4714e390cabSriastradh clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
4724e390cabSriastradh
4734e390cabSriastradh spin_unlock(&request->lock);
4744e390cabSriastradh
4754e390cabSriastradh /* We've already spun, don't charge on resubmitting. */
4764e390cabSriastradh if (request->sched.semaphores && i915_request_started(request)) {
4774e390cabSriastradh request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
4784e390cabSriastradh request->sched.semaphores = 0;
4794e390cabSriastradh }
4804e390cabSriastradh
4814e390cabSriastradh /*
4824e390cabSriastradh * We don't need to wake_up any waiters on request->execute, they
4834e390cabSriastradh * will get woken by any other event or us re-adding this request
4844e390cabSriastradh * to the engine timeline (__i915_request_submit()). The waiters
4854e390cabSriastradh * should be quite adapt at finding that the request now has a new
4864e390cabSriastradh * global_seqno to the one they went to sleep on.
4874e390cabSriastradh */
4884e390cabSriastradh }
4894e390cabSriastradh
i915_request_unsubmit(struct i915_request * request)4904e390cabSriastradh void i915_request_unsubmit(struct i915_request *request)
4914e390cabSriastradh {
4924e390cabSriastradh struct intel_engine_cs *engine = request->engine;
4934e390cabSriastradh unsigned long flags;
4944e390cabSriastradh
4954e390cabSriastradh /* Will be called from irq-context when using foreign fences. */
4964e390cabSriastradh spin_lock_irqsave(&engine->active.lock, flags);
4974e390cabSriastradh
4984e390cabSriastradh __i915_request_unsubmit(request);
4994e390cabSriastradh
5004e390cabSriastradh spin_unlock_irqrestore(&engine->active.lock, flags);
5014e390cabSriastradh }
5024e390cabSriastradh
5034e390cabSriastradh static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)5044e390cabSriastradh submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
5054e390cabSriastradh {
5064e390cabSriastradh struct i915_request *request =
5074e390cabSriastradh container_of(fence, typeof(*request), submit);
5084e390cabSriastradh
5094e390cabSriastradh switch (state) {
5104e390cabSriastradh case FENCE_COMPLETE:
5114e390cabSriastradh trace_i915_request_submit(request);
5124e390cabSriastradh
5134e390cabSriastradh if (unlikely(fence->error))
5144e390cabSriastradh i915_request_skip(request, fence->error);
5154e390cabSriastradh
5164e390cabSriastradh /*
5174e390cabSriastradh * We need to serialize use of the submit_request() callback
5184e390cabSriastradh * with its hotplugging performed during an emergency
5194e390cabSriastradh * i915_gem_set_wedged(). We use the RCU mechanism to mark the
5204e390cabSriastradh * critical section in order to force i915_gem_set_wedged() to
5214e390cabSriastradh * wait until the submit_request() is completed before
5224e390cabSriastradh * proceeding.
5234e390cabSriastradh */
5244e390cabSriastradh rcu_read_lock();
5254e390cabSriastradh request->engine->submit_request(request);
5264e390cabSriastradh rcu_read_unlock();
5274e390cabSriastradh break;
5284e390cabSriastradh
5294e390cabSriastradh case FENCE_FREE:
5304e390cabSriastradh i915_request_put(request);
5314e390cabSriastradh break;
5324e390cabSriastradh }
5334e390cabSriastradh
5344e390cabSriastradh return NOTIFY_DONE;
5354e390cabSriastradh }
5364e390cabSriastradh
5374e390cabSriastradh static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)5384e390cabSriastradh semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
5394e390cabSriastradh {
5404e390cabSriastradh struct i915_request *request =
5414e390cabSriastradh container_of(fence, typeof(*request), semaphore);
5424e390cabSriastradh
5434e390cabSriastradh switch (state) {
5444e390cabSriastradh case FENCE_COMPLETE:
5454e390cabSriastradh i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
5464e390cabSriastradh break;
5474e390cabSriastradh
5484e390cabSriastradh case FENCE_FREE:
5494e390cabSriastradh i915_request_put(request);
5504e390cabSriastradh break;
5514e390cabSriastradh }
5524e390cabSriastradh
5534e390cabSriastradh return NOTIFY_DONE;
5544e390cabSriastradh }
5554e390cabSriastradh
retire_requests(struct intel_timeline * tl)5564e390cabSriastradh static void retire_requests(struct intel_timeline *tl)
5574e390cabSriastradh {
5584e390cabSriastradh struct i915_request *rq, *rn;
5594e390cabSriastradh
5604e390cabSriastradh list_for_each_entry_safe(rq, rn, &tl->requests, link)
5614e390cabSriastradh if (!i915_request_retire(rq))
5624e390cabSriastradh break;
5634e390cabSriastradh }
5644e390cabSriastradh
5654e390cabSriastradh static noinline struct i915_request *
request_alloc_slow(struct intel_timeline * tl,gfp_t gfp)5664e390cabSriastradh request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
5674e390cabSriastradh {
5684e390cabSriastradh struct i915_request *rq;
5694e390cabSriastradh
5704e390cabSriastradh if (list_empty(&tl->requests))
5714e390cabSriastradh goto out;
5724e390cabSriastradh
5734e390cabSriastradh if (!gfpflags_allow_blocking(gfp))
5744e390cabSriastradh goto out;
5754e390cabSriastradh
5764e390cabSriastradh /* Move our oldest request to the slab-cache (if not in use!) */
5774e390cabSriastradh rq = list_first_entry(&tl->requests, typeof(*rq), link);
5784e390cabSriastradh i915_request_retire(rq);
5794e390cabSriastradh
5804e390cabSriastradh rq = kmem_cache_alloc(global.slab_requests,
5814e390cabSriastradh gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
5824e390cabSriastradh if (rq)
5834e390cabSriastradh return rq;
5844e390cabSriastradh
5854e390cabSriastradh /* Ratelimit ourselves to prevent oom from malicious clients */
5864e390cabSriastradh rq = list_last_entry(&tl->requests, typeof(*rq), link);
5874e390cabSriastradh cond_synchronize_rcu(rq->rcustate);
5884e390cabSriastradh
5894e390cabSriastradh /* Retire our old requests in the hope that we free some */
5904e390cabSriastradh retire_requests(tl);
5914e390cabSriastradh
5924e390cabSriastradh out:
5934e390cabSriastradh return kmem_cache_alloc(global.slab_requests, gfp);
5944e390cabSriastradh }
5954e390cabSriastradh
__i915_request_ctor(void * arg)5964e390cabSriastradh static void __i915_request_ctor(void *arg)
5974e390cabSriastradh {
5984e390cabSriastradh struct i915_request *rq = arg;
5994e390cabSriastradh
6004e390cabSriastradh spin_lock_init(&rq->lock);
6014e390cabSriastradh i915_sched_node_init(&rq->sched);
6024e390cabSriastradh i915_sw_fence_init(&rq->submit, submit_notify);
6034e390cabSriastradh i915_sw_fence_init(&rq->semaphore, semaphore_notify);
6044e390cabSriastradh
6054e390cabSriastradh dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
6064e390cabSriastradh
6074e390cabSriastradh rq->file_priv = NULL;
6084e390cabSriastradh rq->capture_list = NULL;
6094e390cabSriastradh
6104e390cabSriastradh INIT_LIST_HEAD(&rq->execute_cb);
6114e390cabSriastradh }
6124e390cabSriastradh
__i915_request_dtor(void * arg)61316f1ba6cSriastradh static void __i915_request_dtor(void *arg)
61416f1ba6cSriastradh {
61516f1ba6cSriastradh struct i915_request *rq = arg;
61616f1ba6cSriastradh
61716f1ba6cSriastradh dma_fence_destroy(&rq->fence);
6180ca47d00Sriastradh #ifdef __NetBSD__
6190ca47d00Sriastradh i915_sw_fence_fini(&rq->submit);
6200ca47d00Sriastradh i915_sw_fence_fini(&rq->semaphore);
6210ca47d00Sriastradh #endif
62216f1ba6cSriastradh spin_lock_destroy(&rq->lock);
62316f1ba6cSriastradh }
62416f1ba6cSriastradh
6254e390cabSriastradh struct i915_request *
__i915_request_create(struct intel_context * ce,gfp_t gfp)6264e390cabSriastradh __i915_request_create(struct intel_context *ce, gfp_t gfp)
6274e390cabSriastradh {
6284e390cabSriastradh struct intel_timeline *tl = ce->timeline;
6294e390cabSriastradh struct i915_request *rq;
6304e390cabSriastradh u32 seqno;
6314e390cabSriastradh int ret;
6324e390cabSriastradh
6334e390cabSriastradh might_sleep_if(gfpflags_allow_blocking(gfp));
6344e390cabSriastradh
6354e390cabSriastradh /* Check that the caller provided an already pinned context */
6364e390cabSriastradh __intel_context_pin(ce);
6374e390cabSriastradh
6384e390cabSriastradh /*
6394e390cabSriastradh * Beware: Dragons be flying overhead.
6404e390cabSriastradh *
6414e390cabSriastradh * We use RCU to look up requests in flight. The lookups may
6424e390cabSriastradh * race with the request being allocated from the slab freelist.
6434e390cabSriastradh * That is the request we are writing to here, may be in the process
6444e390cabSriastradh * of being read by __i915_active_request_get_rcu(). As such,
6454e390cabSriastradh * we have to be very careful when overwriting the contents. During
6464e390cabSriastradh * the RCU lookup, we change chase the request->engine pointer,
6474e390cabSriastradh * read the request->global_seqno and increment the reference count.
6484e390cabSriastradh *
6494e390cabSriastradh * The reference count is incremented atomically. If it is zero,
6504e390cabSriastradh * the lookup knows the request is unallocated and complete. Otherwise,
6514e390cabSriastradh * it is either still in use, or has been reallocated and reset
6524e390cabSriastradh * with dma_fence_init(). This increment is safe for release as we
6534e390cabSriastradh * check that the request we have a reference to and matches the active
6544e390cabSriastradh * request.
6554e390cabSriastradh *
6564e390cabSriastradh * Before we increment the refcount, we chase the request->engine
6574e390cabSriastradh * pointer. We must not call kmem_cache_zalloc() or else we set
6584e390cabSriastradh * that pointer to NULL and cause a crash during the lookup. If
6594e390cabSriastradh * we see the request is completed (based on the value of the
6604e390cabSriastradh * old engine and seqno), the lookup is complete and reports NULL.
6614e390cabSriastradh * If we decide the request is not completed (new engine or seqno),
6624e390cabSriastradh * then we grab a reference and double check that it is still the
6634e390cabSriastradh * active request - which it won't be and restart the lookup.
6644e390cabSriastradh *
6654e390cabSriastradh * Do not use kmem_cache_zalloc() here!
6664e390cabSriastradh */
6674e390cabSriastradh rq = kmem_cache_alloc(global.slab_requests,
6684e390cabSriastradh gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
6694e390cabSriastradh if (unlikely(!rq)) {
6704e390cabSriastradh rq = request_alloc_slow(tl, gfp);
6714e390cabSriastradh if (!rq) {
6724e390cabSriastradh ret = -ENOMEM;
6734e390cabSriastradh goto err_unreserve;
6744e390cabSriastradh }
6754e390cabSriastradh }
6764e390cabSriastradh
6774e390cabSriastradh rq->i915 = ce->engine->i915;
6784e390cabSriastradh rq->context = ce;
6794e390cabSriastradh rq->engine = ce->engine;
6804e390cabSriastradh rq->ring = ce->ring;
6814e390cabSriastradh rq->execution_mask = ce->engine->mask;
6824e390cabSriastradh
6833ce207bbSriastradh #ifdef __NetBSD__
68416f1ba6cSriastradh dma_fence_reset(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
6853ce207bbSriastradh #else
6864e390cabSriastradh kref_init(&rq->fence.refcount);
6874e390cabSriastradh rq->fence.flags = 0;
6884e390cabSriastradh rq->fence.error = 0;
6894e390cabSriastradh INIT_LIST_HEAD(&rq->fence.cb_list);
6903ce207bbSriastradh #endif
6914e390cabSriastradh
6924e390cabSriastradh ret = intel_timeline_get_seqno(tl, rq, &seqno);
6934e390cabSriastradh if (ret)
6944e390cabSriastradh goto err_free;
6954e390cabSriastradh
6964e390cabSriastradh rq->fence.context = tl->fence_context;
6974e390cabSriastradh rq->fence.seqno = seqno;
6984e390cabSriastradh
6994e390cabSriastradh RCU_INIT_POINTER(rq->timeline, tl);
7004e390cabSriastradh RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
7014e390cabSriastradh rq->hwsp_seqno = tl->hwsp_seqno;
7024e390cabSriastradh
7034e390cabSriastradh rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
7044e390cabSriastradh
7054e390cabSriastradh /* We bump the ref for the fence chain */
7064e390cabSriastradh i915_sw_fence_reinit(&i915_request_get(rq)->submit);
7074e390cabSriastradh i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
7084e390cabSriastradh
7094e390cabSriastradh i915_sched_node_reinit(&rq->sched);
7104e390cabSriastradh
7114e390cabSriastradh /* No zalloc, everything must be cleared after use */
7124e390cabSriastradh rq->batch = NULL;
7134e390cabSriastradh GEM_BUG_ON(rq->file_priv);
7144e390cabSriastradh GEM_BUG_ON(rq->capture_list);
7154e390cabSriastradh GEM_BUG_ON(!list_empty(&rq->execute_cb));
7164e390cabSriastradh
7174e390cabSriastradh /*
7184e390cabSriastradh * Reserve space in the ring buffer for all the commands required to
7194e390cabSriastradh * eventually emit this request. This is to guarantee that the
7204e390cabSriastradh * i915_request_add() call can't fail. Note that the reserve may need
7214e390cabSriastradh * to be redone if the request is not actually submitted straight
7224e390cabSriastradh * away, e.g. because a GPU scheduler has deferred it.
7234e390cabSriastradh *
7244e390cabSriastradh * Note that due to how we add reserved_space to intel_ring_begin()
7254e390cabSriastradh * we need to double our request to ensure that if we need to wrap
7264e390cabSriastradh * around inside i915_request_add() there is sufficient space at
7274e390cabSriastradh * the beginning of the ring as well.
7284e390cabSriastradh */
7294e390cabSriastradh rq->reserved_space =
7304e390cabSriastradh 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
7314e390cabSriastradh
7324e390cabSriastradh /*
7334e390cabSriastradh * Record the position of the start of the request so that
7344e390cabSriastradh * should we detect the updated seqno part-way through the
7354e390cabSriastradh * GPU processing the request, we never over-estimate the
7364e390cabSriastradh * position of the head.
7374e390cabSriastradh */
7384e390cabSriastradh rq->head = rq->ring->emit;
7394e390cabSriastradh
7404e390cabSriastradh ret = rq->engine->request_alloc(rq);
7414e390cabSriastradh if (ret)
7424e390cabSriastradh goto err_unwind;
7434e390cabSriastradh
7444e390cabSriastradh rq->infix = rq->ring->emit; /* end of header; start of user payload */
7454e390cabSriastradh
7464e390cabSriastradh intel_context_mark_active(ce);
7474e390cabSriastradh return rq;
7484e390cabSriastradh
7494e390cabSriastradh err_unwind:
7504e390cabSriastradh ce->ring->emit = rq->head;
7514e390cabSriastradh
7524e390cabSriastradh /* Make sure we didn't add ourselves to external state before freeing */
7534e390cabSriastradh GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
7544e390cabSriastradh GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
7554e390cabSriastradh
7564e390cabSriastradh err_free:
7574e390cabSriastradh kmem_cache_free(global.slab_requests, rq);
7584e390cabSriastradh err_unreserve:
7594e390cabSriastradh intel_context_unpin(ce);
7604e390cabSriastradh return ERR_PTR(ret);
7614e390cabSriastradh }
7624e390cabSriastradh
7634e390cabSriastradh struct i915_request *
i915_request_create(struct intel_context * ce)7644e390cabSriastradh i915_request_create(struct intel_context *ce)
7654e390cabSriastradh {
7664e390cabSriastradh struct i915_request *rq;
7674e390cabSriastradh struct intel_timeline *tl;
7684e390cabSriastradh
7694e390cabSriastradh tl = intel_context_timeline_lock(ce);
7704e390cabSriastradh if (IS_ERR(tl))
7714e390cabSriastradh return ERR_CAST(tl);
7724e390cabSriastradh
7734e390cabSriastradh /* Move our oldest request to the slab-cache (if not in use!) */
7744e390cabSriastradh rq = list_first_entry(&tl->requests, typeof(*rq), link);
7754e390cabSriastradh if (!list_is_last(&rq->link, &tl->requests))
7764e390cabSriastradh i915_request_retire(rq);
7774e390cabSriastradh
7784e390cabSriastradh intel_context_enter(ce);
7794e390cabSriastradh rq = __i915_request_create(ce, GFP_KERNEL);
7804e390cabSriastradh intel_context_exit(ce); /* active reference transferred to request */
7814e390cabSriastradh if (IS_ERR(rq))
7824e390cabSriastradh goto err_unlock;
7834e390cabSriastradh
7844e390cabSriastradh /* Check that we do not interrupt ourselves with a new request */
7854e390cabSriastradh rq->cookie = lockdep_pin_lock(&tl->mutex);
7864e390cabSriastradh
7874e390cabSriastradh return rq;
7884e390cabSriastradh
7894e390cabSriastradh err_unlock:
7904e390cabSriastradh intel_context_timeline_unlock(tl);
7914e390cabSriastradh return rq;
7924e390cabSriastradh }
7934e390cabSriastradh
7944e390cabSriastradh static int
i915_request_await_start(struct i915_request * rq,struct i915_request * signal)7954e390cabSriastradh i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
7964e390cabSriastradh {
7974e390cabSriastradh struct dma_fence *fence;
7984e390cabSriastradh int err;
7994e390cabSriastradh
8004e390cabSriastradh GEM_BUG_ON(i915_request_timeline(rq) ==
8014e390cabSriastradh rcu_access_pointer(signal->timeline));
8024e390cabSriastradh
8034e390cabSriastradh fence = NULL;
8044e390cabSriastradh rcu_read_lock();
8054e390cabSriastradh spin_lock_irq(&signal->lock);
8064e390cabSriastradh if (!i915_request_started(signal) &&
8074e390cabSriastradh !list_is_first(&signal->link,
8084e390cabSriastradh &rcu_dereference(signal->timeline)->requests)) {
8094e390cabSriastradh struct i915_request *prev = list_prev_entry(signal, link);
8104e390cabSriastradh
8114e390cabSriastradh /*
8124e390cabSriastradh * Peek at the request before us in the timeline. That
8134e390cabSriastradh * request will only be valid before it is retired, so
8144e390cabSriastradh * after acquiring a reference to it, confirm that it is
8154e390cabSriastradh * still part of the signaler's timeline.
8164e390cabSriastradh */
8174e390cabSriastradh if (i915_request_get_rcu(prev)) {
8184e390cabSriastradh if (list_next_entry(prev, link) == signal)
8194e390cabSriastradh fence = &prev->fence;
8204e390cabSriastradh else
8214e390cabSriastradh i915_request_put(prev);
8224e390cabSriastradh }
8234e390cabSriastradh }
8244e390cabSriastradh spin_unlock_irq(&signal->lock);
8254e390cabSriastradh rcu_read_unlock();
8264e390cabSriastradh if (!fence)
8274e390cabSriastradh return 0;
8284e390cabSriastradh
8294e390cabSriastradh err = 0;
8304e390cabSriastradh if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
8314e390cabSriastradh err = i915_sw_fence_await_dma_fence(&rq->submit,
8324e390cabSriastradh fence, 0,
8334e390cabSriastradh I915_FENCE_GFP);
8344e390cabSriastradh dma_fence_put(fence);
8354e390cabSriastradh
8364e390cabSriastradh return err;
8374e390cabSriastradh }
8384e390cabSriastradh
8394e390cabSriastradh static intel_engine_mask_t
already_busywaiting(struct i915_request * rq)8404e390cabSriastradh already_busywaiting(struct i915_request *rq)
8414e390cabSriastradh {
8424e390cabSriastradh /*
8434e390cabSriastradh * Polling a semaphore causes bus traffic, delaying other users of
8444e390cabSriastradh * both the GPU and CPU. We want to limit the impact on others,
8454e390cabSriastradh * while taking advantage of early submission to reduce GPU
8464e390cabSriastradh * latency. Therefore we restrict ourselves to not using more
8474e390cabSriastradh * than one semaphore from each source, and not using a semaphore
8484e390cabSriastradh * if we have detected the engine is saturated (i.e. would not be
8494e390cabSriastradh * submitted early and cause bus traffic reading an already passed
8504e390cabSriastradh * semaphore).
8514e390cabSriastradh *
8524e390cabSriastradh * See the are-we-too-late? check in __i915_request_submit().
8534e390cabSriastradh */
8544e390cabSriastradh return rq->sched.semaphores | rq->engine->saturated;
8554e390cabSriastradh }
8564e390cabSriastradh
8574e390cabSriastradh static int
__emit_semaphore_wait(struct i915_request * to,struct i915_request * from,u32 seqno)8584e390cabSriastradh __emit_semaphore_wait(struct i915_request *to,
8594e390cabSriastradh struct i915_request *from,
8604e390cabSriastradh u32 seqno)
8614e390cabSriastradh {
8624e390cabSriastradh const int has_token = INTEL_GEN(to->i915) >= 12;
8634e390cabSriastradh u32 hwsp_offset;
8644e390cabSriastradh int len, err;
8654e390cabSriastradh u32 *cs;
8664e390cabSriastradh
8674e390cabSriastradh GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
8684e390cabSriastradh
8694e390cabSriastradh /* We need to pin the signaler's HWSP until we are finished reading. */
8704e390cabSriastradh err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
8714e390cabSriastradh if (err)
8724e390cabSriastradh return err;
8734e390cabSriastradh
8744e390cabSriastradh len = 4;
8754e390cabSriastradh if (has_token)
8764e390cabSriastradh len += 2;
8774e390cabSriastradh
8784e390cabSriastradh cs = intel_ring_begin(to, len);
8794e390cabSriastradh if (IS_ERR(cs))
8804e390cabSriastradh return PTR_ERR(cs);
8814e390cabSriastradh
8824e390cabSriastradh /*
8834e390cabSriastradh * Using greater-than-or-equal here means we have to worry
8844e390cabSriastradh * about seqno wraparound. To side step that issue, we swap
8854e390cabSriastradh * the timeline HWSP upon wrapping, so that everyone listening
8864e390cabSriastradh * for the old (pre-wrap) values do not see the much smaller
8874e390cabSriastradh * (post-wrap) values than they were expecting (and so wait
8884e390cabSriastradh * forever).
8894e390cabSriastradh */
8904e390cabSriastradh *cs++ = (MI_SEMAPHORE_WAIT |
8914e390cabSriastradh MI_SEMAPHORE_GLOBAL_GTT |
8924e390cabSriastradh MI_SEMAPHORE_POLL |
8934e390cabSriastradh MI_SEMAPHORE_SAD_GTE_SDD) +
8944e390cabSriastradh has_token;
8954e390cabSriastradh *cs++ = seqno;
8964e390cabSriastradh *cs++ = hwsp_offset;
8974e390cabSriastradh *cs++ = 0;
8984e390cabSriastradh if (has_token) {
8994e390cabSriastradh *cs++ = 0;
9004e390cabSriastradh *cs++ = MI_NOOP;
9014e390cabSriastradh }
9024e390cabSriastradh
9034e390cabSriastradh intel_ring_advance(to, cs);
9044e390cabSriastradh return 0;
9054e390cabSriastradh }
9064e390cabSriastradh
9074e390cabSriastradh static int
emit_semaphore_wait(struct i915_request * to,struct i915_request * from,gfp_t gfp)9084e390cabSriastradh emit_semaphore_wait(struct i915_request *to,
9094e390cabSriastradh struct i915_request *from,
9104e390cabSriastradh gfp_t gfp)
9114e390cabSriastradh {
9124e390cabSriastradh /* Just emit the first semaphore we see as request space is limited. */
9134e390cabSriastradh if (already_busywaiting(to) & from->engine->mask)
9144e390cabSriastradh goto await_fence;
9154e390cabSriastradh
9164e390cabSriastradh if (i915_request_await_start(to, from) < 0)
9174e390cabSriastradh goto await_fence;
9184e390cabSriastradh
9194e390cabSriastradh /* Only submit our spinner after the signaler is running! */
9204e390cabSriastradh if (__await_execution(to, from, NULL, gfp))
9214e390cabSriastradh goto await_fence;
9224e390cabSriastradh
9234e390cabSriastradh if (__emit_semaphore_wait(to, from, from->fence.seqno))
9244e390cabSriastradh goto await_fence;
9254e390cabSriastradh
9264e390cabSriastradh to->sched.semaphores |= from->engine->mask;
9274e390cabSriastradh to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
9284e390cabSriastradh return 0;
9294e390cabSriastradh
9304e390cabSriastradh await_fence:
9314e390cabSriastradh return i915_sw_fence_await_dma_fence(&to->submit,
9324e390cabSriastradh &from->fence, 0,
9334e390cabSriastradh I915_FENCE_GFP);
9344e390cabSriastradh }
9354e390cabSriastradh
9364e390cabSriastradh static int
i915_request_await_request(struct i915_request * to,struct i915_request * from)9374e390cabSriastradh i915_request_await_request(struct i915_request *to, struct i915_request *from)
9384e390cabSriastradh {
9394e390cabSriastradh int ret;
9404e390cabSriastradh
9414e390cabSriastradh GEM_BUG_ON(to == from);
9424e390cabSriastradh GEM_BUG_ON(to->timeline == from->timeline);
9434e390cabSriastradh
9444e390cabSriastradh if (i915_request_completed(from))
9454e390cabSriastradh return 0;
9464e390cabSriastradh
9474e390cabSriastradh if (to->engine->schedule) {
9484e390cabSriastradh ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
9494e390cabSriastradh if (ret < 0)
9504e390cabSriastradh return ret;
9514e390cabSriastradh }
9524e390cabSriastradh
9534e390cabSriastradh if (to->engine == from->engine)
9544e390cabSriastradh ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
9554e390cabSriastradh &from->submit,
9564e390cabSriastradh I915_FENCE_GFP);
9574e390cabSriastradh else if (intel_context_use_semaphores(to->context))
9584e390cabSriastradh ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
9594e390cabSriastradh else
9604e390cabSriastradh ret = i915_sw_fence_await_dma_fence(&to->submit,
9614e390cabSriastradh &from->fence, 0,
9624e390cabSriastradh I915_FENCE_GFP);
9634e390cabSriastradh if (ret < 0)
9644e390cabSriastradh return ret;
9654e390cabSriastradh
9664e390cabSriastradh if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
9674e390cabSriastradh ret = i915_sw_fence_await_dma_fence(&to->semaphore,
9684e390cabSriastradh &from->fence, 0,
9694e390cabSriastradh I915_FENCE_GFP);
9704e390cabSriastradh if (ret < 0)
9714e390cabSriastradh return ret;
9724e390cabSriastradh }
9734e390cabSriastradh
9744e390cabSriastradh return 0;
9754e390cabSriastradh }
9764e390cabSriastradh
9774e390cabSriastradh int
i915_request_await_dma_fence(struct i915_request * rq,struct dma_fence * fence)9784e390cabSriastradh i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
9794e390cabSriastradh {
9804e390cabSriastradh struct dma_fence **child = &fence;
9814e390cabSriastradh unsigned int nchild = 1;
9824e390cabSriastradh int ret;
9834e390cabSriastradh
9844e390cabSriastradh /*
9854e390cabSriastradh * Note that if the fence-array was created in signal-on-any mode,
9864e390cabSriastradh * we should *not* decompose it into its individual fences. However,
9874e390cabSriastradh * we don't currently store which mode the fence-array is operating
9884e390cabSriastradh * in. Fortunately, the only user of signal-on-any is private to
9894e390cabSriastradh * amdgpu and we should not see any incoming fence-array from
9904e390cabSriastradh * sync-file being in signal-on-any mode.
9914e390cabSriastradh */
9924e390cabSriastradh if (dma_fence_is_array(fence)) {
9934e390cabSriastradh struct dma_fence_array *array = to_dma_fence_array(fence);
9944e390cabSriastradh
9954e390cabSriastradh child = array->fences;
9964e390cabSriastradh nchild = array->num_fences;
9974e390cabSriastradh GEM_BUG_ON(!nchild);
9984e390cabSriastradh }
9994e390cabSriastradh
10004e390cabSriastradh do {
10014e390cabSriastradh fence = *child++;
10024e390cabSriastradh if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
10034e390cabSriastradh i915_sw_fence_set_error_once(&rq->submit, fence->error);
10044e390cabSriastradh continue;
10054e390cabSriastradh }
10064e390cabSriastradh
10074e390cabSriastradh /*
10084e390cabSriastradh * Requests on the same timeline are explicitly ordered, along
10094e390cabSriastradh * with their dependencies, by i915_request_add() which ensures
10104e390cabSriastradh * that requests are submitted in-order through each ring.
10114e390cabSriastradh */
10124e390cabSriastradh if (fence->context == rq->fence.context)
10134e390cabSriastradh continue;
10144e390cabSriastradh
10154e390cabSriastradh /* Squash repeated waits to the same timelines */
10164e390cabSriastradh if (fence->context &&
10174e390cabSriastradh intel_timeline_sync_is_later(i915_request_timeline(rq),
10184e390cabSriastradh fence))
10194e390cabSriastradh continue;
10204e390cabSriastradh
10214e390cabSriastradh if (dma_fence_is_i915(fence))
10224e390cabSriastradh ret = i915_request_await_request(rq, to_request(fence));
10234e390cabSriastradh else
10244e390cabSriastradh ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
10254e390cabSriastradh fence->context ? I915_FENCE_TIMEOUT : 0,
10264e390cabSriastradh I915_FENCE_GFP);
10274e390cabSriastradh if (ret < 0)
10284e390cabSriastradh return ret;
10294e390cabSriastradh
10304e390cabSriastradh /* Record the latest fence used against each timeline */
10314e390cabSriastradh if (fence->context)
10324e390cabSriastradh intel_timeline_sync_set(i915_request_timeline(rq),
10334e390cabSriastradh fence);
10344e390cabSriastradh } while (--nchild);
10354e390cabSriastradh
10364e390cabSriastradh return 0;
10374e390cabSriastradh }
10384e390cabSriastradh
intel_timeline_sync_has_start(struct intel_timeline * tl,struct dma_fence * fence)10394e390cabSriastradh static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
10404e390cabSriastradh struct dma_fence *fence)
10414e390cabSriastradh {
10424e390cabSriastradh return __intel_timeline_sync_is_later(tl,
10434e390cabSriastradh fence->context,
10444e390cabSriastradh fence->seqno - 1);
10454e390cabSriastradh }
10464e390cabSriastradh
intel_timeline_sync_set_start(struct intel_timeline * tl,const struct dma_fence * fence)10474e390cabSriastradh static int intel_timeline_sync_set_start(struct intel_timeline *tl,
10484e390cabSriastradh const struct dma_fence *fence)
10494e390cabSriastradh {
10504e390cabSriastradh return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
10514e390cabSriastradh }
10524e390cabSriastradh
10534e390cabSriastradh static int
__i915_request_await_execution(struct i915_request * to,struct i915_request * from,void (* hook)(struct i915_request * rq,struct dma_fence * signal))10544e390cabSriastradh __i915_request_await_execution(struct i915_request *to,
10554e390cabSriastradh struct i915_request *from,
10564e390cabSriastradh void (*hook)(struct i915_request *rq,
10574e390cabSriastradh struct dma_fence *signal))
10584e390cabSriastradh {
10594e390cabSriastradh int err;
10604e390cabSriastradh
10614e390cabSriastradh /* Submit both requests at the same time */
10624e390cabSriastradh err = __await_execution(to, from, hook, I915_FENCE_GFP);
10634e390cabSriastradh if (err)
10644e390cabSriastradh return err;
10654e390cabSriastradh
10664e390cabSriastradh /* Squash repeated depenendices to the same timelines */
10674e390cabSriastradh if (intel_timeline_sync_has_start(i915_request_timeline(to),
10684e390cabSriastradh &from->fence))
10694e390cabSriastradh return 0;
10704e390cabSriastradh
10714e390cabSriastradh /* Ensure both start together [after all semaphores in signal] */
10724e390cabSriastradh if (intel_engine_has_semaphores(to->engine))
10734e390cabSriastradh err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
10744e390cabSriastradh else
10754e390cabSriastradh err = i915_request_await_start(to, from);
10764e390cabSriastradh if (err < 0)
10774e390cabSriastradh return err;
10784e390cabSriastradh
10794e390cabSriastradh /* Couple the dependency tree for PI on this exposed to->fence */
10804e390cabSriastradh if (to->engine->schedule) {
10814e390cabSriastradh err = i915_sched_node_add_dependency(&to->sched, &from->sched);
10824e390cabSriastradh if (err < 0)
10834e390cabSriastradh return err;
10844e390cabSriastradh }
10854e390cabSriastradh
10864e390cabSriastradh return intel_timeline_sync_set_start(i915_request_timeline(to),
10874e390cabSriastradh &from->fence);
10884e390cabSriastradh }
10894e390cabSriastradh
10904e390cabSriastradh int
i915_request_await_execution(struct i915_request * rq,struct dma_fence * fence,void (* hook)(struct i915_request * rq,struct dma_fence * signal))10914e390cabSriastradh i915_request_await_execution(struct i915_request *rq,
10924e390cabSriastradh struct dma_fence *fence,
10934e390cabSriastradh void (*hook)(struct i915_request *rq,
10944e390cabSriastradh struct dma_fence *signal))
10954e390cabSriastradh {
10964e390cabSriastradh struct dma_fence **child = &fence;
10974e390cabSriastradh unsigned int nchild = 1;
10984e390cabSriastradh int ret;
10994e390cabSriastradh
11004e390cabSriastradh if (dma_fence_is_array(fence)) {
11014e390cabSriastradh struct dma_fence_array *array = to_dma_fence_array(fence);
11024e390cabSriastradh
11034e390cabSriastradh /* XXX Error for signal-on-any fence arrays */
11044e390cabSriastradh
11054e390cabSriastradh child = array->fences;
11064e390cabSriastradh nchild = array->num_fences;
11074e390cabSriastradh GEM_BUG_ON(!nchild);
11084e390cabSriastradh }
11094e390cabSriastradh
11104e390cabSriastradh do {
11114e390cabSriastradh fence = *child++;
11124e390cabSriastradh if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
11134e390cabSriastradh i915_sw_fence_set_error_once(&rq->submit, fence->error);
11144e390cabSriastradh continue;
11154e390cabSriastradh }
11164e390cabSriastradh
11174e390cabSriastradh /*
11184e390cabSriastradh * We don't squash repeated fence dependencies here as we
11194e390cabSriastradh * want to run our callback in all cases.
11204e390cabSriastradh */
11214e390cabSriastradh
11224e390cabSriastradh if (dma_fence_is_i915(fence))
11234e390cabSriastradh ret = __i915_request_await_execution(rq,
11244e390cabSriastradh to_request(fence),
11254e390cabSriastradh hook);
11264e390cabSriastradh else
11274e390cabSriastradh ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
11284e390cabSriastradh I915_FENCE_TIMEOUT,
11294e390cabSriastradh GFP_KERNEL);
11304e390cabSriastradh if (ret < 0)
11314e390cabSriastradh return ret;
11324e390cabSriastradh } while (--nchild);
11334e390cabSriastradh
11344e390cabSriastradh return 0;
11354e390cabSriastradh }
11364e390cabSriastradh
11374e390cabSriastradh /**
11384e390cabSriastradh * i915_request_await_object - set this request to (async) wait upon a bo
11394e390cabSriastradh * @to: request we are wishing to use
11404e390cabSriastradh * @obj: object which may be in use on another ring.
11414e390cabSriastradh * @write: whether the wait is on behalf of a writer
11424e390cabSriastradh *
11434e390cabSriastradh * This code is meant to abstract object synchronization with the GPU.
11444e390cabSriastradh * Conceptually we serialise writes between engines inside the GPU.
11454e390cabSriastradh * We only allow one engine to write into a buffer at any time, but
11464e390cabSriastradh * multiple readers. To ensure each has a coherent view of memory, we must:
11474e390cabSriastradh *
11484e390cabSriastradh * - If there is an outstanding write request to the object, the new
11494e390cabSriastradh * request must wait for it to complete (either CPU or in hw, requests
11504e390cabSriastradh * on the same ring will be naturally ordered).
11514e390cabSriastradh *
11524e390cabSriastradh * - If we are a write request (pending_write_domain is set), the new
11534e390cabSriastradh * request must wait for outstanding read requests to complete.
11544e390cabSriastradh *
11554e390cabSriastradh * Returns 0 if successful, else propagates up the lower layer error.
11564e390cabSriastradh */
11574e390cabSriastradh int
i915_request_await_object(struct i915_request * to,struct drm_i915_gem_object * obj,bool write)11584e390cabSriastradh i915_request_await_object(struct i915_request *to,
11594e390cabSriastradh struct drm_i915_gem_object *obj,
11604e390cabSriastradh bool write)
11614e390cabSriastradh {
11624e390cabSriastradh struct dma_fence *excl;
11634e390cabSriastradh int ret = 0;
11644e390cabSriastradh
11654e390cabSriastradh if (write) {
11664e390cabSriastradh struct dma_fence **shared;
11674e390cabSriastradh unsigned int count, i;
11684e390cabSriastradh
11694e390cabSriastradh ret = dma_resv_get_fences_rcu(obj->base.resv,
11704e390cabSriastradh &excl, &count, &shared);
11714e390cabSriastradh if (ret)
11724e390cabSriastradh return ret;
11734e390cabSriastradh
11744e390cabSriastradh for (i = 0; i < count; i++) {
11754e390cabSriastradh ret = i915_request_await_dma_fence(to, shared[i]);
11764e390cabSriastradh if (ret)
11774e390cabSriastradh break;
11784e390cabSriastradh
11794e390cabSriastradh dma_fence_put(shared[i]);
11804e390cabSriastradh }
11814e390cabSriastradh
11824e390cabSriastradh for (; i < count; i++)
11834e390cabSriastradh dma_fence_put(shared[i]);
11844e390cabSriastradh kfree(shared);
11854e390cabSriastradh } else {
11864e390cabSriastradh excl = dma_resv_get_excl_rcu(obj->base.resv);
11874e390cabSriastradh }
11884e390cabSriastradh
11894e390cabSriastradh if (excl) {
11904e390cabSriastradh if (ret == 0)
11914e390cabSriastradh ret = i915_request_await_dma_fence(to, excl);
11924e390cabSriastradh
11934e390cabSriastradh dma_fence_put(excl);
11944e390cabSriastradh }
11954e390cabSriastradh
11964e390cabSriastradh return ret;
11974e390cabSriastradh }
11984e390cabSriastradh
i915_request_skip(struct i915_request * rq,int error)11994e390cabSriastradh void i915_request_skip(struct i915_request *rq, int error)
12004e390cabSriastradh {
12014e390cabSriastradh void *vaddr = rq->ring->vaddr;
12024e390cabSriastradh u32 head;
12034e390cabSriastradh
12044e390cabSriastradh GEM_BUG_ON(!IS_ERR_VALUE((long)error));
12054e390cabSriastradh dma_fence_set_error(&rq->fence, error);
12064e390cabSriastradh
12074e390cabSriastradh if (rq->infix == rq->postfix)
12084e390cabSriastradh return;
12094e390cabSriastradh
12104e390cabSriastradh /*
12114e390cabSriastradh * As this request likely depends on state from the lost
12124e390cabSriastradh * context, clear out all the user operations leaving the
12134e390cabSriastradh * breadcrumb at the end (so we get the fence notifications).
12144e390cabSriastradh */
12154e390cabSriastradh head = rq->infix;
12164e390cabSriastradh if (rq->postfix < head) {
1217cfce158cSriastradh memset(vaddr + head, 0, rq->ring->size - head);
12184e390cabSriastradh head = 0;
12194e390cabSriastradh }
1220cfce158cSriastradh memset(vaddr + head, 0, rq->postfix - head);
12214e390cabSriastradh rq->infix = rq->postfix;
12224e390cabSriastradh }
12234e390cabSriastradh
12244e390cabSriastradh static struct i915_request *
__i915_request_add_to_timeline(struct i915_request * rq)12254e390cabSriastradh __i915_request_add_to_timeline(struct i915_request *rq)
12264e390cabSriastradh {
12274e390cabSriastradh struct intel_timeline *timeline = i915_request_timeline(rq);
12284e390cabSriastradh struct i915_request *prev;
12294e390cabSriastradh
12304e390cabSriastradh /*
12314e390cabSriastradh * Dependency tracking and request ordering along the timeline
12324e390cabSriastradh * is special cased so that we can eliminate redundant ordering
12334e390cabSriastradh * operations while building the request (we know that the timeline
12344e390cabSriastradh * itself is ordered, and here we guarantee it).
12354e390cabSriastradh *
12364e390cabSriastradh * As we know we will need to emit tracking along the timeline,
12374e390cabSriastradh * we embed the hooks into our request struct -- at the cost of
12384e390cabSriastradh * having to have specialised no-allocation interfaces (which will
12394e390cabSriastradh * be beneficial elsewhere).
12404e390cabSriastradh *
12414e390cabSriastradh * A second benefit to open-coding i915_request_await_request is
12424e390cabSriastradh * that we can apply a slight variant of the rules specialised
12434e390cabSriastradh * for timelines that jump between engines (such as virtual engines).
12444e390cabSriastradh * If we consider the case of virtual engine, we must emit a dma-fence
12454e390cabSriastradh * to prevent scheduling of the second request until the first is
12464e390cabSriastradh * complete (to maximise our greedy late load balancing) and this
12474e390cabSriastradh * precludes optimising to use semaphores serialisation of a single
12484e390cabSriastradh * timeline across engines.
12494e390cabSriastradh */
12504e390cabSriastradh prev = to_request(__i915_active_fence_set(&timeline->last_request,
12514e390cabSriastradh &rq->fence));
12524e390cabSriastradh if (prev && !i915_request_completed(prev)) {
12534e390cabSriastradh if (is_power_of_2(prev->engine->mask | rq->engine->mask))
12544e390cabSriastradh i915_sw_fence_await_sw_fence(&rq->submit,
12554e390cabSriastradh &prev->submit,
12564e390cabSriastradh &rq->submitq);
12574e390cabSriastradh else
12584e390cabSriastradh __i915_sw_fence_await_dma_fence(&rq->submit,
12594e390cabSriastradh &prev->fence,
12604e390cabSriastradh &rq->dmaq);
12614e390cabSriastradh if (rq->engine->schedule)
12624e390cabSriastradh __i915_sched_node_add_dependency(&rq->sched,
12634e390cabSriastradh &prev->sched,
12644e390cabSriastradh &rq->dep,
12654e390cabSriastradh 0);
12664e390cabSriastradh }
12674e390cabSriastradh
12684e390cabSriastradh list_add_tail(&rq->link, &timeline->requests);
12694e390cabSriastradh
12704e390cabSriastradh /*
12714e390cabSriastradh * Make sure that no request gazumped us - if it was allocated after
12724e390cabSriastradh * our i915_request_alloc() and called __i915_request_add() before
12734e390cabSriastradh * us, the timeline will hold its seqno which is later than ours.
12744e390cabSriastradh */
12754e390cabSriastradh GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
12764e390cabSriastradh
12774e390cabSriastradh return prev;
12784e390cabSriastradh }
12794e390cabSriastradh
12804e390cabSriastradh /*
12814e390cabSriastradh * NB: This function is not allowed to fail. Doing so would mean the the
12824e390cabSriastradh * request is not being tracked for completion but the work itself is
12834e390cabSriastradh * going to happen on the hardware. This would be a Bad Thing(tm).
12844e390cabSriastradh */
__i915_request_commit(struct i915_request * rq)12854e390cabSriastradh struct i915_request *__i915_request_commit(struct i915_request *rq)
12864e390cabSriastradh {
12874e390cabSriastradh struct intel_engine_cs *engine = rq->engine;
12884e390cabSriastradh struct intel_ring *ring = rq->ring;
12894e390cabSriastradh u32 *cs;
12904e390cabSriastradh
12914e390cabSriastradh RQ_TRACE(rq, "\n");
12924e390cabSriastradh
12934e390cabSriastradh /*
12944e390cabSriastradh * To ensure that this call will not fail, space for its emissions
12954e390cabSriastradh * should already have been reserved in the ring buffer. Let the ring
12964e390cabSriastradh * know that it is time to use that space up.
12974e390cabSriastradh */
12984e390cabSriastradh GEM_BUG_ON(rq->reserved_space > ring->space);
12994e390cabSriastradh rq->reserved_space = 0;
13004e390cabSriastradh rq->emitted_jiffies = jiffies;
13014e390cabSriastradh
13024e390cabSriastradh /*
13034e390cabSriastradh * Record the position of the start of the breadcrumb so that
13044e390cabSriastradh * should we detect the updated seqno part-way through the
13054e390cabSriastradh * GPU processing the request, we never over-estimate the
13064e390cabSriastradh * position of the ring's HEAD.
13074e390cabSriastradh */
13084e390cabSriastradh cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
13094e390cabSriastradh GEM_BUG_ON(IS_ERR(cs));
13104e390cabSriastradh rq->postfix = intel_ring_offset(rq, cs);
13114e390cabSriastradh
13124e390cabSriastradh return __i915_request_add_to_timeline(rq);
13134e390cabSriastradh }
13144e390cabSriastradh
__i915_request_queue(struct i915_request * rq,const struct i915_sched_attr * attr)13154e390cabSriastradh void __i915_request_queue(struct i915_request *rq,
13164e390cabSriastradh const struct i915_sched_attr *attr)
13174e390cabSriastradh {
13184e390cabSriastradh /*
13194e390cabSriastradh * Let the backend know a new request has arrived that may need
13204e390cabSriastradh * to adjust the existing execution schedule due to a high priority
13214e390cabSriastradh * request - i.e. we may want to preempt the current request in order
13224e390cabSriastradh * to run a high priority dependency chain *before* we can execute this
13234e390cabSriastradh * request.
13244e390cabSriastradh *
13254e390cabSriastradh * This is called before the request is ready to run so that we can
13264e390cabSriastradh * decide whether to preempt the entire chain so that it is ready to
13274e390cabSriastradh * run at the earliest possible convenience.
13284e390cabSriastradh */
13294e390cabSriastradh i915_sw_fence_commit(&rq->semaphore);
13304e390cabSriastradh if (attr && rq->engine->schedule)
13314e390cabSriastradh rq->engine->schedule(rq, attr);
13324e390cabSriastradh i915_sw_fence_commit(&rq->submit);
13334e390cabSriastradh }
13344e390cabSriastradh
i915_request_add(struct i915_request * rq)13354e390cabSriastradh void i915_request_add(struct i915_request *rq)
13364e390cabSriastradh {
13374e390cabSriastradh struct intel_timeline * const tl = i915_request_timeline(rq);
13384e390cabSriastradh struct i915_sched_attr attr = {};
13394e390cabSriastradh struct i915_request *prev;
13404e390cabSriastradh
13414e390cabSriastradh lockdep_assert_held(&tl->mutex);
13424e390cabSriastradh lockdep_unpin_lock(&tl->mutex, rq->cookie);
13434e390cabSriastradh
13444e390cabSriastradh trace_i915_request_add(rq);
13454e390cabSriastradh
13464e390cabSriastradh prev = __i915_request_commit(rq);
13474e390cabSriastradh
13484e390cabSriastradh if (rcu_access_pointer(rq->context->gem_context))
13494e390cabSriastradh attr = i915_request_gem_context(rq)->sched;
13504e390cabSriastradh
13514e390cabSriastradh /*
13524e390cabSriastradh * Boost actual workloads past semaphores!
13534e390cabSriastradh *
13544e390cabSriastradh * With semaphores we spin on one engine waiting for another,
13554e390cabSriastradh * simply to reduce the latency of starting our work when
13564e390cabSriastradh * the signaler completes. However, if there is any other
13574e390cabSriastradh * work that we could be doing on this engine instead, that
13584e390cabSriastradh * is better utilisation and will reduce the overall duration
13594e390cabSriastradh * of the current work. To avoid PI boosting a semaphore
13604e390cabSriastradh * far in the distance past over useful work, we keep a history
13614e390cabSriastradh * of any semaphore use along our dependency chain.
13624e390cabSriastradh */
13634e390cabSriastradh if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
13644e390cabSriastradh attr.priority |= I915_PRIORITY_NOSEMAPHORE;
13654e390cabSriastradh
13664e390cabSriastradh /*
13674e390cabSriastradh * Boost priorities to new clients (new request flows).
13684e390cabSriastradh *
13694e390cabSriastradh * Allow interactive/synchronous clients to jump ahead of
13704e390cabSriastradh * the bulk clients. (FQ_CODEL)
13714e390cabSriastradh */
13724e390cabSriastradh if (list_empty(&rq->sched.signalers_list))
13734e390cabSriastradh attr.priority |= I915_PRIORITY_WAIT;
13744e390cabSriastradh
137592f64546Sriastradh #ifdef __NetBSD__
137692f64546Sriastradh int s = splsoftserial();
137792f64546Sriastradh #else
13784e390cabSriastradh local_bh_disable();
137992f64546Sriastradh #endif
13804e390cabSriastradh __i915_request_queue(rq, &attr);
138192f64546Sriastradh #ifdef __NetBSD__
138292f64546Sriastradh splx(s);
138392f64546Sriastradh #else
13844e390cabSriastradh local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
138592f64546Sriastradh #endif
13864e390cabSriastradh
13874e390cabSriastradh /*
13884e390cabSriastradh * In typical scenarios, we do not expect the previous request on
13894e390cabSriastradh * the timeline to be still tracked by timeline->last_request if it
13904e390cabSriastradh * has been completed. If the completed request is still here, that
13914e390cabSriastradh * implies that request retirement is a long way behind submission,
13924e390cabSriastradh * suggesting that we haven't been retiring frequently enough from
13934e390cabSriastradh * the combination of retire-before-alloc, waiters and the background
13944e390cabSriastradh * retirement worker. So if the last request on this timeline was
13954e390cabSriastradh * already completed, do a catch up pass, flushing the retirement queue
13964e390cabSriastradh * up to this client. Since we have now moved the heaviest operations
13974e390cabSriastradh * during retirement onto secondary workers, such as freeing objects
13984e390cabSriastradh * or contexts, retiring a bunch of requests is mostly list management
13994e390cabSriastradh * (and cache misses), and so we should not be overly penalizing this
14004e390cabSriastradh * client by performing excess work, though we may still performing
14014e390cabSriastradh * work on behalf of others -- but instead we should benefit from
14024e390cabSriastradh * improved resource management. (Well, that's the theory at least.)
14034e390cabSriastradh */
14044e390cabSriastradh if (prev &&
14054e390cabSriastradh i915_request_completed(prev) &&
14064e390cabSriastradh rcu_access_pointer(prev->timeline) == tl)
14074e390cabSriastradh i915_request_retire_upto(prev);
14084e390cabSriastradh
14094e390cabSriastradh mutex_unlock(&tl->mutex);
14104e390cabSriastradh }
14114e390cabSriastradh
local_clock_us(unsigned int * cpu)14124e390cabSriastradh static unsigned long local_clock_us(unsigned int *cpu)
14134e390cabSriastradh {
14144e390cabSriastradh unsigned long t;
14154e390cabSriastradh
14164e390cabSriastradh /*
14174e390cabSriastradh * Cheaply and approximately convert from nanoseconds to microseconds.
14184e390cabSriastradh * The result and subsequent calculations are also defined in the same
14194e390cabSriastradh * approximate microseconds units. The principal source of timing
14204e390cabSriastradh * error here is from the simple truncation.
14214e390cabSriastradh *
14224e390cabSriastradh * Note that local_clock() is only defined wrt to the current CPU;
14234e390cabSriastradh * the comparisons are no longer valid if we switch CPUs. Instead of
14244e390cabSriastradh * blocking preemption for the entire busywait, we can detect the CPU
14254e390cabSriastradh * switch and use that as indicator of system load and a reason to
14264e390cabSriastradh * stop busywaiting, see busywait_stop().
14274e390cabSriastradh */
14284e390cabSriastradh *cpu = get_cpu();
14294e390cabSriastradh t = local_clock() >> 10;
14304e390cabSriastradh put_cpu();
14314e390cabSriastradh
14324e390cabSriastradh return t;
14334e390cabSriastradh }
14344e390cabSriastradh
busywait_stop(unsigned long timeout,unsigned int cpu)14354e390cabSriastradh static bool busywait_stop(unsigned long timeout, unsigned int cpu)
14364e390cabSriastradh {
14374e390cabSriastradh unsigned int this_cpu;
14384e390cabSriastradh
14394e390cabSriastradh if (time_after(local_clock_us(&this_cpu), timeout))
14404e390cabSriastradh return true;
14414e390cabSriastradh
14424e390cabSriastradh return this_cpu != cpu;
14434e390cabSriastradh }
14444e390cabSriastradh
__i915_spin_request(const struct i915_request * const rq,int state,unsigned long timeout_us)14454e390cabSriastradh static bool __i915_spin_request(const struct i915_request * const rq,
14464e390cabSriastradh int state, unsigned long timeout_us)
14474e390cabSriastradh {
14484e390cabSriastradh unsigned int cpu;
14494e390cabSriastradh
14504e390cabSriastradh /*
14514e390cabSriastradh * Only wait for the request if we know it is likely to complete.
14524e390cabSriastradh *
14534e390cabSriastradh * We don't track the timestamps around requests, nor the average
14544e390cabSriastradh * request length, so we do not have a good indicator that this
14554e390cabSriastradh * request will complete within the timeout. What we do know is the
14564e390cabSriastradh * order in which requests are executed by the context and so we can
14574e390cabSriastradh * tell if the request has been started. If the request is not even
14584e390cabSriastradh * running yet, it is a fair assumption that it will not complete
14594e390cabSriastradh * within our relatively short timeout.
14604e390cabSriastradh */
14614e390cabSriastradh if (!i915_request_is_running(rq))
14624e390cabSriastradh return false;
14634e390cabSriastradh
14644e390cabSriastradh /*
14654e390cabSriastradh * When waiting for high frequency requests, e.g. during synchronous
14664e390cabSriastradh * rendering split between the CPU and GPU, the finite amount of time
14674e390cabSriastradh * required to set up the irq and wait upon it limits the response
14684e390cabSriastradh * rate. By busywaiting on the request completion for a short while we
14694e390cabSriastradh * can service the high frequency waits as quick as possible. However,
14704e390cabSriastradh * if it is a slow request, we want to sleep as quickly as possible.
14714e390cabSriastradh * The tradeoff between waiting and sleeping is roughly the time it
14724e390cabSriastradh * takes to sleep on a request, on the order of a microsecond.
14734e390cabSriastradh */
14744e390cabSriastradh
14754e390cabSriastradh timeout_us += local_clock_us(&cpu);
14764e390cabSriastradh do {
14774e390cabSriastradh if (i915_request_completed(rq))
14784e390cabSriastradh return true;
14794e390cabSriastradh
14804e390cabSriastradh if (signal_pending_state(state, current))
14814e390cabSriastradh break;
14824e390cabSriastradh
14834e390cabSriastradh if (busywait_stop(timeout_us, cpu))
14844e390cabSriastradh break;
14854e390cabSriastradh
14864e390cabSriastradh cpu_relax();
14874e390cabSriastradh } while (!need_resched());
14884e390cabSriastradh
14894e390cabSriastradh return false;
14904e390cabSriastradh }
14914e390cabSriastradh
14924e390cabSriastradh struct request_wait {
14934e390cabSriastradh struct dma_fence_cb cb;
1494873b6c05Sriastradh #ifdef __NetBSD__
14953ce207bbSriastradh drm_waitqueue_t wq;
1496873b6c05Sriastradh #else
14974e390cabSriastradh struct task_struct *tsk;
1498873b6c05Sriastradh #endif
14994e390cabSriastradh };
15004e390cabSriastradh
request_wait_wake(struct dma_fence * fence,struct dma_fence_cb * cb)15014e390cabSriastradh static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
15024e390cabSriastradh {
15034e390cabSriastradh struct request_wait *wait = container_of(cb, typeof(*wait), cb);
15044e390cabSriastradh
15053ce207bbSriastradh #ifdef __NetBSD__
15063ce207bbSriastradh DRM_SPIN_WAKEUP_ALL(&wait->wq, fence->lock);
15073ce207bbSriastradh #else
15084e390cabSriastradh wake_up_process(wait->tsk);
15093ce207bbSriastradh #endif
15104e390cabSriastradh }
15114e390cabSriastradh
15124e390cabSriastradh /**
15134e390cabSriastradh * i915_request_wait - wait until execution of request has finished
15144e390cabSriastradh * @rq: the request to wait upon
15154e390cabSriastradh * @flags: how to wait
15164e390cabSriastradh * @timeout: how long to wait in jiffies
15174e390cabSriastradh *
15184e390cabSriastradh * i915_request_wait() waits for the request to be completed, for a
15194e390cabSriastradh * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
15204e390cabSriastradh * unbounded wait).
15214e390cabSriastradh *
15224e390cabSriastradh * Returns the remaining time (in jiffies) if the request completed, which may
15234e390cabSriastradh * be zero or -ETIME if the request is unfinished after the timeout expires.
15244e390cabSriastradh * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
15254e390cabSriastradh * pending before the request completes.
15264e390cabSriastradh */
i915_request_wait(struct i915_request * rq,unsigned int flags,long timeout)15274e390cabSriastradh long i915_request_wait(struct i915_request *rq,
15284e390cabSriastradh unsigned int flags,
15294e390cabSriastradh long timeout)
15304e390cabSriastradh {
15314e390cabSriastradh const int state = flags & I915_WAIT_INTERRUPTIBLE ?
15324e390cabSriastradh TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
15334e390cabSriastradh struct request_wait wait;
15344e390cabSriastradh
15354e390cabSriastradh might_sleep();
15364e390cabSriastradh GEM_BUG_ON(timeout < 0);
15374e390cabSriastradh
15384e390cabSriastradh if (dma_fence_is_signaled(&rq->fence))
15394e390cabSriastradh return timeout;
15404e390cabSriastradh
15414e390cabSriastradh if (!timeout)
15424e390cabSriastradh return -ETIME;
15434e390cabSriastradh
15444e390cabSriastradh trace_i915_request_wait_begin(rq, flags);
15454e390cabSriastradh
15464e390cabSriastradh /*
15474e390cabSriastradh * We must never wait on the GPU while holding a lock as we
15484e390cabSriastradh * may need to perform a GPU reset. So while we don't need to
15494e390cabSriastradh * serialise wait/reset with an explicit lock, we do want
15504e390cabSriastradh * lockdep to detect potential dependency cycles.
15514e390cabSriastradh */
15524e390cabSriastradh mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
15534e390cabSriastradh
15544e390cabSriastradh /*
15554e390cabSriastradh * Optimistic spin before touching IRQs.
15564e390cabSriastradh *
15574e390cabSriastradh * We may use a rather large value here to offset the penalty of
15584e390cabSriastradh * switching away from the active task. Frequently, the client will
15594e390cabSriastradh * wait upon an old swapbuffer to throttle itself to remain within a
15604e390cabSriastradh * frame of the gpu. If the client is running in lockstep with the gpu,
15614e390cabSriastradh * then it should not be waiting long at all, and a sleep now will incur
15624e390cabSriastradh * extra scheduler latency in producing the next frame. To try to
15634e390cabSriastradh * avoid adding the cost of enabling/disabling the interrupt to the
15644e390cabSriastradh * short wait, we first spin to see if the request would have completed
15654e390cabSriastradh * in the time taken to setup the interrupt.
15664e390cabSriastradh *
15674e390cabSriastradh * We need upto 5us to enable the irq, and upto 20us to hide the
15684e390cabSriastradh * scheduler latency of a context switch, ignoring the secondary
15694e390cabSriastradh * impacts from a context switch such as cache eviction.
15704e390cabSriastradh *
15714e390cabSriastradh * The scheme used for low-latency IO is called "hybrid interrupt
15724e390cabSriastradh * polling". The suggestion there is to sleep until just before you
15734e390cabSriastradh * expect to be woken by the device interrupt and then poll for its
15744e390cabSriastradh * completion. That requires having a good predictor for the request
15754e390cabSriastradh * duration, which we currently lack.
15764e390cabSriastradh */
15774e390cabSriastradh if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
15784e390cabSriastradh __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
15794e390cabSriastradh dma_fence_signal(&rq->fence);
15804e390cabSriastradh goto out;
15814e390cabSriastradh }
15824e390cabSriastradh
15834e390cabSriastradh /*
15844e390cabSriastradh * This client is about to stall waiting for the GPU. In many cases
15854e390cabSriastradh * this is undesirable and limits the throughput of the system, as
15864e390cabSriastradh * many clients cannot continue processing user input/output whilst
15874e390cabSriastradh * blocked. RPS autotuning may take tens of milliseconds to respond
15884e390cabSriastradh * to the GPU load and thus incurs additional latency for the client.
15894e390cabSriastradh * We can circumvent that by promoting the GPU frequency to maximum
15904e390cabSriastradh * before we sleep. This makes the GPU throttle up much more quickly
15914e390cabSriastradh * (good for benchmarks and user experience, e.g. window animations),
15924e390cabSriastradh * but at a cost of spending more power processing the workload
15934e390cabSriastradh * (bad for battery).
15944e390cabSriastradh */
15954e390cabSriastradh if (flags & I915_WAIT_PRIORITY) {
15964e390cabSriastradh if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
15974e390cabSriastradh intel_rps_boost(rq);
15984e390cabSriastradh i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
15994e390cabSriastradh }
16004e390cabSriastradh
16013ce207bbSriastradh #ifdef __NetBSD__
16023ce207bbSriastradh DRM_INIT_WAITQUEUE(&wait.wq, "i915req");
1603879b0e17Sriastradh #else
1604879b0e17Sriastradh wait.tsk = current;
1605879b0e17Sriastradh #endif
16063ce207bbSriastradh if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
16073ce207bbSriastradh goto out;
1608879b0e17Sriastradh
1609879b0e17Sriastradh #ifdef __NetBSD__
16103ce207bbSriastradh spin_lock(rq->fence.lock);
1611920cbad7Sriastradh #define C (i915_request_completed(rq) ? 1 : \
16122d599fa8Sriastradh (spin_unlock(rq->fence.lock), \
16132d599fa8Sriastradh intel_engine_flush_submission(rq->engine), \
16142d599fa8Sriastradh spin_lock(rq->fence.lock), \
16152d599fa8Sriastradh i915_request_completed(rq)))
16163ce207bbSriastradh if (flags & I915_WAIT_INTERRUPTIBLE) {
16173ce207bbSriastradh DRM_SPIN_TIMED_WAIT_UNTIL(timeout, &wait.wq,
16183ce207bbSriastradh rq->fence.lock, timeout,
1619920cbad7Sriastradh C);
16203ce207bbSriastradh } else {
16213ce207bbSriastradh DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(timeout, &wait.wq,
16223ce207bbSriastradh rq->fence.lock, timeout,
1623920cbad7Sriastradh C);
16243ce207bbSriastradh }
1625920cbad7Sriastradh #undef C
1626b47ee0a4Sriastradh if (timeout > 0) { /* succeeded before timeout */
1627b47ee0a4Sriastradh KASSERT(i915_request_completed(rq));
16289d172eb6Sriastradh dma_fence_signal_locked(&rq->fence);
1629b47ee0a4Sriastradh } else if (timeout == 0) { /* timed out */
1630b47ee0a4Sriastradh timeout = -ETIME;
1631b47ee0a4Sriastradh }
16323ce207bbSriastradh spin_unlock(rq->fence.lock);
16333ce207bbSriastradh #else
16344e390cabSriastradh for (;;) {
16354e390cabSriastradh set_current_state(state);
16364e390cabSriastradh
16374e390cabSriastradh if (i915_request_completed(rq)) {
16384e390cabSriastradh dma_fence_signal(&rq->fence);
16394e390cabSriastradh break;
16404e390cabSriastradh }
16414e390cabSriastradh
16424e390cabSriastradh if (signal_pending_state(state, current)) {
16434e390cabSriastradh timeout = -ERESTARTSYS;
16444e390cabSriastradh break;
16454e390cabSriastradh }
16464e390cabSriastradh
16474e390cabSriastradh if (!timeout) {
16484e390cabSriastradh timeout = -ETIME;
16494e390cabSriastradh break;
16504e390cabSriastradh }
16514e390cabSriastradh
16524e390cabSriastradh intel_engine_flush_submission(rq->engine);
16534e390cabSriastradh timeout = io_schedule_timeout(timeout);
16544e390cabSriastradh }
16554e390cabSriastradh __set_current_state(TASK_RUNNING);
16563ce207bbSriastradh #endif
16574e390cabSriastradh
16584e390cabSriastradh dma_fence_remove_callback(&rq->fence, &wait.cb);
1659*5f6c4afeSriastradh #ifdef __NetBSD__
1660*5f6c4afeSriastradh DRM_DESTROY_WAITQUEUE(&wait.wq);
1661*5f6c4afeSriastradh #endif
16624e390cabSriastradh
16634e390cabSriastradh out:
16644e390cabSriastradh mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
16654e390cabSriastradh trace_i915_request_wait_end(rq);
16664e390cabSriastradh return timeout;
16674e390cabSriastradh }
16684e390cabSriastradh
16694e390cabSriastradh #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
16704e390cabSriastradh #include "selftests/mock_request.c"
16714e390cabSriastradh #include "selftests/i915_request.c"
16724e390cabSriastradh #endif
16734e390cabSriastradh
i915_global_request_shrink(void)16744e390cabSriastradh static void i915_global_request_shrink(void)
16754e390cabSriastradh {
16764e390cabSriastradh kmem_cache_shrink(global.slab_dependencies);
16774e390cabSriastradh kmem_cache_shrink(global.slab_execute_cbs);
16784e390cabSriastradh kmem_cache_shrink(global.slab_requests);
16794e390cabSriastradh }
16804e390cabSriastradh
i915_global_request_exit(void)16814e390cabSriastradh static void i915_global_request_exit(void)
16824e390cabSriastradh {
16834e390cabSriastradh kmem_cache_destroy(global.slab_dependencies);
16844e390cabSriastradh kmem_cache_destroy(global.slab_execute_cbs);
16854e390cabSriastradh kmem_cache_destroy(global.slab_requests);
16864e390cabSriastradh }
16874e390cabSriastradh
16884e390cabSriastradh static struct i915_global_request global = { {
16894e390cabSriastradh .shrink = i915_global_request_shrink,
16904e390cabSriastradh .exit = i915_global_request_exit,
16914e390cabSriastradh } };
16924e390cabSriastradh
i915_global_request_init(void)16934e390cabSriastradh int __init i915_global_request_init(void)
16944e390cabSriastradh {
16954e390cabSriastradh global.slab_requests =
169616f1ba6cSriastradh kmem_cache_create_dtor("i915_request",
16974e390cabSriastradh sizeof(struct i915_request),
16984e390cabSriastradh __alignof__(struct i915_request),
16994e390cabSriastradh SLAB_HWCACHE_ALIGN |
17004e390cabSriastradh SLAB_RECLAIM_ACCOUNT |
17014e390cabSriastradh SLAB_TYPESAFE_BY_RCU,
170216f1ba6cSriastradh __i915_request_ctor,
170316f1ba6cSriastradh __i915_request_dtor);
17044e390cabSriastradh if (!global.slab_requests)
17054e390cabSriastradh return -ENOMEM;
17064e390cabSriastradh
17074e390cabSriastradh global.slab_execute_cbs = KMEM_CACHE(execute_cb,
17084e390cabSriastradh SLAB_HWCACHE_ALIGN |
17094e390cabSriastradh SLAB_RECLAIM_ACCOUNT |
17104e390cabSriastradh SLAB_TYPESAFE_BY_RCU);
17114e390cabSriastradh if (!global.slab_execute_cbs)
17124e390cabSriastradh goto err_requests;
17134e390cabSriastradh
17144e390cabSriastradh global.slab_dependencies = KMEM_CACHE(i915_dependency,
17154e390cabSriastradh SLAB_HWCACHE_ALIGN |
17164e390cabSriastradh SLAB_RECLAIM_ACCOUNT);
17174e390cabSriastradh if (!global.slab_dependencies)
17184e390cabSriastradh goto err_execute_cbs;
17194e390cabSriastradh
17204e390cabSriastradh i915_global_register(&global.base);
17214e390cabSriastradh return 0;
17224e390cabSriastradh
17234e390cabSriastradh err_execute_cbs:
17244e390cabSriastradh kmem_cache_destroy(global.slab_execute_cbs);
17254e390cabSriastradh err_requests:
17264e390cabSriastradh kmem_cache_destroy(global.slab_requests);
17274e390cabSriastradh return -ENOMEM;
17284e390cabSriastradh }
1729