15ca02815Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg */
5c349dbc7Sjsg
61bb76ff1Sjsg #include "gem/i915_gem_internal.h"
75ca02815Sjsg #include "gem/i915_gem_lmem.h"
8c349dbc7Sjsg #include "gem/i915_gem_object.h"
95ca02815Sjsg
10c349dbc7Sjsg #include "i915_drv.h"
11c349dbc7Sjsg #include "i915_vma.h"
12c349dbc7Sjsg #include "intel_engine.h"
131bb76ff1Sjsg #include "intel_engine_regs.h"
145ca02815Sjsg #include "intel_gpu_commands.h"
15c349dbc7Sjsg #include "intel_ring.h"
16*f005ef32Sjsg #include "intel_gt.h"
17c349dbc7Sjsg #include "intel_timeline.h"
18c349dbc7Sjsg
intel_ring_update_space(struct intel_ring * ring)19c349dbc7Sjsg unsigned int intel_ring_update_space(struct intel_ring *ring)
20c349dbc7Sjsg {
21c349dbc7Sjsg unsigned int space;
22c349dbc7Sjsg
23c349dbc7Sjsg space = __intel_ring_space(ring->head, ring->emit, ring->size);
24c349dbc7Sjsg
25c349dbc7Sjsg ring->space = space;
26c349dbc7Sjsg return space;
27c349dbc7Sjsg }
28c349dbc7Sjsg
__intel_ring_pin(struct intel_ring * ring)29ad8b1aafSjsg void __intel_ring_pin(struct intel_ring *ring)
30ad8b1aafSjsg {
31ad8b1aafSjsg GEM_BUG_ON(!atomic_read(&ring->pin_count));
32ad8b1aafSjsg atomic_inc(&ring->pin_count);
33ad8b1aafSjsg }
34ad8b1aafSjsg
intel_ring_pin(struct intel_ring * ring,struct i915_gem_ww_ctx * ww)35ad8b1aafSjsg int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
36c349dbc7Sjsg {
37c349dbc7Sjsg struct i915_vma *vma = ring->vma;
38c349dbc7Sjsg unsigned int flags;
39c349dbc7Sjsg void *addr;
40c349dbc7Sjsg int ret;
41c349dbc7Sjsg
42c349dbc7Sjsg if (atomic_fetch_inc(&ring->pin_count))
43c349dbc7Sjsg return 0;
44c349dbc7Sjsg
45c349dbc7Sjsg /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
46c349dbc7Sjsg flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
47c349dbc7Sjsg
485ca02815Sjsg if (i915_gem_object_is_stolen(vma->obj))
49c349dbc7Sjsg flags |= PIN_MAPPABLE;
50c349dbc7Sjsg else
51c349dbc7Sjsg flags |= PIN_HIGH;
52c349dbc7Sjsg
53ad8b1aafSjsg ret = i915_ggtt_pin(vma, ww, 0, flags);
54c349dbc7Sjsg if (unlikely(ret))
55c349dbc7Sjsg goto err_unpin;
56c349dbc7Sjsg
57c1b1784eSjsg if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
58c349dbc7Sjsg addr = (void __force *)i915_vma_pin_iomap(vma);
595ca02815Sjsg } else {
60*f005ef32Sjsg int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
615ca02815Sjsg
625ca02815Sjsg addr = i915_gem_object_pin_map(vma->obj, type);
635ca02815Sjsg }
645ca02815Sjsg
65c349dbc7Sjsg if (IS_ERR(addr)) {
66c349dbc7Sjsg ret = PTR_ERR(addr);
67c349dbc7Sjsg goto err_ring;
68c349dbc7Sjsg }
69c349dbc7Sjsg
70c349dbc7Sjsg i915_vma_make_unshrinkable(vma);
71c349dbc7Sjsg
72c349dbc7Sjsg /* Discard any unused bytes beyond that submitted to hw. */
73c349dbc7Sjsg intel_ring_reset(ring, ring->emit);
74c349dbc7Sjsg
75c349dbc7Sjsg ring->vaddr = addr;
76c349dbc7Sjsg return 0;
77c349dbc7Sjsg
78c349dbc7Sjsg err_ring:
79c349dbc7Sjsg i915_vma_unpin(vma);
80c349dbc7Sjsg err_unpin:
81c349dbc7Sjsg atomic_dec(&ring->pin_count);
82c349dbc7Sjsg return ret;
83c349dbc7Sjsg }
84c349dbc7Sjsg
intel_ring_reset(struct intel_ring * ring,u32 tail)85c349dbc7Sjsg void intel_ring_reset(struct intel_ring *ring, u32 tail)
86c349dbc7Sjsg {
87c349dbc7Sjsg tail = intel_ring_wrap(ring, tail);
88c349dbc7Sjsg ring->tail = tail;
89c349dbc7Sjsg ring->head = tail;
90c349dbc7Sjsg ring->emit = tail;
91c349dbc7Sjsg intel_ring_update_space(ring);
92c349dbc7Sjsg }
93c349dbc7Sjsg
intel_ring_unpin(struct intel_ring * ring)94c349dbc7Sjsg void intel_ring_unpin(struct intel_ring *ring)
95c349dbc7Sjsg {
96c349dbc7Sjsg struct i915_vma *vma = ring->vma;
97c349dbc7Sjsg
98c349dbc7Sjsg if (!atomic_dec_and_test(&ring->pin_count))
99c349dbc7Sjsg return;
100c349dbc7Sjsg
101c349dbc7Sjsg i915_vma_unset_ggtt_write(vma);
102c1b1784eSjsg if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
103c349dbc7Sjsg i915_vma_unpin_iomap(vma);
104c349dbc7Sjsg else
105c349dbc7Sjsg i915_gem_object_unpin_map(vma->obj);
106c349dbc7Sjsg
107c349dbc7Sjsg i915_vma_make_purgeable(vma);
108c349dbc7Sjsg i915_vma_unpin(vma);
109c349dbc7Sjsg }
110c349dbc7Sjsg
create_ring_vma(struct i915_ggtt * ggtt,int size)111c349dbc7Sjsg static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
112c349dbc7Sjsg {
113c349dbc7Sjsg struct i915_address_space *vm = &ggtt->vm;
114c349dbc7Sjsg struct drm_i915_private *i915 = vm->i915;
115c349dbc7Sjsg struct drm_i915_gem_object *obj;
116c349dbc7Sjsg struct i915_vma *vma;
117c349dbc7Sjsg
1181bb76ff1Sjsg obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
1191bb76ff1Sjsg I915_BO_ALLOC_PM_VOLATILE);
12059e8c142Sjsg if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
121c349dbc7Sjsg obj = i915_gem_object_create_stolen(i915, size);
122c349dbc7Sjsg if (IS_ERR(obj))
123c349dbc7Sjsg obj = i915_gem_object_create_internal(i915, size);
124c349dbc7Sjsg if (IS_ERR(obj))
125c349dbc7Sjsg return ERR_CAST(obj);
126c349dbc7Sjsg
127c349dbc7Sjsg /*
128c349dbc7Sjsg * Mark ring buffers as read-only from GPU side (so no stray overwrites)
129c349dbc7Sjsg * if supported by the platform's GGTT.
130c349dbc7Sjsg */
131c349dbc7Sjsg if (vm->has_read_only)
132c349dbc7Sjsg i915_gem_object_set_readonly(obj);
133c349dbc7Sjsg
134c349dbc7Sjsg vma = i915_vma_instance(obj, vm, NULL);
135c349dbc7Sjsg if (IS_ERR(vma))
136c349dbc7Sjsg goto err;
137c349dbc7Sjsg
138c349dbc7Sjsg return vma;
139c349dbc7Sjsg
140c349dbc7Sjsg err:
141c349dbc7Sjsg i915_gem_object_put(obj);
142c349dbc7Sjsg return vma;
143c349dbc7Sjsg }
144c349dbc7Sjsg
145c349dbc7Sjsg struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs * engine,int size)146c349dbc7Sjsg intel_engine_create_ring(struct intel_engine_cs *engine, int size)
147c349dbc7Sjsg {
148c349dbc7Sjsg struct drm_i915_private *i915 = engine->i915;
149c349dbc7Sjsg struct intel_ring *ring;
150c349dbc7Sjsg struct i915_vma *vma;
151c349dbc7Sjsg
152c349dbc7Sjsg GEM_BUG_ON(!is_power_of_2(size));
153c349dbc7Sjsg GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
154c349dbc7Sjsg
155c349dbc7Sjsg ring = kzalloc(sizeof(*ring), GFP_KERNEL);
156c349dbc7Sjsg if (!ring)
157c349dbc7Sjsg return ERR_PTR(-ENOMEM);
158c349dbc7Sjsg
159c349dbc7Sjsg kref_init(&ring->ref);
160c349dbc7Sjsg ring->size = size;
161c349dbc7Sjsg ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
162c349dbc7Sjsg
163c349dbc7Sjsg /*
164c349dbc7Sjsg * Workaround an erratum on the i830 which causes a hang if
165c349dbc7Sjsg * the TAIL pointer points to within the last 2 cachelines
166c349dbc7Sjsg * of the buffer.
167c349dbc7Sjsg */
168c349dbc7Sjsg ring->effective_size = size;
169c349dbc7Sjsg if (IS_I830(i915) || IS_I845G(i915))
170c349dbc7Sjsg ring->effective_size -= 2 * CACHELINE_BYTES;
171c349dbc7Sjsg
172c349dbc7Sjsg intel_ring_update_space(ring);
173c349dbc7Sjsg
174c349dbc7Sjsg vma = create_ring_vma(engine->gt->ggtt, size);
175c349dbc7Sjsg if (IS_ERR(vma)) {
176c349dbc7Sjsg kfree(ring);
177c349dbc7Sjsg return ERR_CAST(vma);
178c349dbc7Sjsg }
179c349dbc7Sjsg ring->vma = vma;
180c349dbc7Sjsg
181c349dbc7Sjsg return ring;
182c349dbc7Sjsg }
183c349dbc7Sjsg
intel_ring_free(struct kref * ref)184c349dbc7Sjsg void intel_ring_free(struct kref *ref)
185c349dbc7Sjsg {
186c349dbc7Sjsg struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
187c349dbc7Sjsg
188c349dbc7Sjsg i915_vma_put(ring->vma);
189c349dbc7Sjsg kfree(ring);
190c349dbc7Sjsg }
191c349dbc7Sjsg
192c349dbc7Sjsg static noinline int
wait_for_space(struct intel_ring * ring,struct intel_timeline * tl,unsigned int bytes)193c349dbc7Sjsg wait_for_space(struct intel_ring *ring,
194c349dbc7Sjsg struct intel_timeline *tl,
195c349dbc7Sjsg unsigned int bytes)
196c349dbc7Sjsg {
197c349dbc7Sjsg struct i915_request *target;
198c349dbc7Sjsg long timeout;
199c349dbc7Sjsg
200c349dbc7Sjsg if (intel_ring_update_space(ring) >= bytes)
201c349dbc7Sjsg return 0;
202c349dbc7Sjsg
203c349dbc7Sjsg GEM_BUG_ON(list_empty(&tl->requests));
204c349dbc7Sjsg list_for_each_entry(target, &tl->requests, link) {
205c349dbc7Sjsg if (target->ring != ring)
206c349dbc7Sjsg continue;
207c349dbc7Sjsg
208c349dbc7Sjsg /* Would completion of this request free enough space? */
209c349dbc7Sjsg if (bytes <= __intel_ring_space(target->postfix,
210c349dbc7Sjsg ring->emit, ring->size))
211c349dbc7Sjsg break;
212c349dbc7Sjsg }
213c349dbc7Sjsg
214c349dbc7Sjsg if (GEM_WARN_ON(&target->link == &tl->requests))
215c349dbc7Sjsg return -ENOSPC;
216c349dbc7Sjsg
217c349dbc7Sjsg timeout = i915_request_wait(target,
218c349dbc7Sjsg I915_WAIT_INTERRUPTIBLE,
219c349dbc7Sjsg MAX_SCHEDULE_TIMEOUT);
220c349dbc7Sjsg if (timeout < 0)
221c349dbc7Sjsg return timeout;
222c349dbc7Sjsg
223c349dbc7Sjsg i915_request_retire_upto(target);
224c349dbc7Sjsg
225c349dbc7Sjsg intel_ring_update_space(ring);
226c349dbc7Sjsg GEM_BUG_ON(ring->space < bytes);
227c349dbc7Sjsg return 0;
228c349dbc7Sjsg }
229c349dbc7Sjsg
intel_ring_begin(struct i915_request * rq,unsigned int num_dwords)230c349dbc7Sjsg u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
231c349dbc7Sjsg {
232c349dbc7Sjsg struct intel_ring *ring = rq->ring;
233c349dbc7Sjsg const unsigned int remain_usable = ring->effective_size - ring->emit;
234c349dbc7Sjsg const unsigned int bytes = num_dwords * sizeof(u32);
235c349dbc7Sjsg unsigned int need_wrap = 0;
236c349dbc7Sjsg unsigned int total_bytes;
237c349dbc7Sjsg u32 *cs;
238c349dbc7Sjsg
239c349dbc7Sjsg /* Packets must be qword aligned. */
240c349dbc7Sjsg GEM_BUG_ON(num_dwords & 1);
241c349dbc7Sjsg
242c349dbc7Sjsg total_bytes = bytes + rq->reserved_space;
243c349dbc7Sjsg GEM_BUG_ON(total_bytes > ring->effective_size);
244c349dbc7Sjsg
245c349dbc7Sjsg if (unlikely(total_bytes > remain_usable)) {
246c349dbc7Sjsg const int remain_actual = ring->size - ring->emit;
247c349dbc7Sjsg
248c349dbc7Sjsg if (bytes > remain_usable) {
249c349dbc7Sjsg /*
250c349dbc7Sjsg * Not enough space for the basic request. So need to
251c349dbc7Sjsg * flush out the remainder and then wait for
252c349dbc7Sjsg * base + reserved.
253c349dbc7Sjsg */
254c349dbc7Sjsg total_bytes += remain_actual;
255c349dbc7Sjsg need_wrap = remain_actual | 1;
256c349dbc7Sjsg } else {
257c349dbc7Sjsg /*
258c349dbc7Sjsg * The base request will fit but the reserved space
259c349dbc7Sjsg * falls off the end. So we don't need an immediate
260c349dbc7Sjsg * wrap and only need to effectively wait for the
261c349dbc7Sjsg * reserved size from the start of ringbuffer.
262c349dbc7Sjsg */
263c349dbc7Sjsg total_bytes = rq->reserved_space + remain_actual;
264c349dbc7Sjsg }
265c349dbc7Sjsg }
266c349dbc7Sjsg
267c349dbc7Sjsg if (unlikely(total_bytes > ring->space)) {
268c349dbc7Sjsg int ret;
269c349dbc7Sjsg
270c349dbc7Sjsg /*
271c349dbc7Sjsg * Space is reserved in the ringbuffer for finalising the
272c349dbc7Sjsg * request, as that cannot be allowed to fail. During request
273c349dbc7Sjsg * finalisation, reserved_space is set to 0 to stop the
274c349dbc7Sjsg * overallocation and the assumption is that then we never need
275c349dbc7Sjsg * to wait (which has the risk of failing with EINTR).
276c349dbc7Sjsg *
277c349dbc7Sjsg * See also i915_request_alloc() and i915_request_add().
278c349dbc7Sjsg */
279c349dbc7Sjsg GEM_BUG_ON(!rq->reserved_space);
280c349dbc7Sjsg
281c349dbc7Sjsg ret = wait_for_space(ring,
282c349dbc7Sjsg i915_request_timeline(rq),
283c349dbc7Sjsg total_bytes);
284c349dbc7Sjsg if (unlikely(ret))
285c349dbc7Sjsg return ERR_PTR(ret);
286c349dbc7Sjsg }
287c349dbc7Sjsg
288c349dbc7Sjsg if (unlikely(need_wrap)) {
289c349dbc7Sjsg need_wrap &= ~1;
290c349dbc7Sjsg GEM_BUG_ON(need_wrap > ring->space);
291c349dbc7Sjsg GEM_BUG_ON(ring->emit + need_wrap > ring->size);
292c349dbc7Sjsg GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
293c349dbc7Sjsg
294c349dbc7Sjsg /* Fill the tail with MI_NOOP */
295c349dbc7Sjsg memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
296c349dbc7Sjsg ring->space -= need_wrap;
297c349dbc7Sjsg ring->emit = 0;
298c349dbc7Sjsg }
299c349dbc7Sjsg
300c349dbc7Sjsg GEM_BUG_ON(ring->emit > ring->size - bytes);
301c349dbc7Sjsg GEM_BUG_ON(ring->space < bytes);
302c349dbc7Sjsg cs = ring->vaddr + ring->emit;
3031bb76ff1Sjsg if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
3041bb76ff1Sjsg memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
305c349dbc7Sjsg ring->emit += bytes;
306c349dbc7Sjsg ring->space -= bytes;
307c349dbc7Sjsg
308c349dbc7Sjsg return cs;
309c349dbc7Sjsg }
310c349dbc7Sjsg
311c349dbc7Sjsg /* Align the ring tail to a cacheline boundary */
intel_ring_cacheline_align(struct i915_request * rq)312c349dbc7Sjsg int intel_ring_cacheline_align(struct i915_request *rq)
313c349dbc7Sjsg {
314c349dbc7Sjsg int num_dwords;
315c349dbc7Sjsg void *cs;
316c349dbc7Sjsg
317c349dbc7Sjsg num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
318c349dbc7Sjsg if (num_dwords == 0)
319c349dbc7Sjsg return 0;
320c349dbc7Sjsg
321c349dbc7Sjsg num_dwords = CACHELINE_DWORDS - num_dwords;
322c349dbc7Sjsg GEM_BUG_ON(num_dwords & 1);
323c349dbc7Sjsg
324c349dbc7Sjsg cs = intel_ring_begin(rq, num_dwords);
325c349dbc7Sjsg if (IS_ERR(cs))
326c349dbc7Sjsg return PTR_ERR(cs);
327c349dbc7Sjsg
328c349dbc7Sjsg memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
329c349dbc7Sjsg intel_ring_advance(rq, cs + num_dwords);
330c349dbc7Sjsg
331c349dbc7Sjsg GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
332c349dbc7Sjsg return 0;
333c349dbc7Sjsg }
3342fdb5a15Sjsg
3352fdb5a15Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3362fdb5a15Sjsg #include "selftest_ring.c"
3372fdb5a15Sjsg #endif
338