1 /* $NetBSD: igt_spinner.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2018 Intel Corporation
7 */
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: igt_spinner.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
10
11 #include "gt/intel_gt.h"
12
13 #include "gem/selftests/igt_gem_utils.h"
14
15 #include "igt_spinner.h"
16
igt_spinner_init(struct igt_spinner * spin,struct intel_gt * gt)17 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
18 {
19 unsigned int mode;
20 void *vaddr;
21 int err;
22
23 memset(spin, 0, sizeof(*spin));
24 spin->gt = gt;
25
26 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
27 if (IS_ERR(spin->hws)) {
28 err = PTR_ERR(spin->hws);
29 goto err;
30 }
31
32 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
33 if (IS_ERR(spin->obj)) {
34 err = PTR_ERR(spin->obj);
35 goto err_hws;
36 }
37
38 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
39 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
40 if (IS_ERR(vaddr)) {
41 err = PTR_ERR(vaddr);
42 goto err_obj;
43 }
44 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
45
46 mode = i915_coherent_map_type(gt->i915);
47 vaddr = i915_gem_object_pin_map(spin->obj, mode);
48 if (IS_ERR(vaddr)) {
49 err = PTR_ERR(vaddr);
50 goto err_unpin_hws;
51 }
52 spin->batch = vaddr;
53
54 return 0;
55
56 err_unpin_hws:
57 i915_gem_object_unpin_map(spin->hws);
58 err_obj:
59 i915_gem_object_put(spin->obj);
60 err_hws:
61 i915_gem_object_put(spin->hws);
62 err:
63 return err;
64 }
65
seqno_offset(u64 fence)66 static unsigned int seqno_offset(u64 fence)
67 {
68 return offset_in_page(sizeof(u32) * fence);
69 }
70
hws_address(const struct i915_vma * hws,const struct i915_request * rq)71 static u64 hws_address(const struct i915_vma *hws,
72 const struct i915_request *rq)
73 {
74 return hws->node.start + seqno_offset(rq->fence.context);
75 }
76
move_to_active(struct i915_vma * vma,struct i915_request * rq,unsigned int flags)77 static int move_to_active(struct i915_vma *vma,
78 struct i915_request *rq,
79 unsigned int flags)
80 {
81 int err;
82
83 i915_vma_lock(vma);
84 err = i915_request_await_object(rq, vma->obj,
85 flags & EXEC_OBJECT_WRITE);
86 if (err == 0)
87 err = i915_vma_move_to_active(vma, rq, flags);
88 i915_vma_unlock(vma);
89
90 return err;
91 }
92
93 struct i915_request *
igt_spinner_create_request(struct igt_spinner * spin,struct intel_context * ce,u32 arbitration_command)94 igt_spinner_create_request(struct igt_spinner *spin,
95 struct intel_context *ce,
96 u32 arbitration_command)
97 {
98 struct intel_engine_cs *engine = ce->engine;
99 struct i915_request *rq = NULL;
100 struct i915_vma *hws, *vma;
101 unsigned int flags;
102 u32 *batch;
103 int err;
104
105 GEM_BUG_ON(spin->gt != ce->vm->gt);
106
107 if (!intel_engine_can_store_dword(ce->engine))
108 return ERR_PTR(-ENODEV);
109
110 vma = i915_vma_instance(spin->obj, ce->vm, NULL);
111 if (IS_ERR(vma))
112 return ERR_CAST(vma);
113
114 hws = i915_vma_instance(spin->hws, ce->vm, NULL);
115 if (IS_ERR(hws))
116 return ERR_CAST(hws);
117
118 err = i915_vma_pin(vma, 0, 0, PIN_USER);
119 if (err)
120 return ERR_PTR(err);
121
122 err = i915_vma_pin(hws, 0, 0, PIN_USER);
123 if (err)
124 goto unpin_vma;
125
126 rq = intel_context_create_request(ce);
127 if (IS_ERR(rq)) {
128 err = PTR_ERR(rq);
129 goto unpin_hws;
130 }
131
132 err = move_to_active(vma, rq, 0);
133 if (err)
134 goto cancel_rq;
135
136 err = move_to_active(hws, rq, 0);
137 if (err)
138 goto cancel_rq;
139
140 batch = spin->batch;
141
142 if (INTEL_GEN(rq->i915) >= 8) {
143 *batch++ = MI_STORE_DWORD_IMM_GEN4;
144 *batch++ = lower_32_bits(hws_address(hws, rq));
145 *batch++ = upper_32_bits(hws_address(hws, rq));
146 } else if (INTEL_GEN(rq->i915) >= 6) {
147 *batch++ = MI_STORE_DWORD_IMM_GEN4;
148 *batch++ = 0;
149 *batch++ = hws_address(hws, rq);
150 } else if (INTEL_GEN(rq->i915) >= 4) {
151 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
152 *batch++ = 0;
153 *batch++ = hws_address(hws, rq);
154 } else {
155 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
156 *batch++ = hws_address(hws, rq);
157 }
158 *batch++ = rq->fence.seqno;
159
160 *batch++ = arbitration_command;
161
162 if (INTEL_GEN(rq->i915) >= 8)
163 *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
164 else if (IS_HASWELL(rq->i915))
165 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
166 else if (INTEL_GEN(rq->i915) >= 6)
167 *batch++ = MI_BATCH_BUFFER_START;
168 else
169 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
170 *batch++ = lower_32_bits(vma->node.start);
171 *batch++ = upper_32_bits(vma->node.start);
172
173 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
174
175 intel_gt_chipset_flush(engine->gt);
176
177 if (engine->emit_init_breadcrumb &&
178 i915_request_timeline(rq)->has_initial_breadcrumb) {
179 err = engine->emit_init_breadcrumb(rq);
180 if (err)
181 goto cancel_rq;
182 }
183
184 flags = 0;
185 if (INTEL_GEN(rq->i915) <= 5)
186 flags |= I915_DISPATCH_SECURE;
187 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
188
189 cancel_rq:
190 if (err) {
191 i915_request_skip(rq, err);
192 i915_request_add(rq);
193 }
194 unpin_hws:
195 i915_vma_unpin(hws);
196 unpin_vma:
197 i915_vma_unpin(vma);
198 return err ? ERR_PTR(err) : rq;
199 }
200
201 static u32
hws_seqno(const struct igt_spinner * spin,const struct i915_request * rq)202 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
203 {
204 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
205
206 return READ_ONCE(*seqno);
207 }
208
igt_spinner_end(struct igt_spinner * spin)209 void igt_spinner_end(struct igt_spinner *spin)
210 {
211 *spin->batch = MI_BATCH_BUFFER_END;
212 intel_gt_chipset_flush(spin->gt);
213 }
214
igt_spinner_fini(struct igt_spinner * spin)215 void igt_spinner_fini(struct igt_spinner *spin)
216 {
217 igt_spinner_end(spin);
218
219 i915_gem_object_unpin_map(spin->obj);
220 i915_gem_object_put(spin->obj);
221
222 i915_gem_object_unpin_map(spin->hws);
223 i915_gem_object_put(spin->hws);
224 }
225
igt_wait_for_spinner(struct igt_spinner * spin,struct i915_request * rq)226 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
227 {
228 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
229 rq->fence.seqno),
230 10) &&
231 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
232 rq->fence.seqno),
233 1000));
234 }
235