1 /* $NetBSD: igt_gem_utils.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2018 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: igt_gem_utils.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11
12 #include "igt_gem_utils.h"
13
14 #include "gem/i915_gem_context.h"
15 #include "gem/i915_gem_pm.h"
16 #include "gt/intel_context.h"
17 #include "gt/intel_gt.h"
18 #include "i915_vma.h"
19 #include "i915_drv.h"
20
21 #include "i915_request.h"
22
23 struct i915_request *
igt_request_alloc(struct i915_gem_context * ctx,struct intel_engine_cs * engine)24 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
25 {
26 struct intel_context *ce;
27 struct i915_request *rq;
28
29 /*
30 * Pinning the contexts may generate requests in order to acquire
31 * GGTT space, so do this first before we reserve a seqno for
32 * ourselves.
33 */
34 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
35 if (IS_ERR(ce))
36 return ERR_CAST(ce);
37
38 rq = intel_context_create_request(ce);
39 intel_context_put(ce);
40
41 return rq;
42 }
43
44 struct i915_vma *
igt_emit_store_dw(struct i915_vma * vma,u64 offset,unsigned long count,u32 val)45 igt_emit_store_dw(struct i915_vma *vma,
46 u64 offset,
47 unsigned long count,
48 u32 val)
49 {
50 struct drm_i915_gem_object *obj;
51 const int gen = INTEL_GEN(vma->vm->i915);
52 unsigned long n, size;
53 u32 *cmd;
54 int err;
55
56 size = (4 * count + 1) * sizeof(u32);
57 size = round_up(size, PAGE_SIZE);
58 obj = i915_gem_object_create_internal(vma->vm->i915, size);
59 if (IS_ERR(obj))
60 return ERR_CAST(obj);
61
62 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
63 if (IS_ERR(cmd)) {
64 err = PTR_ERR(cmd);
65 goto err;
66 }
67
68 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
69 offset += vma->node.start;
70
71 for (n = 0; n < count; n++) {
72 if (gen >= 8) {
73 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
74 *cmd++ = lower_32_bits(offset);
75 *cmd++ = upper_32_bits(offset);
76 *cmd++ = val;
77 } else if (gen >= 4) {
78 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
79 (gen < 6 ? MI_USE_GGTT : 0);
80 *cmd++ = 0;
81 *cmd++ = offset;
82 *cmd++ = val;
83 } else {
84 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
85 *cmd++ = offset;
86 *cmd++ = val;
87 }
88 offset += PAGE_SIZE;
89 }
90 *cmd = MI_BATCH_BUFFER_END;
91 i915_gem_object_unpin_map(obj);
92
93 intel_gt_chipset_flush(vma->vm->gt);
94
95 vma = i915_vma_instance(obj, vma->vm, NULL);
96 if (IS_ERR(vma)) {
97 err = PTR_ERR(vma);
98 goto err;
99 }
100
101 err = i915_vma_pin(vma, 0, 0, PIN_USER);
102 if (err)
103 goto err;
104
105 return vma;
106
107 err:
108 i915_gem_object_put(obj);
109 return ERR_PTR(err);
110 }
111
igt_gpu_fill_dw(struct intel_context * ce,struct i915_vma * vma,u64 offset,unsigned long count,u32 val)112 int igt_gpu_fill_dw(struct intel_context *ce,
113 struct i915_vma *vma, u64 offset,
114 unsigned long count, u32 val)
115 {
116 struct i915_request *rq;
117 struct i915_vma *batch;
118 unsigned int flags;
119 int err;
120
121 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
122 GEM_BUG_ON(!i915_vma_is_pinned(vma));
123
124 batch = igt_emit_store_dw(vma, offset, count, val);
125 if (IS_ERR(batch))
126 return PTR_ERR(batch);
127
128 rq = intel_context_create_request(ce);
129 if (IS_ERR(rq)) {
130 err = PTR_ERR(rq);
131 goto err_batch;
132 }
133
134 flags = 0;
135 if (INTEL_GEN(ce->vm->i915) <= 5)
136 flags |= I915_DISPATCH_SECURE;
137
138 err = rq->engine->emit_bb_start(rq,
139 batch->node.start, batch->node.size,
140 flags);
141 if (err)
142 goto err_request;
143
144 i915_vma_lock(batch);
145 err = i915_request_await_object(rq, batch->obj, false);
146 if (err == 0)
147 err = i915_vma_move_to_active(batch, rq, 0);
148 i915_vma_unlock(batch);
149 if (err)
150 goto skip_request;
151
152 i915_vma_lock(vma);
153 err = i915_request_await_object(rq, vma->obj, true);
154 if (err == 0)
155 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
156 i915_vma_unlock(vma);
157 if (err)
158 goto skip_request;
159
160 i915_request_add(rq);
161
162 i915_vma_unpin_and_release(&batch, 0);
163
164 return 0;
165
166 skip_request:
167 i915_request_skip(rq, err);
168 err_request:
169 i915_request_add(rq);
170 err_batch:
171 i915_vma_unpin_and_release(&batch, 0);
172 return err;
173 }
174