1 /* $NetBSD: i915_perf.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2019 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_perf.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
11
12 #include <linux/kref.h>
13
14 #include "gem/i915_gem_pm.h"
15 #include "gt/intel_gt.h"
16
17 #include "i915_selftest.h"
18
19 #include "igt_flush_test.h"
20 #include "lib_sw_fence.h"
21
22 static struct i915_perf_stream *
test_stream(struct i915_perf * perf)23 test_stream(struct i915_perf *perf)
24 {
25 struct drm_i915_perf_open_param param = {};
26 struct perf_open_properties props = {
27 .engine = intel_engine_lookup_user(perf->i915,
28 I915_ENGINE_CLASS_RENDER,
29 0),
30 .sample_flags = SAMPLE_OA_REPORT,
31 .oa_format = IS_GEN(perf->i915, 12) ?
32 I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
33 .metrics_set = 1,
34 };
35 struct i915_perf_stream *stream;
36
37 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
38 if (!stream)
39 return NULL;
40
41 stream->perf = perf;
42
43 mutex_lock(&perf->lock);
44 if (i915_oa_stream_init(stream, ¶m, &props)) {
45 kfree(stream);
46 stream = NULL;
47 }
48 mutex_unlock(&perf->lock);
49
50 return stream;
51 }
52
stream_destroy(struct i915_perf_stream * stream)53 static void stream_destroy(struct i915_perf_stream *stream)
54 {
55 struct i915_perf *perf = stream->perf;
56
57 mutex_lock(&perf->lock);
58 i915_perf_destroy_locked(stream);
59 mutex_unlock(&perf->lock);
60 }
61
live_sanitycheck(void * arg)62 static int live_sanitycheck(void *arg)
63 {
64 struct drm_i915_private *i915 = arg;
65 struct i915_perf_stream *stream;
66
67 /* Quick check we can create a perf stream */
68
69 stream = test_stream(&i915->perf);
70 if (!stream)
71 return -EINVAL;
72
73 stream_destroy(stream);
74 return 0;
75 }
76
write_timestamp(struct i915_request * rq,int slot)77 static int write_timestamp(struct i915_request *rq, int slot)
78 {
79 u32 *cs;
80 int len;
81
82 cs = intel_ring_begin(rq, 6);
83 if (IS_ERR(cs))
84 return PTR_ERR(cs);
85
86 len = 5;
87 if (INTEL_GEN(rq->i915) >= 8)
88 len++;
89
90 *cs++ = GFX_OP_PIPE_CONTROL(len);
91 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
92 PIPE_CONTROL_STORE_DATA_INDEX |
93 PIPE_CONTROL_WRITE_TIMESTAMP;
94 *cs++ = slot * sizeof(u32);
95 *cs++ = 0;
96 *cs++ = 0;
97 *cs++ = 0;
98
99 intel_ring_advance(rq, cs);
100
101 return 0;
102 }
103
poll_status(struct i915_request * rq,int slot)104 static ktime_t poll_status(struct i915_request *rq, int slot)
105 {
106 while (!intel_read_status_page(rq->engine, slot) &&
107 !i915_request_completed(rq))
108 cpu_relax();
109
110 return ktime_get();
111 }
112
live_noa_delay(void * arg)113 static int live_noa_delay(void *arg)
114 {
115 struct drm_i915_private *i915 = arg;
116 struct i915_perf_stream *stream;
117 struct i915_request *rq;
118 ktime_t t0, t1;
119 u64 expected;
120 u32 delay;
121 int err;
122 int i;
123
124 /* Check that the GPU delays matches expectations */
125
126 stream = test_stream(&i915->perf);
127 if (!stream)
128 return -ENOMEM;
129
130 expected = atomic64_read(&stream->perf->noa_programming_delay);
131
132 if (stream->engine->class != RENDER_CLASS) {
133 err = -ENODEV;
134 goto out;
135 }
136
137 for (i = 0; i < 4; i++)
138 intel_write_status_page(stream->engine, 0x100 + i, 0);
139
140 rq = intel_engine_create_kernel_request(stream->engine);
141 if (IS_ERR(rq)) {
142 err = PTR_ERR(rq);
143 goto out;
144 }
145
146 if (rq->engine->emit_init_breadcrumb &&
147 i915_request_timeline(rq)->has_initial_breadcrumb) {
148 err = rq->engine->emit_init_breadcrumb(rq);
149 if (err) {
150 i915_request_add(rq);
151 goto out;
152 }
153 }
154
155 err = write_timestamp(rq, 0x100);
156 if (err) {
157 i915_request_add(rq);
158 goto out;
159 }
160
161 err = rq->engine->emit_bb_start(rq,
162 i915_ggtt_offset(stream->noa_wait), 0,
163 I915_DISPATCH_SECURE);
164 if (err) {
165 i915_request_add(rq);
166 goto out;
167 }
168
169 err = write_timestamp(rq, 0x102);
170 if (err) {
171 i915_request_add(rq);
172 goto out;
173 }
174
175 i915_request_get(rq);
176 i915_request_add(rq);
177
178 preempt_disable();
179 t0 = poll_status(rq, 0x100);
180 t1 = poll_status(rq, 0x102);
181 preempt_enable();
182
183 pr_info("CPU delay: %lluns, expected %lluns\n",
184 ktime_sub(t1, t0), expected);
185
186 delay = intel_read_status_page(stream->engine, 0x102);
187 delay -= intel_read_status_page(stream->engine, 0x100);
188 delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
189 RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
190 pr_info("GPU delay: %uns, expected %lluns\n",
191 delay, expected);
192
193 if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
194 pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
195 delay / 1000,
196 div_u64(3 * expected, 4000),
197 div_u64(3 * expected, 2000));
198 err = -EINVAL;
199 }
200
201 i915_request_put(rq);
202 out:
203 stream_destroy(stream);
204 return err;
205 }
206
i915_perf_live_selftests(struct drm_i915_private * i915)207 int i915_perf_live_selftests(struct drm_i915_private *i915)
208 {
209 static const struct i915_subtest tests[] = {
210 SUBTEST(live_sanitycheck),
211 SUBTEST(live_noa_delay),
212 };
213 struct i915_perf *perf = &i915->perf;
214
215 if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
216 return 0;
217
218 if (intel_gt_is_wedged(&i915->gt))
219 return 0;
220
221 return i915_subtests(tests, i915);
222 }
223