xref: /openbsd-src/sys/dev/pci/drm/i915/i915_perf.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "gem/i915_gem_context.h"
199 #include "gem/i915_gem_internal.h"
200 #include "gt/intel_engine_pm.h"
201 #include "gt/intel_engine_regs.h"
202 #include "gt/intel_engine_user.h"
203 #include "gt/intel_execlists_submission.h"
204 #include "gt/intel_gpu_commands.h"
205 #include "gt/intel_gt.h"
206 #include "gt/intel_gt_clock_utils.h"
207 #include "gt/intel_gt_regs.h"
208 #include "gt/intel_lrc.h"
209 #include "gt/intel_lrc_reg.h"
210 #include "gt/intel_ring.h"
211 
212 #include "i915_drv.h"
213 #include "i915_file_private.h"
214 #include "i915_perf.h"
215 #include "i915_perf_oa_regs.h"
216 
217 /* HW requires this to be a power of two, between 128k and 16M, though driver
218  * is currently generally designed assuming the largest 16M size is used such
219  * that the overflow cases are unlikely in normal operation.
220  */
221 #define OA_BUFFER_SIZE		SZ_16M
222 
223 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
224 
225 /**
226  * DOC: OA Tail Pointer Race
227  *
228  * There's a HW race condition between OA unit tail pointer register updates and
229  * writes to memory whereby the tail pointer can sometimes get ahead of what's
230  * been written out to the OA buffer so far (in terms of what's visible to the
231  * CPU).
232  *
233  * Although this can be observed explicitly while copying reports to userspace
234  * by checking for a zeroed report-id field in tail reports, we want to account
235  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
236  * redundant read() attempts.
237  *
238  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
239  * in the OA buffer, starting from the tail reported by the HW until we find a
240  * report with its first 2 dwords not 0 meaning its previous report is
241  * completely in memory and ready to be read. Those dwords are also set to 0
242  * once read and the whole buffer is cleared upon OA buffer initialization. The
243  * first dword is the reason for this report while the second is the timestamp,
244  * making the chances of having those 2 fields at 0 fairly unlikely. A more
245  * detailed explanation is available in oa_buffer_check_unlocked().
246  *
247  * Most of the implementation details for this workaround are in
248  * oa_buffer_check_unlocked() and _append_oa_reports()
249  *
250  * Note for posterity: previously the driver used to define an effective tail
251  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
252  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
253  * This was flawed considering that the OA unit may also automatically generate
254  * non-periodic reports (such as on context switch) or the OA unit may be
255  * enabled without any periodic sampling.
256  */
257 #define OA_TAIL_MARGIN_NSEC	100000ULL
258 #define INVALID_TAIL_PTR	0xffffffff
259 
260 /* The default frequency for checking whether the OA unit has written new
261  * reports to the circular OA buffer...
262  */
263 #define DEFAULT_POLL_FREQUENCY_HZ 200
264 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
265 
266 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
267 static u32 i915_perf_stream_paranoid = true;
268 
269 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
270  * of the 64bit timestamp bits to trigger reports from) but there's currently
271  * no known use case for sampling as infrequently as once per 47 thousand years.
272  *
273  * Since the timestamps included in OA reports are only 32bits it seems
274  * reasonable to limit the OA exponent where it's still possible to account for
275  * overflow in OA report timestamps.
276  */
277 #define OA_EXPONENT_MAX 31
278 
279 #define INVALID_CTX_ID 0xffffffff
280 
281 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
282 #define OAREPORT_REASON_MASK           0x3f
283 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
284 #define OAREPORT_REASON_SHIFT          19
285 #define OAREPORT_REASON_TIMER          (1<<0)
286 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
287 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
288 
289 
290 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
291  *
292  * The highest sampling frequency we can theoretically program the OA unit
293  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
294  *
295  * Initialized just before we register the sysctl parameter.
296  */
297 static int oa_sample_rate_hard_limit;
298 
299 /* Theoretically we can program the OA unit to sample every 160ns but don't
300  * allow that by default unless root...
301  *
302  * The default threshold of 100000Hz is based on perf's similar
303  * kernel.perf_event_max_sample_rate sysctl parameter.
304  */
305 static u32 i915_oa_max_sample_rate = 100000;
306 
307 /* XXX: beware if future OA HW adds new report formats that the current
308  * code assumes all reports have a power-of-two size and ~(size - 1) can
309  * be used as a mask to align the OA tail pointer.
310  */
311 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
312 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
313 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
314 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
315 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
316 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
317 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
318 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
319 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
320 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
321 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
322 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
323 };
324 
325 #define SAMPLE_OA_REPORT      (1<<0)
326 
327 /**
328  * struct perf_open_properties - for validated properties given to open a stream
329  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
330  * @single_context: Whether a single or all gpu contexts should be monitored
331  * @hold_preemption: Whether the preemption is disabled for the filtered
332  *                   context
333  * @ctx_handle: A gem ctx handle for use with @single_context
334  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
335  * @oa_format: An OA unit HW report format
336  * @oa_periodic: Whether to enable periodic OA unit sampling
337  * @oa_period_exponent: The OA unit sampling period is derived from this
338  * @engine: The engine (typically rcs0) being monitored by the OA unit
339  * @has_sseu: Whether @sseu was specified by userspace
340  * @sseu: internal SSEU configuration computed either from the userspace
341  *        specified configuration in the opening parameters or a default value
342  *        (see get_default_sseu_config())
343  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
344  * data availability
345  *
346  * As read_properties_unlocked() enumerates and validates the properties given
347  * to open a stream of metrics the configuration is built up in the structure
348  * which starts out zero initialized.
349  */
350 struct perf_open_properties {
351 	u32 sample_flags;
352 
353 	u64 single_context:1;
354 	u64 hold_preemption:1;
355 	u64 ctx_handle;
356 
357 	/* OA sampling state */
358 	int metrics_set;
359 	int oa_format;
360 	bool oa_periodic;
361 	int oa_period_exponent;
362 
363 	struct intel_engine_cs *engine;
364 
365 	bool has_sseu;
366 	struct intel_sseu sseu;
367 
368 	u64 poll_oa_period;
369 };
370 
371 struct i915_oa_config_bo {
372 	struct llist_node node;
373 
374 	struct i915_oa_config *oa_config;
375 	struct i915_vma *vma;
376 };
377 
378 static struct ctl_table_header *sysctl_header;
379 
380 #ifdef notyet
381 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
382 #endif
383 
384 void i915_oa_config_release(struct kref *ref)
385 {
386 	struct i915_oa_config *oa_config =
387 		container_of(ref, typeof(*oa_config), ref);
388 
389 	kfree(oa_config->flex_regs);
390 	kfree(oa_config->b_counter_regs);
391 	kfree(oa_config->mux_regs);
392 
393 	kfree_rcu(oa_config, rcu);
394 }
395 
396 struct i915_oa_config *
397 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
398 {
399 	struct i915_oa_config *oa_config;
400 
401 	rcu_read_lock();
402 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
403 	if (oa_config)
404 		oa_config = i915_oa_config_get(oa_config);
405 	rcu_read_unlock();
406 
407 	return oa_config;
408 }
409 
410 #ifdef notyet
411 
412 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
413 {
414 	i915_oa_config_put(oa_bo->oa_config);
415 	i915_vma_put(oa_bo->vma);
416 	kfree(oa_bo);
417 }
418 
419 #endif
420 
421 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
422 {
423 	struct intel_uncore *uncore = stream->uncore;
424 
425 	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
426 	       GEN12_OAG_OATAILPTR_MASK;
427 }
428 
429 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
430 {
431 	struct intel_uncore *uncore = stream->uncore;
432 
433 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
434 }
435 
436 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
437 {
438 	struct intel_uncore *uncore = stream->uncore;
439 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
440 
441 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
442 }
443 
444 #ifdef notyet
445 
446 /**
447  * oa_buffer_check_unlocked - check for data and update tail ptr state
448  * @stream: i915 stream instance
449  *
450  * This is either called via fops (for blocking reads in user ctx) or the poll
451  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
452  * if there is data available for userspace to read.
453  *
454  * This function is central to providing a workaround for the OA unit tail
455  * pointer having a race with respect to what data is visible to the CPU.
456  * It is responsible for reading tail pointers from the hardware and giving
457  * the pointers time to 'age' before they are made available for reading.
458  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
459  *
460  * Besides returning true when there is data available to read() this function
461  * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
462  * object.
463  *
464  * Note: It's safe to read OA config state here unlocked, assuming that this is
465  * only called while the stream is enabled, while the global OA configuration
466  * can't be modified.
467  *
468  * Returns: %true if the OA buffer contains data, else %false
469  */
470 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
471 {
472 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
473 	int report_size = stream->oa_buffer.format_size;
474 	unsigned long flags;
475 	bool pollin;
476 	u32 hw_tail;
477 	u64 now;
478 
479 	/* We have to consider the (unlikely) possibility that read() errors
480 	 * could result in an OA buffer reset which might reset the head and
481 	 * tail state.
482 	 */
483 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
484 
485 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
486 
487 	/* The tail pointer increases in 64 byte increments,
488 	 * not in report_size steps...
489 	 */
490 	hw_tail &= ~(report_size - 1);
491 
492 	now = ktime_get_mono_fast_ns();
493 
494 	if (hw_tail == stream->oa_buffer.aging_tail &&
495 	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
496 		/* If the HW tail hasn't move since the last check and the HW
497 		 * tail has been aging for long enough, declare it the new
498 		 * tail.
499 		 */
500 		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
501 	} else {
502 		u32 head, tail, aged_tail;
503 
504 		/* NB: The head we observe here might effectively be a little
505 		 * out of date. If a read() is in progress, the head could be
506 		 * anywhere between this head and stream->oa_buffer.tail.
507 		 */
508 		head = stream->oa_buffer.head - gtt_offset;
509 		aged_tail = stream->oa_buffer.tail - gtt_offset;
510 
511 		hw_tail -= gtt_offset;
512 		tail = hw_tail;
513 
514 		/* Walk the stream backward until we find a report with dword 0
515 		 * & 1 not at 0. Since the circular buffer pointers progress by
516 		 * increments of 64 bytes and that reports can be up to 256
517 		 * bytes long, we can't tell whether a report has fully landed
518 		 * in memory before the first 2 dwords of the following report
519 		 * have effectively landed.
520 		 *
521 		 * This is assuming that the writes of the OA unit land in
522 		 * memory in the order they were written to.
523 		 * If not : (╯°□°)╯︵ ┻━┻
524 		 */
525 		while (OA_TAKEN(tail, aged_tail) >= report_size) {
526 			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
527 
528 			if (report32[0] != 0 || report32[1] != 0)
529 				break;
530 
531 			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
532 		}
533 
534 		if (OA_TAKEN(hw_tail, tail) > report_size &&
535 		    __ratelimit(&stream->perf->tail_pointer_race))
536 			DRM_NOTE("unlanded report(s) head=0x%x "
537 				 "tail=0x%x hw_tail=0x%x\n",
538 				 head, tail, hw_tail);
539 
540 		stream->oa_buffer.tail = gtt_offset + tail;
541 		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
542 		stream->oa_buffer.aging_timestamp = now;
543 	}
544 
545 	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
546 			  stream->oa_buffer.head - gtt_offset) >= report_size;
547 
548 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
549 
550 	return pollin;
551 }
552 
553 #endif
554 
555 /**
556  * append_oa_status - Appends a status record to a userspace read() buffer.
557  * @stream: An i915-perf stream opened for OA metrics
558  * @buf: destination buffer given by userspace
559  * @count: the number of bytes userspace wants to read
560  * @offset: (inout): the current position for writing into @buf
561  * @type: The kind of status to report to userspace
562  *
563  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
564  * into the userspace read() buffer.
565  *
566  * The @buf @offset will only be updated on success.
567  *
568  * Returns: 0 on success, negative error code on failure.
569  */
570 static int append_oa_status(struct i915_perf_stream *stream,
571 			    char __user *buf,
572 			    size_t count,
573 			    size_t *offset,
574 			    enum drm_i915_perf_record_type type)
575 {
576 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
577 
578 	if ((count - *offset) < header.size)
579 		return -ENOSPC;
580 
581 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
582 		return -EFAULT;
583 
584 	(*offset) += header.size;
585 
586 	return 0;
587 }
588 
589 /**
590  * append_oa_sample - Copies single OA report into userspace read() buffer.
591  * @stream: An i915-perf stream opened for OA metrics
592  * @buf: destination buffer given by userspace
593  * @count: the number of bytes userspace wants to read
594  * @offset: (inout): the current position for writing into @buf
595  * @report: A single OA report to (optionally) include as part of the sample
596  *
597  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
598  * properties when opening a stream, tracked as `stream->sample_flags`. This
599  * function copies the requested components of a single sample to the given
600  * read() @buf.
601  *
602  * The @buf @offset will only be updated on success.
603  *
604  * Returns: 0 on success, negative error code on failure.
605  */
606 static int append_oa_sample(struct i915_perf_stream *stream,
607 			    char __user *buf,
608 			    size_t count,
609 			    size_t *offset,
610 			    const u8 *report)
611 {
612 	int report_size = stream->oa_buffer.format_size;
613 	struct drm_i915_perf_record_header header;
614 
615 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
616 	header.pad = 0;
617 	header.size = stream->sample_size;
618 
619 	if ((count - *offset) < header.size)
620 		return -ENOSPC;
621 
622 	buf += *offset;
623 	if (copy_to_user(buf, &header, sizeof(header)))
624 		return -EFAULT;
625 	buf += sizeof(header);
626 
627 	if (copy_to_user(buf, report, report_size))
628 		return -EFAULT;
629 
630 	(*offset) += header.size;
631 
632 	return 0;
633 }
634 
635 /**
636  * gen8_append_oa_reports - Copies all buffered OA reports into
637  *			    userspace read() buffer.
638  * @stream: An i915-perf stream opened for OA metrics
639  * @buf: destination buffer given by userspace
640  * @count: the number of bytes userspace wants to read
641  * @offset: (inout): the current position for writing into @buf
642  *
643  * Notably any error condition resulting in a short read (-%ENOSPC or
644  * -%EFAULT) will be returned even though one or more records may
645  * have been successfully copied. In this case it's up to the caller
646  * to decide if the error should be squashed before returning to
647  * userspace.
648  *
649  * Note: reports are consumed from the head, and appended to the
650  * tail, so the tail chases the head?... If you think that's mad
651  * and back-to-front you're not alone, but this follows the
652  * Gen PRM naming convention.
653  *
654  * Returns: 0 on success, negative error code on failure.
655  */
656 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
657 				  char __user *buf,
658 				  size_t count,
659 				  size_t *offset)
660 {
661 	struct intel_uncore *uncore = stream->uncore;
662 	int report_size = stream->oa_buffer.format_size;
663 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
664 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
665 	u32 mask = (OA_BUFFER_SIZE - 1);
666 	size_t start_offset = *offset;
667 	unsigned long flags;
668 	u32 head, tail;
669 	u32 taken;
670 	int ret = 0;
671 
672 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
673 		return -EIO;
674 
675 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
676 
677 	head = stream->oa_buffer.head;
678 	tail = stream->oa_buffer.tail;
679 
680 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
681 
682 	/*
683 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
684 	 * while indexing relative to oa_buf_base.
685 	 */
686 	head -= gtt_offset;
687 	tail -= gtt_offset;
688 
689 	/*
690 	 * An out of bounds or misaligned head or tail pointer implies a driver
691 	 * bug since we validate + align the tail pointers we read from the
692 	 * hardware and we are in full control of the head pointer which should
693 	 * only be incremented by multiples of the report size (notably also
694 	 * all a power of two).
695 	 */
696 	if (drm_WARN_ONCE(&uncore->i915->drm,
697 			  head > OA_BUFFER_SIZE || head % report_size ||
698 			  tail > OA_BUFFER_SIZE || tail % report_size,
699 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
700 			  head, tail))
701 		return -EIO;
702 
703 
704 	for (/* none */;
705 	     (taken = OA_TAKEN(tail, head));
706 	     head = (head + report_size) & mask) {
707 		u8 *report = oa_buf_base + head;
708 		u32 *report32 = (void *)report;
709 		u32 ctx_id;
710 		u32 reason;
711 
712 		/*
713 		 * All the report sizes factor neatly into the buffer
714 		 * size so we never expect to see a report split
715 		 * between the beginning and end of the buffer.
716 		 *
717 		 * Given the initial alignment check a misalignment
718 		 * here would imply a driver bug that would result
719 		 * in an overrun.
720 		 */
721 		if (drm_WARN_ON(&uncore->i915->drm,
722 				(OA_BUFFER_SIZE - head) < report_size)) {
723 			drm_err(&uncore->i915->drm,
724 				"Spurious OA head ptr: non-integral report offset\n");
725 			break;
726 		}
727 
728 		/*
729 		 * The reason field includes flags identifying what
730 		 * triggered this specific report (mostly timer
731 		 * triggered or e.g. due to a context switch).
732 		 *
733 		 * This field is never expected to be zero so we can
734 		 * check that the report isn't invalid before copying
735 		 * it to userspace...
736 		 */
737 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
738 			  (GRAPHICS_VER(stream->perf->i915) == 12 ?
739 			   OAREPORT_REASON_MASK_EXTENDED :
740 			   OAREPORT_REASON_MASK));
741 
742 		ctx_id = report32[2] & stream->specific_ctx_id_mask;
743 
744 		/*
745 		 * Squash whatever is in the CTX_ID field if it's marked as
746 		 * invalid to be sure we avoid false-positive, single-context
747 		 * filtering below...
748 		 *
749 		 * Note: that we don't clear the valid_ctx_bit so userspace can
750 		 * understand that the ID has been squashed by the kernel.
751 		 */
752 		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
753 		    GRAPHICS_VER(stream->perf->i915) <= 11)
754 			ctx_id = report32[2] = INVALID_CTX_ID;
755 
756 		/*
757 		 * NB: For Gen 8 the OA unit no longer supports clock gating
758 		 * off for a specific context and the kernel can't securely
759 		 * stop the counters from updating as system-wide / global
760 		 * values.
761 		 *
762 		 * Automatic reports now include a context ID so reports can be
763 		 * filtered on the cpu but it's not worth trying to
764 		 * automatically subtract/hide counter progress for other
765 		 * contexts while filtering since we can't stop userspace
766 		 * issuing MI_REPORT_PERF_COUNT commands which would still
767 		 * provide a side-band view of the real values.
768 		 *
769 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
770 		 * to normalize counters for a single filtered context then it
771 		 * needs be forwarded bookend context-switch reports so that it
772 		 * can track switches in between MI_REPORT_PERF_COUNT commands
773 		 * and can itself subtract/ignore the progress of counters
774 		 * associated with other contexts. Note that the hardware
775 		 * automatically triggers reports when switching to a new
776 		 * context which are tagged with the ID of the newly active
777 		 * context. To avoid the complexity (and likely fragility) of
778 		 * reading ahead while parsing reports to try and minimize
779 		 * forwarding redundant context switch reports (i.e. between
780 		 * other, unrelated contexts) we simply elect to forward them
781 		 * all.
782 		 *
783 		 * We don't rely solely on the reason field to identify context
784 		 * switches since it's not-uncommon for periodic samples to
785 		 * identify a switch before any 'context switch' report.
786 		 */
787 		if (!stream->perf->exclusive_stream->ctx ||
788 		    stream->specific_ctx_id == ctx_id ||
789 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
790 		    reason & OAREPORT_REASON_CTX_SWITCH) {
791 
792 			/*
793 			 * While filtering for a single context we avoid
794 			 * leaking the IDs of other contexts.
795 			 */
796 			if (stream->perf->exclusive_stream->ctx &&
797 			    stream->specific_ctx_id != ctx_id) {
798 				report32[2] = INVALID_CTX_ID;
799 			}
800 
801 			ret = append_oa_sample(stream, buf, count, offset,
802 					       report);
803 			if (ret)
804 				break;
805 
806 			stream->oa_buffer.last_ctx_id = ctx_id;
807 		}
808 
809 		/*
810 		 * Clear out the first 2 dword as a mean to detect unlanded
811 		 * reports.
812 		 */
813 		report32[0] = 0;
814 		report32[1] = 0;
815 	}
816 
817 	if (start_offset != *offset) {
818 		i915_reg_t oaheadptr;
819 
820 		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
821 			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
822 
823 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
824 
825 		/*
826 		 * We removed the gtt_offset for the copy loop above, indexing
827 		 * relative to oa_buf_base so put back here...
828 		 */
829 		head += gtt_offset;
830 		intel_uncore_write(uncore, oaheadptr,
831 				   head & GEN12_OAG_OAHEADPTR_MASK);
832 		stream->oa_buffer.head = head;
833 
834 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
835 	}
836 
837 	return ret;
838 }
839 
840 /**
841  * gen8_oa_read - copy status records then buffered OA reports
842  * @stream: An i915-perf stream opened for OA metrics
843  * @buf: destination buffer given by userspace
844  * @count: the number of bytes userspace wants to read
845  * @offset: (inout): the current position for writing into @buf
846  *
847  * Checks OA unit status registers and if necessary appends corresponding
848  * status records for userspace (such as for a buffer full condition) and then
849  * initiate appending any buffered OA reports.
850  *
851  * Updates @offset according to the number of bytes successfully copied into
852  * the userspace buffer.
853  *
854  * NB: some data may be successfully copied to the userspace buffer
855  * even if an error is returned, and this is reflected in the
856  * updated @offset.
857  *
858  * Returns: zero on success or a negative error code
859  */
860 static int gen8_oa_read(struct i915_perf_stream *stream,
861 			char __user *buf,
862 			size_t count,
863 			size_t *offset)
864 {
865 	struct intel_uncore *uncore = stream->uncore;
866 	u32 oastatus;
867 	i915_reg_t oastatus_reg;
868 	int ret;
869 
870 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
871 		return -EIO;
872 
873 	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
874 		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
875 
876 	oastatus = intel_uncore_read(uncore, oastatus_reg);
877 
878 	/*
879 	 * We treat OABUFFER_OVERFLOW as a significant error:
880 	 *
881 	 * Although theoretically we could handle this more gracefully
882 	 * sometimes, some Gens don't correctly suppress certain
883 	 * automatically triggered reports in this condition and so we
884 	 * have to assume that old reports are now being trampled
885 	 * over.
886 	 *
887 	 * Considering how we don't currently give userspace control
888 	 * over the OA buffer size and always configure a large 16MB
889 	 * buffer, then a buffer overflow does anyway likely indicate
890 	 * that something has gone quite badly wrong.
891 	 */
892 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
893 		ret = append_oa_status(stream, buf, count, offset,
894 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
895 		if (ret)
896 			return ret;
897 
898 		drm_dbg(&stream->perf->i915->drm,
899 			"OA buffer overflow (exponent = %d): force restart\n",
900 			stream->period_exponent);
901 
902 		stream->perf->ops.oa_disable(stream);
903 		stream->perf->ops.oa_enable(stream);
904 
905 		/*
906 		 * Note: .oa_enable() is expected to re-init the oabuffer and
907 		 * reset GEN8_OASTATUS for us
908 		 */
909 		oastatus = intel_uncore_read(uncore, oastatus_reg);
910 	}
911 
912 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
913 		ret = append_oa_status(stream, buf, count, offset,
914 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
915 		if (ret)
916 			return ret;
917 
918 		intel_uncore_rmw(uncore, oastatus_reg,
919 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
920 				 GEN8_OASTATUS_REPORT_LOST,
921 				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
922 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
923 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
924 	}
925 
926 	return gen8_append_oa_reports(stream, buf, count, offset);
927 }
928 
929 /**
930  * gen7_append_oa_reports - Copies all buffered OA reports into
931  *			    userspace read() buffer.
932  * @stream: An i915-perf stream opened for OA metrics
933  * @buf: destination buffer given by userspace
934  * @count: the number of bytes userspace wants to read
935  * @offset: (inout): the current position for writing into @buf
936  *
937  * Notably any error condition resulting in a short read (-%ENOSPC or
938  * -%EFAULT) will be returned even though one or more records may
939  * have been successfully copied. In this case it's up to the caller
940  * to decide if the error should be squashed before returning to
941  * userspace.
942  *
943  * Note: reports are consumed from the head, and appended to the
944  * tail, so the tail chases the head?... If you think that's mad
945  * and back-to-front you're not alone, but this follows the
946  * Gen PRM naming convention.
947  *
948  * Returns: 0 on success, negative error code on failure.
949  */
950 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
951 				  char __user *buf,
952 				  size_t count,
953 				  size_t *offset)
954 {
955 	struct intel_uncore *uncore = stream->uncore;
956 	int report_size = stream->oa_buffer.format_size;
957 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
958 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
959 	u32 mask = (OA_BUFFER_SIZE - 1);
960 	size_t start_offset = *offset;
961 	unsigned long flags;
962 	u32 head, tail;
963 	u32 taken;
964 	int ret = 0;
965 
966 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
967 		return -EIO;
968 
969 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
970 
971 	head = stream->oa_buffer.head;
972 	tail = stream->oa_buffer.tail;
973 
974 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
975 
976 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
977 	 * while indexing relative to oa_buf_base.
978 	 */
979 	head -= gtt_offset;
980 	tail -= gtt_offset;
981 
982 	/* An out of bounds or misaligned head or tail pointer implies a driver
983 	 * bug since we validate + align the tail pointers we read from the
984 	 * hardware and we are in full control of the head pointer which should
985 	 * only be incremented by multiples of the report size (notably also
986 	 * all a power of two).
987 	 */
988 	if (drm_WARN_ONCE(&uncore->i915->drm,
989 			  head > OA_BUFFER_SIZE || head % report_size ||
990 			  tail > OA_BUFFER_SIZE || tail % report_size,
991 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
992 			  head, tail))
993 		return -EIO;
994 
995 
996 	for (/* none */;
997 	     (taken = OA_TAKEN(tail, head));
998 	     head = (head + report_size) & mask) {
999 		u8 *report = oa_buf_base + head;
1000 		u32 *report32 = (void *)report;
1001 
1002 		/* All the report sizes factor neatly into the buffer
1003 		 * size so we never expect to see a report split
1004 		 * between the beginning and end of the buffer.
1005 		 *
1006 		 * Given the initial alignment check a misalignment
1007 		 * here would imply a driver bug that would result
1008 		 * in an overrun.
1009 		 */
1010 		if (drm_WARN_ON(&uncore->i915->drm,
1011 				(OA_BUFFER_SIZE - head) < report_size)) {
1012 			drm_err(&uncore->i915->drm,
1013 				"Spurious OA head ptr: non-integral report offset\n");
1014 			break;
1015 		}
1016 
1017 		/* The report-ID field for periodic samples includes
1018 		 * some undocumented flags related to what triggered
1019 		 * the report and is never expected to be zero so we
1020 		 * can check that the report isn't invalid before
1021 		 * copying it to userspace...
1022 		 */
1023 		if (report32[0] == 0) {
1024 			if (__ratelimit(&stream->perf->spurious_report_rs))
1025 				DRM_NOTE("Skipping spurious, invalid OA report\n");
1026 			continue;
1027 		}
1028 
1029 		ret = append_oa_sample(stream, buf, count, offset, report);
1030 		if (ret)
1031 			break;
1032 
1033 		/* Clear out the first 2 dwords as a mean to detect unlanded
1034 		 * reports.
1035 		 */
1036 		report32[0] = 0;
1037 		report32[1] = 0;
1038 	}
1039 
1040 	if (start_offset != *offset) {
1041 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1042 
1043 		/* We removed the gtt_offset for the copy loop above, indexing
1044 		 * relative to oa_buf_base so put back here...
1045 		 */
1046 		head += gtt_offset;
1047 
1048 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1049 				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1050 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1051 		stream->oa_buffer.head = head;
1052 
1053 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1054 	}
1055 
1056 	return ret;
1057 }
1058 
1059 /**
1060  * gen7_oa_read - copy status records then buffered OA reports
1061  * @stream: An i915-perf stream opened for OA metrics
1062  * @buf: destination buffer given by userspace
1063  * @count: the number of bytes userspace wants to read
1064  * @offset: (inout): the current position for writing into @buf
1065  *
1066  * Checks Gen 7 specific OA unit status registers and if necessary appends
1067  * corresponding status records for userspace (such as for a buffer full
1068  * condition) and then initiate appending any buffered OA reports.
1069  *
1070  * Updates @offset according to the number of bytes successfully copied into
1071  * the userspace buffer.
1072  *
1073  * Returns: zero on success or a negative error code
1074  */
1075 static int gen7_oa_read(struct i915_perf_stream *stream,
1076 			char __user *buf,
1077 			size_t count,
1078 			size_t *offset)
1079 {
1080 	struct intel_uncore *uncore = stream->uncore;
1081 	u32 oastatus1;
1082 	int ret;
1083 
1084 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1085 		return -EIO;
1086 
1087 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1088 
1089 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1090 	 * bits while the OA unit is enabled (while the tail pointer
1091 	 * may be updated asynchronously) so we ignore status bits
1092 	 * that have already been reported to userspace.
1093 	 */
1094 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1095 
1096 	/* We treat OABUFFER_OVERFLOW as a significant error:
1097 	 *
1098 	 * - The status can be interpreted to mean that the buffer is
1099 	 *   currently full (with a higher precedence than OA_TAKEN()
1100 	 *   which will start to report a near-empty buffer after an
1101 	 *   overflow) but it's awkward that we can't clear the status
1102 	 *   on Haswell, so without a reset we won't be able to catch
1103 	 *   the state again.
1104 	 *
1105 	 * - Since it also implies the HW has started overwriting old
1106 	 *   reports it may also affect our sanity checks for invalid
1107 	 *   reports when copying to userspace that assume new reports
1108 	 *   are being written to cleared memory.
1109 	 *
1110 	 * - In the future we may want to introduce a flight recorder
1111 	 *   mode where the driver will automatically maintain a safe
1112 	 *   guard band between head/tail, avoiding this overflow
1113 	 *   condition, but we avoid the added driver complexity for
1114 	 *   now.
1115 	 */
1116 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1117 		ret = append_oa_status(stream, buf, count, offset,
1118 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1119 		if (ret)
1120 			return ret;
1121 
1122 		drm_dbg(&stream->perf->i915->drm,
1123 			"OA buffer overflow (exponent = %d): force restart\n",
1124 			stream->period_exponent);
1125 
1126 		stream->perf->ops.oa_disable(stream);
1127 		stream->perf->ops.oa_enable(stream);
1128 
1129 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1130 	}
1131 
1132 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1133 		ret = append_oa_status(stream, buf, count, offset,
1134 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1135 		if (ret)
1136 			return ret;
1137 		stream->perf->gen7_latched_oastatus1 |=
1138 			GEN7_OASTATUS1_REPORT_LOST;
1139 	}
1140 
1141 	return gen7_append_oa_reports(stream, buf, count, offset);
1142 }
1143 
1144 #ifdef notyet
1145 
1146 /**
1147  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1148  * @stream: An i915-perf stream opened for OA metrics
1149  *
1150  * Called when userspace tries to read() from a blocking stream FD opened
1151  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1152  * OA buffer and wakes us.
1153  *
1154  * Note: it's acceptable to have this return with some false positives
1155  * since any subsequent read handling will return -EAGAIN if there isn't
1156  * really data ready for userspace yet.
1157  *
1158  * Returns: zero on success or a negative error code
1159  */
1160 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1161 {
1162 	/* We would wait indefinitely if periodic sampling is not enabled */
1163 	if (!stream->periodic)
1164 		return -EIO;
1165 
1166 	return wait_event_interruptible(stream->poll_wq,
1167 					oa_buffer_check_unlocked(stream));
1168 }
1169 
1170 /**
1171  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1172  * @stream: An i915-perf stream opened for OA metrics
1173  * @file: An i915 perf stream file
1174  * @wait: poll() state table
1175  *
1176  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1177  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1178  * when it sees data ready to read in the circular OA buffer.
1179  */
1180 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1181 			      struct file *file,
1182 			      poll_table *wait)
1183 {
1184 	poll_wait(file, &stream->poll_wq, wait);
1185 }
1186 
1187 /**
1188  * i915_oa_read - just calls through to &i915_oa_ops->read
1189  * @stream: An i915-perf stream opened for OA metrics
1190  * @buf: destination buffer given by userspace
1191  * @count: the number of bytes userspace wants to read
1192  * @offset: (inout): the current position for writing into @buf
1193  *
1194  * Updates @offset according to the number of bytes successfully copied into
1195  * the userspace buffer.
1196  *
1197  * Returns: zero on success or a negative error code
1198  */
1199 static int i915_oa_read(struct i915_perf_stream *stream,
1200 			char __user *buf,
1201 			size_t count,
1202 			size_t *offset)
1203 {
1204 	return stream->perf->ops.read(stream, buf, count, offset);
1205 }
1206 
1207 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1208 {
1209 	struct i915_gem_engines_iter it;
1210 	struct i915_gem_context *ctx = stream->ctx;
1211 	struct intel_context *ce;
1212 	struct i915_gem_ww_ctx ww;
1213 	int err = -ENODEV;
1214 
1215 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1216 		if (ce->engine != stream->engine) /* first match! */
1217 			continue;
1218 
1219 		err = 0;
1220 		break;
1221 	}
1222 	i915_gem_context_unlock_engines(ctx);
1223 
1224 	if (err)
1225 		return ERR_PTR(err);
1226 
1227 	i915_gem_ww_ctx_init(&ww, true);
1228 retry:
1229 	/*
1230 	 * As the ID is the gtt offset of the context's vma we
1231 	 * pin the vma to ensure the ID remains fixed.
1232 	 */
1233 	err = intel_context_pin_ww(ce, &ww);
1234 	if (err == -EDEADLK) {
1235 		err = i915_gem_ww_ctx_backoff(&ww);
1236 		if (!err)
1237 			goto retry;
1238 	}
1239 	i915_gem_ww_ctx_fini(&ww);
1240 
1241 	if (err)
1242 		return ERR_PTR(err);
1243 
1244 	stream->pinned_ctx = ce;
1245 	return stream->pinned_ctx;
1246 }
1247 
1248 /**
1249  * oa_get_render_ctx_id - determine and hold ctx hw id
1250  * @stream: An i915-perf stream opened for OA metrics
1251  *
1252  * Determine the render context hw id, and ensure it remains fixed for the
1253  * lifetime of the stream. This ensures that we don't have to worry about
1254  * updating the context ID in OACONTROL on the fly.
1255  *
1256  * Returns: zero on success or a negative error code
1257  */
1258 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1259 {
1260 	struct intel_context *ce;
1261 
1262 	ce = oa_pin_context(stream);
1263 	if (IS_ERR(ce))
1264 		return PTR_ERR(ce);
1265 
1266 	switch (GRAPHICS_VER(ce->engine->i915)) {
1267 	case 7: {
1268 		/*
1269 		 * On Haswell we don't do any post processing of the reports
1270 		 * and don't need to use the mask.
1271 		 */
1272 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1273 		stream->specific_ctx_id_mask = 0;
1274 		break;
1275 	}
1276 
1277 	case 8:
1278 	case 9:
1279 		if (intel_engine_uses_guc(ce->engine)) {
1280 			/*
1281 			 * When using GuC, the context descriptor we write in
1282 			 * i915 is read by GuC and rewritten before it's
1283 			 * actually written into the hardware. The LRCA is
1284 			 * what is put into the context id field of the
1285 			 * context descriptor by GuC. Because it's aligned to
1286 			 * a page, the lower 12bits are always at 0 and
1287 			 * dropped by GuC. They won't be part of the context
1288 			 * ID in the OA reports, so squash those lower bits.
1289 			 */
1290 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1291 
1292 			/*
1293 			 * GuC uses the top bit to signal proxy submission, so
1294 			 * ignore that bit.
1295 			 */
1296 			stream->specific_ctx_id_mask =
1297 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1298 		} else {
1299 			stream->specific_ctx_id_mask =
1300 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1301 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1302 		}
1303 		break;
1304 
1305 	case 11:
1306 	case 12:
1307 		if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
1308 			stream->specific_ctx_id_mask =
1309 				((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1310 				(XEHP_SW_CTX_ID_SHIFT - 32);
1311 			stream->specific_ctx_id =
1312 				(XEHP_MAX_CONTEXT_HW_ID - 1) <<
1313 				(XEHP_SW_CTX_ID_SHIFT - 32);
1314 		} else {
1315 			stream->specific_ctx_id_mask =
1316 				((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1317 			/*
1318 			 * Pick an unused context id
1319 			 * 0 - BITS_PER_LONG are used by other contexts
1320 			 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1321 			 */
1322 			stream->specific_ctx_id =
1323 				(GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1324 		}
1325 		break;
1326 
1327 	default:
1328 		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1329 	}
1330 
1331 	ce->tag = stream->specific_ctx_id;
1332 
1333 	drm_dbg(&stream->perf->i915->drm,
1334 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1335 		stream->specific_ctx_id,
1336 		stream->specific_ctx_id_mask);
1337 
1338 	return 0;
1339 }
1340 
1341 /**
1342  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1343  * @stream: An i915-perf stream opened for OA metrics
1344  *
1345  * In case anything needed doing to ensure the context HW ID would remain valid
1346  * for the lifetime of the stream, then that can be undone here.
1347  */
1348 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1349 {
1350 	struct intel_context *ce;
1351 
1352 	ce = fetch_and_zero(&stream->pinned_ctx);
1353 	if (ce) {
1354 		ce->tag = 0; /* recomputed on next submission after parking */
1355 		intel_context_unpin(ce);
1356 	}
1357 
1358 	stream->specific_ctx_id = INVALID_CTX_ID;
1359 	stream->specific_ctx_id_mask = 0;
1360 }
1361 
1362 static void
1363 free_oa_buffer(struct i915_perf_stream *stream)
1364 {
1365 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1366 				   I915_VMA_RELEASE_MAP);
1367 
1368 	stream->oa_buffer.vaddr = NULL;
1369 }
1370 
1371 static void
1372 free_oa_configs(struct i915_perf_stream *stream)
1373 {
1374 	struct i915_oa_config_bo *oa_bo, *tmp;
1375 
1376 	i915_oa_config_put(stream->oa_config);
1377 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1378 		free_oa_config_bo(oa_bo);
1379 }
1380 
1381 static void
1382 free_noa_wait(struct i915_perf_stream *stream)
1383 {
1384 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1385 }
1386 
1387 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1388 {
1389 	struct i915_perf *perf = stream->perf;
1390 
1391 	if (WARN_ON(stream != perf->exclusive_stream))
1392 		return;
1393 
1394 	/*
1395 	 * Unset exclusive_stream first, it will be checked while disabling
1396 	 * the metric set on gen8+.
1397 	 *
1398 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1399 	 */
1400 	WRITE_ONCE(perf->exclusive_stream, NULL);
1401 	perf->ops.disable_metric_set(stream);
1402 
1403 	free_oa_buffer(stream);
1404 
1405 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1406 	intel_engine_pm_put(stream->engine);
1407 
1408 	if (stream->ctx)
1409 		oa_put_render_ctx_id(stream);
1410 
1411 	free_oa_configs(stream);
1412 	free_noa_wait(stream);
1413 
1414 	if (perf->spurious_report_rs.missed) {
1415 		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1416 			 perf->spurious_report_rs.missed);
1417 	}
1418 }
1419 
1420 #endif
1421 
1422 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1423 {
1424 	struct intel_uncore *uncore = stream->uncore;
1425 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1426 	unsigned long flags;
1427 
1428 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1429 
1430 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1431 	 * before OASTATUS1, but after OASTATUS2
1432 	 */
1433 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1434 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1435 	stream->oa_buffer.head = gtt_offset;
1436 
1437 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1438 
1439 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1440 			   gtt_offset | OABUFFER_SIZE_16M);
1441 
1442 	/* Mark that we need updated tail pointers to read from... */
1443 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1444 	stream->oa_buffer.tail = gtt_offset;
1445 
1446 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1447 
1448 	/* On Haswell we have to track which OASTATUS1 flags we've
1449 	 * already seen since they can't be cleared while periodic
1450 	 * sampling is enabled.
1451 	 */
1452 	stream->perf->gen7_latched_oastatus1 = 0;
1453 
1454 	/* NB: although the OA buffer will initially be allocated
1455 	 * zeroed via shmfs (and so this memset is redundant when
1456 	 * first allocating), we may re-init the OA buffer, either
1457 	 * when re-enabling a stream or in error/reset paths.
1458 	 *
1459 	 * The reason we clear the buffer for each re-init is for the
1460 	 * sanity check in gen7_append_oa_reports() that looks at the
1461 	 * report-id field to make sure it's non-zero which relies on
1462 	 * the assumption that new reports are being written to zeroed
1463 	 * memory...
1464 	 */
1465 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1466 }
1467 
1468 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1469 {
1470 	struct intel_uncore *uncore = stream->uncore;
1471 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1472 	unsigned long flags;
1473 
1474 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1475 
1476 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1477 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1478 	stream->oa_buffer.head = gtt_offset;
1479 
1480 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1481 
1482 	/*
1483 	 * PRM says:
1484 	 *
1485 	 *  "This MMIO must be set before the OATAILPTR
1486 	 *  register and after the OAHEADPTR register. This is
1487 	 *  to enable proper functionality of the overflow
1488 	 *  bit."
1489 	 */
1490 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1491 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1492 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1493 
1494 	/* Mark that we need updated tail pointers to read from... */
1495 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1496 	stream->oa_buffer.tail = gtt_offset;
1497 
1498 	/*
1499 	 * Reset state used to recognise context switches, affecting which
1500 	 * reports we will forward to userspace while filtering for a single
1501 	 * context.
1502 	 */
1503 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1504 
1505 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1506 
1507 	/*
1508 	 * NB: although the OA buffer will initially be allocated
1509 	 * zeroed via shmfs (and so this memset is redundant when
1510 	 * first allocating), we may re-init the OA buffer, either
1511 	 * when re-enabling a stream or in error/reset paths.
1512 	 *
1513 	 * The reason we clear the buffer for each re-init is for the
1514 	 * sanity check in gen8_append_oa_reports() that looks at the
1515 	 * reason field to make sure it's non-zero which relies on
1516 	 * the assumption that new reports are being written to zeroed
1517 	 * memory...
1518 	 */
1519 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1520 }
1521 
1522 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1523 {
1524 	struct intel_uncore *uncore = stream->uncore;
1525 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1526 	unsigned long flags;
1527 
1528 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1529 
1530 	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1531 	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1532 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1533 	stream->oa_buffer.head = gtt_offset;
1534 
1535 	/*
1536 	 * PRM says:
1537 	 *
1538 	 *  "This MMIO must be set before the OATAILPTR
1539 	 *  register and after the OAHEADPTR register. This is
1540 	 *  to enable proper functionality of the overflow
1541 	 *  bit."
1542 	 */
1543 	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1544 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1545 	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1546 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1547 
1548 	/* Mark that we need updated tail pointers to read from... */
1549 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1550 	stream->oa_buffer.tail = gtt_offset;
1551 
1552 	/*
1553 	 * Reset state used to recognise context switches, affecting which
1554 	 * reports we will forward to userspace while filtering for a single
1555 	 * context.
1556 	 */
1557 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1558 
1559 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1560 
1561 	/*
1562 	 * NB: although the OA buffer will initially be allocated
1563 	 * zeroed via shmfs (and so this memset is redundant when
1564 	 * first allocating), we may re-init the OA buffer, either
1565 	 * when re-enabling a stream or in error/reset paths.
1566 	 *
1567 	 * The reason we clear the buffer for each re-init is for the
1568 	 * sanity check in gen8_append_oa_reports() that looks at the
1569 	 * reason field to make sure it's non-zero which relies on
1570 	 * the assumption that new reports are being written to zeroed
1571 	 * memory...
1572 	 */
1573 	memset(stream->oa_buffer.vaddr, 0,
1574 	       stream->oa_buffer.vma->size);
1575 }
1576 
1577 #ifdef notyet
1578 
1579 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1580 {
1581 	struct drm_i915_private *i915 = stream->perf->i915;
1582 	struct drm_i915_gem_object *bo;
1583 	struct i915_vma *vma;
1584 	int ret;
1585 
1586 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1587 		return -ENODEV;
1588 
1589 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1590 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1591 
1592 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1593 	if (IS_ERR(bo)) {
1594 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1595 		return PTR_ERR(bo);
1596 	}
1597 
1598 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1599 
1600 	/* PreHSW required 512K alignment, HSW requires 16M */
1601 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1602 	if (IS_ERR(vma)) {
1603 		ret = PTR_ERR(vma);
1604 		goto err_unref;
1605 	}
1606 	stream->oa_buffer.vma = vma;
1607 
1608 	stream->oa_buffer.vaddr =
1609 		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1610 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1611 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1612 		goto err_unpin;
1613 	}
1614 
1615 	return 0;
1616 
1617 err_unpin:
1618 	__i915_vma_unpin(vma);
1619 
1620 err_unref:
1621 	i915_gem_object_put(bo);
1622 
1623 	stream->oa_buffer.vaddr = NULL;
1624 	stream->oa_buffer.vma = NULL;
1625 
1626 	return ret;
1627 }
1628 
1629 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1630 				  bool save, i915_reg_t reg, u32 offset,
1631 				  u32 dword_count)
1632 {
1633 	u32 cmd;
1634 	u32 d;
1635 
1636 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1637 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1638 	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1639 		cmd++;
1640 
1641 	for (d = 0; d < dword_count; d++) {
1642 		*cs++ = cmd;
1643 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1644 		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
1645 						offset) + 4 * d;
1646 		*cs++ = 0;
1647 	}
1648 
1649 	return cs;
1650 }
1651 
1652 static int alloc_noa_wait(struct i915_perf_stream *stream)
1653 {
1654 	struct drm_i915_private *i915 = stream->perf->i915;
1655 	struct drm_i915_gem_object *bo;
1656 	struct i915_vma *vma;
1657 	const u64 delay_ticks = 0xffffffffffffffff -
1658 		intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1659 		atomic64_read(&stream->perf->noa_programming_delay));
1660 	const u32 base = stream->engine->mmio_base;
1661 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1662 	u32 *batch, *ts0, *cs, *jump;
1663 	struct i915_gem_ww_ctx ww;
1664 	int ret, i;
1665 	enum {
1666 		START_TS,
1667 		NOW_TS,
1668 		DELTA_TS,
1669 		JUMP_PREDICATE,
1670 		DELTA_TARGET,
1671 		N_CS_GPR
1672 	};
1673 
1674 	bo = i915_gem_object_create_internal(i915, 4096);
1675 	if (IS_ERR(bo)) {
1676 		drm_err(&i915->drm,
1677 			"Failed to allocate NOA wait batchbuffer\n");
1678 		return PTR_ERR(bo);
1679 	}
1680 
1681 	i915_gem_ww_ctx_init(&ww, true);
1682 retry:
1683 	ret = i915_gem_object_lock(bo, &ww);
1684 	if (ret)
1685 		goto out_ww;
1686 
1687 	/*
1688 	 * We pin in GGTT because we jump into this buffer now because
1689 	 * multiple OA config BOs will have a jump to this address and it
1690 	 * needs to be fixed during the lifetime of the i915/perf stream.
1691 	 */
1692 	vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
1693 	if (IS_ERR(vma)) {
1694 		ret = PTR_ERR(vma);
1695 		goto out_ww;
1696 	}
1697 
1698 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1699 	if (IS_ERR(batch)) {
1700 		ret = PTR_ERR(batch);
1701 		goto err_unpin;
1702 	}
1703 
1704 	/* Save registers. */
1705 	for (i = 0; i < N_CS_GPR; i++)
1706 		cs = save_restore_register(
1707 			stream, cs, true /* save */, CS_GPR(i),
1708 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1709 	cs = save_restore_register(
1710 		stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
1711 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1712 
1713 	/* First timestamp snapshot location. */
1714 	ts0 = cs;
1715 
1716 	/*
1717 	 * Initial snapshot of the timestamp register to implement the wait.
1718 	 * We work with 32b values, so clear out the top 32b bits of the
1719 	 * register because the ALU works 64bits.
1720 	 */
1721 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1722 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1723 	*cs++ = 0;
1724 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1725 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1726 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1727 
1728 	/*
1729 	 * This is the location we're going to jump back into until the
1730 	 * required amount of time has passed.
1731 	 */
1732 	jump = cs;
1733 
1734 	/*
1735 	 * Take another snapshot of the timestamp register. Take care to clear
1736 	 * up the top 32bits of CS_GPR(1) as we're using it for other
1737 	 * operations below.
1738 	 */
1739 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1740 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1741 	*cs++ = 0;
1742 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1743 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1744 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1745 
1746 	/*
1747 	 * Do a diff between the 2 timestamps and store the result back into
1748 	 * CS_GPR(1).
1749 	 */
1750 	*cs++ = MI_MATH(5);
1751 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1752 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1753 	*cs++ = MI_MATH_SUB;
1754 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1755 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1756 
1757 	/*
1758 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1759 	 * timestamp have rolled over the 32bits) into the predicate register
1760 	 * to be used for the predicated jump.
1761 	 */
1762 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1763 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1764 	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
1765 
1766 	/* Restart from the beginning if we had timestamps roll over. */
1767 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1768 		 MI_BATCH_BUFFER_START :
1769 		 MI_BATCH_BUFFER_START_GEN8) |
1770 		MI_BATCH_PREDICATE;
1771 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1772 	*cs++ = 0;
1773 
1774 	/*
1775 	 * Now add the diff between to previous timestamps and add it to :
1776 	 *      (((1 * << 64) - 1) - delay_ns)
1777 	 *
1778 	 * When the Carry Flag contains 1 this means the elapsed time is
1779 	 * longer than the expected delay, and we can exit the wait loop.
1780 	 */
1781 	*cs++ = MI_LOAD_REGISTER_IMM(2);
1782 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1783 	*cs++ = lower_32_bits(delay_ticks);
1784 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1785 	*cs++ = upper_32_bits(delay_ticks);
1786 
1787 	*cs++ = MI_MATH(4);
1788 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1789 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1790 	*cs++ = MI_MATH_ADD;
1791 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1792 
1793 	*cs++ = MI_ARB_CHECK;
1794 
1795 	/*
1796 	 * Transfer the result into the predicate register to be used for the
1797 	 * predicated jump.
1798 	 */
1799 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1800 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1801 	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
1802 
1803 	/* Predicate the jump.  */
1804 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1805 		 MI_BATCH_BUFFER_START :
1806 		 MI_BATCH_BUFFER_START_GEN8) |
1807 		MI_BATCH_PREDICATE;
1808 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1809 	*cs++ = 0;
1810 
1811 	/* Restore registers. */
1812 	for (i = 0; i < N_CS_GPR; i++)
1813 		cs = save_restore_register(
1814 			stream, cs, false /* restore */, CS_GPR(i),
1815 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1816 	cs = save_restore_register(
1817 		stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
1818 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1819 
1820 	/* And return to the ring. */
1821 	*cs++ = MI_BATCH_BUFFER_END;
1822 
1823 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1824 
1825 	i915_gem_object_flush_map(bo);
1826 	__i915_gem_object_release_map(bo);
1827 
1828 	stream->noa_wait = vma;
1829 	goto out_ww;
1830 
1831 err_unpin:
1832 	i915_vma_unpin_and_release(&vma, 0);
1833 out_ww:
1834 	if (ret == -EDEADLK) {
1835 		ret = i915_gem_ww_ctx_backoff(&ww);
1836 		if (!ret)
1837 			goto retry;
1838 	}
1839 	i915_gem_ww_ctx_fini(&ww);
1840 	if (ret)
1841 		i915_gem_object_put(bo);
1842 	return ret;
1843 }
1844 
1845 #endif
1846 
1847 static u32 *write_cs_mi_lri(u32 *cs,
1848 			    const struct i915_oa_reg *reg_data,
1849 			    u32 n_regs)
1850 {
1851 	u32 i;
1852 
1853 	for (i = 0; i < n_regs; i++) {
1854 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1855 			u32 n_lri = min_t(u32,
1856 					  n_regs - i,
1857 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
1858 
1859 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1860 		}
1861 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1862 		*cs++ = reg_data[i].value;
1863 	}
1864 
1865 	return cs;
1866 }
1867 
1868 static int num_lri_dwords(int num_regs)
1869 {
1870 	int count = 0;
1871 
1872 	if (num_regs > 0) {
1873 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1874 		count += num_regs * 2;
1875 	}
1876 
1877 	return count;
1878 }
1879 
1880 static struct i915_oa_config_bo *
1881 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1882 		       struct i915_oa_config *oa_config)
1883 {
1884 	struct drm_i915_gem_object *obj;
1885 	struct i915_oa_config_bo *oa_bo;
1886 	struct i915_gem_ww_ctx ww;
1887 	size_t config_length = 0;
1888 	u32 *cs;
1889 	int err;
1890 
1891 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1892 	if (!oa_bo)
1893 		return ERR_PTR(-ENOMEM);
1894 
1895 	config_length += num_lri_dwords(oa_config->mux_regs_len);
1896 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1897 	config_length += num_lri_dwords(oa_config->flex_regs_len);
1898 	config_length += 3; /* MI_BATCH_BUFFER_START */
1899 	config_length = roundup2(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1900 
1901 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1902 	if (IS_ERR(obj)) {
1903 		err = PTR_ERR(obj);
1904 		goto err_free;
1905 	}
1906 
1907 	i915_gem_ww_ctx_init(&ww, true);
1908 retry:
1909 	err = i915_gem_object_lock(obj, &ww);
1910 	if (err)
1911 		goto out_ww;
1912 
1913 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1914 	if (IS_ERR(cs)) {
1915 		err = PTR_ERR(cs);
1916 		goto out_ww;
1917 	}
1918 
1919 	cs = write_cs_mi_lri(cs,
1920 			     oa_config->mux_regs,
1921 			     oa_config->mux_regs_len);
1922 	cs = write_cs_mi_lri(cs,
1923 			     oa_config->b_counter_regs,
1924 			     oa_config->b_counter_regs_len);
1925 	cs = write_cs_mi_lri(cs,
1926 			     oa_config->flex_regs,
1927 			     oa_config->flex_regs_len);
1928 
1929 	/* Jump into the active wait. */
1930 	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
1931 		 MI_BATCH_BUFFER_START :
1932 		 MI_BATCH_BUFFER_START_GEN8);
1933 	*cs++ = i915_ggtt_offset(stream->noa_wait);
1934 	*cs++ = 0;
1935 
1936 	i915_gem_object_flush_map(obj);
1937 	__i915_gem_object_release_map(obj);
1938 
1939 	oa_bo->vma = i915_vma_instance(obj,
1940 				       &stream->engine->gt->ggtt->vm,
1941 				       NULL);
1942 	if (IS_ERR(oa_bo->vma)) {
1943 		err = PTR_ERR(oa_bo->vma);
1944 		goto out_ww;
1945 	}
1946 
1947 	oa_bo->oa_config = i915_oa_config_get(oa_config);
1948 	llist_add(&oa_bo->node, &stream->oa_config_bos);
1949 
1950 out_ww:
1951 	if (err == -EDEADLK) {
1952 		err = i915_gem_ww_ctx_backoff(&ww);
1953 		if (!err)
1954 			goto retry;
1955 	}
1956 	i915_gem_ww_ctx_fini(&ww);
1957 
1958 	if (err)
1959 		i915_gem_object_put(obj);
1960 err_free:
1961 	if (err) {
1962 		kfree(oa_bo);
1963 		return ERR_PTR(err);
1964 	}
1965 	return oa_bo;
1966 }
1967 
1968 static struct i915_vma *
1969 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1970 {
1971 	struct i915_oa_config_bo *oa_bo;
1972 
1973 	/*
1974 	 * Look for the buffer in the already allocated BOs attached
1975 	 * to the stream.
1976 	 */
1977 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1978 		if (oa_bo->oa_config == oa_config &&
1979 		    memcmp(oa_bo->oa_config->uuid,
1980 			   oa_config->uuid,
1981 			   sizeof(oa_config->uuid)) == 0)
1982 			goto out;
1983 	}
1984 
1985 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
1986 	if (IS_ERR(oa_bo))
1987 		return ERR_CAST(oa_bo);
1988 
1989 out:
1990 	return i915_vma_get(oa_bo->vma);
1991 }
1992 
1993 static int
1994 emit_oa_config(struct i915_perf_stream *stream,
1995 	       struct i915_oa_config *oa_config,
1996 	       struct intel_context *ce,
1997 	       struct i915_active *active)
1998 {
1999 	struct i915_request *rq;
2000 	struct i915_vma *vma;
2001 	struct i915_gem_ww_ctx ww;
2002 	int err;
2003 
2004 	vma = get_oa_vma(stream, oa_config);
2005 	if (IS_ERR(vma))
2006 		return PTR_ERR(vma);
2007 
2008 	i915_gem_ww_ctx_init(&ww, true);
2009 retry:
2010 	err = i915_gem_object_lock(vma->obj, &ww);
2011 	if (err)
2012 		goto err;
2013 
2014 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2015 	if (err)
2016 		goto err;
2017 
2018 	intel_engine_pm_get(ce->engine);
2019 	rq = i915_request_create(ce);
2020 	intel_engine_pm_put(ce->engine);
2021 	if (IS_ERR(rq)) {
2022 		err = PTR_ERR(rq);
2023 		goto err_vma_unpin;
2024 	}
2025 
2026 	if (!IS_ERR_OR_NULL(active)) {
2027 		/* After all individual context modifications */
2028 		err = i915_request_await_active(rq, active,
2029 						I915_ACTIVE_AWAIT_ACTIVE);
2030 		if (err)
2031 			goto err_add_request;
2032 
2033 		err = i915_active_add_request(active, rq);
2034 		if (err)
2035 			goto err_add_request;
2036 	}
2037 
2038 	err = i915_request_await_object(rq, vma->obj, 0);
2039 	if (!err)
2040 		err = i915_vma_move_to_active(vma, rq, 0);
2041 	if (err)
2042 		goto err_add_request;
2043 
2044 	err = rq->engine->emit_bb_start(rq,
2045 					vma->node.start, 0,
2046 					I915_DISPATCH_SECURE);
2047 	if (err)
2048 		goto err_add_request;
2049 
2050 err_add_request:
2051 	i915_request_add(rq);
2052 err_vma_unpin:
2053 	i915_vma_unpin(vma);
2054 err:
2055 	if (err == -EDEADLK) {
2056 		err = i915_gem_ww_ctx_backoff(&ww);
2057 		if (!err)
2058 			goto retry;
2059 	}
2060 
2061 	i915_gem_ww_ctx_fini(&ww);
2062 	i915_vma_put(vma);
2063 	return err;
2064 }
2065 
2066 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2067 {
2068 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2069 }
2070 
2071 static int
2072 hsw_enable_metric_set(struct i915_perf_stream *stream,
2073 		      struct i915_active *active)
2074 {
2075 	struct intel_uncore *uncore = stream->uncore;
2076 
2077 	/*
2078 	 * PRM:
2079 	 *
2080 	 * OA unit is using “crclk” for its functionality. When trunk
2081 	 * level clock gating takes place, OA clock would be gated,
2082 	 * unable to count the events from non-render clock domain.
2083 	 * Render clock gating must be disabled when OA is enabled to
2084 	 * count the events from non-render domain. Unit level clock
2085 	 * gating for RCS should also be disabled.
2086 	 */
2087 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2088 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2089 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2090 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2091 
2092 	return emit_oa_config(stream,
2093 			      stream->oa_config, oa_context(stream),
2094 			      active);
2095 }
2096 
2097 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2098 {
2099 	struct intel_uncore *uncore = stream->uncore;
2100 
2101 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2102 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2103 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2104 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2105 
2106 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2107 }
2108 
2109 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2110 			      i915_reg_t reg)
2111 {
2112 	u32 mmio = i915_mmio_reg_offset(reg);
2113 	int i;
2114 
2115 	/*
2116 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2117 	 * Active' event. In the future it's anticipated that there
2118 	 * will be an explicit 'No Event' we can select, but not yet...
2119 	 */
2120 	if (!oa_config)
2121 		return 0;
2122 
2123 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2124 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2125 			return oa_config->flex_regs[i].value;
2126 	}
2127 
2128 	return 0;
2129 }
2130 /*
2131  * NB: It must always remain pointer safe to run this even if the OA unit
2132  * has been disabled.
2133  *
2134  * It's fine to put out-of-date values into these per-context registers
2135  * in the case that the OA unit has been disabled.
2136  */
2137 static void
2138 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2139 			       const struct i915_perf_stream *stream)
2140 {
2141 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2142 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2143 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2144 	static const i915_reg_t flex_regs[] = {
2145 		EU_PERF_CNTL0,
2146 		EU_PERF_CNTL1,
2147 		EU_PERF_CNTL2,
2148 		EU_PERF_CNTL3,
2149 		EU_PERF_CNTL4,
2150 		EU_PERF_CNTL5,
2151 		EU_PERF_CNTL6,
2152 	};
2153 	u32 *reg_state = ce->lrc_reg_state;
2154 	int i;
2155 
2156 	reg_state[ctx_oactxctrl + 1] =
2157 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2158 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2159 		GEN8_OA_COUNTER_RESUME;
2160 
2161 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2162 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2163 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2164 }
2165 
2166 struct flex {
2167 	i915_reg_t reg;
2168 	u32 offset;
2169 	u32 value;
2170 };
2171 
2172 static int
2173 gen8_store_flex(struct i915_request *rq,
2174 		struct intel_context *ce,
2175 		const struct flex *flex, unsigned int count)
2176 {
2177 	u32 offset;
2178 	u32 *cs;
2179 
2180 	cs = intel_ring_begin(rq, 4 * count);
2181 	if (IS_ERR(cs))
2182 		return PTR_ERR(cs);
2183 
2184 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2185 	do {
2186 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2187 		*cs++ = offset + flex->offset * sizeof(u32);
2188 		*cs++ = 0;
2189 		*cs++ = flex->value;
2190 	} while (flex++, --count);
2191 
2192 	intel_ring_advance(rq, cs);
2193 
2194 	return 0;
2195 }
2196 
2197 static int
2198 gen8_load_flex(struct i915_request *rq,
2199 	       struct intel_context *ce,
2200 	       const struct flex *flex, unsigned int count)
2201 {
2202 	u32 *cs;
2203 
2204 	GEM_BUG_ON(!count || count > 63);
2205 
2206 	cs = intel_ring_begin(rq, 2 * count + 2);
2207 	if (IS_ERR(cs))
2208 		return PTR_ERR(cs);
2209 
2210 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2211 	do {
2212 		*cs++ = i915_mmio_reg_offset(flex->reg);
2213 		*cs++ = flex->value;
2214 	} while (flex++, --count);
2215 	*cs++ = MI_NOOP;
2216 
2217 	intel_ring_advance(rq, cs);
2218 
2219 	return 0;
2220 }
2221 
2222 static int gen8_modify_context(struct intel_context *ce,
2223 			       const struct flex *flex, unsigned int count)
2224 {
2225 	struct i915_request *rq;
2226 	int err;
2227 
2228 	rq = intel_engine_create_kernel_request(ce->engine);
2229 	if (IS_ERR(rq))
2230 		return PTR_ERR(rq);
2231 
2232 	/* Serialise with the remote context */
2233 	err = intel_context_prepare_remote_request(ce, rq);
2234 	if (err == 0)
2235 		err = gen8_store_flex(rq, ce, flex, count);
2236 
2237 	i915_request_add(rq);
2238 	return err;
2239 }
2240 
2241 static int
2242 gen8_modify_self(struct intel_context *ce,
2243 		 const struct flex *flex, unsigned int count,
2244 		 struct i915_active *active)
2245 {
2246 	struct i915_request *rq;
2247 	int err;
2248 
2249 	intel_engine_pm_get(ce->engine);
2250 	rq = i915_request_create(ce);
2251 	intel_engine_pm_put(ce->engine);
2252 	if (IS_ERR(rq))
2253 		return PTR_ERR(rq);
2254 
2255 	if (!IS_ERR_OR_NULL(active)) {
2256 		err = i915_active_add_request(active, rq);
2257 		if (err)
2258 			goto err_add_request;
2259 	}
2260 
2261 	err = gen8_load_flex(rq, ce, flex, count);
2262 	if (err)
2263 		goto err_add_request;
2264 
2265 err_add_request:
2266 	i915_request_add(rq);
2267 	return err;
2268 }
2269 
2270 static int gen8_configure_context(struct i915_gem_context *ctx,
2271 				  struct flex *flex, unsigned int count)
2272 {
2273 	struct i915_gem_engines_iter it;
2274 	struct intel_context *ce;
2275 	int err = 0;
2276 
2277 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2278 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2279 
2280 		if (ce->engine->class != RENDER_CLASS)
2281 			continue;
2282 
2283 		/* Otherwise OA settings will be set upon first use */
2284 		if (!intel_context_pin_if_active(ce))
2285 			continue;
2286 
2287 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2288 		err = gen8_modify_context(ce, flex, count);
2289 
2290 		intel_context_unpin(ce);
2291 		if (err)
2292 			break;
2293 	}
2294 	i915_gem_context_unlock_engines(ctx);
2295 
2296 	return err;
2297 }
2298 
2299 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2300 				       struct i915_active *active)
2301 {
2302 	int err;
2303 	struct intel_context *ce = stream->pinned_ctx;
2304 	u32 format = stream->oa_buffer.format;
2305 	struct flex regs_context[] = {
2306 		{
2307 			GEN8_OACTXCONTROL,
2308 			stream->perf->ctx_oactxctrl_offset + 1,
2309 			active ? GEN8_OA_COUNTER_RESUME : 0,
2310 		},
2311 	};
2312 	/* Offsets in regs_lri are not used since this configuration is only
2313 	 * applied using LRI. Initialize the correct offsets for posterity.
2314 	 */
2315 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2316 	struct flex regs_lri[] = {
2317 		{
2318 			GEN12_OAR_OACONTROL,
2319 			GEN12_OAR_OACONTROL_OFFSET + 1,
2320 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2321 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2322 		},
2323 		{
2324 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2325 			CTX_CONTEXT_CONTROL,
2326 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2327 				      active ?
2328 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2329 				      0)
2330 		},
2331 	};
2332 
2333 	/* Modify the context image of pinned context with regs_context*/
2334 	err = intel_context_lock_pinned(ce);
2335 	if (err)
2336 		return err;
2337 
2338 	err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2339 	intel_context_unlock_pinned(ce);
2340 	if (err)
2341 		return err;
2342 
2343 	/* Apply regs_lri using LRI with pinned context */
2344 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2345 }
2346 
2347 /*
2348  * Manages updating the per-context aspects of the OA stream
2349  * configuration across all contexts.
2350  *
2351  * The awkward consideration here is that OACTXCONTROL controls the
2352  * exponent for periodic sampling which is primarily used for system
2353  * wide profiling where we'd like a consistent sampling period even in
2354  * the face of context switches.
2355  *
2356  * Our approach of updating the register state context (as opposed to
2357  * say using a workaround batch buffer) ensures that the hardware
2358  * won't automatically reload an out-of-date timer exponent even
2359  * transiently before a WA BB could be parsed.
2360  *
2361  * This function needs to:
2362  * - Ensure the currently running context's per-context OA state is
2363  *   updated
2364  * - Ensure that all existing contexts will have the correct per-context
2365  *   OA state if they are scheduled for use.
2366  * - Ensure any new contexts will be initialized with the correct
2367  *   per-context OA state.
2368  *
2369  * Note: it's only the RCS/Render context that has any OA state.
2370  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2371  */
2372 static int
2373 oa_configure_all_contexts(struct i915_perf_stream *stream,
2374 			  struct flex *regs,
2375 			  size_t num_regs,
2376 			  struct i915_active *active)
2377 {
2378 	struct drm_i915_private *i915 = stream->perf->i915;
2379 	struct intel_engine_cs *engine;
2380 	struct i915_gem_context *ctx, *cn;
2381 	int err;
2382 
2383 	lockdep_assert_held(&stream->perf->lock);
2384 
2385 	/*
2386 	 * The OA register config is setup through the context image. This image
2387 	 * might be written to by the GPU on context switch (in particular on
2388 	 * lite-restore). This means we can't safely update a context's image,
2389 	 * if this context is scheduled/submitted to run on the GPU.
2390 	 *
2391 	 * We could emit the OA register config through the batch buffer but
2392 	 * this might leave small interval of time where the OA unit is
2393 	 * configured at an invalid sampling period.
2394 	 *
2395 	 * Note that since we emit all requests from a single ring, there
2396 	 * is still an implicit global barrier here that may cause a high
2397 	 * priority context to wait for an otherwise independent low priority
2398 	 * context. Contexts idle at the time of reconfiguration are not
2399 	 * trapped behind the barrier.
2400 	 */
2401 	spin_lock(&i915->gem.contexts.lock);
2402 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2403 		if (!kref_get_unless_zero(&ctx->ref))
2404 			continue;
2405 
2406 		spin_unlock(&i915->gem.contexts.lock);
2407 
2408 		err = gen8_configure_context(ctx, regs, num_regs);
2409 		if (err) {
2410 			i915_gem_context_put(ctx);
2411 			return err;
2412 		}
2413 
2414 		spin_lock(&i915->gem.contexts.lock);
2415 		list_safe_reset_next(ctx, cn, link);
2416 		i915_gem_context_put(ctx);
2417 	}
2418 	spin_unlock(&i915->gem.contexts.lock);
2419 
2420 	/*
2421 	 * After updating all other contexts, we need to modify ourselves.
2422 	 * If we don't modify the kernel_context, we do not get events while
2423 	 * idle.
2424 	 */
2425 	for_each_uabi_engine(engine, i915) {
2426 		struct intel_context *ce = engine->kernel_context;
2427 
2428 		if (engine->class != RENDER_CLASS)
2429 			continue;
2430 
2431 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2432 
2433 		err = gen8_modify_self(ce, regs, num_regs, active);
2434 		if (err)
2435 			return err;
2436 	}
2437 
2438 	return 0;
2439 }
2440 
2441 static int
2442 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2443 			     const struct i915_oa_config *oa_config,
2444 			     struct i915_active *active)
2445 {
2446 	struct flex regs[] = {
2447 		{
2448 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2449 			CTX_R_PWR_CLK_STATE,
2450 		},
2451 	};
2452 
2453 	return oa_configure_all_contexts(stream,
2454 					 regs, ARRAY_SIZE(regs),
2455 					 active);
2456 }
2457 
2458 static int
2459 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2460 			   const struct i915_oa_config *oa_config,
2461 			   struct i915_active *active)
2462 {
2463 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2464 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2465 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2466 	struct flex regs[] = {
2467 		{
2468 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2469 			CTX_R_PWR_CLK_STATE,
2470 		},
2471 		{
2472 			GEN8_OACTXCONTROL,
2473 			stream->perf->ctx_oactxctrl_offset + 1,
2474 		},
2475 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2476 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2477 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2478 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2479 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2480 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2481 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2482 	};
2483 #undef ctx_flexeuN
2484 	int i;
2485 
2486 	regs[1].value =
2487 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2488 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2489 		GEN8_OA_COUNTER_RESUME;
2490 
2491 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2492 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2493 
2494 	return oa_configure_all_contexts(stream,
2495 					 regs, ARRAY_SIZE(regs),
2496 					 active);
2497 }
2498 
2499 static int
2500 gen8_enable_metric_set(struct i915_perf_stream *stream,
2501 		       struct i915_active *active)
2502 {
2503 	struct intel_uncore *uncore = stream->uncore;
2504 	struct i915_oa_config *oa_config = stream->oa_config;
2505 	int ret;
2506 
2507 	/*
2508 	 * We disable slice/unslice clock ratio change reports on SKL since
2509 	 * they are too noisy. The HW generates a lot of redundant reports
2510 	 * where the ratio hasn't really changed causing a lot of redundant
2511 	 * work to processes and increasing the chances we'll hit buffer
2512 	 * overruns.
2513 	 *
2514 	 * Although we don't currently use the 'disable overrun' OABUFFER
2515 	 * feature it's worth noting that clock ratio reports have to be
2516 	 * disabled before considering to use that feature since the HW doesn't
2517 	 * correctly block these reports.
2518 	 *
2519 	 * Currently none of the high-level metrics we have depend on knowing
2520 	 * this ratio to normalize.
2521 	 *
2522 	 * Note: This register is not power context saved and restored, but
2523 	 * that's OK considering that we disable RC6 while the OA unit is
2524 	 * enabled.
2525 	 *
2526 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2527 	 * be read back from automatically triggered reports, as part of the
2528 	 * RPT_ID field.
2529 	 */
2530 	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2531 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2532 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2533 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2534 	}
2535 
2536 	/*
2537 	 * Update all contexts prior writing the mux configurations as we need
2538 	 * to make sure all slices/subslices are ON before writing to NOA
2539 	 * registers.
2540 	 */
2541 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2542 	if (ret)
2543 		return ret;
2544 
2545 	return emit_oa_config(stream,
2546 			      stream->oa_config, oa_context(stream),
2547 			      active);
2548 }
2549 
2550 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2551 {
2552 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2553 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2554 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2555 }
2556 
2557 static int
2558 gen12_enable_metric_set(struct i915_perf_stream *stream,
2559 			struct i915_active *active)
2560 {
2561 	struct intel_uncore *uncore = stream->uncore;
2562 	struct i915_oa_config *oa_config = stream->oa_config;
2563 	bool periodic = stream->periodic;
2564 	u32 period_exponent = stream->period_exponent;
2565 	int ret;
2566 
2567 	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2568 			   /* Disable clk ratio reports, like previous Gens. */
2569 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2570 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2571 			   /*
2572 			    * If the user didn't require OA reports, instruct
2573 			    * the hardware not to emit ctx switch reports.
2574 			    */
2575 			   oag_report_ctx_switches(stream));
2576 
2577 	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2578 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2579 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2580 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2581 			    : 0);
2582 
2583 	/*
2584 	 * Update all contexts prior writing the mux configurations as we need
2585 	 * to make sure all slices/subslices are ON before writing to NOA
2586 	 * registers.
2587 	 */
2588 	ret = gen12_configure_all_contexts(stream, oa_config, active);
2589 	if (ret)
2590 		return ret;
2591 
2592 	/*
2593 	 * For Gen12, performance counters are context
2594 	 * saved/restored. Only enable it for the context that
2595 	 * requested this.
2596 	 */
2597 	if (stream->ctx) {
2598 		ret = gen12_configure_oar_context(stream, active);
2599 		if (ret)
2600 			return ret;
2601 	}
2602 
2603 	return emit_oa_config(stream,
2604 			      stream->oa_config, oa_context(stream),
2605 			      active);
2606 }
2607 
2608 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2609 {
2610 	struct intel_uncore *uncore = stream->uncore;
2611 
2612 	/* Reset all contexts' slices/subslices configurations. */
2613 	lrc_configure_all_contexts(stream, NULL, NULL);
2614 
2615 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2616 }
2617 
2618 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2619 {
2620 	struct intel_uncore *uncore = stream->uncore;
2621 
2622 	/* Reset all contexts' slices/subslices configurations. */
2623 	lrc_configure_all_contexts(stream, NULL, NULL);
2624 
2625 	/* Make sure we disable noa to save power. */
2626 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2627 }
2628 
2629 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2630 {
2631 	struct intel_uncore *uncore = stream->uncore;
2632 
2633 	/* Reset all contexts' slices/subslices configurations. */
2634 	gen12_configure_all_contexts(stream, NULL, NULL);
2635 
2636 	/* disable the context save/restore or OAR counters */
2637 	if (stream->ctx)
2638 		gen12_configure_oar_context(stream, NULL);
2639 
2640 	/* Make sure we disable noa to save power. */
2641 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2642 }
2643 
2644 static void gen7_oa_enable(struct i915_perf_stream *stream)
2645 {
2646 	struct intel_uncore *uncore = stream->uncore;
2647 	struct i915_gem_context *ctx = stream->ctx;
2648 	u32 ctx_id = stream->specific_ctx_id;
2649 	bool periodic = stream->periodic;
2650 	u32 period_exponent = stream->period_exponent;
2651 	u32 report_format = stream->oa_buffer.format;
2652 
2653 	/*
2654 	 * Reset buf pointers so we don't forward reports from before now.
2655 	 *
2656 	 * Think carefully if considering trying to avoid this, since it
2657 	 * also ensures status flags and the buffer itself are cleared
2658 	 * in error paths, and we have checks for invalid reports based
2659 	 * on the assumption that certain fields are written to zeroed
2660 	 * memory which this helps maintains.
2661 	 */
2662 	gen7_init_oa_buffer(stream);
2663 
2664 	intel_uncore_write(uncore, GEN7_OACONTROL,
2665 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2666 			   (period_exponent <<
2667 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2668 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2669 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2670 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2671 			   GEN7_OACONTROL_ENABLE);
2672 }
2673 
2674 static void gen8_oa_enable(struct i915_perf_stream *stream)
2675 {
2676 	struct intel_uncore *uncore = stream->uncore;
2677 	u32 report_format = stream->oa_buffer.format;
2678 
2679 	/*
2680 	 * Reset buf pointers so we don't forward reports from before now.
2681 	 *
2682 	 * Think carefully if considering trying to avoid this, since it
2683 	 * also ensures status flags and the buffer itself are cleared
2684 	 * in error paths, and we have checks for invalid reports based
2685 	 * on the assumption that certain fields are written to zeroed
2686 	 * memory which this helps maintains.
2687 	 */
2688 	gen8_init_oa_buffer(stream);
2689 
2690 	/*
2691 	 * Note: we don't rely on the hardware to perform single context
2692 	 * filtering and instead filter on the cpu based on the context-id
2693 	 * field of reports
2694 	 */
2695 	intel_uncore_write(uncore, GEN8_OACONTROL,
2696 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2697 			   GEN8_OA_COUNTER_ENABLE);
2698 }
2699 
2700 static void gen12_oa_enable(struct i915_perf_stream *stream)
2701 {
2702 	struct intel_uncore *uncore = stream->uncore;
2703 	u32 report_format = stream->oa_buffer.format;
2704 
2705 	/*
2706 	 * If we don't want OA reports from the OA buffer, then we don't even
2707 	 * need to program the OAG unit.
2708 	 */
2709 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2710 		return;
2711 
2712 	gen12_init_oa_buffer(stream);
2713 
2714 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2715 			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2716 			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2717 }
2718 
2719 #ifdef notyet
2720 
2721 /**
2722  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2723  * @stream: An i915 perf stream opened for OA metrics
2724  *
2725  * [Re]enables hardware periodic sampling according to the period configured
2726  * when opening the stream. This also starts a hrtimer that will periodically
2727  * check for data in the circular OA buffer for notifying userspace (e.g.
2728  * during a read() or poll()).
2729  */
2730 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2731 {
2732 	stream->pollin = false;
2733 
2734 	stream->perf->ops.oa_enable(stream);
2735 
2736 	if (stream->sample_flags & SAMPLE_OA_REPORT)
2737 		hrtimer_start(&stream->poll_check_timer,
2738 			      ns_to_ktime(stream->poll_oa_period),
2739 			      HRTIMER_MODE_REL_PINNED);
2740 }
2741 
2742 #endif
2743 
2744 static void gen7_oa_disable(struct i915_perf_stream *stream)
2745 {
2746 	struct intel_uncore *uncore = stream->uncore;
2747 
2748 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2749 	if (intel_wait_for_register(uncore,
2750 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2751 				    50))
2752 		drm_err(&stream->perf->i915->drm,
2753 			"wait for OA to be disabled timed out\n");
2754 }
2755 
2756 static void gen8_oa_disable(struct i915_perf_stream *stream)
2757 {
2758 	struct intel_uncore *uncore = stream->uncore;
2759 
2760 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2761 	if (intel_wait_for_register(uncore,
2762 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2763 				    50))
2764 		drm_err(&stream->perf->i915->drm,
2765 			"wait for OA to be disabled timed out\n");
2766 }
2767 
2768 static void gen12_oa_disable(struct i915_perf_stream *stream)
2769 {
2770 	struct intel_uncore *uncore = stream->uncore;
2771 
2772 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2773 	if (intel_wait_for_register(uncore,
2774 				    GEN12_OAG_OACONTROL,
2775 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2776 				    50))
2777 		drm_err(&stream->perf->i915->drm,
2778 			"wait for OA to be disabled timed out\n");
2779 
2780 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2781 	if (intel_wait_for_register(uncore,
2782 				    GEN12_OA_TLB_INV_CR,
2783 				    1, 0,
2784 				    50))
2785 		drm_err(&stream->perf->i915->drm,
2786 			"wait for OA tlb invalidate timed out\n");
2787 }
2788 
2789 #ifdef notyet
2790 
2791 /**
2792  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2793  * @stream: An i915 perf stream opened for OA metrics
2794  *
2795  * Stops the OA unit from periodically writing counter reports into the
2796  * circular OA buffer. This also stops the hrtimer that periodically checks for
2797  * data in the circular OA buffer, for notifying userspace.
2798  */
2799 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2800 {
2801 	stream->perf->ops.oa_disable(stream);
2802 
2803 	if (stream->sample_flags & SAMPLE_OA_REPORT)
2804 		hrtimer_cancel(&stream->poll_check_timer);
2805 }
2806 
2807 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2808 	.destroy = i915_oa_stream_destroy,
2809 	.enable = i915_oa_stream_enable,
2810 	.disable = i915_oa_stream_disable,
2811 	.wait_unlocked = i915_oa_wait_unlocked,
2812 	.poll_wait = i915_oa_poll_wait,
2813 	.read = i915_oa_read,
2814 };
2815 
2816 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2817 {
2818 	struct i915_active *active;
2819 	int err;
2820 
2821 	active = i915_active_create();
2822 	if (!active)
2823 		return -ENOMEM;
2824 
2825 	err = stream->perf->ops.enable_metric_set(stream, active);
2826 	if (err == 0)
2827 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2828 
2829 	i915_active_put(active);
2830 	return err;
2831 }
2832 
2833 static void
2834 get_default_sseu_config(struct intel_sseu *out_sseu,
2835 			struct intel_engine_cs *engine)
2836 {
2837 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2838 
2839 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2840 
2841 	if (GRAPHICS_VER(engine->i915) == 11) {
2842 		/*
2843 		 * We only need subslice count so it doesn't matter which ones
2844 		 * we select - just turn off low bits in the amount of half of
2845 		 * all available subslices per slice.
2846 		 */
2847 		out_sseu->subslice_mask =
2848 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2849 		out_sseu->slice_mask = 0x1;
2850 	}
2851 }
2852 
2853 #endif
2854 
2855 static int
2856 get_sseu_config(struct intel_sseu *out_sseu,
2857 		struct intel_engine_cs *engine,
2858 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
2859 {
2860 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
2861 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
2862 		return -EINVAL;
2863 
2864 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2865 }
2866 
2867 #ifdef notyet
2868 
2869 /**
2870  * i915_oa_stream_init - validate combined props for OA stream and init
2871  * @stream: An i915 perf stream
2872  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2873  * @props: The property state that configures stream (individually validated)
2874  *
2875  * While read_properties_unlocked() validates properties in isolation it
2876  * doesn't ensure that the combination necessarily makes sense.
2877  *
2878  * At this point it has been determined that userspace wants a stream of
2879  * OA metrics, but still we need to further validate the combined
2880  * properties are OK.
2881  *
2882  * If the configuration makes sense then we can allocate memory for
2883  * a circular OA buffer and apply the requested metric set configuration.
2884  *
2885  * Returns: zero on success or a negative error code.
2886  */
2887 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2888 			       struct drm_i915_perf_open_param *param,
2889 			       struct perf_open_properties *props)
2890 {
2891 	struct drm_i915_private *i915 = stream->perf->i915;
2892 	struct i915_perf *perf = stream->perf;
2893 	int format_size;
2894 	int ret;
2895 
2896 	if (!props->engine) {
2897 		drm_dbg(&stream->perf->i915->drm,
2898 			"OA engine not specified\n");
2899 		return -EINVAL;
2900 	}
2901 
2902 	/*
2903 	 * If the sysfs metrics/ directory wasn't registered for some
2904 	 * reason then don't let userspace try their luck with config
2905 	 * IDs
2906 	 */
2907 	if (!perf->metrics_kobj) {
2908 		drm_dbg(&stream->perf->i915->drm,
2909 			"OA metrics weren't advertised via sysfs\n");
2910 		return -EINVAL;
2911 	}
2912 
2913 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2914 	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
2915 		drm_dbg(&stream->perf->i915->drm,
2916 			"Only OA report sampling supported\n");
2917 		return -EINVAL;
2918 	}
2919 
2920 	if (!perf->ops.enable_metric_set) {
2921 		drm_dbg(&stream->perf->i915->drm,
2922 			"OA unit not supported\n");
2923 		return -ENODEV;
2924 	}
2925 
2926 	/*
2927 	 * To avoid the complexity of having to accurately filter
2928 	 * counter reports and marshal to the appropriate client
2929 	 * we currently only allow exclusive access
2930 	 */
2931 	if (perf->exclusive_stream) {
2932 		drm_dbg(&stream->perf->i915->drm,
2933 			"OA unit already in use\n");
2934 		return -EBUSY;
2935 	}
2936 
2937 	if (!props->oa_format) {
2938 		drm_dbg(&stream->perf->i915->drm,
2939 			"OA report format not specified\n");
2940 		return -EINVAL;
2941 	}
2942 
2943 	stream->engine = props->engine;
2944 	stream->uncore = stream->engine->gt->uncore;
2945 
2946 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2947 
2948 	format_size = perf->oa_formats[props->oa_format].size;
2949 
2950 	stream->sample_flags = props->sample_flags;
2951 	stream->sample_size += format_size;
2952 
2953 	stream->oa_buffer.format_size = format_size;
2954 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2955 		return -EINVAL;
2956 
2957 	stream->hold_preemption = props->hold_preemption;
2958 
2959 	stream->oa_buffer.format =
2960 		perf->oa_formats[props->oa_format].format;
2961 
2962 	stream->periodic = props->oa_periodic;
2963 	if (stream->periodic)
2964 		stream->period_exponent = props->oa_period_exponent;
2965 
2966 	if (stream->ctx) {
2967 		ret = oa_get_render_ctx_id(stream);
2968 		if (ret) {
2969 			drm_dbg(&stream->perf->i915->drm,
2970 				"Invalid context id to filter with\n");
2971 			return ret;
2972 		}
2973 	}
2974 
2975 	ret = alloc_noa_wait(stream);
2976 	if (ret) {
2977 		drm_dbg(&stream->perf->i915->drm,
2978 			"Unable to allocate NOA wait batch buffer\n");
2979 		goto err_noa_wait_alloc;
2980 	}
2981 
2982 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2983 	if (!stream->oa_config) {
2984 		drm_dbg(&stream->perf->i915->drm,
2985 			"Invalid OA config id=%i\n", props->metrics_set);
2986 		ret = -EINVAL;
2987 		goto err_config;
2988 	}
2989 
2990 	/* PRM - observability performance counters:
2991 	 *
2992 	 *   OACONTROL, performance counter enable, note:
2993 	 *
2994 	 *   "When this bit is set, in order to have coherent counts,
2995 	 *   RC6 power state and trunk clock gating must be disabled.
2996 	 *   This can be achieved by programming MMIO registers as
2997 	 *   0xA094=0 and 0xA090[31]=1"
2998 	 *
2999 	 *   In our case we are expecting that taking pm + FORCEWAKE
3000 	 *   references will effectively disable RC6.
3001 	 */
3002 	intel_engine_pm_get(stream->engine);
3003 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
3004 
3005 	ret = alloc_oa_buffer(stream);
3006 	if (ret)
3007 		goto err_oa_buf_alloc;
3008 
3009 	stream->ops = &i915_oa_stream_ops;
3010 
3011 	perf->sseu = props->sseu;
3012 	WRITE_ONCE(perf->exclusive_stream, stream);
3013 
3014 	ret = i915_perf_stream_enable_sync(stream);
3015 	if (ret) {
3016 		drm_dbg(&stream->perf->i915->drm,
3017 			"Unable to enable metric set\n");
3018 		goto err_enable;
3019 	}
3020 
3021 	drm_dbg(&stream->perf->i915->drm,
3022 		"opening stream oa config uuid=%s\n",
3023 		  stream->oa_config->uuid);
3024 
3025 	hrtimer_init(&stream->poll_check_timer,
3026 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3027 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
3028 	init_waitqueue_head(&stream->poll_wq);
3029 	mtx_init(&stream->oa_buffer.ptr_lock, IPL_TTY);
3030 
3031 	return 0;
3032 
3033 err_enable:
3034 	WRITE_ONCE(perf->exclusive_stream, NULL);
3035 	perf->ops.disable_metric_set(stream);
3036 
3037 	free_oa_buffer(stream);
3038 
3039 err_oa_buf_alloc:
3040 	free_oa_configs(stream);
3041 
3042 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3043 	intel_engine_pm_put(stream->engine);
3044 
3045 err_config:
3046 	free_noa_wait(stream);
3047 
3048 err_noa_wait_alloc:
3049 	if (stream->ctx)
3050 		oa_put_render_ctx_id(stream);
3051 
3052 	return ret;
3053 }
3054 
3055 #endif
3056 
3057 void i915_oa_init_reg_state(const struct intel_context *ce,
3058 			    const struct intel_engine_cs *engine)
3059 {
3060 	struct i915_perf_stream *stream;
3061 
3062 	if (engine->class != RENDER_CLASS)
3063 		return;
3064 
3065 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3066 	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
3067 	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3068 		gen8_update_reg_state_unlocked(ce, stream);
3069 }
3070 
3071 #ifdef notyet
3072 
3073 /**
3074  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3075  * @file: An i915 perf stream file
3076  * @buf: destination buffer given by userspace
3077  * @count: the number of bytes userspace wants to read
3078  * @ppos: (inout) file seek position (unused)
3079  *
3080  * The entry point for handling a read() on a stream file descriptor from
3081  * userspace. Most of the work is left to the i915_perf_read_locked() and
3082  * &i915_perf_stream_ops->read but to save having stream implementations (of
3083  * which we might have multiple later) we handle blocking read here.
3084  *
3085  * We can also consistently treat trying to read from a disabled stream
3086  * as an IO error so implementations can assume the stream is enabled
3087  * while reading.
3088  *
3089  * Returns: The number of bytes copied or a negative error code on failure.
3090  */
3091 static ssize_t i915_perf_read(struct file *file,
3092 			      char __user *buf,
3093 			      size_t count,
3094 			      loff_t *ppos)
3095 {
3096 	struct i915_perf_stream *stream = file->private_data;
3097 	struct i915_perf *perf = stream->perf;
3098 	size_t offset = 0;
3099 	int ret;
3100 
3101 	/* To ensure it's handled consistently we simply treat all reads of a
3102 	 * disabled stream as an error. In particular it might otherwise lead
3103 	 * to a deadlock for blocking file descriptors...
3104 	 */
3105 	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3106 		return -EIO;
3107 
3108 	if (!(file->f_flags & O_NONBLOCK)) {
3109 		/* There's the small chance of false positives from
3110 		 * stream->ops->wait_unlocked.
3111 		 *
3112 		 * E.g. with single context filtering since we only wait until
3113 		 * oabuffer has >= 1 report we don't immediately know whether
3114 		 * any reports really belong to the current context
3115 		 */
3116 		do {
3117 			ret = stream->ops->wait_unlocked(stream);
3118 			if (ret)
3119 				return ret;
3120 
3121 			mutex_lock(&perf->lock);
3122 			ret = stream->ops->read(stream, buf, count, &offset);
3123 			mutex_unlock(&perf->lock);
3124 		} while (!offset && !ret);
3125 	} else {
3126 		mutex_lock(&perf->lock);
3127 		ret = stream->ops->read(stream, buf, count, &offset);
3128 		mutex_unlock(&perf->lock);
3129 	}
3130 
3131 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3132 	 * events where we might actually report EAGAIN on read() if there's
3133 	 * not really any data available. In this situation though we don't
3134 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3135 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3136 	 * effectively ensures we back off until the next hrtimer callback
3137 	 * before reporting another EPOLLIN event.
3138 	 * The exception to this is if ops->read() returned -ENOSPC which means
3139 	 * that more OA data is available than could fit in the user provided
3140 	 * buffer. In this case we want the next poll() call to not block.
3141 	 */
3142 	if (ret != -ENOSPC)
3143 		stream->pollin = false;
3144 
3145 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3146 	return offset ?: (ret ?: -EAGAIN);
3147 }
3148 
3149 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3150 {
3151 	struct i915_perf_stream *stream =
3152 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3153 
3154 	if (oa_buffer_check_unlocked(stream)) {
3155 		stream->pollin = true;
3156 		wake_up(&stream->poll_wq);
3157 	}
3158 
3159 	hrtimer_forward_now(hrtimer,
3160 			    ns_to_ktime(stream->poll_oa_period));
3161 
3162 	return HRTIMER_RESTART;
3163 }
3164 
3165 /**
3166  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3167  * @stream: An i915 perf stream
3168  * @file: An i915 perf stream file
3169  * @wait: poll() state table
3170  *
3171  * For handling userspace polling on an i915 perf stream, this calls through to
3172  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3173  * will be woken for new stream data.
3174  *
3175  * Note: The &perf->lock mutex has been taken to serialize
3176  * with any non-file-operation driver hooks.
3177  *
3178  * Returns: any poll events that are ready without sleeping
3179  */
3180 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3181 				      struct file *file,
3182 				      poll_table *wait)
3183 {
3184 	__poll_t events = 0;
3185 
3186 	stream->ops->poll_wait(stream, file, wait);
3187 
3188 	/* Note: we don't explicitly check whether there's something to read
3189 	 * here since this path may be very hot depending on what else
3190 	 * userspace is polling, or on the timeout in use. We rely solely on
3191 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3192 	 * samples to read.
3193 	 */
3194 	if (stream->pollin)
3195 		events |= EPOLLIN;
3196 
3197 	return events;
3198 }
3199 
3200 /**
3201  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3202  * @file: An i915 perf stream file
3203  * @wait: poll() state table
3204  *
3205  * For handling userspace polling on an i915 perf stream, this ensures
3206  * poll_wait() gets called with a wait queue that will be woken for new stream
3207  * data.
3208  *
3209  * Note: Implementation deferred to i915_perf_poll_locked()
3210  *
3211  * Returns: any poll events that are ready without sleeping
3212  */
3213 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3214 {
3215 	struct i915_perf_stream *stream = file->private_data;
3216 	struct i915_perf *perf = stream->perf;
3217 	__poll_t ret;
3218 
3219 	mutex_lock(&perf->lock);
3220 	ret = i915_perf_poll_locked(stream, file, wait);
3221 	mutex_unlock(&perf->lock);
3222 
3223 	return ret;
3224 }
3225 
3226 /**
3227  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3228  * @stream: A disabled i915 perf stream
3229  *
3230  * [Re]enables the associated capture of data for this stream.
3231  *
3232  * If a stream was previously enabled then there's currently no intention
3233  * to provide userspace any guarantee about the preservation of previously
3234  * buffered data.
3235  */
3236 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3237 {
3238 	if (stream->enabled)
3239 		return;
3240 
3241 	/* Allow stream->ops->enable() to refer to this */
3242 	stream->enabled = true;
3243 
3244 	if (stream->ops->enable)
3245 		stream->ops->enable(stream);
3246 
3247 	if (stream->hold_preemption)
3248 		intel_context_set_nopreempt(stream->pinned_ctx);
3249 }
3250 
3251 /**
3252  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3253  * @stream: An enabled i915 perf stream
3254  *
3255  * Disables the associated capture of data for this stream.
3256  *
3257  * The intention is that disabling an re-enabling a stream will ideally be
3258  * cheaper than destroying and re-opening a stream with the same configuration,
3259  * though there are no formal guarantees about what state or buffered data
3260  * must be retained between disabling and re-enabling a stream.
3261  *
3262  * Note: while a stream is disabled it's considered an error for userspace
3263  * to attempt to read from the stream (-EIO).
3264  */
3265 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3266 {
3267 	if (!stream->enabled)
3268 		return;
3269 
3270 	/* Allow stream->ops->disable() to refer to this */
3271 	stream->enabled = false;
3272 
3273 	if (stream->hold_preemption)
3274 		intel_context_clear_nopreempt(stream->pinned_ctx);
3275 
3276 	if (stream->ops->disable)
3277 		stream->ops->disable(stream);
3278 }
3279 
3280 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3281 				    unsigned long metrics_set)
3282 {
3283 	struct i915_oa_config *config;
3284 	long ret = stream->oa_config->id;
3285 
3286 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3287 	if (!config)
3288 		return -EINVAL;
3289 
3290 	if (config != stream->oa_config) {
3291 		int err;
3292 
3293 		/*
3294 		 * If OA is bound to a specific context, emit the
3295 		 * reconfiguration inline from that context. The update
3296 		 * will then be ordered with respect to submission on that
3297 		 * context.
3298 		 *
3299 		 * When set globally, we use a low priority kernel context,
3300 		 * so it will effectively take effect when idle.
3301 		 */
3302 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3303 		if (!err)
3304 			config = xchg(&stream->oa_config, config);
3305 		else
3306 			ret = err;
3307 	}
3308 
3309 	i915_oa_config_put(config);
3310 
3311 	return ret;
3312 }
3313 
3314 /**
3315  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3316  * @stream: An i915 perf stream
3317  * @cmd: the ioctl request
3318  * @arg: the ioctl data
3319  *
3320  * Note: The &perf->lock mutex has been taken to serialize
3321  * with any non-file-operation driver hooks.
3322  *
3323  * Returns: zero on success or a negative error code. Returns -EINVAL for
3324  * an unknown ioctl request.
3325  */
3326 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3327 				   unsigned int cmd,
3328 				   unsigned long arg)
3329 {
3330 	switch (cmd) {
3331 	case I915_PERF_IOCTL_ENABLE:
3332 		i915_perf_enable_locked(stream);
3333 		return 0;
3334 	case I915_PERF_IOCTL_DISABLE:
3335 		i915_perf_disable_locked(stream);
3336 		return 0;
3337 	case I915_PERF_IOCTL_CONFIG:
3338 		return i915_perf_config_locked(stream, arg);
3339 	}
3340 
3341 	return -EINVAL;
3342 }
3343 
3344 /**
3345  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3346  * @file: An i915 perf stream file
3347  * @cmd: the ioctl request
3348  * @arg: the ioctl data
3349  *
3350  * Implementation deferred to i915_perf_ioctl_locked().
3351  *
3352  * Returns: zero on success or a negative error code. Returns -EINVAL for
3353  * an unknown ioctl request.
3354  */
3355 static long i915_perf_ioctl(struct file *file,
3356 			    unsigned int cmd,
3357 			    unsigned long arg)
3358 {
3359 	struct i915_perf_stream *stream = file->private_data;
3360 	struct i915_perf *perf = stream->perf;
3361 	long ret;
3362 
3363 	mutex_lock(&perf->lock);
3364 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3365 	mutex_unlock(&perf->lock);
3366 
3367 	return ret;
3368 }
3369 
3370 /**
3371  * i915_perf_destroy_locked - destroy an i915 perf stream
3372  * @stream: An i915 perf stream
3373  *
3374  * Frees all resources associated with the given i915 perf @stream, disabling
3375  * any associated data capture in the process.
3376  *
3377  * Note: The &perf->lock mutex has been taken to serialize
3378  * with any non-file-operation driver hooks.
3379  */
3380 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3381 {
3382 	if (stream->enabled)
3383 		i915_perf_disable_locked(stream);
3384 
3385 	if (stream->ops->destroy)
3386 		stream->ops->destroy(stream);
3387 
3388 	if (stream->ctx)
3389 		i915_gem_context_put(stream->ctx);
3390 
3391 	kfree(stream);
3392 }
3393 
3394 /**
3395  * i915_perf_release - handles userspace close() of a stream file
3396  * @inode: anonymous inode associated with file
3397  * @file: An i915 perf stream file
3398  *
3399  * Cleans up any resources associated with an open i915 perf stream file.
3400  *
3401  * NB: close() can't really fail from the userspace point of view.
3402  *
3403  * Returns: zero on success or a negative error code.
3404  */
3405 static int i915_perf_release(struct inode *inode, struct file *file)
3406 {
3407 	struct i915_perf_stream *stream = file->private_data;
3408 	struct i915_perf *perf = stream->perf;
3409 
3410 	mutex_lock(&perf->lock);
3411 	i915_perf_destroy_locked(stream);
3412 	mutex_unlock(&perf->lock);
3413 
3414 	/* Release the reference the perf stream kept on the driver. */
3415 	drm_dev_put(&perf->i915->drm);
3416 
3417 	return 0;
3418 }
3419 
3420 
3421 static const struct file_operations fops = {
3422 	.owner		= THIS_MODULE,
3423 	.llseek		= no_llseek,
3424 	.release	= i915_perf_release,
3425 	.poll		= i915_perf_poll,
3426 	.read		= i915_perf_read,
3427 	.unlocked_ioctl	= i915_perf_ioctl,
3428 	/* Our ioctl have no arguments, so it's safe to use the same function
3429 	 * to handle 32bits compatibility.
3430 	 */
3431 	.compat_ioctl   = i915_perf_ioctl,
3432 };
3433 
3434 #endif /* notyet */
3435 
3436 /**
3437  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3438  * @perf: i915 perf instance
3439  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3440  * @props: individually validated u64 property value pairs
3441  * @file: drm file
3442  *
3443  * See i915_perf_ioctl_open() for interface details.
3444  *
3445  * Implements further stream config validation and stream initialization on
3446  * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3447  * taken to serialize with any non-file-operation driver hooks.
3448  *
3449  * Note: at this point the @props have only been validated in isolation and
3450  * it's still necessary to validate that the combination of properties makes
3451  * sense.
3452  *
3453  * In the case where userspace is interested in OA unit metrics then further
3454  * config validation and stream initialization details will be handled by
3455  * i915_oa_stream_init(). The code here should only validate config state that
3456  * will be relevant to all stream types / backends.
3457  *
3458  * Returns: zero on success or a negative error code.
3459  */
3460 static int
3461 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3462 			    struct drm_i915_perf_open_param *param,
3463 			    struct perf_open_properties *props,
3464 			    struct drm_file *file)
3465 {
3466 	STUB();
3467 	return -ENOSYS;
3468 #ifdef notyet
3469 	struct i915_gem_context *specific_ctx = NULL;
3470 	struct i915_perf_stream *stream = NULL;
3471 	unsigned long f_flags = 0;
3472 	bool privileged_op = true;
3473 	int stream_fd;
3474 	int ret;
3475 
3476 	if (props->single_context) {
3477 		u32 ctx_handle = props->ctx_handle;
3478 		struct drm_i915_file_private *file_priv = file->driver_priv;
3479 
3480 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3481 		if (IS_ERR(specific_ctx)) {
3482 			drm_dbg(&perf->i915->drm,
3483 				"Failed to look up context with ID %u for opening perf stream\n",
3484 				  ctx_handle);
3485 			ret = PTR_ERR(specific_ctx);
3486 			goto err;
3487 		}
3488 	}
3489 
3490 	/*
3491 	 * On Haswell the OA unit supports clock gating off for a specific
3492 	 * context and in this mode there's no visibility of metrics for the
3493 	 * rest of the system, which we consider acceptable for a
3494 	 * non-privileged client.
3495 	 *
3496 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3497 	 * specific context and the kernel can't securely stop the counters
3498 	 * from updating as system-wide / global values. Even though we can
3499 	 * filter reports based on the included context ID we can't block
3500 	 * clients from seeing the raw / global counter values via
3501 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3502 	 * enable the OA unit by default.
3503 	 *
3504 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3505 	 * per context basis. So we can relax requirements there if the user
3506 	 * doesn't request global stream access (i.e. query based sampling
3507 	 * using MI_RECORD_PERF_COUNT.
3508 	 */
3509 	if (IS_HASWELL(perf->i915) && specific_ctx)
3510 		privileged_op = false;
3511 	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3512 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3513 		privileged_op = false;
3514 
3515 	if (props->hold_preemption) {
3516 		if (!props->single_context) {
3517 			drm_dbg(&perf->i915->drm,
3518 				"preemption disable with no context\n");
3519 			ret = -EINVAL;
3520 			goto err;
3521 		}
3522 		privileged_op = true;
3523 	}
3524 
3525 	/*
3526 	 * Asking for SSEU configuration is a priviliged operation.
3527 	 */
3528 	if (props->has_sseu)
3529 		privileged_op = true;
3530 	else
3531 		get_default_sseu_config(&props->sseu, props->engine);
3532 
3533 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3534 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3535 	 * to determine if it's ok to access system wide OA counters
3536 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3537 	 */
3538 	if (privileged_op &&
3539 	    i915_perf_stream_paranoid && !perfmon_capable()) {
3540 		drm_dbg(&perf->i915->drm,
3541 			"Insufficient privileges to open i915 perf stream\n");
3542 		ret = -EACCES;
3543 		goto err_ctx;
3544 	}
3545 
3546 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3547 	if (!stream) {
3548 		ret = -ENOMEM;
3549 		goto err_ctx;
3550 	}
3551 
3552 	stream->perf = perf;
3553 	stream->ctx = specific_ctx;
3554 	stream->poll_oa_period = props->poll_oa_period;
3555 
3556 	ret = i915_oa_stream_init(stream, param, props);
3557 	if (ret)
3558 		goto err_alloc;
3559 
3560 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3561 	 * to have _stream_init check the combination of sample flags more
3562 	 * thoroughly, but still this is the expected result at this point.
3563 	 */
3564 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3565 		ret = -ENODEV;
3566 		goto err_flags;
3567 	}
3568 
3569 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3570 		f_flags |= O_CLOEXEC;
3571 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3572 		f_flags |= O_NONBLOCK;
3573 
3574 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3575 	if (stream_fd < 0) {
3576 		ret = stream_fd;
3577 		goto err_flags;
3578 	}
3579 
3580 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3581 		i915_perf_enable_locked(stream);
3582 
3583 	/* Take a reference on the driver that will be kept with stream_fd
3584 	 * until its release.
3585 	 */
3586 	drm_dev_get(&perf->i915->drm);
3587 
3588 	return stream_fd;
3589 
3590 err_flags:
3591 	if (stream->ops->destroy)
3592 		stream->ops->destroy(stream);
3593 err_alloc:
3594 	kfree(stream);
3595 err_ctx:
3596 	if (specific_ctx)
3597 		i915_gem_context_put(specific_ctx);
3598 err:
3599 	return ret;
3600 #endif
3601 }
3602 
3603 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3604 {
3605 	return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
3606 					     2ULL << exponent);
3607 }
3608 
3609 static __always_inline bool
3610 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3611 {
3612 	return test_bit(format, perf->format_mask);
3613 }
3614 
3615 static __always_inline void
3616 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3617 {
3618 	__set_bit(format, perf->format_mask);
3619 }
3620 
3621 /**
3622  * read_properties_unlocked - validate + copy userspace stream open properties
3623  * @perf: i915 perf instance
3624  * @uprops: The array of u64 key value pairs given by userspace
3625  * @n_props: The number of key value pairs expected in @uprops
3626  * @props: The stream configuration built up while validating properties
3627  *
3628  * Note this function only validates properties in isolation it doesn't
3629  * validate that the combination of properties makes sense or that all
3630  * properties necessary for a particular kind of stream have been set.
3631  *
3632  * Note that there currently aren't any ordering requirements for properties so
3633  * we shouldn't validate or assume anything about ordering here. This doesn't
3634  * rule out defining new properties with ordering requirements in the future.
3635  */
3636 static int read_properties_unlocked(struct i915_perf *perf,
3637 				    u64 __user *uprops,
3638 				    u32 n_props,
3639 				    struct perf_open_properties *props)
3640 {
3641 	u64 __user *uprop = uprops;
3642 	u32 i;
3643 	int ret;
3644 
3645 	memset(props, 0, sizeof(struct perf_open_properties));
3646 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3647 
3648 	if (!n_props) {
3649 		drm_dbg(&perf->i915->drm,
3650 			"No i915 perf properties given\n");
3651 		return -EINVAL;
3652 	}
3653 
3654 	/* At the moment we only support using i915-perf on the RCS. */
3655 	props->engine = intel_engine_lookup_user(perf->i915,
3656 						 I915_ENGINE_CLASS_RENDER,
3657 						 0);
3658 	if (!props->engine) {
3659 		drm_dbg(&perf->i915->drm,
3660 			"No RENDER-capable engines\n");
3661 		return -EINVAL;
3662 	}
3663 
3664 	/* Considering that ID = 0 is reserved and assuming that we don't
3665 	 * (currently) expect any configurations to ever specify duplicate
3666 	 * values for a particular property ID then the last _PROP_MAX value is
3667 	 * one greater than the maximum number of properties we expect to get
3668 	 * from userspace.
3669 	 */
3670 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3671 		drm_dbg(&perf->i915->drm,
3672 			"More i915 perf properties specified than exist\n");
3673 		return -EINVAL;
3674 	}
3675 
3676 	for (i = 0; i < n_props; i++) {
3677 		u64 oa_period, oa_freq_hz;
3678 		u64 id, value;
3679 
3680 		ret = get_user(id, uprop);
3681 		if (ret)
3682 			return ret;
3683 
3684 		ret = get_user(value, uprop + 1);
3685 		if (ret)
3686 			return ret;
3687 
3688 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3689 			drm_dbg(&perf->i915->drm,
3690 				"Unknown i915 perf property ID\n");
3691 			return -EINVAL;
3692 		}
3693 
3694 		switch ((enum drm_i915_perf_property_id)id) {
3695 		case DRM_I915_PERF_PROP_CTX_HANDLE:
3696 			props->single_context = 1;
3697 			props->ctx_handle = value;
3698 			break;
3699 		case DRM_I915_PERF_PROP_SAMPLE_OA:
3700 			if (value)
3701 				props->sample_flags |= SAMPLE_OA_REPORT;
3702 			break;
3703 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
3704 			if (value == 0) {
3705 				drm_dbg(&perf->i915->drm,
3706 					"Unknown OA metric set ID\n");
3707 				return -EINVAL;
3708 			}
3709 			props->metrics_set = value;
3710 			break;
3711 		case DRM_I915_PERF_PROP_OA_FORMAT:
3712 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3713 				drm_dbg(&perf->i915->drm,
3714 					"Out-of-range OA report format %llu\n",
3715 					  value);
3716 				return -EINVAL;
3717 			}
3718 			if (!oa_format_valid(perf, value)) {
3719 				drm_dbg(&perf->i915->drm,
3720 					"Unsupported OA report format %llu\n",
3721 					  value);
3722 				return -EINVAL;
3723 			}
3724 			props->oa_format = value;
3725 			break;
3726 		case DRM_I915_PERF_PROP_OA_EXPONENT:
3727 			if (value > OA_EXPONENT_MAX) {
3728 				drm_dbg(&perf->i915->drm,
3729 					"OA timer exponent too high (> %u)\n",
3730 					 OA_EXPONENT_MAX);
3731 				return -EINVAL;
3732 			}
3733 
3734 			/* Theoretically we can program the OA unit to sample
3735 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3736 			 * for BXT. We don't allow such high sampling
3737 			 * frequencies by default unless root.
3738 			 */
3739 
3740 			BUILD_BUG_ON(sizeof(oa_period) != 8);
3741 			oa_period = oa_exponent_to_ns(perf, value);
3742 
3743 			/* This check is primarily to ensure that oa_period <=
3744 			 * UINT32_MAX (before passing to do_div which only
3745 			 * accepts a u32 denominator), but we can also skip
3746 			 * checking anything < 1Hz which implicitly can't be
3747 			 * limited via an integer oa_max_sample_rate.
3748 			 */
3749 			if (oa_period <= NSEC_PER_SEC) {
3750 				u64 tmp = NSEC_PER_SEC;
3751 				do_div(tmp, oa_period);
3752 				oa_freq_hz = tmp;
3753 			} else
3754 				oa_freq_hz = 0;
3755 
3756 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3757 				drm_dbg(&perf->i915->drm,
3758 					"OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3759 					  i915_oa_max_sample_rate);
3760 				return -EACCES;
3761 			}
3762 
3763 			props->oa_periodic = true;
3764 			props->oa_period_exponent = value;
3765 			break;
3766 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3767 			props->hold_preemption = !!value;
3768 			break;
3769 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3770 			struct drm_i915_gem_context_param_sseu user_sseu;
3771 
3772 			if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
3773 				drm_dbg(&perf->i915->drm,
3774 					"SSEU config not supported on gfx %x\n",
3775 					GRAPHICS_VER_FULL(perf->i915));
3776 				return -ENODEV;
3777 			}
3778 
3779 			if (copy_from_user(&user_sseu,
3780 					   u64_to_user_ptr(value),
3781 					   sizeof(user_sseu))) {
3782 				drm_dbg(&perf->i915->drm,
3783 					"Unable to copy global sseu parameter\n");
3784 				return -EFAULT;
3785 			}
3786 
3787 			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3788 			if (ret) {
3789 				drm_dbg(&perf->i915->drm,
3790 					"Invalid SSEU configuration\n");
3791 				return ret;
3792 			}
3793 			props->has_sseu = true;
3794 			break;
3795 		}
3796 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3797 			if (value < 100000 /* 100us */) {
3798 				drm_dbg(&perf->i915->drm,
3799 					"OA availability timer too small (%lluns < 100us)\n",
3800 					  value);
3801 				return -EINVAL;
3802 			}
3803 			props->poll_oa_period = value;
3804 			break;
3805 		case DRM_I915_PERF_PROP_MAX:
3806 			MISSING_CASE(id);
3807 			return -EINVAL;
3808 		}
3809 
3810 		uprop += 2;
3811 	}
3812 
3813 	return 0;
3814 }
3815 
3816 /**
3817  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3818  * @dev: drm device
3819  * @data: ioctl data copied from userspace (unvalidated)
3820  * @file: drm file
3821  *
3822  * Validates the stream open parameters given by userspace including flags
3823  * and an array of u64 key, value pair properties.
3824  *
3825  * Very little is assumed up front about the nature of the stream being
3826  * opened (for instance we don't assume it's for periodic OA unit metrics). An
3827  * i915-perf stream is expected to be a suitable interface for other forms of
3828  * buffered data written by the GPU besides periodic OA metrics.
3829  *
3830  * Note we copy the properties from userspace outside of the i915 perf
3831  * mutex to avoid an awkward lockdep with mmap_lock.
3832  *
3833  * Most of the implementation details are handled by
3834  * i915_perf_open_ioctl_locked() after taking the &perf->lock
3835  * mutex for serializing with any non-file-operation driver hooks.
3836  *
3837  * Return: A newly opened i915 Perf stream file descriptor or negative
3838  * error code on failure.
3839  */
3840 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3841 			 struct drm_file *file)
3842 {
3843 	struct i915_perf *perf = &to_i915(dev)->perf;
3844 	struct drm_i915_perf_open_param *param = data;
3845 	struct perf_open_properties props;
3846 	u32 known_open_flags;
3847 	int ret;
3848 
3849 	if (!perf->i915) {
3850 		drm_dbg(&perf->i915->drm,
3851 			"i915 perf interface not available for this system\n");
3852 		return -ENOTSUPP;
3853 	}
3854 
3855 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3856 			   I915_PERF_FLAG_FD_NONBLOCK |
3857 			   I915_PERF_FLAG_DISABLED;
3858 	if (param->flags & ~known_open_flags) {
3859 		drm_dbg(&perf->i915->drm,
3860 			"Unknown drm_i915_perf_open_param flag\n");
3861 		return -EINVAL;
3862 	}
3863 
3864 	ret = read_properties_unlocked(perf,
3865 				       u64_to_user_ptr(param->properties_ptr),
3866 				       param->num_properties,
3867 				       &props);
3868 	if (ret)
3869 		return ret;
3870 
3871 	mutex_lock(&perf->lock);
3872 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3873 	mutex_unlock(&perf->lock);
3874 
3875 	return ret;
3876 }
3877 
3878 /**
3879  * i915_perf_register - exposes i915-perf to userspace
3880  * @i915: i915 device instance
3881  *
3882  * In particular OA metric sets are advertised under a sysfs metrics/
3883  * directory allowing userspace to enumerate valid IDs that can be
3884  * used to open an i915-perf stream.
3885  */
3886 void i915_perf_register(struct drm_i915_private *i915)
3887 {
3888 #ifdef __linux__
3889 	struct i915_perf *perf = &i915->perf;
3890 
3891 	if (!perf->i915)
3892 		return;
3893 
3894 	/* To be sure we're synchronized with an attempted
3895 	 * i915_perf_open_ioctl(); considering that we register after
3896 	 * being exposed to userspace.
3897 	 */
3898 	mutex_lock(&perf->lock);
3899 
3900 	perf->metrics_kobj =
3901 		kobject_create_and_add("metrics",
3902 				       &i915->drm.primary->kdev->kobj);
3903 
3904 	mutex_unlock(&perf->lock);
3905 #endif
3906 }
3907 
3908 /**
3909  * i915_perf_unregister - hide i915-perf from userspace
3910  * @i915: i915 device instance
3911  *
3912  * i915-perf state cleanup is split up into an 'unregister' and
3913  * 'deinit' phase where the interface is first hidden from
3914  * userspace by i915_perf_unregister() before cleaning up
3915  * remaining state in i915_perf_fini().
3916  */
3917 void i915_perf_unregister(struct drm_i915_private *i915)
3918 {
3919 	struct i915_perf *perf = &i915->perf;
3920 
3921 	if (!perf->metrics_kobj)
3922 		return;
3923 
3924 	kobject_put(perf->metrics_kobj);
3925 	perf->metrics_kobj = NULL;
3926 }
3927 
3928 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3929 {
3930 	static const i915_reg_t flex_eu_regs[] = {
3931 		EU_PERF_CNTL0,
3932 		EU_PERF_CNTL1,
3933 		EU_PERF_CNTL2,
3934 		EU_PERF_CNTL3,
3935 		EU_PERF_CNTL4,
3936 		EU_PERF_CNTL5,
3937 		EU_PERF_CNTL6,
3938 	};
3939 	int i;
3940 
3941 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3942 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3943 			return true;
3944 	}
3945 	return false;
3946 }
3947 
3948 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
3949 {
3950 	while (table->start || table->end) {
3951 		if (addr >= table->start && addr <= table->end)
3952 			return true;
3953 
3954 		table++;
3955 	}
3956 
3957 	return false;
3958 }
3959 
3960 #define REG_EQUAL(addr, mmio) \
3961 	((addr) == i915_mmio_reg_offset(mmio))
3962 
3963 static const struct i915_range gen7_oa_b_counters[] = {
3964 	{ .start = 0x2710, .end = 0x272c },	/* OASTARTTRIG[1-8] */
3965 	{ .start = 0x2740, .end = 0x275c },	/* OAREPORTTRIG[1-8] */
3966 	{ .start = 0x2770, .end = 0x27ac },	/* OACEC[0-7][0-1] */
3967 	{}
3968 };
3969 
3970 static const struct i915_range gen12_oa_b_counters[] = {
3971 	{ .start = 0x2b2c, .end = 0x2b2c },	/* GEN12_OAG_OA_PESS */
3972 	{ .start = 0xd900, .end = 0xd91c },	/* GEN12_OAG_OASTARTTRIG[1-8] */
3973 	{ .start = 0xd920, .end = 0xd93c },	/* GEN12_OAG_OAREPORTTRIG1[1-8] */
3974 	{ .start = 0xd940, .end = 0xd97c },	/* GEN12_OAG_CEC[0-7][0-1] */
3975 	{ .start = 0xdc00, .end = 0xdc3c },	/* GEN12_OAG_SCEC[0-7][0-1] */
3976 	{ .start = 0xdc40, .end = 0xdc40 },	/* GEN12_OAG_SPCTR_CNF */
3977 	{ .start = 0xdc44, .end = 0xdc44 },	/* GEN12_OAA_DBG_REG */
3978 	{}
3979 };
3980 
3981 static const struct i915_range gen7_oa_mux_regs[] = {
3982 	{ .start = 0x91b8, .end = 0x91cc },	/* OA_PERFCNT[1-2], OA_PERFMATRIX */
3983 	{ .start = 0x9800, .end = 0x9888 },	/* MICRO_BP0_0 - NOA_WRITE */
3984 	{ .start = 0xe180, .end = 0xe180 },	/* HALF_SLICE_CHICKEN2 */
3985 	{}
3986 };
3987 
3988 static const struct i915_range hsw_oa_mux_regs[] = {
3989 	{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
3990 	{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
3991 	{ .start = 0x25100, .end = 0x2ff90 },
3992 	{}
3993 };
3994 
3995 static const struct i915_range chv_oa_mux_regs[] = {
3996 	{ .start = 0x182300, .end = 0x1823a4 },
3997 	{}
3998 };
3999 
4000 static const struct i915_range gen8_oa_mux_regs[] = {
4001 	{ .start = 0x0d00, .end = 0x0d2c },	/* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4002 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4003 	{}
4004 };
4005 
4006 static const struct i915_range gen11_oa_mux_regs[] = {
4007 	{ .start = 0x91c8, .end = 0x91dc },	/* OA_PERFCNT[3-4] */
4008 	{}
4009 };
4010 
4011 static const struct i915_range gen12_oa_mux_regs[] = {
4012 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
4013 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
4014 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4015 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4016 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4017 	{}
4018 };
4019 
4020 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4021 {
4022 	return reg_in_range_table(addr, gen7_oa_b_counters);
4023 }
4024 
4025 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4026 {
4027 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4028 		reg_in_range_table(addr, gen8_oa_mux_regs);
4029 }
4030 
4031 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4032 {
4033 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4034 		reg_in_range_table(addr, gen8_oa_mux_regs) ||
4035 		reg_in_range_table(addr, gen11_oa_mux_regs);
4036 }
4037 
4038 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4039 {
4040 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4041 		reg_in_range_table(addr, hsw_oa_mux_regs);
4042 }
4043 
4044 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4045 {
4046 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4047 		reg_in_range_table(addr, chv_oa_mux_regs);
4048 }
4049 
4050 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4051 {
4052 	return reg_in_range_table(addr, gen12_oa_b_counters);
4053 }
4054 
4055 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4056 {
4057 	return reg_in_range_table(addr, gen12_oa_mux_regs);
4058 }
4059 
4060 #ifdef notyet
4061 
4062 static u32 mask_reg_value(u32 reg, u32 val)
4063 {
4064 	/* HALF_SLICE_CHICKEN2 is programmed with a the
4065 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4066 	 * programmed by userspace doesn't change this.
4067 	 */
4068 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
4069 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
4070 
4071 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4072 	 * indicated by its name and a bunch of selection fields used by OA
4073 	 * configs.
4074 	 */
4075 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
4076 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
4077 
4078 	return val;
4079 }
4080 
4081 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4082 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4083 					 u32 __user *regs,
4084 					 u32 n_regs)
4085 {
4086 	struct i915_oa_reg *oa_regs;
4087 	int err;
4088 	u32 i;
4089 
4090 	if (!n_regs)
4091 		return NULL;
4092 
4093 	/* No is_valid function means we're not allowing any register to be programmed. */
4094 	GEM_BUG_ON(!is_valid);
4095 	if (!is_valid)
4096 		return ERR_PTR(-EINVAL);
4097 
4098 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4099 	if (!oa_regs)
4100 		return ERR_PTR(-ENOMEM);
4101 
4102 	for (i = 0; i < n_regs; i++) {
4103 		u32 addr, value;
4104 
4105 		err = get_user(addr, regs);
4106 		if (err)
4107 			goto addr_err;
4108 
4109 		if (!is_valid(perf, addr)) {
4110 			drm_dbg(&perf->i915->drm,
4111 				"Invalid oa_reg address: %X\n", addr);
4112 			err = -EINVAL;
4113 			goto addr_err;
4114 		}
4115 
4116 		err = get_user(value, regs + 1);
4117 		if (err)
4118 			goto addr_err;
4119 
4120 		oa_regs[i].addr = _MMIO(addr);
4121 		oa_regs[i].value = mask_reg_value(addr, value);
4122 
4123 		regs += 2;
4124 	}
4125 
4126 	return oa_regs;
4127 
4128 addr_err:
4129 	kfree(oa_regs);
4130 	return ERR_PTR(err);
4131 }
4132 
4133 static ssize_t show_dynamic_id(struct kobject *kobj,
4134 			       struct kobj_attribute *attr,
4135 			       char *buf)
4136 {
4137 	struct i915_oa_config *oa_config =
4138 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4139 
4140 	return sprintf(buf, "%d\n", oa_config->id);
4141 }
4142 
4143 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4144 					 struct i915_oa_config *oa_config)
4145 {
4146 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4147 	oa_config->sysfs_metric_id.attr.name = "id";
4148 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4149 	oa_config->sysfs_metric_id.show = show_dynamic_id;
4150 	oa_config->sysfs_metric_id.store = NULL;
4151 
4152 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4153 	oa_config->attrs[1] = NULL;
4154 
4155 	oa_config->sysfs_metric.name = oa_config->uuid;
4156 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4157 
4158 	return sysfs_create_group(perf->metrics_kobj,
4159 				  &oa_config->sysfs_metric);
4160 }
4161 
4162 #endif
4163 
4164 /**
4165  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4166  * @dev: drm device
4167  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4168  *        userspace (unvalidated)
4169  * @file: drm file
4170  *
4171  * Validates the submitted OA register to be saved into a new OA config that
4172  * can then be used for programming the OA unit and its NOA network.
4173  *
4174  * Returns: A new allocated config number to be used with the perf open ioctl
4175  * or a negative error code on failure.
4176  */
4177 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4178 			       struct drm_file *file)
4179 {
4180 	STUB();
4181 	return -ENOSYS;
4182 #ifdef notyet
4183 	struct i915_perf *perf = &to_i915(dev)->perf;
4184 	struct drm_i915_perf_oa_config *args = data;
4185 	struct i915_oa_config *oa_config, *tmp;
4186 	struct i915_oa_reg *regs;
4187 	int err, id;
4188 
4189 	if (!perf->i915) {
4190 		drm_dbg(&perf->i915->drm,
4191 			"i915 perf interface not available for this system\n");
4192 		return -ENOTSUPP;
4193 	}
4194 
4195 	if (!perf->metrics_kobj) {
4196 		drm_dbg(&perf->i915->drm,
4197 			"OA metrics weren't advertised via sysfs\n");
4198 		return -EINVAL;
4199 	}
4200 
4201 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4202 		drm_dbg(&perf->i915->drm,
4203 			"Insufficient privileges to add i915 OA config\n");
4204 		return -EACCES;
4205 	}
4206 
4207 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4208 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4209 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4210 		drm_dbg(&perf->i915->drm,
4211 			"No OA registers given\n");
4212 		return -EINVAL;
4213 	}
4214 
4215 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4216 	if (!oa_config) {
4217 		drm_dbg(&perf->i915->drm,
4218 			"Failed to allocate memory for the OA config\n");
4219 		return -ENOMEM;
4220 	}
4221 
4222 	oa_config->perf = perf;
4223 	kref_init(&oa_config->ref);
4224 
4225 	if (!uuid_is_valid(args->uuid)) {
4226 		drm_dbg(&perf->i915->drm,
4227 			"Invalid uuid format for OA config\n");
4228 		err = -EINVAL;
4229 		goto reg_err;
4230 	}
4231 
4232 	/* Last character in oa_config->uuid will be 0 because oa_config is
4233 	 * kzalloc.
4234 	 */
4235 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4236 
4237 	oa_config->mux_regs_len = args->n_mux_regs;
4238 	regs = alloc_oa_regs(perf,
4239 			     perf->ops.is_valid_mux_reg,
4240 			     u64_to_user_ptr(args->mux_regs_ptr),
4241 			     args->n_mux_regs);
4242 
4243 	if (IS_ERR(regs)) {
4244 		drm_dbg(&perf->i915->drm,
4245 			"Failed to create OA config for mux_regs\n");
4246 		err = PTR_ERR(regs);
4247 		goto reg_err;
4248 	}
4249 	oa_config->mux_regs = regs;
4250 
4251 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4252 	regs = alloc_oa_regs(perf,
4253 			     perf->ops.is_valid_b_counter_reg,
4254 			     u64_to_user_ptr(args->boolean_regs_ptr),
4255 			     args->n_boolean_regs);
4256 
4257 	if (IS_ERR(regs)) {
4258 		drm_dbg(&perf->i915->drm,
4259 			"Failed to create OA config for b_counter_regs\n");
4260 		err = PTR_ERR(regs);
4261 		goto reg_err;
4262 	}
4263 	oa_config->b_counter_regs = regs;
4264 
4265 	if (GRAPHICS_VER(perf->i915) < 8) {
4266 		if (args->n_flex_regs != 0) {
4267 			err = -EINVAL;
4268 			goto reg_err;
4269 		}
4270 	} else {
4271 		oa_config->flex_regs_len = args->n_flex_regs;
4272 		regs = alloc_oa_regs(perf,
4273 				     perf->ops.is_valid_flex_reg,
4274 				     u64_to_user_ptr(args->flex_regs_ptr),
4275 				     args->n_flex_regs);
4276 
4277 		if (IS_ERR(regs)) {
4278 			drm_dbg(&perf->i915->drm,
4279 				"Failed to create OA config for flex_regs\n");
4280 			err = PTR_ERR(regs);
4281 			goto reg_err;
4282 		}
4283 		oa_config->flex_regs = regs;
4284 	}
4285 
4286 	err = mutex_lock_interruptible(&perf->metrics_lock);
4287 	if (err)
4288 		goto reg_err;
4289 
4290 	/* We shouldn't have too many configs, so this iteration shouldn't be
4291 	 * too costly.
4292 	 */
4293 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4294 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4295 			drm_dbg(&perf->i915->drm,
4296 				"OA config already exists with this uuid\n");
4297 			err = -EADDRINUSE;
4298 			goto sysfs_err;
4299 		}
4300 	}
4301 
4302 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4303 	if (err) {
4304 		drm_dbg(&perf->i915->drm,
4305 			"Failed to create sysfs entry for OA config\n");
4306 		goto sysfs_err;
4307 	}
4308 
4309 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4310 	oa_config->id = idr_alloc(&perf->metrics_idr,
4311 				  oa_config, 2,
4312 				  0, GFP_KERNEL);
4313 	if (oa_config->id < 0) {
4314 		drm_dbg(&perf->i915->drm,
4315 			"Failed to create sysfs entry for OA config\n");
4316 		err = oa_config->id;
4317 		goto sysfs_err;
4318 	}
4319 	id = oa_config->id;
4320 
4321 	drm_dbg(&perf->i915->drm,
4322 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4323 	mutex_unlock(&perf->metrics_lock);
4324 
4325 	return id;
4326 
4327 sysfs_err:
4328 	mutex_unlock(&perf->metrics_lock);
4329 reg_err:
4330 	i915_oa_config_put(oa_config);
4331 	drm_dbg(&perf->i915->drm,
4332 		"Failed to add new OA config\n");
4333 	return err;
4334 #endif
4335 }
4336 
4337 /**
4338  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4339  * @dev: drm device
4340  * @data: ioctl data (pointer to u64 integer) copied from userspace
4341  * @file: drm file
4342  *
4343  * Configs can be removed while being used, the will stop appearing in sysfs
4344  * and their content will be freed when the stream using the config is closed.
4345  *
4346  * Returns: 0 on success or a negative error code on failure.
4347  */
4348 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4349 				  struct drm_file *file)
4350 {
4351 	struct i915_perf *perf = &to_i915(dev)->perf;
4352 	u64 *arg = data;
4353 	struct i915_oa_config *oa_config;
4354 	int ret;
4355 
4356 	if (!perf->i915) {
4357 		drm_dbg(&perf->i915->drm,
4358 			"i915 perf interface not available for this system\n");
4359 		return -ENOTSUPP;
4360 	}
4361 
4362 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4363 		drm_dbg(&perf->i915->drm,
4364 			"Insufficient privileges to remove i915 OA config\n");
4365 		return -EACCES;
4366 	}
4367 
4368 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4369 	if (ret)
4370 		return ret;
4371 
4372 	oa_config = idr_find(&perf->metrics_idr, *arg);
4373 	if (!oa_config) {
4374 		drm_dbg(&perf->i915->drm,
4375 			"Failed to remove unknown OA config\n");
4376 		ret = -ENOENT;
4377 		goto err_unlock;
4378 	}
4379 
4380 	GEM_BUG_ON(*arg != oa_config->id);
4381 
4382 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4383 
4384 	idr_remove(&perf->metrics_idr, *arg);
4385 
4386 	mutex_unlock(&perf->metrics_lock);
4387 
4388 	drm_dbg(&perf->i915->drm,
4389 		"Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4390 
4391 	i915_oa_config_put(oa_config);
4392 
4393 	return 0;
4394 
4395 err_unlock:
4396 	mutex_unlock(&perf->metrics_lock);
4397 	return ret;
4398 }
4399 
4400 #ifdef notyet
4401 static struct ctl_table oa_table[] = {
4402 	{
4403 	 .procname = "perf_stream_paranoid",
4404 	 .data = &i915_perf_stream_paranoid,
4405 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4406 	 .mode = 0644,
4407 	 .proc_handler = proc_dointvec_minmax,
4408 	 .extra1 = SYSCTL_ZERO,
4409 	 .extra2 = SYSCTL_ONE,
4410 	 },
4411 	{
4412 	 .procname = "oa_max_sample_rate",
4413 	 .data = &i915_oa_max_sample_rate,
4414 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4415 	 .mode = 0644,
4416 	 .proc_handler = proc_dointvec_minmax,
4417 	 .extra1 = SYSCTL_ZERO,
4418 	 .extra2 = &oa_sample_rate_hard_limit,
4419 	 },
4420 	{}
4421 };
4422 #endif
4423 
4424 static void oa_init_supported_formats(struct i915_perf *perf)
4425 {
4426 	struct drm_i915_private *i915 = perf->i915;
4427 	enum intel_platform platform = INTEL_INFO(i915)->platform;
4428 
4429 	switch (platform) {
4430 	case INTEL_HASWELL:
4431 		oa_format_add(perf, I915_OA_FORMAT_A13);
4432 		oa_format_add(perf, I915_OA_FORMAT_A13);
4433 		oa_format_add(perf, I915_OA_FORMAT_A29);
4434 		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4435 		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4436 		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4437 		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4438 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4439 		break;
4440 
4441 	case INTEL_BROADWELL:
4442 	case INTEL_CHERRYVIEW:
4443 	case INTEL_SKYLAKE:
4444 	case INTEL_BROXTON:
4445 	case INTEL_KABYLAKE:
4446 	case INTEL_GEMINILAKE:
4447 	case INTEL_COFFEELAKE:
4448 	case INTEL_COMETLAKE:
4449 	case INTEL_ICELAKE:
4450 	case INTEL_ELKHARTLAKE:
4451 	case INTEL_JASPERLAKE:
4452 	case INTEL_TIGERLAKE:
4453 	case INTEL_ROCKETLAKE:
4454 	case INTEL_DG1:
4455 	case INTEL_ALDERLAKE_S:
4456 	case INTEL_ALDERLAKE_P:
4457 		oa_format_add(perf, I915_OA_FORMAT_A12);
4458 		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4459 		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4460 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4461 		break;
4462 
4463 	default:
4464 		MISSING_CASE(platform);
4465 	}
4466 }
4467 
4468 /**
4469  * i915_perf_init - initialize i915-perf state on module bind
4470  * @i915: i915 device instance
4471  *
4472  * Initializes i915-perf state without exposing anything to userspace.
4473  *
4474  * Note: i915-perf initialization is split into an 'init' and 'register'
4475  * phase with the i915_perf_register() exposing state to userspace.
4476  */
4477 void i915_perf_init(struct drm_i915_private *i915)
4478 {
4479 	struct i915_perf *perf = &i915->perf;
4480 
4481 	/* XXX const struct i915_perf_ops! */
4482 
4483 	/* i915_perf is not enabled for DG2 yet */
4484 	if (IS_DG2(i915))
4485 		return;
4486 
4487 	perf->oa_formats = oa_formats;
4488 	if (IS_HASWELL(i915)) {
4489 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4490 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4491 		perf->ops.is_valid_flex_reg = NULL;
4492 		perf->ops.enable_metric_set = hsw_enable_metric_set;
4493 		perf->ops.disable_metric_set = hsw_disable_metric_set;
4494 		perf->ops.oa_enable = gen7_oa_enable;
4495 		perf->ops.oa_disable = gen7_oa_disable;
4496 		perf->ops.read = gen7_oa_read;
4497 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4498 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4499 		/* Note: that although we could theoretically also support the
4500 		 * legacy ringbuffer mode on BDW (and earlier iterations of
4501 		 * this driver, before upstreaming did this) it didn't seem
4502 		 * worth the complexity to maintain now that BDW+ enable
4503 		 * execlist mode by default.
4504 		 */
4505 		perf->ops.read = gen8_oa_read;
4506 
4507 		if (IS_GRAPHICS_VER(i915, 8, 9)) {
4508 			perf->ops.is_valid_b_counter_reg =
4509 				gen7_is_valid_b_counter_addr;
4510 			perf->ops.is_valid_mux_reg =
4511 				gen8_is_valid_mux_addr;
4512 			perf->ops.is_valid_flex_reg =
4513 				gen8_is_valid_flex_addr;
4514 
4515 			if (IS_CHERRYVIEW(i915)) {
4516 				perf->ops.is_valid_mux_reg =
4517 					chv_is_valid_mux_addr;
4518 			}
4519 
4520 			perf->ops.oa_enable = gen8_oa_enable;
4521 			perf->ops.oa_disable = gen8_oa_disable;
4522 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4523 			perf->ops.disable_metric_set = gen8_disable_metric_set;
4524 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4525 
4526 			if (GRAPHICS_VER(i915) == 8) {
4527 				perf->ctx_oactxctrl_offset = 0x120;
4528 				perf->ctx_flexeu0_offset = 0x2ce;
4529 
4530 				perf->gen8_valid_ctx_bit = BIT(25);
4531 			} else {
4532 				perf->ctx_oactxctrl_offset = 0x128;
4533 				perf->ctx_flexeu0_offset = 0x3de;
4534 
4535 				perf->gen8_valid_ctx_bit = BIT(16);
4536 			}
4537 		} else if (GRAPHICS_VER(i915) == 11) {
4538 			perf->ops.is_valid_b_counter_reg =
4539 				gen7_is_valid_b_counter_addr;
4540 			perf->ops.is_valid_mux_reg =
4541 				gen11_is_valid_mux_addr;
4542 			perf->ops.is_valid_flex_reg =
4543 				gen8_is_valid_flex_addr;
4544 
4545 			perf->ops.oa_enable = gen8_oa_enable;
4546 			perf->ops.oa_disable = gen8_oa_disable;
4547 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4548 			perf->ops.disable_metric_set = gen11_disable_metric_set;
4549 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4550 
4551 			perf->ctx_oactxctrl_offset = 0x124;
4552 			perf->ctx_flexeu0_offset = 0x78e;
4553 
4554 			perf->gen8_valid_ctx_bit = BIT(16);
4555 		} else if (GRAPHICS_VER(i915) == 12) {
4556 			perf->ops.is_valid_b_counter_reg =
4557 				gen12_is_valid_b_counter_addr;
4558 			perf->ops.is_valid_mux_reg =
4559 				gen12_is_valid_mux_addr;
4560 			perf->ops.is_valid_flex_reg =
4561 				gen8_is_valid_flex_addr;
4562 
4563 			perf->ops.oa_enable = gen12_oa_enable;
4564 			perf->ops.oa_disable = gen12_oa_disable;
4565 			perf->ops.enable_metric_set = gen12_enable_metric_set;
4566 			perf->ops.disable_metric_set = gen12_disable_metric_set;
4567 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4568 
4569 			perf->ctx_flexeu0_offset = 0;
4570 			perf->ctx_oactxctrl_offset = 0x144;
4571 		}
4572 	}
4573 
4574 	if (perf->ops.enable_metric_set) {
4575 		rw_init(&perf->lock, "perflk");
4576 
4577 		/* Choose a representative limit */
4578 		oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
4579 
4580 		rw_init(&perf->metrics_lock, "metricslk");
4581 		idr_init_base(&perf->metrics_idr, 1);
4582 
4583 		/* We set up some ratelimit state to potentially throttle any
4584 		 * _NOTES about spurious, invalid OA reports which we don't
4585 		 * forward to userspace.
4586 		 *
4587 		 * We print a _NOTE about any throttling when closing the
4588 		 * stream instead of waiting until driver _fini which no one
4589 		 * would ever see.
4590 		 *
4591 		 * Using the same limiting factors as printk_ratelimit()
4592 		 */
4593 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4594 		/* Since we use a DRM_NOTE for spurious reports it would be
4595 		 * inconsistent to let __ratelimit() automatically print a
4596 		 * warning for throttling.
4597 		 */
4598 		ratelimit_set_flags(&perf->spurious_report_rs,
4599 				    RATELIMIT_MSG_ON_RELEASE);
4600 
4601 		ratelimit_state_init(&perf->tail_pointer_race,
4602 				     5 * HZ, 10);
4603 		ratelimit_set_flags(&perf->tail_pointer_race,
4604 				    RATELIMIT_MSG_ON_RELEASE);
4605 
4606 		atomic64_set(&perf->noa_programming_delay,
4607 			     500 * 1000 /* 500us */);
4608 
4609 		perf->i915 = i915;
4610 
4611 		oa_init_supported_formats(perf);
4612 	}
4613 }
4614 
4615 static int destroy_config(int id, void *p, void *data)
4616 {
4617 	i915_oa_config_put(p);
4618 	return 0;
4619 }
4620 
4621 int i915_perf_sysctl_register(void)
4622 {
4623 #ifdef notyet
4624 	sysctl_header = register_sysctl("dev/i915", oa_table);
4625 #endif
4626 	return 0;
4627 }
4628 
4629 void i915_perf_sysctl_unregister(void)
4630 {
4631 #ifdef notyet
4632 	unregister_sysctl_table(sysctl_header);
4633 #endif
4634 }
4635 
4636 /**
4637  * i915_perf_fini - Counter part to i915_perf_init()
4638  * @i915: i915 device instance
4639  */
4640 void i915_perf_fini(struct drm_i915_private *i915)
4641 {
4642 	struct i915_perf *perf = &i915->perf;
4643 
4644 	if (!perf->i915)
4645 		return;
4646 
4647 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4648 	idr_destroy(&perf->metrics_idr);
4649 
4650 	memset(&perf->ops, 0, sizeof(perf->ops));
4651 	perf->i915 = NULL;
4652 }
4653 
4654 /**
4655  * i915_perf_ioctl_version - Version of the i915-perf subsystem
4656  *
4657  * This version number is used by userspace to detect available features.
4658  */
4659 int i915_perf_ioctl_version(void)
4660 {
4661 	/*
4662 	 * 1: Initial version
4663 	 *   I915_PERF_IOCTL_ENABLE
4664 	 *   I915_PERF_IOCTL_DISABLE
4665 	 *
4666 	 * 2: Added runtime modification of OA config.
4667 	 *   I915_PERF_IOCTL_CONFIG
4668 	 *
4669 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4670 	 *    preemption on a particular context so that performance data is
4671 	 *    accessible from a delta of MI_RPC reports without looking at the
4672 	 *    OA buffer.
4673 	 *
4674 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4675 	 *    be run for the duration of the performance recording based on
4676 	 *    their SSEU configuration.
4677 	 *
4678 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4679 	 *    interval for the hrtimer used to check for OA data.
4680 	 */
4681 	return 5;
4682 }
4683 
4684 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4685 #include "selftests/i915_perf.c"
4686 #endif
4687