1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "gem/i915_gem_context.h" 199 #include "gt/intel_engine_pm.h" 200 #include "gt/intel_engine_user.h" 201 #include "gt/intel_gt.h" 202 #include "gt/intel_lrc_reg.h" 203 #include "gt/intel_ring.h" 204 205 #include "i915_drv.h" 206 #include "i915_perf.h" 207 #include "oa/i915_oa_hsw.h" 208 #include "oa/i915_oa_bdw.h" 209 #include "oa/i915_oa_chv.h" 210 #include "oa/i915_oa_sklgt2.h" 211 #include "oa/i915_oa_sklgt3.h" 212 #include "oa/i915_oa_sklgt4.h" 213 #include "oa/i915_oa_bxt.h" 214 #include "oa/i915_oa_kblgt2.h" 215 #include "oa/i915_oa_kblgt3.h" 216 #include "oa/i915_oa_glk.h" 217 #include "oa/i915_oa_cflgt2.h" 218 #include "oa/i915_oa_cflgt3.h" 219 #include "oa/i915_oa_cnl.h" 220 #include "oa/i915_oa_icl.h" 221 #include "oa/i915_oa_tgl.h" 222 223 /* HW requires this to be a power of two, between 128k and 16M, though driver 224 * is currently generally designed assuming the largest 16M size is used such 225 * that the overflow cases are unlikely in normal operation. 226 */ 227 #define OA_BUFFER_SIZE SZ_16M 228 229 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 230 231 /** 232 * DOC: OA Tail Pointer Race 233 * 234 * There's a HW race condition between OA unit tail pointer register updates and 235 * writes to memory whereby the tail pointer can sometimes get ahead of what's 236 * been written out to the OA buffer so far (in terms of what's visible to the 237 * CPU). 238 * 239 * Although this can be observed explicitly while copying reports to userspace 240 * by checking for a zeroed report-id field in tail reports, we want to account 241 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant 242 * read() attempts. 243 * 244 * In effect we define a tail pointer for reading that lags the real tail 245 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough 246 * time for the corresponding reports to become visible to the CPU. 247 * 248 * To manage this we actually track two tail pointers: 249 * 1) An 'aging' tail with an associated timestamp that is tracked until we 250 * can trust the corresponding data is visible to the CPU; at which point 251 * it is considered 'aged'. 252 * 2) An 'aged' tail that can be used for read()ing. 253 * 254 * The two separate pointers let us decouple read()s from tail pointer aging. 255 * 256 * The tail pointers are checked and updated at a limited rate within a hrtimer 257 * callback (the same callback that is used for delivering EPOLLIN events) 258 * 259 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 260 * indicates that an updated tail pointer is needed. 261 * 262 * Most of the implementation details for this workaround are in 263 * oa_buffer_check_unlocked() and _append_oa_reports() 264 * 265 * Note for posterity: previously the driver used to define an effective tail 266 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 267 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 268 * This was flawed considering that the OA unit may also automatically generate 269 * non-periodic reports (such as on context switch) or the OA unit may be 270 * enabled without any periodic sampling. 271 */ 272 #define OA_TAIL_MARGIN_NSEC 100000ULL 273 #define INVALID_TAIL_PTR 0xffffffff 274 275 /* frequency for checking whether the OA unit has written new reports to the 276 * circular OA buffer... 277 */ 278 #define POLL_FREQUENCY 200 279 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) 280 281 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 282 static u32 i915_perf_stream_paranoid = true; 283 284 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 285 * of the 64bit timestamp bits to trigger reports from) but there's currently 286 * no known use case for sampling as infrequently as once per 47 thousand years. 287 * 288 * Since the timestamps included in OA reports are only 32bits it seems 289 * reasonable to limit the OA exponent where it's still possible to account for 290 * overflow in OA report timestamps. 291 */ 292 #define OA_EXPONENT_MAX 31 293 294 #define INVALID_CTX_ID 0xffffffff 295 296 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 297 #define OAREPORT_REASON_MASK 0x3f 298 #define OAREPORT_REASON_MASK_EXTENDED 0x7f 299 #define OAREPORT_REASON_SHIFT 19 300 #define OAREPORT_REASON_TIMER (1<<0) 301 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 302 #define OAREPORT_REASON_CLK_RATIO (1<<5) 303 304 305 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 306 * 307 * The highest sampling frequency we can theoretically program the OA unit 308 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 309 * 310 * Initialized just before we register the sysctl parameter. 311 */ 312 static int oa_sample_rate_hard_limit; 313 314 /* Theoretically we can program the OA unit to sample every 160ns but don't 315 * allow that by default unless root... 316 * 317 * The default threshold of 100000Hz is based on perf's similar 318 * kernel.perf_event_max_sample_rate sysctl parameter. 319 */ 320 static u32 i915_oa_max_sample_rate = 100000; 321 322 /* XXX: beware if future OA HW adds new report formats that the current 323 * code assumes all reports have a power-of-two size and ~(size - 1) can 324 * be used as a mask to align the OA tail pointer. 325 */ 326 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 327 [I915_OA_FORMAT_A13] = { 0, 64 }, 328 [I915_OA_FORMAT_A29] = { 1, 128 }, 329 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 330 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 331 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 332 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 333 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 334 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 335 }; 336 337 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 338 [I915_OA_FORMAT_A12] = { 0, 64 }, 339 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 340 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 341 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 342 }; 343 344 static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { 345 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 346 }; 347 348 #define SAMPLE_OA_REPORT (1<<0) 349 350 /** 351 * struct perf_open_properties - for validated properties given to open a stream 352 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 353 * @single_context: Whether a single or all gpu contexts should be monitored 354 * @hold_preemption: Whether the preemption is disabled for the filtered 355 * context 356 * @ctx_handle: A gem ctx handle for use with @single_context 357 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 358 * @oa_format: An OA unit HW report format 359 * @oa_periodic: Whether to enable periodic OA unit sampling 360 * @oa_period_exponent: The OA unit sampling period is derived from this 361 * @engine: The engine (typically rcs0) being monitored by the OA unit 362 * 363 * As read_properties_unlocked() enumerates and validates the properties given 364 * to open a stream of metrics the configuration is built up in the structure 365 * which starts out zero initialized. 366 */ 367 struct perf_open_properties { 368 u32 sample_flags; 369 370 u64 single_context:1; 371 u64 hold_preemption:1; 372 u64 ctx_handle; 373 374 /* OA sampling state */ 375 int metrics_set; 376 int oa_format; 377 bool oa_periodic; 378 int oa_period_exponent; 379 380 struct intel_engine_cs *engine; 381 }; 382 383 struct i915_oa_config_bo { 384 struct llist_node node; 385 386 struct i915_oa_config *oa_config; 387 struct i915_vma *vma; 388 }; 389 390 static struct ctl_table_header *sysctl_header; 391 392 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); 393 394 void i915_oa_config_release(struct kref *ref) 395 { 396 struct i915_oa_config *oa_config = 397 container_of(ref, typeof(*oa_config), ref); 398 399 kfree(oa_config->flex_regs); 400 kfree(oa_config->b_counter_regs); 401 kfree(oa_config->mux_regs); 402 403 kfree_rcu(oa_config, rcu); 404 } 405 406 struct i915_oa_config * 407 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) 408 { 409 struct i915_oa_config *oa_config; 410 411 rcu_read_lock(); 412 if (metrics_set == 1) 413 oa_config = &perf->test_config; 414 else 415 oa_config = idr_find(&perf->metrics_idr, metrics_set); 416 if (oa_config) 417 oa_config = i915_oa_config_get(oa_config); 418 rcu_read_unlock(); 419 420 return oa_config; 421 } 422 423 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) 424 { 425 i915_oa_config_put(oa_bo->oa_config); 426 i915_vma_put(oa_bo->vma); 427 kfree(oa_bo); 428 } 429 430 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) 431 { 432 struct intel_uncore *uncore = stream->uncore; 433 434 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & 435 GEN12_OAG_OATAILPTR_MASK; 436 } 437 438 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) 439 { 440 struct intel_uncore *uncore = stream->uncore; 441 442 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 443 } 444 445 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) 446 { 447 struct intel_uncore *uncore = stream->uncore; 448 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 449 450 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 451 } 452 453 /** 454 * oa_buffer_check_unlocked - check for data and update tail ptr state 455 * @stream: i915 stream instance 456 * 457 * This is either called via fops (for blocking reads in user ctx) or the poll 458 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 459 * if there is data available for userspace to read. 460 * 461 * This function is central to providing a workaround for the OA unit tail 462 * pointer having a race with respect to what data is visible to the CPU. 463 * It is responsible for reading tail pointers from the hardware and giving 464 * the pointers time to 'age' before they are made available for reading. 465 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 466 * 467 * Besides returning true when there is data available to read() this function 468 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp 469 * and .aged_tail_idx state used for reading. 470 * 471 * Note: It's safe to read OA config state here unlocked, assuming that this is 472 * only called while the stream is enabled, while the global OA configuration 473 * can't be modified. 474 * 475 * Returns: %true if the OA buffer contains data, else %false 476 */ 477 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) 478 { 479 STUB(); 480 return false; 481 #ifdef notyet 482 int report_size = stream->oa_buffer.format_size; 483 unsigned long flags; 484 unsigned int aged_idx; 485 u32 head, hw_tail, aged_tail, aging_tail; 486 u64 now; 487 488 /* We have to consider the (unlikely) possibility that read() errors 489 * could result in an OA buffer reset which might reset the head, 490 * tails[] and aged_tail state. 491 */ 492 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 493 494 /* NB: The head we observe here might effectively be a little out of 495 * date (between head and tails[aged_idx].offset if there is currently 496 * a read() in progress. 497 */ 498 head = stream->oa_buffer.head; 499 500 aged_idx = stream->oa_buffer.aged_tail_idx; 501 aged_tail = stream->oa_buffer.tails[aged_idx].offset; 502 aging_tail = stream->oa_buffer.tails[!aged_idx].offset; 503 504 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); 505 506 /* The tail pointer increases in 64 byte increments, 507 * not in report_size steps... 508 */ 509 hw_tail &= ~(report_size - 1); 510 511 now = ktime_get_mono_fast_ns(); 512 513 /* Update the aged tail 514 * 515 * Flip the tail pointer available for read()s once the aging tail is 516 * old enough to trust that the corresponding data will be visible to 517 * the CPU... 518 * 519 * Do this before updating the aging pointer in case we may be able to 520 * immediately start aging a new pointer too (if new data has become 521 * available) without needing to wait for a later hrtimer callback. 522 */ 523 if (aging_tail != INVALID_TAIL_PTR && 524 ((now - stream->oa_buffer.aging_timestamp) > 525 OA_TAIL_MARGIN_NSEC)) { 526 527 aged_idx ^= 1; 528 stream->oa_buffer.aged_tail_idx = aged_idx; 529 530 aged_tail = aging_tail; 531 532 /* Mark that we need a new pointer to start aging... */ 533 stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; 534 aging_tail = INVALID_TAIL_PTR; 535 } 536 537 /* Update the aging tail 538 * 539 * We throttle aging tail updates until we have a new tail that 540 * represents >= one report more data than is already available for 541 * reading. This ensures there will be enough data for a successful 542 * read once this new pointer has aged and ensures we will give the new 543 * pointer time to age. 544 */ 545 if (aging_tail == INVALID_TAIL_PTR && 546 (aged_tail == INVALID_TAIL_PTR || 547 OA_TAKEN(hw_tail, aged_tail) >= report_size)) { 548 struct i915_vma *vma = stream->oa_buffer.vma; 549 u32 gtt_offset = i915_ggtt_offset(vma); 550 551 /* Be paranoid and do a bounds check on the pointer read back 552 * from hardware, just in case some spurious hardware condition 553 * could put the tail out of bounds... 554 */ 555 if (hw_tail >= gtt_offset && 556 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { 557 stream->oa_buffer.tails[!aged_idx].offset = 558 aging_tail = hw_tail; 559 stream->oa_buffer.aging_timestamp = now; 560 } else { 561 drm_err(&stream->perf->i915->drm, 562 "Ignoring spurious out of range OA buffer tail pointer = %x\n", 563 hw_tail); 564 } 565 } 566 567 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 568 569 return aged_tail == INVALID_TAIL_PTR ? 570 false : OA_TAKEN(aged_tail, head) >= report_size; 571 #endif 572 } 573 574 /** 575 * append_oa_status - Appends a status record to a userspace read() buffer. 576 * @stream: An i915-perf stream opened for OA metrics 577 * @buf: destination buffer given by userspace 578 * @count: the number of bytes userspace wants to read 579 * @offset: (inout): the current position for writing into @buf 580 * @type: The kind of status to report to userspace 581 * 582 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 583 * into the userspace read() buffer. 584 * 585 * The @buf @offset will only be updated on success. 586 * 587 * Returns: 0 on success, negative error code on failure. 588 */ 589 static int append_oa_status(struct i915_perf_stream *stream, 590 char __user *buf, 591 size_t count, 592 size_t *offset, 593 enum drm_i915_perf_record_type type) 594 { 595 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 596 597 if ((count - *offset) < header.size) 598 return -ENOSPC; 599 600 if (copy_to_user(buf + *offset, &header, sizeof(header))) 601 return -EFAULT; 602 603 (*offset) += header.size; 604 605 return 0; 606 } 607 608 /** 609 * append_oa_sample - Copies single OA report into userspace read() buffer. 610 * @stream: An i915-perf stream opened for OA metrics 611 * @buf: destination buffer given by userspace 612 * @count: the number of bytes userspace wants to read 613 * @offset: (inout): the current position for writing into @buf 614 * @report: A single OA report to (optionally) include as part of the sample 615 * 616 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 617 * properties when opening a stream, tracked as `stream->sample_flags`. This 618 * function copies the requested components of a single sample to the given 619 * read() @buf. 620 * 621 * The @buf @offset will only be updated on success. 622 * 623 * Returns: 0 on success, negative error code on failure. 624 */ 625 static int append_oa_sample(struct i915_perf_stream *stream, 626 char __user *buf, 627 size_t count, 628 size_t *offset, 629 const u8 *report) 630 { 631 int report_size = stream->oa_buffer.format_size; 632 struct drm_i915_perf_record_header header; 633 u32 sample_flags = stream->sample_flags; 634 635 header.type = DRM_I915_PERF_RECORD_SAMPLE; 636 header.pad = 0; 637 header.size = stream->sample_size; 638 639 if ((count - *offset) < header.size) 640 return -ENOSPC; 641 642 buf += *offset; 643 if (copy_to_user(buf, &header, sizeof(header))) 644 return -EFAULT; 645 buf += sizeof(header); 646 647 if (sample_flags & SAMPLE_OA_REPORT) { 648 if (copy_to_user(buf, report, report_size)) 649 return -EFAULT; 650 } 651 652 (*offset) += header.size; 653 654 return 0; 655 } 656 657 /** 658 * Copies all buffered OA reports into userspace read() buffer. 659 * @stream: An i915-perf stream opened for OA metrics 660 * @buf: destination buffer given by userspace 661 * @count: the number of bytes userspace wants to read 662 * @offset: (inout): the current position for writing into @buf 663 * 664 * Notably any error condition resulting in a short read (-%ENOSPC or 665 * -%EFAULT) will be returned even though one or more records may 666 * have been successfully copied. In this case it's up to the caller 667 * to decide if the error should be squashed before returning to 668 * userspace. 669 * 670 * Note: reports are consumed from the head, and appended to the 671 * tail, so the tail chases the head?... If you think that's mad 672 * and back-to-front you're not alone, but this follows the 673 * Gen PRM naming convention. 674 * 675 * Returns: 0 on success, negative error code on failure. 676 */ 677 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 678 char __user *buf, 679 size_t count, 680 size_t *offset) 681 { 682 struct intel_uncore *uncore = stream->uncore; 683 int report_size = stream->oa_buffer.format_size; 684 u8 *oa_buf_base = stream->oa_buffer.vaddr; 685 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 686 u32 mask = (OA_BUFFER_SIZE - 1); 687 size_t start_offset = *offset; 688 unsigned long flags; 689 unsigned int aged_tail_idx; 690 u32 head, tail; 691 u32 taken; 692 int ret = 0; 693 694 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 695 return -EIO; 696 697 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 698 699 head = stream->oa_buffer.head; 700 aged_tail_idx = stream->oa_buffer.aged_tail_idx; 701 tail = stream->oa_buffer.tails[aged_tail_idx].offset; 702 703 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 704 705 /* 706 * An invalid tail pointer here means we're still waiting for the poll 707 * hrtimer callback to give us a pointer 708 */ 709 if (tail == INVALID_TAIL_PTR) 710 return -EAGAIN; 711 712 /* 713 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 714 * while indexing relative to oa_buf_base. 715 */ 716 head -= gtt_offset; 717 tail -= gtt_offset; 718 719 /* 720 * An out of bounds or misaligned head or tail pointer implies a driver 721 * bug since we validate + align the tail pointers we read from the 722 * hardware and we are in full control of the head pointer which should 723 * only be incremented by multiples of the report size (notably also 724 * all a power of two). 725 */ 726 if (drm_WARN_ONCE(&uncore->i915->drm, 727 head > OA_BUFFER_SIZE || head % report_size || 728 tail > OA_BUFFER_SIZE || tail % report_size, 729 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 730 head, tail)) 731 return -EIO; 732 733 734 for (/* none */; 735 (taken = OA_TAKEN(tail, head)); 736 head = (head + report_size) & mask) { 737 u8 *report = oa_buf_base + head; 738 u32 *report32 = (void *)report; 739 u32 ctx_id; 740 u32 reason; 741 742 /* 743 * All the report sizes factor neatly into the buffer 744 * size so we never expect to see a report split 745 * between the beginning and end of the buffer. 746 * 747 * Given the initial alignment check a misalignment 748 * here would imply a driver bug that would result 749 * in an overrun. 750 */ 751 if (drm_WARN_ON(&uncore->i915->drm, 752 (OA_BUFFER_SIZE - head) < report_size)) { 753 drm_err(&uncore->i915->drm, 754 "Spurious OA head ptr: non-integral report offset\n"); 755 break; 756 } 757 758 /* 759 * The reason field includes flags identifying what 760 * triggered this specific report (mostly timer 761 * triggered or e.g. due to a context switch). 762 * 763 * This field is never expected to be zero so we can 764 * check that the report isn't invalid before copying 765 * it to userspace... 766 */ 767 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 768 (IS_GEN(stream->perf->i915, 12) ? 769 OAREPORT_REASON_MASK_EXTENDED : 770 OAREPORT_REASON_MASK)); 771 if (reason == 0) { 772 if (__ratelimit(&stream->perf->spurious_report_rs)) 773 DRM_NOTE("Skipping spurious, invalid OA report\n"); 774 continue; 775 } 776 777 ctx_id = report32[2] & stream->specific_ctx_id_mask; 778 779 /* 780 * Squash whatever is in the CTX_ID field if it's marked as 781 * invalid to be sure we avoid false-positive, single-context 782 * filtering below... 783 * 784 * Note: that we don't clear the valid_ctx_bit so userspace can 785 * understand that the ID has been squashed by the kernel. 786 */ 787 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && 788 INTEL_GEN(stream->perf->i915) <= 11) 789 ctx_id = report32[2] = INVALID_CTX_ID; 790 791 /* 792 * NB: For Gen 8 the OA unit no longer supports clock gating 793 * off for a specific context and the kernel can't securely 794 * stop the counters from updating as system-wide / global 795 * values. 796 * 797 * Automatic reports now include a context ID so reports can be 798 * filtered on the cpu but it's not worth trying to 799 * automatically subtract/hide counter progress for other 800 * contexts while filtering since we can't stop userspace 801 * issuing MI_REPORT_PERF_COUNT commands which would still 802 * provide a side-band view of the real values. 803 * 804 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 805 * to normalize counters for a single filtered context then it 806 * needs be forwarded bookend context-switch reports so that it 807 * can track switches in between MI_REPORT_PERF_COUNT commands 808 * and can itself subtract/ignore the progress of counters 809 * associated with other contexts. Note that the hardware 810 * automatically triggers reports when switching to a new 811 * context which are tagged with the ID of the newly active 812 * context. To avoid the complexity (and likely fragility) of 813 * reading ahead while parsing reports to try and minimize 814 * forwarding redundant context switch reports (i.e. between 815 * other, unrelated contexts) we simply elect to forward them 816 * all. 817 * 818 * We don't rely solely on the reason field to identify context 819 * switches since it's not-uncommon for periodic samples to 820 * identify a switch before any 'context switch' report. 821 */ 822 if (!stream->perf->exclusive_stream->ctx || 823 stream->specific_ctx_id == ctx_id || 824 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || 825 reason & OAREPORT_REASON_CTX_SWITCH) { 826 827 /* 828 * While filtering for a single context we avoid 829 * leaking the IDs of other contexts. 830 */ 831 if (stream->perf->exclusive_stream->ctx && 832 stream->specific_ctx_id != ctx_id) { 833 report32[2] = INVALID_CTX_ID; 834 } 835 836 ret = append_oa_sample(stream, buf, count, offset, 837 report); 838 if (ret) 839 break; 840 841 stream->oa_buffer.last_ctx_id = ctx_id; 842 } 843 844 /* 845 * The above reason field sanity check is based on 846 * the assumption that the OA buffer is initially 847 * zeroed and we reset the field after copying so the 848 * check is still meaningful once old reports start 849 * being overwritten. 850 */ 851 report32[0] = 0; 852 } 853 854 if (start_offset != *offset) { 855 i915_reg_t oaheadptr; 856 857 oaheadptr = IS_GEN(stream->perf->i915, 12) ? 858 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; 859 860 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 861 862 /* 863 * We removed the gtt_offset for the copy loop above, indexing 864 * relative to oa_buf_base so put back here... 865 */ 866 head += gtt_offset; 867 intel_uncore_write(uncore, oaheadptr, 868 head & GEN12_OAG_OAHEADPTR_MASK); 869 stream->oa_buffer.head = head; 870 871 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 872 } 873 874 return ret; 875 } 876 877 /** 878 * gen8_oa_read - copy status records then buffered OA reports 879 * @stream: An i915-perf stream opened for OA metrics 880 * @buf: destination buffer given by userspace 881 * @count: the number of bytes userspace wants to read 882 * @offset: (inout): the current position for writing into @buf 883 * 884 * Checks OA unit status registers and if necessary appends corresponding 885 * status records for userspace (such as for a buffer full condition) and then 886 * initiate appending any buffered OA reports. 887 * 888 * Updates @offset according to the number of bytes successfully copied into 889 * the userspace buffer. 890 * 891 * NB: some data may be successfully copied to the userspace buffer 892 * even if an error is returned, and this is reflected in the 893 * updated @offset. 894 * 895 * Returns: zero on success or a negative error code 896 */ 897 static int gen8_oa_read(struct i915_perf_stream *stream, 898 char __user *buf, 899 size_t count, 900 size_t *offset) 901 { 902 struct intel_uncore *uncore = stream->uncore; 903 u32 oastatus; 904 i915_reg_t oastatus_reg; 905 int ret; 906 907 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 908 return -EIO; 909 910 oastatus_reg = IS_GEN(stream->perf->i915, 12) ? 911 GEN12_OAG_OASTATUS : GEN8_OASTATUS; 912 913 oastatus = intel_uncore_read(uncore, oastatus_reg); 914 915 /* 916 * We treat OABUFFER_OVERFLOW as a significant error: 917 * 918 * Although theoretically we could handle this more gracefully 919 * sometimes, some Gens don't correctly suppress certain 920 * automatically triggered reports in this condition and so we 921 * have to assume that old reports are now being trampled 922 * over. 923 * 924 * Considering how we don't currently give userspace control 925 * over the OA buffer size and always configure a large 16MB 926 * buffer, then a buffer overflow does anyway likely indicate 927 * that something has gone quite badly wrong. 928 */ 929 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 930 ret = append_oa_status(stream, buf, count, offset, 931 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 932 if (ret) 933 return ret; 934 935 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 936 stream->period_exponent); 937 938 stream->perf->ops.oa_disable(stream); 939 stream->perf->ops.oa_enable(stream); 940 941 /* 942 * Note: .oa_enable() is expected to re-init the oabuffer and 943 * reset GEN8_OASTATUS for us 944 */ 945 oastatus = intel_uncore_read(uncore, oastatus_reg); 946 } 947 948 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 949 ret = append_oa_status(stream, buf, count, offset, 950 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 951 if (ret) 952 return ret; 953 intel_uncore_write(uncore, oastatus_reg, 954 oastatus & ~GEN8_OASTATUS_REPORT_LOST); 955 } 956 957 return gen8_append_oa_reports(stream, buf, count, offset); 958 } 959 960 /** 961 * Copies all buffered OA reports into userspace read() buffer. 962 * @stream: An i915-perf stream opened for OA metrics 963 * @buf: destination buffer given by userspace 964 * @count: the number of bytes userspace wants to read 965 * @offset: (inout): the current position for writing into @buf 966 * 967 * Notably any error condition resulting in a short read (-%ENOSPC or 968 * -%EFAULT) will be returned even though one or more records may 969 * have been successfully copied. In this case it's up to the caller 970 * to decide if the error should be squashed before returning to 971 * userspace. 972 * 973 * Note: reports are consumed from the head, and appended to the 974 * tail, so the tail chases the head?... If you think that's mad 975 * and back-to-front you're not alone, but this follows the 976 * Gen PRM naming convention. 977 * 978 * Returns: 0 on success, negative error code on failure. 979 */ 980 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 981 char __user *buf, 982 size_t count, 983 size_t *offset) 984 { 985 struct intel_uncore *uncore = stream->uncore; 986 int report_size = stream->oa_buffer.format_size; 987 u8 *oa_buf_base = stream->oa_buffer.vaddr; 988 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 989 u32 mask = (OA_BUFFER_SIZE - 1); 990 size_t start_offset = *offset; 991 unsigned long flags; 992 unsigned int aged_tail_idx; 993 u32 head, tail; 994 u32 taken; 995 int ret = 0; 996 997 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 998 return -EIO; 999 1000 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1001 1002 head = stream->oa_buffer.head; 1003 aged_tail_idx = stream->oa_buffer.aged_tail_idx; 1004 tail = stream->oa_buffer.tails[aged_tail_idx].offset; 1005 1006 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1007 1008 /* An invalid tail pointer here means we're still waiting for the poll 1009 * hrtimer callback to give us a pointer 1010 */ 1011 if (tail == INVALID_TAIL_PTR) 1012 return -EAGAIN; 1013 1014 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 1015 * while indexing relative to oa_buf_base. 1016 */ 1017 head -= gtt_offset; 1018 tail -= gtt_offset; 1019 1020 /* An out of bounds or misaligned head or tail pointer implies a driver 1021 * bug since we validate + align the tail pointers we read from the 1022 * hardware and we are in full control of the head pointer which should 1023 * only be incremented by multiples of the report size (notably also 1024 * all a power of two). 1025 */ 1026 if (drm_WARN_ONCE(&uncore->i915->drm, 1027 head > OA_BUFFER_SIZE || head % report_size || 1028 tail > OA_BUFFER_SIZE || tail % report_size, 1029 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 1030 head, tail)) 1031 return -EIO; 1032 1033 1034 for (/* none */; 1035 (taken = OA_TAKEN(tail, head)); 1036 head = (head + report_size) & mask) { 1037 u8 *report = oa_buf_base + head; 1038 u32 *report32 = (void *)report; 1039 1040 /* All the report sizes factor neatly into the buffer 1041 * size so we never expect to see a report split 1042 * between the beginning and end of the buffer. 1043 * 1044 * Given the initial alignment check a misalignment 1045 * here would imply a driver bug that would result 1046 * in an overrun. 1047 */ 1048 if (drm_WARN_ON(&uncore->i915->drm, 1049 (OA_BUFFER_SIZE - head) < report_size)) { 1050 drm_err(&uncore->i915->drm, 1051 "Spurious OA head ptr: non-integral report offset\n"); 1052 break; 1053 } 1054 1055 /* The report-ID field for periodic samples includes 1056 * some undocumented flags related to what triggered 1057 * the report and is never expected to be zero so we 1058 * can check that the report isn't invalid before 1059 * copying it to userspace... 1060 */ 1061 if (report32[0] == 0) { 1062 if (__ratelimit(&stream->perf->spurious_report_rs)) 1063 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1064 continue; 1065 } 1066 1067 ret = append_oa_sample(stream, buf, count, offset, report); 1068 if (ret) 1069 break; 1070 1071 /* The above report-id field sanity check is based on 1072 * the assumption that the OA buffer is initially 1073 * zeroed and we reset the field after copying so the 1074 * check is still meaningful once old reports start 1075 * being overwritten. 1076 */ 1077 report32[0] = 0; 1078 } 1079 1080 if (start_offset != *offset) { 1081 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1082 1083 /* We removed the gtt_offset for the copy loop above, indexing 1084 * relative to oa_buf_base so put back here... 1085 */ 1086 head += gtt_offset; 1087 1088 intel_uncore_write(uncore, GEN7_OASTATUS2, 1089 (head & GEN7_OASTATUS2_HEAD_MASK) | 1090 GEN7_OASTATUS2_MEM_SELECT_GGTT); 1091 stream->oa_buffer.head = head; 1092 1093 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1094 } 1095 1096 return ret; 1097 } 1098 1099 /** 1100 * gen7_oa_read - copy status records then buffered OA reports 1101 * @stream: An i915-perf stream opened for OA metrics 1102 * @buf: destination buffer given by userspace 1103 * @count: the number of bytes userspace wants to read 1104 * @offset: (inout): the current position for writing into @buf 1105 * 1106 * Checks Gen 7 specific OA unit status registers and if necessary appends 1107 * corresponding status records for userspace (such as for a buffer full 1108 * condition) and then initiate appending any buffered OA reports. 1109 * 1110 * Updates @offset according to the number of bytes successfully copied into 1111 * the userspace buffer. 1112 * 1113 * Returns: zero on success or a negative error code 1114 */ 1115 static int gen7_oa_read(struct i915_perf_stream *stream, 1116 char __user *buf, 1117 size_t count, 1118 size_t *offset) 1119 { 1120 struct intel_uncore *uncore = stream->uncore; 1121 u32 oastatus1; 1122 int ret; 1123 1124 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 1125 return -EIO; 1126 1127 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1128 1129 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1130 * bits while the OA unit is enabled (while the tail pointer 1131 * may be updated asynchronously) so we ignore status bits 1132 * that have already been reported to userspace. 1133 */ 1134 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; 1135 1136 /* We treat OABUFFER_OVERFLOW as a significant error: 1137 * 1138 * - The status can be interpreted to mean that the buffer is 1139 * currently full (with a higher precedence than OA_TAKEN() 1140 * which will start to report a near-empty buffer after an 1141 * overflow) but it's awkward that we can't clear the status 1142 * on Haswell, so without a reset we won't be able to catch 1143 * the state again. 1144 * 1145 * - Since it also implies the HW has started overwriting old 1146 * reports it may also affect our sanity checks for invalid 1147 * reports when copying to userspace that assume new reports 1148 * are being written to cleared memory. 1149 * 1150 * - In the future we may want to introduce a flight recorder 1151 * mode where the driver will automatically maintain a safe 1152 * guard band between head/tail, avoiding this overflow 1153 * condition, but we avoid the added driver complexity for 1154 * now. 1155 */ 1156 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1157 ret = append_oa_status(stream, buf, count, offset, 1158 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1159 if (ret) 1160 return ret; 1161 1162 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1163 stream->period_exponent); 1164 1165 stream->perf->ops.oa_disable(stream); 1166 stream->perf->ops.oa_enable(stream); 1167 1168 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1169 } 1170 1171 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1172 ret = append_oa_status(stream, buf, count, offset, 1173 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1174 if (ret) 1175 return ret; 1176 stream->perf->gen7_latched_oastatus1 |= 1177 GEN7_OASTATUS1_REPORT_LOST; 1178 } 1179 1180 return gen7_append_oa_reports(stream, buf, count, offset); 1181 } 1182 1183 /** 1184 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1185 * @stream: An i915-perf stream opened for OA metrics 1186 * 1187 * Called when userspace tries to read() from a blocking stream FD opened 1188 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1189 * OA buffer and wakes us. 1190 * 1191 * Note: it's acceptable to have this return with some false positives 1192 * since any subsequent read handling will return -EAGAIN if there isn't 1193 * really data ready for userspace yet. 1194 * 1195 * Returns: zero on success or a negative error code 1196 */ 1197 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1198 { 1199 /* We would wait indefinitely if periodic sampling is not enabled */ 1200 if (!stream->periodic) 1201 return -EIO; 1202 1203 return wait_event_interruptible(stream->poll_wq, 1204 oa_buffer_check_unlocked(stream)); 1205 } 1206 1207 #ifdef notyet 1208 /** 1209 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1210 * @stream: An i915-perf stream opened for OA metrics 1211 * @file: An i915 perf stream file 1212 * @wait: poll() state table 1213 * 1214 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1215 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1216 * when it sees data ready to read in the circular OA buffer. 1217 */ 1218 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1219 struct file *file, 1220 poll_table *wait) 1221 { 1222 poll_wait(file, &stream->poll_wq, wait); 1223 } 1224 #endif 1225 1226 /** 1227 * i915_oa_read - just calls through to &i915_oa_ops->read 1228 * @stream: An i915-perf stream opened for OA metrics 1229 * @buf: destination buffer given by userspace 1230 * @count: the number of bytes userspace wants to read 1231 * @offset: (inout): the current position for writing into @buf 1232 * 1233 * Updates @offset according to the number of bytes successfully copied into 1234 * the userspace buffer. 1235 * 1236 * Returns: zero on success or a negative error code 1237 */ 1238 static int i915_oa_read(struct i915_perf_stream *stream, 1239 char __user *buf, 1240 size_t count, 1241 size_t *offset) 1242 { 1243 return stream->perf->ops.read(stream, buf, count, offset); 1244 } 1245 1246 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) 1247 { 1248 struct i915_gem_engines_iter it; 1249 struct i915_gem_context *ctx = stream->ctx; 1250 struct intel_context *ce; 1251 int err; 1252 1253 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1254 if (ce->engine != stream->engine) /* first match! */ 1255 continue; 1256 1257 /* 1258 * As the ID is the gtt offset of the context's vma we 1259 * pin the vma to ensure the ID remains fixed. 1260 */ 1261 err = intel_context_pin(ce); 1262 if (err == 0) { 1263 stream->pinned_ctx = ce; 1264 break; 1265 } 1266 } 1267 i915_gem_context_unlock_engines(ctx); 1268 1269 return stream->pinned_ctx; 1270 } 1271 1272 /** 1273 * oa_get_render_ctx_id - determine and hold ctx hw id 1274 * @stream: An i915-perf stream opened for OA metrics 1275 * 1276 * Determine the render context hw id, and ensure it remains fixed for the 1277 * lifetime of the stream. This ensures that we don't have to worry about 1278 * updating the context ID in OACONTROL on the fly. 1279 * 1280 * Returns: zero on success or a negative error code 1281 */ 1282 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1283 { 1284 struct intel_context *ce; 1285 1286 ce = oa_pin_context(stream); 1287 if (IS_ERR(ce)) 1288 return PTR_ERR(ce); 1289 1290 switch (INTEL_GEN(ce->engine->i915)) { 1291 case 7: { 1292 /* 1293 * On Haswell we don't do any post processing of the reports 1294 * and don't need to use the mask. 1295 */ 1296 stream->specific_ctx_id = i915_ggtt_offset(ce->state); 1297 stream->specific_ctx_id_mask = 0; 1298 break; 1299 } 1300 1301 case 8: 1302 case 9: 1303 case 10: 1304 if (intel_engine_in_execlists_submission_mode(ce->engine)) { 1305 stream->specific_ctx_id_mask = 1306 (1U << GEN8_CTX_ID_WIDTH) - 1; 1307 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1308 } else { 1309 /* 1310 * When using GuC, the context descriptor we write in 1311 * i915 is read by GuC and rewritten before it's 1312 * actually written into the hardware. The LRCA is 1313 * what is put into the context id field of the 1314 * context descriptor by GuC. Because it's aligned to 1315 * a page, the lower 12bits are always at 0 and 1316 * dropped by GuC. They won't be part of the context 1317 * ID in the OA reports, so squash those lower bits. 1318 */ 1319 stream->specific_ctx_id = ce->lrc.lrca >> 12; 1320 1321 /* 1322 * GuC uses the top bit to signal proxy submission, so 1323 * ignore that bit. 1324 */ 1325 stream->specific_ctx_id_mask = 1326 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1327 } 1328 break; 1329 1330 case 11: 1331 case 12: { 1332 stream->specific_ctx_id_mask = 1333 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1334 /* 1335 * Pick an unused context id 1336 * 0 - BITS_PER_LONG are used by other contexts 1337 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context 1338 */ 1339 stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1340 break; 1341 } 1342 1343 default: 1344 MISSING_CASE(INTEL_GEN(ce->engine->i915)); 1345 } 1346 1347 ce->tag = stream->specific_ctx_id; 1348 1349 drm_dbg(&stream->perf->i915->drm, 1350 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1351 stream->specific_ctx_id, 1352 stream->specific_ctx_id_mask); 1353 1354 return 0; 1355 } 1356 1357 /** 1358 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1359 * @stream: An i915-perf stream opened for OA metrics 1360 * 1361 * In case anything needed doing to ensure the context HW ID would remain valid 1362 * for the lifetime of the stream, then that can be undone here. 1363 */ 1364 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1365 { 1366 struct intel_context *ce; 1367 1368 ce = fetch_and_zero(&stream->pinned_ctx); 1369 if (ce) { 1370 ce->tag = 0; /* recomputed on next submission after parking */ 1371 intel_context_unpin(ce); 1372 } 1373 1374 stream->specific_ctx_id = INVALID_CTX_ID; 1375 stream->specific_ctx_id_mask = 0; 1376 } 1377 1378 static void 1379 free_oa_buffer(struct i915_perf_stream *stream) 1380 { 1381 i915_vma_unpin_and_release(&stream->oa_buffer.vma, 1382 I915_VMA_RELEASE_MAP); 1383 1384 stream->oa_buffer.vaddr = NULL; 1385 } 1386 1387 static void 1388 free_oa_configs(struct i915_perf_stream *stream) 1389 { 1390 struct i915_oa_config_bo *oa_bo, *tmp; 1391 1392 i915_oa_config_put(stream->oa_config); 1393 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) 1394 free_oa_config_bo(oa_bo); 1395 } 1396 1397 static void 1398 free_noa_wait(struct i915_perf_stream *stream) 1399 { 1400 i915_vma_unpin_and_release(&stream->noa_wait, 0); 1401 } 1402 1403 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1404 { 1405 STUB(); 1406 #ifdef notyet 1407 struct i915_perf *perf = stream->perf; 1408 1409 BUG_ON(stream != perf->exclusive_stream); 1410 1411 /* 1412 * Unset exclusive_stream first, it will be checked while disabling 1413 * the metric set on gen8+. 1414 * 1415 * See i915_oa_init_reg_state() and lrc_configure_all_contexts() 1416 */ 1417 WRITE_ONCE(perf->exclusive_stream, NULL); 1418 perf->ops.disable_metric_set(stream); 1419 1420 free_oa_buffer(stream); 1421 1422 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 1423 intel_engine_pm_put(stream->engine); 1424 1425 if (stream->ctx) 1426 oa_put_render_ctx_id(stream); 1427 1428 free_oa_configs(stream); 1429 free_noa_wait(stream); 1430 1431 if (perf->spurious_report_rs.missed) { 1432 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1433 perf->spurious_report_rs.missed); 1434 } 1435 #endif 1436 } 1437 1438 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) 1439 { 1440 struct intel_uncore *uncore = stream->uncore; 1441 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1442 unsigned long flags; 1443 1444 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1445 1446 /* Pre-DevBDW: OABUFFER must be set with counters off, 1447 * before OASTATUS1, but after OASTATUS2 1448 */ 1449 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ 1450 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); 1451 stream->oa_buffer.head = gtt_offset; 1452 1453 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); 1454 1455 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ 1456 gtt_offset | OABUFFER_SIZE_16M); 1457 1458 /* Mark that we need updated tail pointers to read from... */ 1459 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1460 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1461 1462 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1463 1464 /* On Haswell we have to track which OASTATUS1 flags we've 1465 * already seen since they can't be cleared while periodic 1466 * sampling is enabled. 1467 */ 1468 stream->perf->gen7_latched_oastatus1 = 0; 1469 1470 /* NB: although the OA buffer will initially be allocated 1471 * zeroed via shmfs (and so this memset is redundant when 1472 * first allocating), we may re-init the OA buffer, either 1473 * when re-enabling a stream or in error/reset paths. 1474 * 1475 * The reason we clear the buffer for each re-init is for the 1476 * sanity check in gen7_append_oa_reports() that looks at the 1477 * report-id field to make sure it's non-zero which relies on 1478 * the assumption that new reports are being written to zeroed 1479 * memory... 1480 */ 1481 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1482 1483 stream->pollin = false; 1484 } 1485 1486 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) 1487 { 1488 struct intel_uncore *uncore = stream->uncore; 1489 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1490 unsigned long flags; 1491 1492 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1493 1494 intel_uncore_write(uncore, GEN8_OASTATUS, 0); 1495 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); 1496 stream->oa_buffer.head = gtt_offset; 1497 1498 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); 1499 1500 /* 1501 * PRM says: 1502 * 1503 * "This MMIO must be set before the OATAILPTR 1504 * register and after the OAHEADPTR register. This is 1505 * to enable proper functionality of the overflow 1506 * bit." 1507 */ 1508 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | 1509 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1510 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1511 1512 /* Mark that we need updated tail pointers to read from... */ 1513 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1514 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1515 1516 /* 1517 * Reset state used to recognise context switches, affecting which 1518 * reports we will forward to userspace while filtering for a single 1519 * context. 1520 */ 1521 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1522 1523 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1524 1525 /* 1526 * NB: although the OA buffer will initially be allocated 1527 * zeroed via shmfs (and so this memset is redundant when 1528 * first allocating), we may re-init the OA buffer, either 1529 * when re-enabling a stream or in error/reset paths. 1530 * 1531 * The reason we clear the buffer for each re-init is for the 1532 * sanity check in gen8_append_oa_reports() that looks at the 1533 * reason field to make sure it's non-zero which relies on 1534 * the assumption that new reports are being written to zeroed 1535 * memory... 1536 */ 1537 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1538 1539 stream->pollin = false; 1540 } 1541 1542 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) 1543 { 1544 struct intel_uncore *uncore = stream->uncore; 1545 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1546 unsigned long flags; 1547 1548 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1549 1550 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); 1551 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, 1552 gtt_offset & GEN12_OAG_OAHEADPTR_MASK); 1553 stream->oa_buffer.head = gtt_offset; 1554 1555 /* 1556 * PRM says: 1557 * 1558 * "This MMIO must be set before the OATAILPTR 1559 * register and after the OAHEADPTR register. This is 1560 * to enable proper functionality of the overflow 1561 * bit." 1562 */ 1563 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | 1564 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1565 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, 1566 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1567 1568 /* Mark that we need updated tail pointers to read from... */ 1569 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1570 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1571 1572 /* 1573 * Reset state used to recognise context switches, affecting which 1574 * reports we will forward to userspace while filtering for a single 1575 * context. 1576 */ 1577 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1578 1579 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1580 1581 /* 1582 * NB: although the OA buffer will initially be allocated 1583 * zeroed via shmfs (and so this memset is redundant when 1584 * first allocating), we may re-init the OA buffer, either 1585 * when re-enabling a stream or in error/reset paths. 1586 * 1587 * The reason we clear the buffer for each re-init is for the 1588 * sanity check in gen8_append_oa_reports() that looks at the 1589 * reason field to make sure it's non-zero which relies on 1590 * the assumption that new reports are being written to zeroed 1591 * memory... 1592 */ 1593 memset(stream->oa_buffer.vaddr, 0, 1594 stream->oa_buffer.vma->size); 1595 1596 stream->pollin = false; 1597 } 1598 1599 static int alloc_oa_buffer(struct i915_perf_stream *stream) 1600 { 1601 struct drm_i915_private *i915 = stream->perf->i915; 1602 struct drm_i915_gem_object *bo; 1603 struct i915_vma *vma; 1604 int ret; 1605 1606 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) 1607 return -ENODEV; 1608 1609 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1610 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1611 1612 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); 1613 if (IS_ERR(bo)) { 1614 drm_err(&i915->drm, "Failed to allocate OA buffer\n"); 1615 return PTR_ERR(bo); 1616 } 1617 1618 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1619 1620 /* PreHSW required 512K alignment, HSW requires 16M */ 1621 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1622 if (IS_ERR(vma)) { 1623 ret = PTR_ERR(vma); 1624 goto err_unref; 1625 } 1626 stream->oa_buffer.vma = vma; 1627 1628 stream->oa_buffer.vaddr = 1629 i915_gem_object_pin_map(bo, I915_MAP_WB); 1630 if (IS_ERR(stream->oa_buffer.vaddr)) { 1631 ret = PTR_ERR(stream->oa_buffer.vaddr); 1632 goto err_unpin; 1633 } 1634 1635 return 0; 1636 1637 err_unpin: 1638 __i915_vma_unpin(vma); 1639 1640 err_unref: 1641 i915_gem_object_put(bo); 1642 1643 stream->oa_buffer.vaddr = NULL; 1644 stream->oa_buffer.vma = NULL; 1645 1646 return ret; 1647 } 1648 1649 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, 1650 bool save, i915_reg_t reg, u32 offset, 1651 u32 dword_count) 1652 { 1653 u32 cmd; 1654 u32 d; 1655 1656 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1657 cmd |= MI_SRM_LRM_GLOBAL_GTT; 1658 if (INTEL_GEN(stream->perf->i915) >= 8) 1659 cmd++; 1660 1661 for (d = 0; d < dword_count; d++) { 1662 *cs++ = cmd; 1663 *cs++ = i915_mmio_reg_offset(reg) + 4 * d; 1664 *cs++ = intel_gt_scratch_offset(stream->engine->gt, 1665 offset) + 4 * d; 1666 *cs++ = 0; 1667 } 1668 1669 return cs; 1670 } 1671 1672 static int alloc_noa_wait(struct i915_perf_stream *stream) 1673 { 1674 struct drm_i915_private *i915 = stream->perf->i915; 1675 struct drm_i915_gem_object *bo; 1676 struct i915_vma *vma; 1677 const u64 delay_ticks = 0xffffffffffffffff - 1678 DIV64_U64_ROUND_UP( 1679 atomic64_read(&stream->perf->noa_programming_delay) * 1680 RUNTIME_INFO(i915)->cs_timestamp_frequency_khz, 1681 1000000ull); 1682 const u32 base = stream->engine->mmio_base; 1683 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1684 u32 *batch, *ts0, *cs, *jump; 1685 int ret, i; 1686 enum { 1687 START_TS, 1688 NOW_TS, 1689 DELTA_TS, 1690 JUMP_PREDICATE, 1691 DELTA_TARGET, 1692 N_CS_GPR 1693 }; 1694 1695 bo = i915_gem_object_create_internal(i915, 4096); 1696 if (IS_ERR(bo)) { 1697 drm_err(&i915->drm, 1698 "Failed to allocate NOA wait batchbuffer\n"); 1699 return PTR_ERR(bo); 1700 } 1701 1702 /* 1703 * We pin in GGTT because we jump into this buffer now because 1704 * multiple OA config BOs will have a jump to this address and it 1705 * needs to be fixed during the lifetime of the i915/perf stream. 1706 */ 1707 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH); 1708 if (IS_ERR(vma)) { 1709 ret = PTR_ERR(vma); 1710 goto err_unref; 1711 } 1712 1713 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); 1714 if (IS_ERR(batch)) { 1715 ret = PTR_ERR(batch); 1716 goto err_unpin; 1717 } 1718 1719 /* Save registers. */ 1720 for (i = 0; i < N_CS_GPR; i++) 1721 cs = save_restore_register( 1722 stream, cs, true /* save */, CS_GPR(i), 1723 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1724 cs = save_restore_register( 1725 stream, cs, true /* save */, MI_PREDICATE_RESULT_1, 1726 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1727 1728 /* First timestamp snapshot location. */ 1729 ts0 = cs; 1730 1731 /* 1732 * Initial snapshot of the timestamp register to implement the wait. 1733 * We work with 32b values, so clear out the top 32b bits of the 1734 * register because the ALU works 64bits. 1735 */ 1736 *cs++ = MI_LOAD_REGISTER_IMM(1); 1737 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; 1738 *cs++ = 0; 1739 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1740 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1741 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); 1742 1743 /* 1744 * This is the location we're going to jump back into until the 1745 * required amount of time has passed. 1746 */ 1747 jump = cs; 1748 1749 /* 1750 * Take another snapshot of the timestamp register. Take care to clear 1751 * up the top 32bits of CS_GPR(1) as we're using it for other 1752 * operations below. 1753 */ 1754 *cs++ = MI_LOAD_REGISTER_IMM(1); 1755 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; 1756 *cs++ = 0; 1757 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1758 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1759 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); 1760 1761 /* 1762 * Do a diff between the 2 timestamps and store the result back into 1763 * CS_GPR(1). 1764 */ 1765 *cs++ = MI_MATH(5); 1766 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); 1767 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); 1768 *cs++ = MI_MATH_SUB; 1769 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); 1770 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1771 1772 /* 1773 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the 1774 * timestamp have rolled over the 32bits) into the predicate register 1775 * to be used for the predicated jump. 1776 */ 1777 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1778 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1779 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); 1780 1781 /* Restart from the beginning if we had timestamps roll over. */ 1782 *cs++ = (INTEL_GEN(i915) < 8 ? 1783 MI_BATCH_BUFFER_START : 1784 MI_BATCH_BUFFER_START_GEN8) | 1785 MI_BATCH_PREDICATE; 1786 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; 1787 *cs++ = 0; 1788 1789 /* 1790 * Now add the diff between to previous timestamps and add it to : 1791 * (((1 * << 64) - 1) - delay_ns) 1792 * 1793 * When the Carry Flag contains 1 this means the elapsed time is 1794 * longer than the expected delay, and we can exit the wait loop. 1795 */ 1796 *cs++ = MI_LOAD_REGISTER_IMM(2); 1797 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); 1798 *cs++ = lower_32_bits(delay_ticks); 1799 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; 1800 *cs++ = upper_32_bits(delay_ticks); 1801 1802 *cs++ = MI_MATH(4); 1803 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); 1804 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); 1805 *cs++ = MI_MATH_ADD; 1806 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1807 1808 *cs++ = MI_ARB_CHECK; 1809 1810 /* 1811 * Transfer the result into the predicate register to be used for the 1812 * predicated jump. 1813 */ 1814 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1815 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1816 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); 1817 1818 /* Predicate the jump. */ 1819 *cs++ = (INTEL_GEN(i915) < 8 ? 1820 MI_BATCH_BUFFER_START : 1821 MI_BATCH_BUFFER_START_GEN8) | 1822 MI_BATCH_PREDICATE; 1823 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; 1824 *cs++ = 0; 1825 1826 /* Restore registers. */ 1827 for (i = 0; i < N_CS_GPR; i++) 1828 cs = save_restore_register( 1829 stream, cs, false /* restore */, CS_GPR(i), 1830 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1831 cs = save_restore_register( 1832 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1, 1833 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1834 1835 /* And return to the ring. */ 1836 *cs++ = MI_BATCH_BUFFER_END; 1837 1838 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); 1839 1840 i915_gem_object_flush_map(bo); 1841 i915_gem_object_unpin_map(bo); 1842 1843 stream->noa_wait = vma; 1844 return 0; 1845 1846 err_unpin: 1847 i915_vma_unpin_and_release(&vma, 0); 1848 err_unref: 1849 i915_gem_object_put(bo); 1850 return ret; 1851 } 1852 1853 static u32 *write_cs_mi_lri(u32 *cs, 1854 const struct i915_oa_reg *reg_data, 1855 u32 n_regs) 1856 { 1857 u32 i; 1858 1859 for (i = 0; i < n_regs; i++) { 1860 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { 1861 u32 n_lri = min_t(u32, 1862 n_regs - i, 1863 MI_LOAD_REGISTER_IMM_MAX_REGS); 1864 1865 *cs++ = MI_LOAD_REGISTER_IMM(n_lri); 1866 } 1867 *cs++ = i915_mmio_reg_offset(reg_data[i].addr); 1868 *cs++ = reg_data[i].value; 1869 } 1870 1871 return cs; 1872 } 1873 1874 static int num_lri_dwords(int num_regs) 1875 { 1876 int count = 0; 1877 1878 if (num_regs > 0) { 1879 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); 1880 count += num_regs * 2; 1881 } 1882 1883 return count; 1884 } 1885 1886 static struct i915_oa_config_bo * 1887 alloc_oa_config_buffer(struct i915_perf_stream *stream, 1888 struct i915_oa_config *oa_config) 1889 { 1890 struct drm_i915_gem_object *obj; 1891 struct i915_oa_config_bo *oa_bo; 1892 size_t config_length = 0; 1893 u32 *cs; 1894 int err; 1895 1896 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); 1897 if (!oa_bo) 1898 return ERR_PTR(-ENOMEM); 1899 1900 config_length += num_lri_dwords(oa_config->mux_regs_len); 1901 config_length += num_lri_dwords(oa_config->b_counter_regs_len); 1902 config_length += num_lri_dwords(oa_config->flex_regs_len); 1903 config_length += 3; /* MI_BATCH_BUFFER_START */ 1904 config_length = roundup2(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); 1905 1906 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); 1907 if (IS_ERR(obj)) { 1908 err = PTR_ERR(obj); 1909 goto err_free; 1910 } 1911 1912 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); 1913 if (IS_ERR(cs)) { 1914 err = PTR_ERR(cs); 1915 goto err_oa_bo; 1916 } 1917 1918 cs = write_cs_mi_lri(cs, 1919 oa_config->mux_regs, 1920 oa_config->mux_regs_len); 1921 cs = write_cs_mi_lri(cs, 1922 oa_config->b_counter_regs, 1923 oa_config->b_counter_regs_len); 1924 cs = write_cs_mi_lri(cs, 1925 oa_config->flex_regs, 1926 oa_config->flex_regs_len); 1927 1928 /* Jump into the active wait. */ 1929 *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ? 1930 MI_BATCH_BUFFER_START : 1931 MI_BATCH_BUFFER_START_GEN8); 1932 *cs++ = i915_ggtt_offset(stream->noa_wait); 1933 *cs++ = 0; 1934 1935 i915_gem_object_flush_map(obj); 1936 i915_gem_object_unpin_map(obj); 1937 1938 oa_bo->vma = i915_vma_instance(obj, 1939 &stream->engine->gt->ggtt->vm, 1940 NULL); 1941 if (IS_ERR(oa_bo->vma)) { 1942 err = PTR_ERR(oa_bo->vma); 1943 goto err_oa_bo; 1944 } 1945 1946 oa_bo->oa_config = i915_oa_config_get(oa_config); 1947 llist_add(&oa_bo->node, &stream->oa_config_bos); 1948 1949 return oa_bo; 1950 1951 err_oa_bo: 1952 i915_gem_object_put(obj); 1953 err_free: 1954 kfree(oa_bo); 1955 return ERR_PTR(err); 1956 } 1957 1958 static struct i915_vma * 1959 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) 1960 { 1961 struct i915_oa_config_bo *oa_bo; 1962 1963 /* 1964 * Look for the buffer in the already allocated BOs attached 1965 * to the stream. 1966 */ 1967 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { 1968 if (oa_bo->oa_config == oa_config && 1969 memcmp(oa_bo->oa_config->uuid, 1970 oa_config->uuid, 1971 sizeof(oa_config->uuid)) == 0) 1972 goto out; 1973 } 1974 1975 oa_bo = alloc_oa_config_buffer(stream, oa_config); 1976 if (IS_ERR(oa_bo)) 1977 return ERR_CAST(oa_bo); 1978 1979 out: 1980 return i915_vma_get(oa_bo->vma); 1981 } 1982 1983 static struct i915_request * 1984 emit_oa_config(struct i915_perf_stream *stream, 1985 struct i915_oa_config *oa_config, 1986 struct intel_context *ce) 1987 { 1988 struct i915_request *rq; 1989 struct i915_vma *vma; 1990 int err; 1991 1992 vma = get_oa_vma(stream, oa_config); 1993 if (IS_ERR(vma)) 1994 return ERR_CAST(vma); 1995 1996 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 1997 if (err) 1998 goto err_vma_put; 1999 2000 intel_engine_pm_get(ce->engine); 2001 rq = i915_request_create(ce); 2002 intel_engine_pm_put(ce->engine); 2003 if (IS_ERR(rq)) { 2004 err = PTR_ERR(rq); 2005 goto err_vma_unpin; 2006 } 2007 2008 i915_vma_lock(vma); 2009 err = i915_request_await_object(rq, vma->obj, 0); 2010 if (!err) 2011 err = i915_vma_move_to_active(vma, rq, 0); 2012 i915_vma_unlock(vma); 2013 if (err) 2014 goto err_add_request; 2015 2016 err = rq->engine->emit_bb_start(rq, 2017 vma->node.start, 0, 2018 I915_DISPATCH_SECURE); 2019 if (err) 2020 goto err_add_request; 2021 2022 i915_request_get(rq); 2023 err_add_request: 2024 i915_request_add(rq); 2025 err_vma_unpin: 2026 i915_vma_unpin(vma); 2027 err_vma_put: 2028 i915_vma_put(vma); 2029 return err ? ERR_PTR(err) : rq; 2030 } 2031 2032 static struct intel_context *oa_context(struct i915_perf_stream *stream) 2033 { 2034 return stream->pinned_ctx ?: stream->engine->kernel_context; 2035 } 2036 2037 static struct i915_request * 2038 hsw_enable_metric_set(struct i915_perf_stream *stream) 2039 { 2040 struct intel_uncore *uncore = stream->uncore; 2041 2042 /* 2043 * PRM: 2044 * 2045 * OA unit is using “crclk” for its functionality. When trunk 2046 * level clock gating takes place, OA clock would be gated, 2047 * unable to count the events from non-render clock domain. 2048 * Render clock gating must be disabled when OA is enabled to 2049 * count the events from non-render domain. Unit level clock 2050 * gating for RCS should also be disabled. 2051 */ 2052 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2053 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 2054 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2055 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 2056 2057 return emit_oa_config(stream, stream->oa_config, oa_context(stream)); 2058 } 2059 2060 static void hsw_disable_metric_set(struct i915_perf_stream *stream) 2061 { 2062 struct intel_uncore *uncore = stream->uncore; 2063 2064 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2065 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); 2066 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2067 0, GEN7_DOP_CLOCK_GATE_ENABLE); 2068 2069 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2070 } 2071 2072 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, 2073 i915_reg_t reg) 2074 { 2075 u32 mmio = i915_mmio_reg_offset(reg); 2076 int i; 2077 2078 /* 2079 * This arbitrary default will select the 'EU FPU0 Pipeline 2080 * Active' event. In the future it's anticipated that there 2081 * will be an explicit 'No Event' we can select, but not yet... 2082 */ 2083 if (!oa_config) 2084 return 0; 2085 2086 for (i = 0; i < oa_config->flex_regs_len; i++) { 2087 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) 2088 return oa_config->flex_regs[i].value; 2089 } 2090 2091 return 0; 2092 } 2093 /* 2094 * NB: It must always remain pointer safe to run this even if the OA unit 2095 * has been disabled. 2096 * 2097 * It's fine to put out-of-date values into these per-context registers 2098 * in the case that the OA unit has been disabled. 2099 */ 2100 static void 2101 gen8_update_reg_state_unlocked(const struct intel_context *ce, 2102 const struct i915_perf_stream *stream) 2103 { 2104 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2105 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2106 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2107 i915_reg_t flex_regs[] = { 2108 EU_PERF_CNTL0, 2109 EU_PERF_CNTL1, 2110 EU_PERF_CNTL2, 2111 EU_PERF_CNTL3, 2112 EU_PERF_CNTL4, 2113 EU_PERF_CNTL5, 2114 EU_PERF_CNTL6, 2115 }; 2116 u32 *reg_state = ce->lrc_reg_state; 2117 int i; 2118 2119 reg_state[ctx_oactxctrl + 1] = 2120 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2121 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2122 GEN8_OA_COUNTER_RESUME; 2123 2124 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) 2125 reg_state[ctx_flexeu0 + i * 2 + 1] = 2126 oa_config_flex_reg(stream->oa_config, flex_regs[i]); 2127 2128 reg_state[CTX_R_PWR_CLK_STATE] = 2129 intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu); 2130 } 2131 2132 struct flex { 2133 i915_reg_t reg; 2134 u32 offset; 2135 u32 value; 2136 }; 2137 2138 static int 2139 gen8_store_flex(struct i915_request *rq, 2140 struct intel_context *ce, 2141 const struct flex *flex, unsigned int count) 2142 { 2143 u32 offset; 2144 u32 *cs; 2145 2146 cs = intel_ring_begin(rq, 4 * count); 2147 if (IS_ERR(cs)) 2148 return PTR_ERR(cs); 2149 2150 offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; 2151 do { 2152 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 2153 *cs++ = offset + flex->offset * sizeof(u32); 2154 *cs++ = 0; 2155 *cs++ = flex->value; 2156 } while (flex++, --count); 2157 2158 intel_ring_advance(rq, cs); 2159 2160 return 0; 2161 } 2162 2163 static int 2164 gen8_load_flex(struct i915_request *rq, 2165 struct intel_context *ce, 2166 const struct flex *flex, unsigned int count) 2167 { 2168 u32 *cs; 2169 2170 GEM_BUG_ON(!count || count > 63); 2171 2172 cs = intel_ring_begin(rq, 2 * count + 2); 2173 if (IS_ERR(cs)) 2174 return PTR_ERR(cs); 2175 2176 *cs++ = MI_LOAD_REGISTER_IMM(count); 2177 do { 2178 *cs++ = i915_mmio_reg_offset(flex->reg); 2179 *cs++ = flex->value; 2180 } while (flex++, --count); 2181 *cs++ = MI_NOOP; 2182 2183 intel_ring_advance(rq, cs); 2184 2185 return 0; 2186 } 2187 2188 static int gen8_modify_context(struct intel_context *ce, 2189 const struct flex *flex, unsigned int count) 2190 { 2191 struct i915_request *rq; 2192 int err; 2193 2194 rq = intel_engine_create_kernel_request(ce->engine); 2195 if (IS_ERR(rq)) 2196 return PTR_ERR(rq); 2197 2198 /* Serialise with the remote context */ 2199 err = intel_context_prepare_remote_request(ce, rq); 2200 if (err == 0) 2201 err = gen8_store_flex(rq, ce, flex, count); 2202 2203 i915_request_add(rq); 2204 return err; 2205 } 2206 2207 static int gen8_modify_self(struct intel_context *ce, 2208 const struct flex *flex, unsigned int count) 2209 { 2210 struct i915_request *rq; 2211 int err; 2212 2213 intel_engine_pm_get(ce->engine); 2214 rq = i915_request_create(ce); 2215 intel_engine_pm_put(ce->engine); 2216 if (IS_ERR(rq)) 2217 return PTR_ERR(rq); 2218 2219 err = gen8_load_flex(rq, ce, flex, count); 2220 2221 i915_request_add(rq); 2222 return err; 2223 } 2224 2225 static int gen8_configure_context(struct i915_gem_context *ctx, 2226 struct flex *flex, unsigned int count) 2227 { 2228 struct i915_gem_engines_iter it; 2229 struct intel_context *ce; 2230 int err = 0; 2231 2232 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2233 GEM_BUG_ON(ce == ce->engine->kernel_context); 2234 2235 if (ce->engine->class != RENDER_CLASS) 2236 continue; 2237 2238 /* Otherwise OA settings will be set upon first use */ 2239 if (!intel_context_pin_if_active(ce)) 2240 continue; 2241 2242 flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); 2243 err = gen8_modify_context(ce, flex, count); 2244 2245 intel_context_unpin(ce); 2246 if (err) 2247 break; 2248 } 2249 i915_gem_context_unlock_engines(ctx); 2250 2251 return err; 2252 } 2253 2254 static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable) 2255 { 2256 int err; 2257 struct intel_context *ce = stream->pinned_ctx; 2258 u32 format = stream->oa_buffer.format; 2259 struct flex regs_context[] = { 2260 { 2261 GEN8_OACTXCONTROL, 2262 stream->perf->ctx_oactxctrl_offset + 1, 2263 enable ? GEN8_OA_COUNTER_RESUME : 0, 2264 }, 2265 }; 2266 /* Offsets in regs_lri are not used since this configuration is only 2267 * applied using LRI. Initialize the correct offsets for posterity. 2268 */ 2269 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0 2270 struct flex regs_lri[] = { 2271 { 2272 GEN12_OAR_OACONTROL, 2273 GEN12_OAR_OACONTROL_OFFSET + 1, 2274 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | 2275 (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) 2276 }, 2277 { 2278 RING_CONTEXT_CONTROL(ce->engine->mmio_base), 2279 CTX_CONTEXT_CONTROL, 2280 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, 2281 enable ? 2282 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 2283 0) 2284 }, 2285 }; 2286 2287 /* Modify the context image of pinned context with regs_context*/ 2288 err = intel_context_lock_pinned(ce); 2289 if (err) 2290 return err; 2291 2292 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); 2293 intel_context_unlock_pinned(ce); 2294 if (err) 2295 return err; 2296 2297 /* Apply regs_lri using LRI with pinned context */ 2298 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri)); 2299 } 2300 2301 /* 2302 * Manages updating the per-context aspects of the OA stream 2303 * configuration across all contexts. 2304 * 2305 * The awkward consideration here is that OACTXCONTROL controls the 2306 * exponent for periodic sampling which is primarily used for system 2307 * wide profiling where we'd like a consistent sampling period even in 2308 * the face of context switches. 2309 * 2310 * Our approach of updating the register state context (as opposed to 2311 * say using a workaround batch buffer) ensures that the hardware 2312 * won't automatically reload an out-of-date timer exponent even 2313 * transiently before a WA BB could be parsed. 2314 * 2315 * This function needs to: 2316 * - Ensure the currently running context's per-context OA state is 2317 * updated 2318 * - Ensure that all existing contexts will have the correct per-context 2319 * OA state if they are scheduled for use. 2320 * - Ensure any new contexts will be initialized with the correct 2321 * per-context OA state. 2322 * 2323 * Note: it's only the RCS/Render context that has any OA state. 2324 * Note: the first flex register passed must always be R_PWR_CLK_STATE 2325 */ 2326 static int oa_configure_all_contexts(struct i915_perf_stream *stream, 2327 struct flex *regs, 2328 size_t num_regs) 2329 { 2330 struct drm_i915_private *i915 = stream->perf->i915; 2331 struct intel_engine_cs *engine; 2332 struct i915_gem_context *ctx, *cn; 2333 int err; 2334 2335 lockdep_assert_held(&stream->perf->lock); 2336 2337 /* 2338 * The OA register config is setup through the context image. This image 2339 * might be written to by the GPU on context switch (in particular on 2340 * lite-restore). This means we can't safely update a context's image, 2341 * if this context is scheduled/submitted to run on the GPU. 2342 * 2343 * We could emit the OA register config through the batch buffer but 2344 * this might leave small interval of time where the OA unit is 2345 * configured at an invalid sampling period. 2346 * 2347 * Note that since we emit all requests from a single ring, there 2348 * is still an implicit global barrier here that may cause a high 2349 * priority context to wait for an otherwise independent low priority 2350 * context. Contexts idle at the time of reconfiguration are not 2351 * trapped behind the barrier. 2352 */ 2353 spin_lock(&i915->gem.contexts.lock); 2354 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 2355 if (!kref_get_unless_zero(&ctx->ref)) 2356 continue; 2357 2358 spin_unlock(&i915->gem.contexts.lock); 2359 2360 err = gen8_configure_context(ctx, regs, num_regs); 2361 if (err) { 2362 i915_gem_context_put(ctx); 2363 return err; 2364 } 2365 2366 spin_lock(&i915->gem.contexts.lock); 2367 list_safe_reset_next(ctx, cn, link); 2368 i915_gem_context_put(ctx); 2369 } 2370 spin_unlock(&i915->gem.contexts.lock); 2371 2372 /* 2373 * After updating all other contexts, we need to modify ourselves. 2374 * If we don't modify the kernel_context, we do not get events while 2375 * idle. 2376 */ 2377 for_each_uabi_engine(engine, i915) { 2378 struct intel_context *ce = engine->kernel_context; 2379 2380 if (engine->class != RENDER_CLASS) 2381 continue; 2382 2383 regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); 2384 2385 err = gen8_modify_self(ce, regs, num_regs); 2386 if (err) 2387 return err; 2388 } 2389 2390 return 0; 2391 } 2392 2393 static int gen12_configure_all_contexts(struct i915_perf_stream *stream, 2394 const struct i915_oa_config *oa_config) 2395 { 2396 struct flex regs[] = { 2397 { 2398 GEN8_R_PWR_CLK_STATE, 2399 CTX_R_PWR_CLK_STATE, 2400 }, 2401 }; 2402 2403 return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); 2404 } 2405 2406 static int lrc_configure_all_contexts(struct i915_perf_stream *stream, 2407 const struct i915_oa_config *oa_config) 2408 { 2409 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2410 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2411 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) 2412 struct flex regs[] = { 2413 { 2414 GEN8_R_PWR_CLK_STATE, 2415 CTX_R_PWR_CLK_STATE, 2416 }, 2417 { 2418 GEN8_OACTXCONTROL, 2419 stream->perf->ctx_oactxctrl_offset + 1, 2420 }, 2421 { EU_PERF_CNTL0, ctx_flexeuN(0) }, 2422 { EU_PERF_CNTL1, ctx_flexeuN(1) }, 2423 { EU_PERF_CNTL2, ctx_flexeuN(2) }, 2424 { EU_PERF_CNTL3, ctx_flexeuN(3) }, 2425 { EU_PERF_CNTL4, ctx_flexeuN(4) }, 2426 { EU_PERF_CNTL5, ctx_flexeuN(5) }, 2427 { EU_PERF_CNTL6, ctx_flexeuN(6) }, 2428 }; 2429 #undef ctx_flexeuN 2430 int i; 2431 2432 regs[1].value = 2433 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2434 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2435 GEN8_OA_COUNTER_RESUME; 2436 2437 for (i = 2; i < ARRAY_SIZE(regs); i++) 2438 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); 2439 2440 return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); 2441 } 2442 2443 static struct i915_request * 2444 gen8_enable_metric_set(struct i915_perf_stream *stream) 2445 { 2446 struct intel_uncore *uncore = stream->uncore; 2447 struct i915_oa_config *oa_config = stream->oa_config; 2448 int ret; 2449 2450 /* 2451 * We disable slice/unslice clock ratio change reports on SKL since 2452 * they are too noisy. The HW generates a lot of redundant reports 2453 * where the ratio hasn't really changed causing a lot of redundant 2454 * work to processes and increasing the chances we'll hit buffer 2455 * overruns. 2456 * 2457 * Although we don't currently use the 'disable overrun' OABUFFER 2458 * feature it's worth noting that clock ratio reports have to be 2459 * disabled before considering to use that feature since the HW doesn't 2460 * correctly block these reports. 2461 * 2462 * Currently none of the high-level metrics we have depend on knowing 2463 * this ratio to normalize. 2464 * 2465 * Note: This register is not power context saved and restored, but 2466 * that's OK considering that we disable RC6 while the OA unit is 2467 * enabled. 2468 * 2469 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 2470 * be read back from automatically triggered reports, as part of the 2471 * RPT_ID field. 2472 */ 2473 if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) { 2474 intel_uncore_write(uncore, GEN8_OA_DEBUG, 2475 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2476 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 2477 } 2478 2479 /* 2480 * Update all contexts prior writing the mux configurations as we need 2481 * to make sure all slices/subslices are ON before writing to NOA 2482 * registers. 2483 */ 2484 ret = lrc_configure_all_contexts(stream, oa_config); 2485 if (ret) 2486 return ERR_PTR(ret); 2487 2488 return emit_oa_config(stream, oa_config, oa_context(stream)); 2489 } 2490 2491 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 2492 { 2493 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, 2494 (stream->sample_flags & SAMPLE_OA_REPORT) ? 2495 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 2496 } 2497 2498 static struct i915_request * 2499 gen12_enable_metric_set(struct i915_perf_stream *stream) 2500 { 2501 struct intel_uncore *uncore = stream->uncore; 2502 struct i915_oa_config *oa_config = stream->oa_config; 2503 bool periodic = stream->periodic; 2504 u32 period_exponent = stream->period_exponent; 2505 int ret; 2506 2507 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, 2508 /* Disable clk ratio reports, like previous Gens. */ 2509 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2510 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | 2511 /* 2512 * If the user didn't require OA reports, instruct 2513 * the hardware not to emit ctx switch reports. 2514 */ 2515 oag_report_ctx_switches(stream)); 2516 2517 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? 2518 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | 2519 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | 2520 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) 2521 : 0); 2522 2523 /* 2524 * Update all contexts prior writing the mux configurations as we need 2525 * to make sure all slices/subslices are ON before writing to NOA 2526 * registers. 2527 */ 2528 ret = gen12_configure_all_contexts(stream, oa_config); 2529 if (ret) 2530 return ERR_PTR(ret); 2531 2532 /* 2533 * For Gen12, performance counters are context 2534 * saved/restored. Only enable it for the context that 2535 * requested this. 2536 */ 2537 if (stream->ctx) { 2538 ret = gen12_configure_oar_context(stream, true); 2539 if (ret) 2540 return ERR_PTR(ret); 2541 } 2542 2543 return emit_oa_config(stream, oa_config, oa_context(stream)); 2544 } 2545 2546 static void gen8_disable_metric_set(struct i915_perf_stream *stream) 2547 { 2548 struct intel_uncore *uncore = stream->uncore; 2549 2550 /* Reset all contexts' slices/subslices configurations. */ 2551 lrc_configure_all_contexts(stream, NULL); 2552 2553 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2554 } 2555 2556 static void gen10_disable_metric_set(struct i915_perf_stream *stream) 2557 { 2558 struct intel_uncore *uncore = stream->uncore; 2559 2560 /* Reset all contexts' slices/subslices configurations. */ 2561 lrc_configure_all_contexts(stream, NULL); 2562 2563 /* Make sure we disable noa to save power. */ 2564 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2565 } 2566 2567 static void gen12_disable_metric_set(struct i915_perf_stream *stream) 2568 { 2569 struct intel_uncore *uncore = stream->uncore; 2570 2571 /* Reset all contexts' slices/subslices configurations. */ 2572 gen12_configure_all_contexts(stream, NULL); 2573 2574 /* disable the context save/restore or OAR counters */ 2575 if (stream->ctx) 2576 gen12_configure_oar_context(stream, false); 2577 2578 /* Make sure we disable noa to save power. */ 2579 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2580 } 2581 2582 static void gen7_oa_enable(struct i915_perf_stream *stream) 2583 { 2584 struct intel_uncore *uncore = stream->uncore; 2585 struct i915_gem_context *ctx = stream->ctx; 2586 u32 ctx_id = stream->specific_ctx_id; 2587 bool periodic = stream->periodic; 2588 u32 period_exponent = stream->period_exponent; 2589 u32 report_format = stream->oa_buffer.format; 2590 2591 /* 2592 * Reset buf pointers so we don't forward reports from before now. 2593 * 2594 * Think carefully if considering trying to avoid this, since it 2595 * also ensures status flags and the buffer itself are cleared 2596 * in error paths, and we have checks for invalid reports based 2597 * on the assumption that certain fields are written to zeroed 2598 * memory which this helps maintains. 2599 */ 2600 gen7_init_oa_buffer(stream); 2601 2602 intel_uncore_write(uncore, GEN7_OACONTROL, 2603 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 2604 (period_exponent << 2605 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 2606 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 2607 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 2608 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 2609 GEN7_OACONTROL_ENABLE); 2610 } 2611 2612 static void gen8_oa_enable(struct i915_perf_stream *stream) 2613 { 2614 struct intel_uncore *uncore = stream->uncore; 2615 u32 report_format = stream->oa_buffer.format; 2616 2617 /* 2618 * Reset buf pointers so we don't forward reports from before now. 2619 * 2620 * Think carefully if considering trying to avoid this, since it 2621 * also ensures status flags and the buffer itself are cleared 2622 * in error paths, and we have checks for invalid reports based 2623 * on the assumption that certain fields are written to zeroed 2624 * memory which this helps maintains. 2625 */ 2626 gen8_init_oa_buffer(stream); 2627 2628 /* 2629 * Note: we don't rely on the hardware to perform single context 2630 * filtering and instead filter on the cpu based on the context-id 2631 * field of reports 2632 */ 2633 intel_uncore_write(uncore, GEN8_OACONTROL, 2634 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | 2635 GEN8_OA_COUNTER_ENABLE); 2636 } 2637 2638 static void gen12_oa_enable(struct i915_perf_stream *stream) 2639 { 2640 struct intel_uncore *uncore = stream->uncore; 2641 u32 report_format = stream->oa_buffer.format; 2642 2643 /* 2644 * If we don't want OA reports from the OA buffer, then we don't even 2645 * need to program the OAG unit. 2646 */ 2647 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) 2648 return; 2649 2650 gen12_init_oa_buffer(stream); 2651 2652 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 2653 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | 2654 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); 2655 } 2656 2657 /** 2658 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 2659 * @stream: An i915 perf stream opened for OA metrics 2660 * 2661 * [Re]enables hardware periodic sampling according to the period configured 2662 * when opening the stream. This also starts a hrtimer that will periodically 2663 * check for data in the circular OA buffer for notifying userspace (e.g. 2664 * during a read() or poll()). 2665 */ 2666 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 2667 { 2668 STUB(); 2669 #ifdef notyet 2670 stream->perf->ops.oa_enable(stream); 2671 2672 if (stream->periodic) 2673 hrtimer_start(&stream->poll_check_timer, 2674 ns_to_ktime(POLL_PERIOD), 2675 HRTIMER_MODE_REL_PINNED); 2676 #endif 2677 } 2678 2679 static void gen7_oa_disable(struct i915_perf_stream *stream) 2680 { 2681 struct intel_uncore *uncore = stream->uncore; 2682 2683 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 2684 if (intel_wait_for_register(uncore, 2685 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 2686 50)) 2687 drm_err(&stream->perf->i915->drm, 2688 "wait for OA to be disabled timed out\n"); 2689 } 2690 2691 static void gen8_oa_disable(struct i915_perf_stream *stream) 2692 { 2693 struct intel_uncore *uncore = stream->uncore; 2694 2695 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 2696 if (intel_wait_for_register(uncore, 2697 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 2698 50)) 2699 drm_err(&stream->perf->i915->drm, 2700 "wait for OA to be disabled timed out\n"); 2701 } 2702 2703 static void gen12_oa_disable(struct i915_perf_stream *stream) 2704 { 2705 struct intel_uncore *uncore = stream->uncore; 2706 2707 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); 2708 if (intel_wait_for_register(uncore, 2709 GEN12_OAG_OACONTROL, 2710 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 2711 50)) 2712 drm_err(&stream->perf->i915->drm, 2713 "wait for OA to be disabled timed out\n"); 2714 2715 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); 2716 if (intel_wait_for_register(uncore, 2717 GEN12_OA_TLB_INV_CR, 2718 1, 0, 2719 50)) 2720 drm_err(&stream->perf->i915->drm, 2721 "wait for OA tlb invalidate timed out\n"); 2722 } 2723 2724 /** 2725 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 2726 * @stream: An i915 perf stream opened for OA metrics 2727 * 2728 * Stops the OA unit from periodically writing counter reports into the 2729 * circular OA buffer. This also stops the hrtimer that periodically checks for 2730 * data in the circular OA buffer, for notifying userspace. 2731 */ 2732 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 2733 { 2734 STUB(); 2735 #ifdef notyet 2736 stream->perf->ops.oa_disable(stream); 2737 2738 if (stream->periodic) 2739 hrtimer_cancel(&stream->poll_check_timer); 2740 #endif 2741 } 2742 2743 #ifdef notyet 2744 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 2745 .destroy = i915_oa_stream_destroy, 2746 .enable = i915_oa_stream_enable, 2747 .disable = i915_oa_stream_disable, 2748 .wait_unlocked = i915_oa_wait_unlocked, 2749 .poll_wait = i915_oa_poll_wait, 2750 .read = i915_oa_read, 2751 }; 2752 #endif 2753 2754 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) 2755 { 2756 struct i915_request *rq; 2757 2758 rq = stream->perf->ops.enable_metric_set(stream); 2759 if (IS_ERR(rq)) 2760 return PTR_ERR(rq); 2761 2762 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); 2763 i915_request_put(rq); 2764 2765 return 0; 2766 } 2767 2768 /** 2769 * i915_oa_stream_init - validate combined props for OA stream and init 2770 * @stream: An i915 perf stream 2771 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2772 * @props: The property state that configures stream (individually validated) 2773 * 2774 * While read_properties_unlocked() validates properties in isolation it 2775 * doesn't ensure that the combination necessarily makes sense. 2776 * 2777 * At this point it has been determined that userspace wants a stream of 2778 * OA metrics, but still we need to further validate the combined 2779 * properties are OK. 2780 * 2781 * If the configuration makes sense then we can allocate memory for 2782 * a circular OA buffer and apply the requested metric set configuration. 2783 * 2784 * Returns: zero on success or a negative error code. 2785 */ 2786 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2787 struct drm_i915_perf_open_param *param, 2788 struct perf_open_properties *props) 2789 { 2790 STUB(); 2791 return -ENOSYS; 2792 #ifdef notyet 2793 struct drm_i915_private *i915 = stream->perf->i915; 2794 struct i915_perf *perf = stream->perf; 2795 int format_size; 2796 int ret; 2797 2798 if (!props->engine) { 2799 DRM_DEBUG("OA engine not specified\n"); 2800 return -EINVAL; 2801 } 2802 2803 /* 2804 * If the sysfs metrics/ directory wasn't registered for some 2805 * reason then don't let userspace try their luck with config 2806 * IDs 2807 */ 2808 if (!perf->metrics_kobj) { 2809 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 2810 return -EINVAL; 2811 } 2812 2813 if (!(props->sample_flags & SAMPLE_OA_REPORT) && 2814 (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) { 2815 DRM_DEBUG("Only OA report sampling supported\n"); 2816 return -EINVAL; 2817 } 2818 2819 if (!perf->ops.enable_metric_set) { 2820 DRM_DEBUG("OA unit not supported\n"); 2821 return -ENODEV; 2822 } 2823 2824 /* 2825 * To avoid the complexity of having to accurately filter 2826 * counter reports and marshal to the appropriate client 2827 * we currently only allow exclusive access 2828 */ 2829 if (perf->exclusive_stream) { 2830 DRM_DEBUG("OA unit already in use\n"); 2831 return -EBUSY; 2832 } 2833 2834 if (!props->oa_format) { 2835 DRM_DEBUG("OA report format not specified\n"); 2836 return -EINVAL; 2837 } 2838 2839 stream->engine = props->engine; 2840 stream->uncore = stream->engine->gt->uncore; 2841 2842 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2843 2844 format_size = perf->oa_formats[props->oa_format].size; 2845 2846 stream->sample_flags = props->sample_flags; 2847 stream->sample_size += format_size; 2848 2849 stream->oa_buffer.format_size = format_size; 2850 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)) 2851 return -EINVAL; 2852 2853 stream->hold_preemption = props->hold_preemption; 2854 2855 stream->oa_buffer.format = 2856 perf->oa_formats[props->oa_format].format; 2857 2858 stream->periodic = props->oa_periodic; 2859 if (stream->periodic) 2860 stream->period_exponent = props->oa_period_exponent; 2861 2862 if (stream->ctx) { 2863 ret = oa_get_render_ctx_id(stream); 2864 if (ret) { 2865 DRM_DEBUG("Invalid context id to filter with\n"); 2866 return ret; 2867 } 2868 } 2869 2870 ret = alloc_noa_wait(stream); 2871 if (ret) { 2872 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n"); 2873 goto err_noa_wait_alloc; 2874 } 2875 2876 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); 2877 if (!stream->oa_config) { 2878 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); 2879 ret = -EINVAL; 2880 goto err_config; 2881 } 2882 2883 /* PRM - observability performance counters: 2884 * 2885 * OACONTROL, performance counter enable, note: 2886 * 2887 * "When this bit is set, in order to have coherent counts, 2888 * RC6 power state and trunk clock gating must be disabled. 2889 * This can be achieved by programming MMIO registers as 2890 * 0xA094=0 and 0xA090[31]=1" 2891 * 2892 * In our case we are expecting that taking pm + FORCEWAKE 2893 * references will effectively disable RC6. 2894 */ 2895 intel_engine_pm_get(stream->engine); 2896 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); 2897 2898 ret = alloc_oa_buffer(stream); 2899 if (ret) 2900 goto err_oa_buf_alloc; 2901 2902 stream->ops = &i915_oa_stream_ops; 2903 WRITE_ONCE(perf->exclusive_stream, stream); 2904 2905 ret = i915_perf_stream_enable_sync(stream); 2906 if (ret) { 2907 DRM_DEBUG("Unable to enable metric set\n"); 2908 goto err_enable; 2909 } 2910 2911 DRM_DEBUG("opening stream oa config uuid=%s\n", 2912 stream->oa_config->uuid); 2913 2914 hrtimer_init(&stream->poll_check_timer, 2915 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2916 stream->poll_check_timer.function = oa_poll_check_timer_cb; 2917 init_waitqueue_head(&stream->poll_wq); 2918 mtx_init(&stream->oa_buffer.ptr_lock, IPL_TTY); 2919 2920 return 0; 2921 2922 err_enable: 2923 WRITE_ONCE(perf->exclusive_stream, NULL); 2924 perf->ops.disable_metric_set(stream); 2925 2926 free_oa_buffer(stream); 2927 2928 err_oa_buf_alloc: 2929 free_oa_configs(stream); 2930 2931 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 2932 intel_engine_pm_put(stream->engine); 2933 2934 err_config: 2935 free_noa_wait(stream); 2936 2937 err_noa_wait_alloc: 2938 if (stream->ctx) 2939 oa_put_render_ctx_id(stream); 2940 2941 return ret; 2942 #endif 2943 } 2944 2945 void i915_oa_init_reg_state(const struct intel_context *ce, 2946 const struct intel_engine_cs *engine) 2947 { 2948 struct i915_perf_stream *stream; 2949 2950 if (engine->class != RENDER_CLASS) 2951 return; 2952 2953 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ 2954 stream = READ_ONCE(engine->i915->perf.exclusive_stream); 2955 /* 2956 * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller 2957 * is already doing that, so nothing to be done for gen12 here. 2958 */ 2959 if (stream && INTEL_GEN(stream->perf->i915) < 12) 2960 gen8_update_reg_state_unlocked(ce, stream); 2961 } 2962 2963 /** 2964 * i915_perf_read - handles read() FOP for i915 perf stream FDs 2965 * @file: An i915 perf stream file 2966 * @buf: destination buffer given by userspace 2967 * @count: the number of bytes userspace wants to read 2968 * @ppos: (inout) file seek position (unused) 2969 * 2970 * The entry point for handling a read() on a stream file descriptor from 2971 * userspace. Most of the work is left to the i915_perf_read_locked() and 2972 * &i915_perf_stream_ops->read but to save having stream implementations (of 2973 * which we might have multiple later) we handle blocking read here. 2974 * 2975 * We can also consistently treat trying to read from a disabled stream 2976 * as an IO error so implementations can assume the stream is enabled 2977 * while reading. 2978 * 2979 * Returns: The number of bytes copied or a negative error code on failure. 2980 */ 2981 static ssize_t i915_perf_read(struct file *file, 2982 char __user *buf, 2983 size_t count, 2984 loff_t *ppos) 2985 { 2986 STUB(); 2987 return -ENOSYS; 2988 #ifdef notyet 2989 struct i915_perf_stream *stream = file->private_data; 2990 struct i915_perf *perf = stream->perf; 2991 size_t offset = 0; 2992 int ret; 2993 2994 /* To ensure it's handled consistently we simply treat all reads of a 2995 * disabled stream as an error. In particular it might otherwise lead 2996 * to a deadlock for blocking file descriptors... 2997 */ 2998 if (!stream->enabled) 2999 return -EIO; 3000 3001 if (!(file->f_flags & O_NONBLOCK)) { 3002 /* There's the small chance of false positives from 3003 * stream->ops->wait_unlocked. 3004 * 3005 * E.g. with single context filtering since we only wait until 3006 * oabuffer has >= 1 report we don't immediately know whether 3007 * any reports really belong to the current context 3008 */ 3009 do { 3010 ret = stream->ops->wait_unlocked(stream); 3011 if (ret) 3012 return ret; 3013 3014 mutex_lock(&perf->lock); 3015 ret = stream->ops->read(stream, buf, count, &offset); 3016 mutex_unlock(&perf->lock); 3017 } while (!offset && !ret); 3018 } else { 3019 mutex_lock(&perf->lock); 3020 ret = stream->ops->read(stream, buf, count, &offset); 3021 mutex_unlock(&perf->lock); 3022 } 3023 3024 /* We allow the poll checking to sometimes report false positive EPOLLIN 3025 * events where we might actually report EAGAIN on read() if there's 3026 * not really any data available. In this situation though we don't 3027 * want to enter a busy loop between poll() reporting a EPOLLIN event 3028 * and read() returning -EAGAIN. Clearing the oa.pollin state here 3029 * effectively ensures we back off until the next hrtimer callback 3030 * before reporting another EPOLLIN event. 3031 * The exception to this is if ops->read() returned -ENOSPC which means 3032 * that more OA data is available than could fit in the user provided 3033 * buffer. In this case we want the next poll() call to not block. 3034 */ 3035 if (ret != -ENOSPC) 3036 stream->pollin = false; 3037 3038 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ 3039 return offset ?: (ret ?: -EAGAIN); 3040 #endif 3041 } 3042 3043 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 3044 { 3045 STUB(); 3046 return 0; 3047 #ifdef notyet 3048 struct i915_perf_stream *stream = 3049 container_of(hrtimer, typeof(*stream), poll_check_timer); 3050 3051 if (oa_buffer_check_unlocked(stream)) { 3052 stream->pollin = true; 3053 wake_up(&stream->poll_wq); 3054 } 3055 3056 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); 3057 3058 return HRTIMER_RESTART; 3059 #endif 3060 } 3061 3062 #ifdef notyet 3063 3064 /** 3065 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 3066 * @stream: An i915 perf stream 3067 * @file: An i915 perf stream file 3068 * @wait: poll() state table 3069 * 3070 * For handling userspace polling on an i915 perf stream, this calls through to 3071 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 3072 * will be woken for new stream data. 3073 * 3074 * Note: The &perf->lock mutex has been taken to serialize 3075 * with any non-file-operation driver hooks. 3076 * 3077 * Returns: any poll events that are ready without sleeping 3078 */ 3079 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, 3080 struct file *file, 3081 poll_table *wait) 3082 { 3083 __poll_t events = 0; 3084 3085 stream->ops->poll_wait(stream, file, wait); 3086 3087 /* Note: we don't explicitly check whether there's something to read 3088 * here since this path may be very hot depending on what else 3089 * userspace is polling, or on the timeout in use. We rely solely on 3090 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 3091 * samples to read. 3092 */ 3093 if (stream->pollin) 3094 events |= EPOLLIN; 3095 3096 return events; 3097 } 3098 3099 /** 3100 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 3101 * @file: An i915 perf stream file 3102 * @wait: poll() state table 3103 * 3104 * For handling userspace polling on an i915 perf stream, this ensures 3105 * poll_wait() gets called with a wait queue that will be woken for new stream 3106 * data. 3107 * 3108 * Note: Implementation deferred to i915_perf_poll_locked() 3109 * 3110 * Returns: any poll events that are ready without sleeping 3111 */ 3112 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 3113 { 3114 struct i915_perf_stream *stream = file->private_data; 3115 struct i915_perf *perf = stream->perf; 3116 __poll_t ret; 3117 3118 mutex_lock(&perf->lock); 3119 ret = i915_perf_poll_locked(stream, file, wait); 3120 mutex_unlock(&perf->lock); 3121 3122 return ret; 3123 } 3124 3125 #endif /* notyet */ 3126 3127 /** 3128 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 3129 * @stream: A disabled i915 perf stream 3130 * 3131 * [Re]enables the associated capture of data for this stream. 3132 * 3133 * If a stream was previously enabled then there's currently no intention 3134 * to provide userspace any guarantee about the preservation of previously 3135 * buffered data. 3136 */ 3137 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 3138 { 3139 if (stream->enabled) 3140 return; 3141 3142 /* Allow stream->ops->enable() to refer to this */ 3143 stream->enabled = true; 3144 3145 if (stream->ops->enable) 3146 stream->ops->enable(stream); 3147 3148 if (stream->hold_preemption) 3149 intel_context_set_nopreempt(stream->pinned_ctx); 3150 } 3151 3152 /** 3153 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 3154 * @stream: An enabled i915 perf stream 3155 * 3156 * Disables the associated capture of data for this stream. 3157 * 3158 * The intention is that disabling an re-enabling a stream will ideally be 3159 * cheaper than destroying and re-opening a stream with the same configuration, 3160 * though there are no formal guarantees about what state or buffered data 3161 * must be retained between disabling and re-enabling a stream. 3162 * 3163 * Note: while a stream is disabled it's considered an error for userspace 3164 * to attempt to read from the stream (-EIO). 3165 */ 3166 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 3167 { 3168 if (!stream->enabled) 3169 return; 3170 3171 /* Allow stream->ops->disable() to refer to this */ 3172 stream->enabled = false; 3173 3174 if (stream->hold_preemption) 3175 intel_context_clear_nopreempt(stream->pinned_ctx); 3176 3177 if (stream->ops->disable) 3178 stream->ops->disable(stream); 3179 } 3180 3181 static long i915_perf_config_locked(struct i915_perf_stream *stream, 3182 unsigned long metrics_set) 3183 { 3184 struct i915_oa_config *config; 3185 long ret = stream->oa_config->id; 3186 3187 config = i915_perf_get_oa_config(stream->perf, metrics_set); 3188 if (!config) 3189 return -EINVAL; 3190 3191 if (config != stream->oa_config) { 3192 struct i915_request *rq; 3193 3194 /* 3195 * If OA is bound to a specific context, emit the 3196 * reconfiguration inline from that context. The update 3197 * will then be ordered with respect to submission on that 3198 * context. 3199 * 3200 * When set globally, we use a low priority kernel context, 3201 * so it will effectively take effect when idle. 3202 */ 3203 rq = emit_oa_config(stream, config, oa_context(stream)); 3204 if (!IS_ERR(rq)) { 3205 config = xchg(&stream->oa_config, config); 3206 i915_request_put(rq); 3207 } else { 3208 ret = PTR_ERR(rq); 3209 } 3210 } 3211 3212 i915_oa_config_put(config); 3213 3214 return ret; 3215 } 3216 3217 /** 3218 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3219 * @stream: An i915 perf stream 3220 * @cmd: the ioctl request 3221 * @arg: the ioctl data 3222 * 3223 * Note: The &perf->lock mutex has been taken to serialize 3224 * with any non-file-operation driver hooks. 3225 * 3226 * Returns: zero on success or a negative error code. Returns -EINVAL for 3227 * an unknown ioctl request. 3228 */ 3229 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 3230 unsigned int cmd, 3231 unsigned long arg) 3232 { 3233 switch (cmd) { 3234 case I915_PERF_IOCTL_ENABLE: 3235 i915_perf_enable_locked(stream); 3236 return 0; 3237 case I915_PERF_IOCTL_DISABLE: 3238 i915_perf_disable_locked(stream); 3239 return 0; 3240 case I915_PERF_IOCTL_CONFIG: 3241 return i915_perf_config_locked(stream, arg); 3242 } 3243 3244 return -EINVAL; 3245 } 3246 3247 /** 3248 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3249 * @file: An i915 perf stream file 3250 * @cmd: the ioctl request 3251 * @arg: the ioctl data 3252 * 3253 * Implementation deferred to i915_perf_ioctl_locked(). 3254 * 3255 * Returns: zero on success or a negative error code. Returns -EINVAL for 3256 * an unknown ioctl request. 3257 */ 3258 static long i915_perf_ioctl(struct file *file, 3259 unsigned int cmd, 3260 unsigned long arg) 3261 { 3262 STUB(); 3263 return -ENOSYS; 3264 #ifdef notyet 3265 struct i915_perf_stream *stream = file->private_data; 3266 struct i915_perf *perf = stream->perf; 3267 long ret; 3268 3269 mutex_lock(&perf->lock); 3270 ret = i915_perf_ioctl_locked(stream, cmd, arg); 3271 mutex_unlock(&perf->lock); 3272 3273 return ret; 3274 #endif 3275 } 3276 3277 /** 3278 * i915_perf_destroy_locked - destroy an i915 perf stream 3279 * @stream: An i915 perf stream 3280 * 3281 * Frees all resources associated with the given i915 perf @stream, disabling 3282 * any associated data capture in the process. 3283 * 3284 * Note: The &perf->lock mutex has been taken to serialize 3285 * with any non-file-operation driver hooks. 3286 */ 3287 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 3288 { 3289 if (stream->enabled) 3290 i915_perf_disable_locked(stream); 3291 3292 if (stream->ops->destroy) 3293 stream->ops->destroy(stream); 3294 3295 if (stream->ctx) 3296 i915_gem_context_put(stream->ctx); 3297 3298 kfree(stream); 3299 } 3300 3301 #ifdef notyet 3302 3303 /** 3304 * i915_perf_release - handles userspace close() of a stream file 3305 * @inode: anonymous inode associated with file 3306 * @file: An i915 perf stream file 3307 * 3308 * Cleans up any resources associated with an open i915 perf stream file. 3309 * 3310 * NB: close() can't really fail from the userspace point of view. 3311 * 3312 * Returns: zero on success or a negative error code. 3313 */ 3314 static int i915_perf_release(struct inode *inode, struct file *file) 3315 { 3316 struct i915_perf_stream *stream = file->private_data; 3317 struct i915_perf *perf = stream->perf; 3318 3319 mutex_lock(&perf->lock); 3320 i915_perf_destroy_locked(stream); 3321 mutex_unlock(&perf->lock); 3322 3323 /* Release the reference the perf stream kept on the driver. */ 3324 drm_dev_put(&perf->i915->drm); 3325 3326 return 0; 3327 } 3328 3329 3330 static const struct file_operations fops = { 3331 .owner = THIS_MODULE, 3332 .llseek = no_llseek, 3333 .release = i915_perf_release, 3334 .poll = i915_perf_poll, 3335 .read = i915_perf_read, 3336 .unlocked_ioctl = i915_perf_ioctl, 3337 /* Our ioctl have no arguments, so it's safe to use the same function 3338 * to handle 32bits compatibility. 3339 */ 3340 .compat_ioctl = i915_perf_ioctl, 3341 }; 3342 3343 #endif /* notyet */ 3344 3345 /** 3346 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 3347 * @perf: i915 perf instance 3348 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 3349 * @props: individually validated u64 property value pairs 3350 * @file: drm file 3351 * 3352 * See i915_perf_ioctl_open() for interface details. 3353 * 3354 * Implements further stream config validation and stream initialization on 3355 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex 3356 * taken to serialize with any non-file-operation driver hooks. 3357 * 3358 * Note: at this point the @props have only been validated in isolation and 3359 * it's still necessary to validate that the combination of properties makes 3360 * sense. 3361 * 3362 * In the case where userspace is interested in OA unit metrics then further 3363 * config validation and stream initialization details will be handled by 3364 * i915_oa_stream_init(). The code here should only validate config state that 3365 * will be relevant to all stream types / backends. 3366 * 3367 * Returns: zero on success or a negative error code. 3368 */ 3369 static int 3370 i915_perf_open_ioctl_locked(struct i915_perf *perf, 3371 struct drm_i915_perf_open_param *param, 3372 struct perf_open_properties *props, 3373 struct drm_file *file) 3374 { 3375 STUB(); 3376 return -ENOSYS; 3377 #ifdef notyet 3378 struct i915_gem_context *specific_ctx = NULL; 3379 struct i915_perf_stream *stream = NULL; 3380 unsigned long f_flags = 0; 3381 bool privileged_op = true; 3382 int stream_fd; 3383 int ret; 3384 3385 if (props->single_context) { 3386 u32 ctx_handle = props->ctx_handle; 3387 struct drm_i915_file_private *file_priv = file->driver_priv; 3388 3389 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 3390 if (!specific_ctx) { 3391 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 3392 ctx_handle); 3393 ret = -ENOENT; 3394 goto err; 3395 } 3396 } 3397 3398 /* 3399 * On Haswell the OA unit supports clock gating off for a specific 3400 * context and in this mode there's no visibility of metrics for the 3401 * rest of the system, which we consider acceptable for a 3402 * non-privileged client. 3403 * 3404 * For Gen8->11 the OA unit no longer supports clock gating off for a 3405 * specific context and the kernel can't securely stop the counters 3406 * from updating as system-wide / global values. Even though we can 3407 * filter reports based on the included context ID we can't block 3408 * clients from seeing the raw / global counter values via 3409 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 3410 * enable the OA unit by default. 3411 * 3412 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a 3413 * per context basis. So we can relax requirements there if the user 3414 * doesn't request global stream access (i.e. query based sampling 3415 * using MI_RECORD_PERF_COUNT. 3416 */ 3417 if (IS_HASWELL(perf->i915) && specific_ctx) 3418 privileged_op = false; 3419 else if (IS_GEN(perf->i915, 12) && specific_ctx && 3420 (props->sample_flags & SAMPLE_OA_REPORT) == 0) 3421 privileged_op = false; 3422 3423 if (props->hold_preemption) { 3424 if (!props->single_context) { 3425 DRM_DEBUG("preemption disable with no context\n"); 3426 ret = -EINVAL; 3427 goto err; 3428 } 3429 privileged_op = true; 3430 } 3431 3432 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 3433 * we check a dev.i915.perf_stream_paranoid sysctl option 3434 * to determine if it's ok to access system wide OA counters 3435 * without CAP_SYS_ADMIN privileges. 3436 */ 3437 if (privileged_op && 3438 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3439 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n"); 3440 ret = -EACCES; 3441 goto err_ctx; 3442 } 3443 3444 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 3445 if (!stream) { 3446 ret = -ENOMEM; 3447 goto err_ctx; 3448 } 3449 3450 stream->perf = perf; 3451 stream->ctx = specific_ctx; 3452 3453 ret = i915_oa_stream_init(stream, param, props); 3454 if (ret) 3455 goto err_alloc; 3456 3457 /* we avoid simply assigning stream->sample_flags = props->sample_flags 3458 * to have _stream_init check the combination of sample flags more 3459 * thoroughly, but still this is the expected result at this point. 3460 */ 3461 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 3462 ret = -ENODEV; 3463 goto err_flags; 3464 } 3465 3466 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 3467 f_flags |= O_CLOEXEC; 3468 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 3469 f_flags |= O_NONBLOCK; 3470 3471 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 3472 if (stream_fd < 0) { 3473 ret = stream_fd; 3474 goto err_flags; 3475 } 3476 3477 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 3478 i915_perf_enable_locked(stream); 3479 3480 /* Take a reference on the driver that will be kept with stream_fd 3481 * until its release. 3482 */ 3483 drm_dev_get(&perf->i915->drm); 3484 3485 return stream_fd; 3486 3487 err_flags: 3488 if (stream->ops->destroy) 3489 stream->ops->destroy(stream); 3490 err_alloc: 3491 kfree(stream); 3492 err_ctx: 3493 if (specific_ctx) 3494 i915_gem_context_put(specific_ctx); 3495 err: 3496 return ret; 3497 #endif 3498 } 3499 3500 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3501 { 3502 return div64_u64(1000000000ULL * (2ULL << exponent), 3503 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz); 3504 } 3505 3506 /** 3507 * read_properties_unlocked - validate + copy userspace stream open properties 3508 * @perf: i915 perf instance 3509 * @uprops: The array of u64 key value pairs given by userspace 3510 * @n_props: The number of key value pairs expected in @uprops 3511 * @props: The stream configuration built up while validating properties 3512 * 3513 * Note this function only validates properties in isolation it doesn't 3514 * validate that the combination of properties makes sense or that all 3515 * properties necessary for a particular kind of stream have been set. 3516 * 3517 * Note that there currently aren't any ordering requirements for properties so 3518 * we shouldn't validate or assume anything about ordering here. This doesn't 3519 * rule out defining new properties with ordering requirements in the future. 3520 */ 3521 static int read_properties_unlocked(struct i915_perf *perf, 3522 u64 __user *uprops, 3523 u32 n_props, 3524 struct perf_open_properties *props) 3525 { 3526 u64 __user *uprop = uprops; 3527 u32 i; 3528 3529 memset(props, 0, sizeof(struct perf_open_properties)); 3530 3531 if (!n_props) { 3532 DRM_DEBUG("No i915 perf properties given\n"); 3533 return -EINVAL; 3534 } 3535 3536 /* At the moment we only support using i915-perf on the RCS. */ 3537 props->engine = intel_engine_lookup_user(perf->i915, 3538 I915_ENGINE_CLASS_RENDER, 3539 0); 3540 if (!props->engine) { 3541 DRM_DEBUG("No RENDER-capable engines\n"); 3542 return -EINVAL; 3543 } 3544 3545 /* Considering that ID = 0 is reserved and assuming that we don't 3546 * (currently) expect any configurations to ever specify duplicate 3547 * values for a particular property ID then the last _PROP_MAX value is 3548 * one greater than the maximum number of properties we expect to get 3549 * from userspace. 3550 */ 3551 if (n_props >= DRM_I915_PERF_PROP_MAX) { 3552 DRM_DEBUG("More i915 perf properties specified than exist\n"); 3553 return -EINVAL; 3554 } 3555 3556 for (i = 0; i < n_props; i++) { 3557 u64 oa_period, oa_freq_hz; 3558 u64 id, value; 3559 int ret; 3560 3561 ret = get_user(id, uprop); 3562 if (ret) 3563 return ret; 3564 3565 ret = get_user(value, uprop + 1); 3566 if (ret) 3567 return ret; 3568 3569 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 3570 DRM_DEBUG("Unknown i915 perf property ID\n"); 3571 return -EINVAL; 3572 } 3573 3574 switch ((enum drm_i915_perf_property_id)id) { 3575 case DRM_I915_PERF_PROP_CTX_HANDLE: 3576 props->single_context = 1; 3577 props->ctx_handle = value; 3578 break; 3579 case DRM_I915_PERF_PROP_SAMPLE_OA: 3580 if (value) 3581 props->sample_flags |= SAMPLE_OA_REPORT; 3582 break; 3583 case DRM_I915_PERF_PROP_OA_METRICS_SET: 3584 if (value == 0) { 3585 DRM_DEBUG("Unknown OA metric set ID\n"); 3586 return -EINVAL; 3587 } 3588 props->metrics_set = value; 3589 break; 3590 case DRM_I915_PERF_PROP_OA_FORMAT: 3591 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 3592 DRM_DEBUG("Out-of-range OA report format %llu\n", 3593 value); 3594 return -EINVAL; 3595 } 3596 if (!perf->oa_formats[value].size) { 3597 DRM_DEBUG("Unsupported OA report format %llu\n", 3598 value); 3599 return -EINVAL; 3600 } 3601 props->oa_format = value; 3602 break; 3603 case DRM_I915_PERF_PROP_OA_EXPONENT: 3604 if (value > OA_EXPONENT_MAX) { 3605 DRM_DEBUG("OA timer exponent too high (> %u)\n", 3606 OA_EXPONENT_MAX); 3607 return -EINVAL; 3608 } 3609 3610 /* Theoretically we can program the OA unit to sample 3611 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 3612 * for BXT. We don't allow such high sampling 3613 * frequencies by default unless root. 3614 */ 3615 3616 BUILD_BUG_ON(sizeof(oa_period) != 8); 3617 oa_period = oa_exponent_to_ns(perf, value); 3618 3619 /* This check is primarily to ensure that oa_period <= 3620 * UINT32_MAX (before passing to do_div which only 3621 * accepts a u32 denominator), but we can also skip 3622 * checking anything < 1Hz which implicitly can't be 3623 * limited via an integer oa_max_sample_rate. 3624 */ 3625 if (oa_period <= NSEC_PER_SEC) { 3626 u64 tmp = NSEC_PER_SEC; 3627 do_div(tmp, oa_period); 3628 oa_freq_hz = tmp; 3629 } else 3630 oa_freq_hz = 0; 3631 3632 if (oa_freq_hz > i915_oa_max_sample_rate && 3633 !capable(CAP_SYS_ADMIN)) { 3634 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", 3635 i915_oa_max_sample_rate); 3636 return -EACCES; 3637 } 3638 3639 props->oa_periodic = true; 3640 props->oa_period_exponent = value; 3641 break; 3642 case DRM_I915_PERF_PROP_HOLD_PREEMPTION: 3643 props->hold_preemption = !!value; 3644 break; 3645 case DRM_I915_PERF_PROP_MAX: 3646 MISSING_CASE(id); 3647 return -EINVAL; 3648 } 3649 3650 uprop += 2; 3651 } 3652 3653 return 0; 3654 } 3655 3656 /** 3657 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 3658 * @dev: drm device 3659 * @data: ioctl data copied from userspace (unvalidated) 3660 * @file: drm file 3661 * 3662 * Validates the stream open parameters given by userspace including flags 3663 * and an array of u64 key, value pair properties. 3664 * 3665 * Very little is assumed up front about the nature of the stream being 3666 * opened (for instance we don't assume it's for periodic OA unit metrics). An 3667 * i915-perf stream is expected to be a suitable interface for other forms of 3668 * buffered data written by the GPU besides periodic OA metrics. 3669 * 3670 * Note we copy the properties from userspace outside of the i915 perf 3671 * mutex to avoid an awkward lockdep with mmap_sem. 3672 * 3673 * Most of the implementation details are handled by 3674 * i915_perf_open_ioctl_locked() after taking the &perf->lock 3675 * mutex for serializing with any non-file-operation driver hooks. 3676 * 3677 * Return: A newly opened i915 Perf stream file descriptor or negative 3678 * error code on failure. 3679 */ 3680 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3681 struct drm_file *file) 3682 { 3683 struct i915_perf *perf = &to_i915(dev)->perf; 3684 struct drm_i915_perf_open_param *param = data; 3685 struct perf_open_properties props; 3686 u32 known_open_flags; 3687 int ret; 3688 3689 if (!perf->i915) { 3690 DRM_DEBUG("i915 perf interface not available for this system\n"); 3691 return -ENOTSUPP; 3692 } 3693 3694 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 3695 I915_PERF_FLAG_FD_NONBLOCK | 3696 I915_PERF_FLAG_DISABLED; 3697 if (param->flags & ~known_open_flags) { 3698 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 3699 return -EINVAL; 3700 } 3701 3702 ret = read_properties_unlocked(perf, 3703 u64_to_user_ptr(param->properties_ptr), 3704 param->num_properties, 3705 &props); 3706 if (ret) 3707 return ret; 3708 3709 mutex_lock(&perf->lock); 3710 ret = i915_perf_open_ioctl_locked(perf, param, &props, file); 3711 mutex_unlock(&perf->lock); 3712 3713 return ret; 3714 } 3715 3716 /** 3717 * i915_perf_register - exposes i915-perf to userspace 3718 * @i915: i915 device instance 3719 * 3720 * In particular OA metric sets are advertised under a sysfs metrics/ 3721 * directory allowing userspace to enumerate valid IDs that can be 3722 * used to open an i915-perf stream. 3723 */ 3724 void i915_perf_register(struct drm_i915_private *i915) 3725 { 3726 #ifdef __linux__ 3727 struct i915_perf *perf = &i915->perf; 3728 int ret; 3729 3730 if (!perf->i915) 3731 return; 3732 3733 /* To be sure we're synchronized with an attempted 3734 * i915_perf_open_ioctl(); considering that we register after 3735 * being exposed to userspace. 3736 */ 3737 mutex_lock(&perf->lock); 3738 3739 perf->metrics_kobj = 3740 kobject_create_and_add("metrics", 3741 &i915->drm.primary->kdev->kobj); 3742 if (!perf->metrics_kobj) 3743 goto exit; 3744 3745 sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr); 3746 3747 if (IS_TIGERLAKE(i915)) { 3748 i915_perf_load_test_config_tgl(i915); 3749 } else if (INTEL_GEN(i915) >= 11) { 3750 i915_perf_load_test_config_icl(i915); 3751 } else if (IS_CANNONLAKE(i915)) { 3752 i915_perf_load_test_config_cnl(i915); 3753 } else if (IS_COFFEELAKE(i915)) { 3754 if (IS_CFL_GT2(i915)) 3755 i915_perf_load_test_config_cflgt2(i915); 3756 if (IS_CFL_GT3(i915)) 3757 i915_perf_load_test_config_cflgt3(i915); 3758 } else if (IS_GEMINILAKE(i915)) { 3759 i915_perf_load_test_config_glk(i915); 3760 } else if (IS_KABYLAKE(i915)) { 3761 if (IS_KBL_GT2(i915)) 3762 i915_perf_load_test_config_kblgt2(i915); 3763 else if (IS_KBL_GT3(i915)) 3764 i915_perf_load_test_config_kblgt3(i915); 3765 } else if (IS_BROXTON(i915)) { 3766 i915_perf_load_test_config_bxt(i915); 3767 } else if (IS_SKYLAKE(i915)) { 3768 if (IS_SKL_GT2(i915)) 3769 i915_perf_load_test_config_sklgt2(i915); 3770 else if (IS_SKL_GT3(i915)) 3771 i915_perf_load_test_config_sklgt3(i915); 3772 else if (IS_SKL_GT4(i915)) 3773 i915_perf_load_test_config_sklgt4(i915); 3774 } else if (IS_CHERRYVIEW(i915)) { 3775 i915_perf_load_test_config_chv(i915); 3776 } else if (IS_BROADWELL(i915)) { 3777 i915_perf_load_test_config_bdw(i915); 3778 } else if (IS_HASWELL(i915)) { 3779 i915_perf_load_test_config_hsw(i915); 3780 } 3781 3782 if (perf->test_config.id == 0) 3783 goto sysfs_error; 3784 3785 ret = sysfs_create_group(perf->metrics_kobj, 3786 &perf->test_config.sysfs_metric); 3787 if (ret) 3788 goto sysfs_error; 3789 3790 perf->test_config.perf = perf; 3791 kref_init(&perf->test_config.ref); 3792 3793 goto exit; 3794 3795 sysfs_error: 3796 kobject_put(perf->metrics_kobj); 3797 perf->metrics_kobj = NULL; 3798 3799 exit: 3800 mutex_unlock(&perf->lock); 3801 #endif 3802 } 3803 3804 /** 3805 * i915_perf_unregister - hide i915-perf from userspace 3806 * @i915: i915 device instance 3807 * 3808 * i915-perf state cleanup is split up into an 'unregister' and 3809 * 'deinit' phase where the interface is first hidden from 3810 * userspace by i915_perf_unregister() before cleaning up 3811 * remaining state in i915_perf_fini(). 3812 */ 3813 void i915_perf_unregister(struct drm_i915_private *i915) 3814 { 3815 struct i915_perf *perf = &i915->perf; 3816 3817 if (!perf->metrics_kobj) 3818 return; 3819 3820 sysfs_remove_group(perf->metrics_kobj, 3821 &perf->test_config.sysfs_metric); 3822 3823 kobject_put(perf->metrics_kobj); 3824 perf->metrics_kobj = NULL; 3825 } 3826 3827 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) 3828 { 3829 static const i915_reg_t flex_eu_regs[] = { 3830 EU_PERF_CNTL0, 3831 EU_PERF_CNTL1, 3832 EU_PERF_CNTL2, 3833 EU_PERF_CNTL3, 3834 EU_PERF_CNTL4, 3835 EU_PERF_CNTL5, 3836 EU_PERF_CNTL6, 3837 }; 3838 int i; 3839 3840 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3841 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3842 return true; 3843 } 3844 return false; 3845 } 3846 3847 #define ADDR_IN_RANGE(addr, start, end) \ 3848 ((addr) >= (start) && \ 3849 (addr) <= (end)) 3850 3851 #define REG_IN_RANGE(addr, start, end) \ 3852 ((addr) >= i915_mmio_reg_offset(start) && \ 3853 (addr) <= i915_mmio_reg_offset(end)) 3854 3855 #define REG_EQUAL(addr, mmio) \ 3856 ((addr) == i915_mmio_reg_offset(mmio)) 3857 3858 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 3859 { 3860 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) || 3861 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) || 3862 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1); 3863 } 3864 3865 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3866 { 3867 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) || 3868 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) || 3869 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) || 3870 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI); 3871 } 3872 3873 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3874 { 3875 return gen7_is_valid_mux_addr(perf, addr) || 3876 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || 3877 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8)); 3878 } 3879 3880 static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3881 { 3882 return gen8_is_valid_mux_addr(perf, addr) || 3883 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || 3884 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI); 3885 } 3886 3887 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3888 { 3889 return gen7_is_valid_mux_addr(perf, addr) || 3890 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) || 3891 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) || 3892 REG_EQUAL(addr, HSW_MBVID2_MISR0); 3893 } 3894 3895 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3896 { 3897 return gen7_is_valid_mux_addr(perf, addr) || 3898 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4); 3899 } 3900 3901 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 3902 { 3903 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) || 3904 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) || 3905 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) || 3906 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) || 3907 REG_EQUAL(addr, GEN12_OAA_DBG_REG) || 3908 REG_EQUAL(addr, GEN12_OAG_OA_PESS) || 3909 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF); 3910 } 3911 3912 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3913 { 3914 return REG_EQUAL(addr, NOA_WRITE) || 3915 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || 3916 REG_EQUAL(addr, GDT_CHICKEN_BITS) || 3917 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || 3918 REG_EQUAL(addr, RPM_CONFIG0) || 3919 REG_EQUAL(addr, RPM_CONFIG1) || 3920 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8)); 3921 } 3922 3923 static u32 mask_reg_value(u32 reg, u32 val) 3924 { 3925 /* HALF_SLICE_CHICKEN2 is programmed with a the 3926 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 3927 * programmed by userspace doesn't change this. 3928 */ 3929 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) 3930 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 3931 3932 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 3933 * indicated by its name and a bunch of selection fields used by OA 3934 * configs. 3935 */ 3936 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) 3937 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 3938 3939 return val; 3940 } 3941 3942 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, 3943 bool (*is_valid)(struct i915_perf *perf, u32 addr), 3944 u32 __user *regs, 3945 u32 n_regs) 3946 { 3947 struct i915_oa_reg *oa_regs; 3948 int err; 3949 u32 i; 3950 3951 if (!n_regs) 3952 return NULL; 3953 3954 if (!access_ok(regs, n_regs * sizeof(u32) * 2)) 3955 return ERR_PTR(-EFAULT); 3956 3957 /* No is_valid function means we're not allowing any register to be programmed. */ 3958 GEM_BUG_ON(!is_valid); 3959 if (!is_valid) 3960 return ERR_PTR(-EINVAL); 3961 3962 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 3963 if (!oa_regs) 3964 return ERR_PTR(-ENOMEM); 3965 3966 for (i = 0; i < n_regs; i++) { 3967 u32 addr, value; 3968 3969 err = get_user(addr, regs); 3970 if (err) 3971 goto addr_err; 3972 3973 if (!is_valid(perf, addr)) { 3974 DRM_DEBUG("Invalid oa_reg address: %X\n", addr); 3975 err = -EINVAL; 3976 goto addr_err; 3977 } 3978 3979 err = get_user(value, regs + 1); 3980 if (err) 3981 goto addr_err; 3982 3983 oa_regs[i].addr = _MMIO(addr); 3984 oa_regs[i].value = mask_reg_value(addr, value); 3985 3986 regs += 2; 3987 } 3988 3989 return oa_regs; 3990 3991 addr_err: 3992 kfree(oa_regs); 3993 return ERR_PTR(err); 3994 } 3995 3996 static ssize_t show_dynamic_id(struct device *dev, 3997 struct device_attribute *attr, 3998 char *buf) 3999 { 4000 STUB(); 4001 return -ENOSYS; 4002 #ifdef notyet 4003 struct i915_oa_config *oa_config = 4004 container_of(attr, typeof(*oa_config), sysfs_metric_id); 4005 4006 return sprintf(buf, "%d\n", oa_config->id); 4007 #endif 4008 } 4009 4010 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, 4011 struct i915_oa_config *oa_config) 4012 { 4013 STUB(); 4014 return -ENOSYS; 4015 #ifdef notyet 4016 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 4017 oa_config->sysfs_metric_id.attr.name = "id"; 4018 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 4019 oa_config->sysfs_metric_id.show = show_dynamic_id; 4020 oa_config->sysfs_metric_id.store = NULL; 4021 4022 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 4023 oa_config->attrs[1] = NULL; 4024 4025 oa_config->sysfs_metric.name = oa_config->uuid; 4026 oa_config->sysfs_metric.attrs = oa_config->attrs; 4027 4028 return sysfs_create_group(perf->metrics_kobj, 4029 &oa_config->sysfs_metric); 4030 #endif 4031 } 4032 4033 /** 4034 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 4035 * @dev: drm device 4036 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 4037 * userspace (unvalidated) 4038 * @file: drm file 4039 * 4040 * Validates the submitted OA register to be saved into a new OA config that 4041 * can then be used for programming the OA unit and its NOA network. 4042 * 4043 * Returns: A new allocated config number to be used with the perf open ioctl 4044 * or a negative error code on failure. 4045 */ 4046 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 4047 struct drm_file *file) 4048 { 4049 STUB(); 4050 return -ENOSYS; 4051 #ifdef notyet 4052 struct i915_perf *perf = &to_i915(dev)->perf; 4053 struct drm_i915_perf_oa_config *args = data; 4054 struct i915_oa_config *oa_config, *tmp; 4055 struct i915_oa_reg *regs; 4056 int err, id; 4057 4058 if (!perf->i915) { 4059 DRM_DEBUG("i915 perf interface not available for this system\n"); 4060 return -ENOTSUPP; 4061 } 4062 4063 if (!perf->metrics_kobj) { 4064 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 4065 return -EINVAL; 4066 } 4067 4068 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 4069 DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); 4070 return -EACCES; 4071 } 4072 4073 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 4074 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 4075 (!args->flex_regs_ptr || !args->n_flex_regs)) { 4076 DRM_DEBUG("No OA registers given\n"); 4077 return -EINVAL; 4078 } 4079 4080 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 4081 if (!oa_config) { 4082 DRM_DEBUG("Failed to allocate memory for the OA config\n"); 4083 return -ENOMEM; 4084 } 4085 4086 oa_config->perf = perf; 4087 kref_init(&oa_config->ref); 4088 4089 if (!uuid_is_valid(args->uuid)) { 4090 DRM_DEBUG("Invalid uuid format for OA config\n"); 4091 err = -EINVAL; 4092 goto reg_err; 4093 } 4094 4095 /* Last character in oa_config->uuid will be 0 because oa_config is 4096 * kzalloc. 4097 */ 4098 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 4099 4100 oa_config->mux_regs_len = args->n_mux_regs; 4101 regs = alloc_oa_regs(perf, 4102 perf->ops.is_valid_mux_reg, 4103 u64_to_user_ptr(args->mux_regs_ptr), 4104 args->n_mux_regs); 4105 4106 if (IS_ERR(regs)) { 4107 DRM_DEBUG("Failed to create OA config for mux_regs\n"); 4108 err = PTR_ERR(regs); 4109 goto reg_err; 4110 } 4111 oa_config->mux_regs = regs; 4112 4113 oa_config->b_counter_regs_len = args->n_boolean_regs; 4114 regs = alloc_oa_regs(perf, 4115 perf->ops.is_valid_b_counter_reg, 4116 u64_to_user_ptr(args->boolean_regs_ptr), 4117 args->n_boolean_regs); 4118 4119 if (IS_ERR(regs)) { 4120 DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); 4121 err = PTR_ERR(regs); 4122 goto reg_err; 4123 } 4124 oa_config->b_counter_regs = regs; 4125 4126 if (INTEL_GEN(perf->i915) < 8) { 4127 if (args->n_flex_regs != 0) { 4128 err = -EINVAL; 4129 goto reg_err; 4130 } 4131 } else { 4132 oa_config->flex_regs_len = args->n_flex_regs; 4133 regs = alloc_oa_regs(perf, 4134 perf->ops.is_valid_flex_reg, 4135 u64_to_user_ptr(args->flex_regs_ptr), 4136 args->n_flex_regs); 4137 4138 if (IS_ERR(regs)) { 4139 DRM_DEBUG("Failed to create OA config for flex_regs\n"); 4140 err = PTR_ERR(regs); 4141 goto reg_err; 4142 } 4143 oa_config->flex_regs = regs; 4144 } 4145 4146 err = mutex_lock_interruptible(&perf->metrics_lock); 4147 if (err) 4148 goto reg_err; 4149 4150 /* We shouldn't have too many configs, so this iteration shouldn't be 4151 * too costly. 4152 */ 4153 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 4154 if (!strcmp(tmp->uuid, oa_config->uuid)) { 4155 DRM_DEBUG("OA config already exists with this uuid\n"); 4156 err = -EADDRINUSE; 4157 goto sysfs_err; 4158 } 4159 } 4160 4161 err = create_dynamic_oa_sysfs_entry(perf, oa_config); 4162 if (err) { 4163 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 4164 goto sysfs_err; 4165 } 4166 4167 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 4168 oa_config->id = idr_alloc(&perf->metrics_idr, 4169 oa_config, 2, 4170 0, GFP_KERNEL); 4171 if (oa_config->id < 0) { 4172 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 4173 err = oa_config->id; 4174 goto sysfs_err; 4175 } 4176 4177 mutex_unlock(&perf->metrics_lock); 4178 4179 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); 4180 4181 return oa_config->id; 4182 4183 sysfs_err: 4184 mutex_unlock(&perf->metrics_lock); 4185 reg_err: 4186 i915_oa_config_put(oa_config); 4187 DRM_DEBUG("Failed to add new OA config\n"); 4188 return err; 4189 #endif 4190 } 4191 4192 /** 4193 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 4194 * @dev: drm device 4195 * @data: ioctl data (pointer to u64 integer) copied from userspace 4196 * @file: drm file 4197 * 4198 * Configs can be removed while being used, the will stop appearing in sysfs 4199 * and their content will be freed when the stream using the config is closed. 4200 * 4201 * Returns: 0 on success or a negative error code on failure. 4202 */ 4203 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 4204 struct drm_file *file) 4205 { 4206 struct i915_perf *perf = &to_i915(dev)->perf; 4207 u64 *arg = data; 4208 struct i915_oa_config *oa_config; 4209 int ret; 4210 4211 if (!perf->i915) { 4212 DRM_DEBUG("i915 perf interface not available for this system\n"); 4213 return -ENOTSUPP; 4214 } 4215 4216 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 4217 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); 4218 return -EACCES; 4219 } 4220 4221 ret = mutex_lock_interruptible(&perf->metrics_lock); 4222 if (ret) 4223 return ret; 4224 4225 oa_config = idr_find(&perf->metrics_idr, *arg); 4226 if (!oa_config) { 4227 DRM_DEBUG("Failed to remove unknown OA config\n"); 4228 ret = -ENOENT; 4229 goto err_unlock; 4230 } 4231 4232 GEM_BUG_ON(*arg != oa_config->id); 4233 4234 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); 4235 4236 idr_remove(&perf->metrics_idr, *arg); 4237 4238 mutex_unlock(&perf->metrics_lock); 4239 4240 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 4241 4242 i915_oa_config_put(oa_config); 4243 4244 return 0; 4245 4246 err_unlock: 4247 mutex_unlock(&perf->metrics_lock); 4248 return ret; 4249 } 4250 4251 #ifdef notyet 4252 static struct ctl_table oa_table[] = { 4253 { 4254 .procname = "perf_stream_paranoid", 4255 .data = &i915_perf_stream_paranoid, 4256 .maxlen = sizeof(i915_perf_stream_paranoid), 4257 .mode = 0644, 4258 .proc_handler = proc_dointvec_minmax, 4259 .extra1 = SYSCTL_ZERO, 4260 .extra2 = SYSCTL_ONE, 4261 }, 4262 { 4263 .procname = "oa_max_sample_rate", 4264 .data = &i915_oa_max_sample_rate, 4265 .maxlen = sizeof(i915_oa_max_sample_rate), 4266 .mode = 0644, 4267 .proc_handler = proc_dointvec_minmax, 4268 .extra1 = SYSCTL_ZERO, 4269 .extra2 = &oa_sample_rate_hard_limit, 4270 }, 4271 {} 4272 }; 4273 4274 static struct ctl_table i915_root[] = { 4275 { 4276 .procname = "i915", 4277 .maxlen = 0, 4278 .mode = 0555, 4279 .child = oa_table, 4280 }, 4281 {} 4282 }; 4283 4284 static struct ctl_table dev_root[] = { 4285 { 4286 .procname = "dev", 4287 .maxlen = 0, 4288 .mode = 0555, 4289 .child = i915_root, 4290 }, 4291 {} 4292 }; 4293 #endif 4294 4295 /** 4296 * i915_perf_init - initialize i915-perf state on module bind 4297 * @i915: i915 device instance 4298 * 4299 * Initializes i915-perf state without exposing anything to userspace. 4300 * 4301 * Note: i915-perf initialization is split into an 'init' and 'register' 4302 * phase with the i915_perf_register() exposing state to userspace. 4303 */ 4304 void i915_perf_init(struct drm_i915_private *i915) 4305 { 4306 struct i915_perf *perf = &i915->perf; 4307 4308 /* XXX const struct i915_perf_ops! */ 4309 4310 if (IS_HASWELL(i915)) { 4311 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; 4312 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; 4313 perf->ops.is_valid_flex_reg = NULL; 4314 perf->ops.enable_metric_set = hsw_enable_metric_set; 4315 perf->ops.disable_metric_set = hsw_disable_metric_set; 4316 perf->ops.oa_enable = gen7_oa_enable; 4317 perf->ops.oa_disable = gen7_oa_disable; 4318 perf->ops.read = gen7_oa_read; 4319 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; 4320 4321 perf->oa_formats = hsw_oa_formats; 4322 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { 4323 /* Note: that although we could theoretically also support the 4324 * legacy ringbuffer mode on BDW (and earlier iterations of 4325 * this driver, before upstreaming did this) it didn't seem 4326 * worth the complexity to maintain now that BDW+ enable 4327 * execlist mode by default. 4328 */ 4329 perf->ops.read = gen8_oa_read; 4330 4331 if (IS_GEN_RANGE(i915, 8, 9)) { 4332 perf->oa_formats = gen8_plus_oa_formats; 4333 4334 perf->ops.is_valid_b_counter_reg = 4335 gen7_is_valid_b_counter_addr; 4336 perf->ops.is_valid_mux_reg = 4337 gen8_is_valid_mux_addr; 4338 perf->ops.is_valid_flex_reg = 4339 gen8_is_valid_flex_addr; 4340 4341 if (IS_CHERRYVIEW(i915)) { 4342 perf->ops.is_valid_mux_reg = 4343 chv_is_valid_mux_addr; 4344 } 4345 4346 perf->ops.oa_enable = gen8_oa_enable; 4347 perf->ops.oa_disable = gen8_oa_disable; 4348 perf->ops.enable_metric_set = gen8_enable_metric_set; 4349 perf->ops.disable_metric_set = gen8_disable_metric_set; 4350 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4351 4352 if (IS_GEN(i915, 8)) { 4353 perf->ctx_oactxctrl_offset = 0x120; 4354 perf->ctx_flexeu0_offset = 0x2ce; 4355 4356 perf->gen8_valid_ctx_bit = BIT(25); 4357 } else { 4358 perf->ctx_oactxctrl_offset = 0x128; 4359 perf->ctx_flexeu0_offset = 0x3de; 4360 4361 perf->gen8_valid_ctx_bit = BIT(16); 4362 } 4363 } else if (IS_GEN_RANGE(i915, 10, 11)) { 4364 perf->oa_formats = gen8_plus_oa_formats; 4365 4366 perf->ops.is_valid_b_counter_reg = 4367 gen7_is_valid_b_counter_addr; 4368 perf->ops.is_valid_mux_reg = 4369 gen10_is_valid_mux_addr; 4370 perf->ops.is_valid_flex_reg = 4371 gen8_is_valid_flex_addr; 4372 4373 perf->ops.oa_enable = gen8_oa_enable; 4374 perf->ops.oa_disable = gen8_oa_disable; 4375 perf->ops.enable_metric_set = gen8_enable_metric_set; 4376 perf->ops.disable_metric_set = gen10_disable_metric_set; 4377 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4378 4379 if (IS_GEN(i915, 10)) { 4380 perf->ctx_oactxctrl_offset = 0x128; 4381 perf->ctx_flexeu0_offset = 0x3de; 4382 } else { 4383 perf->ctx_oactxctrl_offset = 0x124; 4384 perf->ctx_flexeu0_offset = 0x78e; 4385 } 4386 perf->gen8_valid_ctx_bit = BIT(16); 4387 } else if (IS_GEN(i915, 12)) { 4388 perf->oa_formats = gen12_oa_formats; 4389 4390 perf->ops.is_valid_b_counter_reg = 4391 gen12_is_valid_b_counter_addr; 4392 perf->ops.is_valid_mux_reg = 4393 gen12_is_valid_mux_addr; 4394 perf->ops.is_valid_flex_reg = 4395 gen8_is_valid_flex_addr; 4396 4397 perf->ops.oa_enable = gen12_oa_enable; 4398 perf->ops.oa_disable = gen12_oa_disable; 4399 perf->ops.enable_metric_set = gen12_enable_metric_set; 4400 perf->ops.disable_metric_set = gen12_disable_metric_set; 4401 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; 4402 4403 perf->ctx_flexeu0_offset = 0; 4404 perf->ctx_oactxctrl_offset = 0x144; 4405 } 4406 } 4407 4408 if (perf->ops.enable_metric_set) { 4409 rw_init(&perf->lock, "perflk"); 4410 4411 oa_sample_rate_hard_limit = 1000 * 4412 (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2); 4413 4414 rw_init(&perf->metrics_lock, "metricslk"); 4415 idr_init(&perf->metrics_idr); 4416 4417 /* We set up some ratelimit state to potentially throttle any 4418 * _NOTES about spurious, invalid OA reports which we don't 4419 * forward to userspace. 4420 * 4421 * We print a _NOTE about any throttling when closing the 4422 * stream instead of waiting until driver _fini which no one 4423 * would ever see. 4424 * 4425 * Using the same limiting factors as printk_ratelimit() 4426 */ 4427 #ifdef notyet 4428 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); 4429 /* Since we use a DRM_NOTE for spurious reports it would be 4430 * inconsistent to let __ratelimit() automatically print a 4431 * warning for throttling. 4432 */ 4433 ratelimit_set_flags(&perf->spurious_report_rs, 4434 RATELIMIT_MSG_ON_RELEASE); 4435 #endif 4436 4437 atomic64_set(&perf->noa_programming_delay, 4438 500 * 1000 /* 500us */); 4439 4440 perf->i915 = i915; 4441 } 4442 } 4443 4444 static int destroy_config(int id, void *p, void *data) 4445 { 4446 i915_oa_config_put(p); 4447 return 0; 4448 } 4449 4450 void i915_perf_sysctl_register(void) 4451 { 4452 #ifdef notyet 4453 sysctl_header = register_sysctl_table(dev_root); 4454 #endif 4455 } 4456 4457 void i915_perf_sysctl_unregister(void) 4458 { 4459 #ifdef notyet 4460 unregister_sysctl_table(sysctl_header); 4461 #endif 4462 } 4463 4464 /** 4465 * i915_perf_fini - Counter part to i915_perf_init() 4466 * @i915: i915 device instance 4467 */ 4468 void i915_perf_fini(struct drm_i915_private *i915) 4469 { 4470 struct i915_perf *perf = &i915->perf; 4471 4472 if (!perf->i915) 4473 return; 4474 4475 idr_for_each(&perf->metrics_idr, destroy_config, perf); 4476 idr_destroy(&perf->metrics_idr); 4477 4478 memset(&perf->ops, 0, sizeof(perf->ops)); 4479 perf->i915 = NULL; 4480 } 4481 4482 /** 4483 * i915_perf_ioctl_version - Version of the i915-perf subsystem 4484 * 4485 * This version number is used by userspace to detect available features. 4486 */ 4487 int i915_perf_ioctl_version(void) 4488 { 4489 /* 4490 * 1: Initial version 4491 * I915_PERF_IOCTL_ENABLE 4492 * I915_PERF_IOCTL_DISABLE 4493 * 4494 * 2: Added runtime modification of OA config. 4495 * I915_PERF_IOCTL_CONFIG 4496 * 4497 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold 4498 * preemption on a particular context so that performance data is 4499 * accessible from a delta of MI_RPC reports without looking at the 4500 * OA buffer. 4501 */ 4502 return 3; 4503 } 4504 4505 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4506 #include "selftests/i915_perf.c" 4507 #endif 4508