| /openbsd-src/sys/dev/pci/drm/i915/gt/ |
| H A D | intel_ring_submission.c | 35 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) in set_hwstam() argument 41 if (engine->class == RENDER_CLASS) { in set_hwstam() 42 if (GRAPHICS_VER(engine->i915) >= 6) in set_hwstam() 48 intel_engine_set_hwsp_writemask(engine, mask); in set_hwstam() 51 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) in set_hws_pga() argument 56 if (GRAPHICS_VER(engine->i915) >= 4) in set_hws_pga() 59 intel_uncore_write(engine->uncore, HWS_PGA, addr); in set_hws_pga() 62 static struct vm_page *status_page(struct intel_engine_cs *engine) in status_page() argument 64 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; in status_page() 70 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) in ring_setup_phys_status_page() argument [all …]
|
| H A D | intel_engine_heartbeat.c | 23 static bool next_heartbeat(struct intel_engine_cs *engine) in next_heartbeat() argument 28 delay = READ_ONCE(engine->props.heartbeat_interval_ms); in next_heartbeat() 30 rq = engine->heartbeat.systole; in next_heartbeat() 42 delay == engine->defaults.heartbeat_interval_ms) { in next_heartbeat() 50 longer = READ_ONCE(engine->props.preempt_timeout_ms) * 2; in next_heartbeat() 51 longer = intel_clamp_heartbeat_interval_ms(engine, longer); in next_heartbeat() 62 mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay + 1); in next_heartbeat() 79 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) in idle_pulse() argument 81 engine->wakeref_serial = READ_ONCE(engine->serial) + 1; in idle_pulse() 83 if (!engine->heartbeat.systole && intel_engine_has_heartbeat(engine)) in idle_pulse() [all …]
|
| H A D | selftest_engine_heartbeat.c | 14 static void reset_heartbeat(struct intel_engine_cs *engine) in reset_heartbeat() argument 16 intel_engine_set_heartbeat(engine, in reset_heartbeat() 17 engine->defaults.heartbeat_interval_ms); in reset_heartbeat() 37 static int engine_sync_barrier(struct intel_engine_cs *engine) in engine_sync_barrier() argument 39 return timeline_sync(engine->kernel_context->timeline); in engine_sync_barrier() 90 static int __live_idle_pulse(struct intel_engine_cs *engine, in __live_idle_pulse() argument 96 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); in __live_idle_pulse() 106 err = i915_active_acquire_preallocate_barrier(&p->active, engine); in __live_idle_pulse() 116 GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); in __live_idle_pulse() 118 err = fn(engine); in __live_idle_pulse() [all …]
|
| H A D | mock_engine.c | 60 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) in mock_ring() argument 75 ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); in mock_ring() 93 static struct i915_request *first_request(struct mock_engine *engine) in first_request() argument 95 return list_first_entry_or_null(&engine->hw_queue, in first_request() 106 intel_engine_signal_breadcrumbs(request->engine); in advance() 111 struct mock_engine *engine = from_timer(engine, t, hw_delay); in hw_delay_complete() local 115 spin_lock_irqsave(&engine->hw_lock, flags); in hw_delay_complete() 118 request = first_request(engine); in hw_delay_complete() 126 while ((request = first_request(engine))) { in hw_delay_complete() 128 mod_timer(&engine->hw_delay, in hw_delay_complete() [all …]
|
| H A D | intel_engine_cs.c | 260 * intel_engine_context_size() - return the size of the context for an engine 262 * @class: engine class 264 * Each engine class may require a different amount of space for a context 267 * Return: size (in bytes) of an engine class specific context image 359 static void __sprint_engine_name(struct intel_engine_cs *engine) in __sprint_engine_name() argument 362 * Before we know what the uABI name for this engine will be, in __sprint_engine_name() 363 * we still would like to keep track of this engine in the debug logs. in __sprint_engine_name() 366 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", in __sprint_engine_name() 367 intel_engine_class_repr(engine in __sprint_engine_name() 371 intel_engine_set_hwsp_writemask(struct intel_engine_cs * engine,u32 mask) intel_engine_set_hwsp_writemask() argument 386 intel_engine_sanitize_mmio(struct intel_engine_cs * engine) intel_engine_sanitize_mmio() argument 392 nop_irq_handler(struct intel_engine_cs * engine,u16 iir) nop_irq_handler() argument 455 struct intel_engine_cs *engine; intel_engine_setup() local 573 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs * engine,u64 value) intel_clamp_heartbeat_interval_ms() argument 580 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs * engine,u64 value) intel_clamp_max_busywait_duration_ns() argument 587 intel_clamp_preempt_timeout_ms(struct intel_engine_cs * engine,u64 value) intel_clamp_preempt_timeout_ms() argument 601 intel_clamp_stop_timeout_ms(struct intel_engine_cs * engine,u64 value) intel_clamp_stop_timeout_ms() argument 608 intel_clamp_timeslice_duration_ms(struct intel_engine_cs * engine,u64 value) intel_clamp_timeslice_duration_ms() argument 622 __setup_engine_capabilities(struct intel_engine_cs * engine) __setup_engine_capabilities() argument 656 struct intel_engine_cs *engine; intel_setup_engine_capabilities() local 669 struct intel_engine_cs *engine; intel_engines_release() local 700 intel_engine_free_request_pool(struct intel_engine_cs * engine) intel_engine_free_request_pool() argument 714 struct intel_engine_cs *engine; intel_engines_free() local 1052 intel_engine_init_execlists(struct intel_engine_cs * engine) intel_engine_init_execlists() argument 1065 cleanup_status_page(struct intel_engine_cs * engine) cleanup_status_page() argument 1083 pin_ggtt_status_page(struct intel_engine_cs * engine,struct i915_gem_ww_ctx * ww,struct i915_vma * vma) pin_ggtt_status_page() argument 1108 init_status_page(struct intel_engine_cs * engine) init_status_page() argument 1173 intel_engine_init_tlb_invalidation(struct intel_engine_cs * engine) intel_engine_init_tlb_invalidation() argument 1287 engine_setup_common(struct intel_engine_cs * engine) engine_setup_common() argument 1352 struct intel_engine_cs *engine = ce->engine; measure_breadcrumb_dw() local 1391 intel_engine_create_pinned_context(struct intel_engine_cs * engine,struct i915_address_space * vm,unsigned int ring_size,unsigned int hwsp,struct lock_class_key * key,const char * name) intel_engine_create_pinned_context() argument 1434 struct intel_engine_cs *engine = ce->engine; intel_engine_destroy_pinned_context() local 1449 create_kernel_context(struct intel_engine_cs * engine) create_kernel_context() argument 1469 engine_init_common(struct intel_engine_cs * engine) engine_init_common() argument 1505 struct intel_engine_cs *engine; intel_engines_init() local 1551 intel_engine_cleanup_common(struct intel_engine_cs * engine) intel_engine_cleanup_common() argument 1581 intel_engine_resume(struct intel_engine_cs * engine) intel_engine_resume() argument 1589 intel_engine_get_active_head(const struct intel_engine_cs * engine) intel_engine_get_active_head() argument 1605 intel_engine_get_last_batch_head(const struct intel_engine_cs * engine) intel_engine_get_last_batch_head() argument 1617 stop_timeout(const struct intel_engine_cs * engine) stop_timeout() argument 1632 __intel_engine_stop_cs(struct intel_engine_cs * engine,int fast_timeout_us,int slow_timeout_ms) __intel_engine_stop_cs() argument 1661 intel_engine_stop_cs(struct intel_engine_cs * engine) intel_engine_stop_cs() argument 1700 intel_engine_cancel_stop_cs(struct intel_engine_cs * engine) intel_engine_cancel_stop_cs() argument 1707 __cs_pending_mi_force_wakes(struct intel_engine_cs * engine) __cs_pending_mi_force_wakes() argument 1768 intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs * engine) intel_engine_wait_for_pending_mi_fw() argument 1777 intel_engine_get_instdone(const struct intel_engine_cs * engine,struct intel_instdone * instdone) intel_engine_get_instdone() argument 1848 ring_is_idle(struct intel_engine_cs * engine) ring_is_idle() argument 1873 __intel_engine_flush_submission(struct intel_engine_cs * engine,bool sync) __intel_engine_flush_submission() argument 1901 intel_engine_is_idle(struct intel_engine_cs * engine) intel_engine_is_idle() argument 1924 struct intel_engine_cs *engine; intel_engines_are_idle() local 1946 intel_engine_irq_enable(struct intel_engine_cs * engine) intel_engine_irq_enable() argument 1959 intel_engine_irq_disable(struct intel_engine_cs * engine) intel_engine_irq_disable() argument 1972 struct intel_engine_cs *engine; intel_engines_reset_default_submission() local 1983 intel_engine_can_store_dword(struct intel_engine_cs * engine) intel_engine_can_store_dword() argument 2086 intel_engine_print_registers(struct intel_engine_cs * engine,struct drm_printer * m) intel_engine_print_registers() argument 2266 print_properties(struct intel_engine_cs * engine,struct drm_printer * m) print_properties() argument 2352 engine_dump_active_requests(struct intel_engine_cs * engine,struct drm_printer * m) engine_dump_active_requests() argument 2383 intel_engine_dump(struct intel_engine_cs * engine,struct drm_printer * m,const char * header,...) intel_engine_dump() argument 2454 intel_engine_get_busy_time(struct intel_engine_cs * engine,ktime_t * now) intel_engine_get_busy_time() argument 2473 engine_execlist_find_hung_request(struct intel_engine_cs * engine) engine_execlist_find_hung_request() argument 2525 intel_engine_get_hung_entity(struct intel_engine_cs * engine,struct intel_context ** ce,struct i915_request ** rq) intel_engine_get_hung_entity() argument 2552 xehp_enable_ccs_engines(struct intel_engine_cs * engine) xehp_enable_ccs_engines() argument [all...] |
| H A D | intel_execlists_submission.c | 24 * shouldn't we just need a set of those per engine command streamer? This is 26 * rings, the engine cs shifts to a new "ring buffer" with every context 41 * Now that ringbuffers belong per-context (and not per-engine, like before) 42 * and that contexts are uniquely tied to a given engine (and not reusable, 45 * - One ringbuffer per-engine inside each context. 46 * - One backing object per-engine inside each context. 50 * more complex, because we don't know at creation time which engine is going 55 * gets populated for a given engine once we receive an execbuffer. If later 57 * engine, we allocate/populate a new ringbuffer and context backing object and 74 * for the appropriate engine 201 to_virtual_engine(struct intel_engine_cs * engine) to_virtual_engine() argument 238 ring_set_paused(const struct intel_engine_cs * engine,int state) ring_set_paused() argument 297 need_preempt(const struct intel_engine_cs * engine,const struct i915_request * rq) need_preempt() argument 367 __unwind_incomplete_requests(struct intel_engine_cs * engine) __unwind_incomplete_requests() argument 411 struct intel_engine_cs *engine = execlists_unwind_incomplete_requests() local 435 reset_active(struct i915_request * rq,struct intel_engine_cs * engine) reset_active() argument 480 struct intel_engine_cs * const engine = rq->engine; __execlists_schedule_in() local 552 struct intel_engine_cs *engine = rq->engine; resubmit_virtual_request() local 566 struct intel_engine_cs *engine = rq->engine; kick_siblings() local 595 struct intel_engine_cs * const engine = rq->engine; __execlists_schedule_out() local 763 const struct intel_engine_cs *engine = trace_ports() local 776 reset_in_progress(const struct intel_engine_cs * engine) reset_in_progress() argument 785 struct intel_engine_cs *engine = assert_pending_valid() local 911 execlists_submit_ports(struct intel_engine_cs * engine) execlists_submit_ports() argument 1001 virtual_matches(const struct virtual_engine * ve,const struct i915_request * rq,const struct intel_engine_cs * engine) virtual_matches() argument 1028 first_virtual_engine(struct intel_engine_cs * engine) first_virtual_engine() argument 1053 virtual_xfer_context(struct virtual_engine * ve,struct intel_engine_cs * engine) virtual_xfer_context() argument 1126 defer_active(struct intel_engine_cs * engine) defer_active() argument 1157 needs_timeslice(const struct intel_engine_cs * engine,const struct i915_request * rq) needs_timeslice() argument 1193 timeslice_expired(struct intel_engine_cs * engine,const struct i915_request * rq) timeslice_expired() argument 1206 timeslice(const struct intel_engine_cs * engine) timeslice() argument 1211 start_timeslice(struct intel_engine_cs * engine) start_timeslice() argument 1242 active_preempt_timeout(struct intel_engine_cs * engine,const struct i915_request * rq) active_preempt_timeout() argument 1258 set_preempt_timeout(struct intel_engine_cs * engine,const struct i915_request * rq) set_preempt_timeout() argument 1276 execlists_dequeue(struct intel_engine_cs * engine) execlists_dequeue() argument 1623 execlists_dequeue_irq(struct intel_engine_cs * engine) execlists_dequeue_irq() argument 1755 wa_csb_read(const struct intel_engine_cs * engine,u64 * const csb) wa_csb_read() argument 1788 csb_read(const struct intel_engine_cs * engine,u64 * const csb) csb_read() argument 1820 process_csb(struct intel_engine_cs * engine,struct i915_request ** inactive) process_csb() argument 2102 execlists_hold(struct intel_engine_cs * engine,struct i915_request * rq) execlists_hold() argument 2200 execlists_unhold(struct intel_engine_cs * engine,struct i915_request * rq) execlists_unhold() argument 2230 struct intel_engine_cs *engine = cap->rq->engine; execlists_capture_work() local 2258 capture_regs(struct intel_engine_cs * engine) capture_regs() argument 2293 active_context(struct intel_engine_cs * engine,u32 ccid) active_context() argument 2326 active_ccid(struct intel_engine_cs * engine) active_ccid() argument 2331 execlists_capture(struct intel_engine_cs * engine) execlists_capture() argument 2392 execlists_reset(struct intel_engine_cs * engine,const char * msg) execlists_reset() argument 2416 preempt_timeout(const struct intel_engine_cs * const engine) preempt_timeout() argument 2437 struct intel_engine_cs * const engine = sched_engine->private_data; execlists_submission_tasklet() local 2492 execlists_irq_handler(struct intel_engine_cs * engine,u16 iir) execlists_irq_handler() argument 2533 struct intel_engine_cs *engine = __execlists_kick() local 2555 queue_request(struct intel_engine_cs * engine,struct i915_request * rq) queue_request() argument 2565 submit_queue(struct intel_engine_cs * engine,const struct i915_request * rq) submit_queue() argument 2577 ancestor_on_hold(const struct intel_engine_cs * engine,const struct i915_request * rq) ancestor_on_hold() argument 2586 struct intel_engine_cs *engine = request->engine; execlists_submit_request() local 2612 __execlists_context_pre_pin(struct intel_context * ce,struct intel_engine_cs * engine,struct i915_gem_ww_ctx * ww,void ** vaddr) __execlists_context_pre_pin() argument 2650 struct intel_engine_cs *engine = NULL; execlists_context_cancel_request() local 2721 const struct intel_engine_cs * const engine = rq->engine; emit_pdps() local 2812 reset_csb_pointers(struct intel_engine_cs * engine) reset_csb_pointers() argument 2854 sanitize_hwsp(struct intel_engine_cs * engine) sanitize_hwsp() argument 2862 execlists_sanitize(struct intel_engine_cs * engine) execlists_sanitize() argument 2893 enable_error_interrupt(struct intel_engine_cs * engine) enable_error_interrupt() argument 2936 enable_execlists(struct intel_engine_cs * engine) enable_execlists() argument 2960 execlists_resume(struct intel_engine_cs * engine) execlists_resume() argument 2973 execlists_reset_prepare(struct intel_engine_cs * engine) execlists_reset_prepare() argument 3016 reset_csb(struct intel_engine_cs * engine,struct i915_request ** inactive) reset_csb() argument 3032 execlists_reset_active(struct intel_engine_cs * engine,bool stalled) execlists_reset_active() argument 3109 execlists_reset_csb(struct intel_engine_cs * engine,bool stalled) execlists_reset_csb() argument 3125 execlists_reset_rewind(struct intel_engine_cs * engine,bool stalled) execlists_reset_rewind() argument 3146 struct intel_engine_cs * const engine = sched_engine->private_data; nop_submission_tasklet() local 3152 execlists_reset_cancel(struct intel_engine_cs * engine) execlists_reset_cancel() argument 3240 execlists_reset_finish(struct intel_engine_cs * engine) execlists_reset_finish() argument 3264 gen8_logical_ring_enable_irq(struct intel_engine_cs * engine) gen8_logical_ring_enable_irq() argument 3271 gen8_logical_ring_disable_irq(struct intel_engine_cs * engine) gen8_logical_ring_disable_irq() argument 3276 execlists_park(struct intel_engine_cs * engine) execlists_park() argument 3293 struct intel_engine_cs *engine, *locked; remove_from_engine() local 3321 can_preempt(struct intel_engine_cs * engine) can_preempt() argument 3332 struct intel_engine_cs *engine = rq->engine; kick_execlists() local 3380 execlists_set_default_submission(struct intel_engine_cs * engine) execlists_set_default_submission() argument 3388 execlists_shutdown(struct intel_engine_cs * engine) execlists_shutdown() argument 3396 execlists_release(struct intel_engine_cs * engine) execlists_release() argument 3406 __execlists_engine_busyness(struct intel_engine_cs * engine,ktime_t * now) __execlists_engine_busyness() argument 3423 execlists_engine_busyness(struct intel_engine_cs * engine,ktime_t * now) execlists_engine_busyness() argument 3439 logical_ring_default_vfuncs(struct intel_engine_cs * engine) logical_ring_default_vfuncs() argument 3505 logical_ring_default_irqs(struct intel_engine_cs * engine) logical_ring_default_irqs() argument 3527 rcs_submission_override(struct intel_engine_cs * engine) rcs_submission_override() argument 3545 intel_execlists_submission_setup(struct intel_engine_cs * engine) intel_execlists_submission_setup() argument 3771 virtual_get_sibling(struct intel_engine_cs * engine,unsigned int sibling) virtual_get_sibling() argument 4092 intel_execlists_show_requests(struct intel_engine_cs * engine,struct drm_printer * m,void (* show_request)(struct drm_printer * m,const struct i915_request * rq,const char * prefix,int indent),unsigned int max) intel_execlists_show_requests() argument 4177 intel_execlists_dump_active_requests(struct intel_engine_cs * engine,struct i915_request * hung_rq,struct drm_printer * m) intel_execlists_dump_active_requests() argument [all...] |
| H A D | intel_engine_pm.c | 20 static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine) in intel_gsc_idle_msg_enable() argument 22 struct drm_i915_private *i915 = engine->i915; in intel_gsc_idle_msg_enable() 24 if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) { in intel_gsc_idle_msg_enable() 25 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable() 29 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable() 42 int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); in dbg_poison_ce() 60 struct intel_engine_cs *engine = in __engine_unpark() local 61 container_of(wf, typeof(*engine), wakeref); in __engine_unpark() 64 ENGINE_TRACE(engine, "\n"); in __engine_unpark() 66 intel_gt_pm_get(engine->gt); in __engine_unpark() [all …]
|
| H A D | intel_engine_pm.h | 16 intel_engine_pm_is_awake(const struct intel_engine_cs *engine) in intel_engine_pm_is_awake() argument 18 return intel_wakeref_is_active(&engine->wakeref); in intel_engine_pm_is_awake() 21 static inline void __intel_engine_pm_get(struct intel_engine_cs *engine) in __intel_engine_pm_get() argument 23 __intel_wakeref_get(&engine->wakeref); in __intel_engine_pm_get() 26 static inline void intel_engine_pm_get(struct intel_engine_cs *engine) in intel_engine_pm_get() argument 28 intel_wakeref_get(&engine->wakeref); in intel_engine_pm_get() 31 static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) in intel_engine_pm_get_if_awake() argument 33 return intel_wakeref_get_if_active(&engine->wakeref); in intel_engine_pm_get_if_awake() 36 static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine) in intel_engine_pm_might_get() argument 38 if (!intel_engine_is_virtual(engine)) { in intel_engine_pm_might_get() [all …]
|
| H A D | intel_engine.h | 47 * ENGINE_READ(engine, REG_FOO); 52 * ENGINE_READ_IDX(engine, REG_BAR, i) 133 intel_read_status_page(const struct intel_engine_cs *engine, int reg) in intel_read_status_page() argument 136 return READ_ONCE(engine->status_page.addr[reg]); in intel_read_status_page() 140 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) in intel_write_status_page() argument 147 drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value)); in intel_write_status_page() 148 WRITE_ONCE(engine->status_page.addr[reg], value); in intel_write_status_page() 149 drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value)); in intel_write_status_page() 187 void intel_engine_stop(struct intel_engine_cs *engine); 188 void intel_engine_cleanup(struct intel_engine_cs *engine); 221 __intel_engine_reset(struct intel_engine_cs * engine,bool stalled) __intel_engine_reset() argument 233 intel_engine_flush_submission(struct intel_engine_cs * engine) intel_engine_flush_submission() argument 273 intel_engine_uses_guc(const struct intel_engine_cs * engine) intel_engine_uses_guc() argument 279 intel_engine_has_preempt_reset(const struct intel_engine_cs * engine) intel_engine_has_preempt_reset() argument 302 intel_virtual_engine_has_heartbeat(const struct intel_engine_cs * engine) intel_virtual_engine_has_heartbeat() argument 316 intel_engine_has_heartbeat(const struct intel_engine_cs * engine) intel_engine_has_heartbeat() argument 328 intel_engine_get_sibling(struct intel_engine_cs * engine,unsigned int sibling) intel_engine_get_sibling() argument 335 intel_engine_set_hung_context(struct intel_engine_cs * engine,struct intel_context * ce) intel_engine_set_hung_context() argument 342 intel_engine_clear_hung_context(struct intel_engine_cs * engine) intel_engine_clear_hung_context() argument 348 intel_engine_get_hung_context(struct intel_engine_cs * engine) intel_engine_get_hung_context() argument [all...] |
| H A D | intel_engine_user.c | 39 void intel_engine_add_user(struct intel_engine_cs *engine) in intel_engine_add_user() argument 41 llist_add((struct llist_node *)&engine->uabi_node, in intel_engine_add_user() 42 (struct llist_head *)&engine->i915->uabi_engines); in intel_engine_add_user() 88 struct intel_engine_cs *engine = in sort_engines() local 89 container_of((struct rb_node *)pos, typeof(*engine), in sort_engines() 91 list_add((struct list_head *)&engine->uabi_node, engines); in sort_engines() 100 u8 engine; in set_scheduler_caps() member 109 struct intel_engine_cs *engine; in set_scheduler_caps() local 114 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ in set_scheduler_caps() 117 if (engine in set_scheduler_caps() 144 u8 engine; set_scheduler_caps() member 153 struct intel_engine_cs *engine; set_scheduler_caps() local 230 add_legacy_ring(struct legacy_ring * ring,struct intel_engine_cs * engine) add_legacy_ring() argument 243 engine_rename(struct intel_engine_cs * engine,const char * name,u16 instance) engine_rename() argument 265 struct intel_engine_cs *engine = intel_engines_driver_register() local 311 struct intel_engine_cs *engine; intel_engines_driver_register() local 366 struct intel_engine_cs *engine; intel_engines_has_context_isolation() local [all...] |
| H A D | selftest_hangcheck.c | 104 hang_create_request(struct hang *h, struct intel_engine_cs *engine) in hang_create_request() argument 157 rq = igt_request_alloc(h->ctx, engine); in hang_create_request() 226 intel_gt_chipset_flush(engine->gt); in hang_create_request() 228 if (rq->engine->emit_init_breadcrumb) { in hang_create_request() 229 err = rq->engine->emit_init_breadcrumb(rq); in hang_create_request() 238 err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags); in hang_create_request() 288 struct intel_engine_cs *engine; in igt_hang_sanitycheck() local 299 for_each_engine(engine, gt, id) { in igt_hang_sanitycheck() 303 if (!intel_engine_can_store_dword(engine)) in igt_hang_sanitycheck() 306 rq = hang_create_request(&h, engine); in igt_hang_sanitycheck() [all …]
|
| H A D | intel_lrc.c | 49 const struct intel_engine_cs *engine, in set_offsets() argument 60 const u32 base = engine->mmio_base; in set_offsets() 78 if (GRAPHICS_VER(engine->i915) >= 11) in set_offsets() 101 if (GRAPHICS_VER(engine->i915) >= 11) in set_offsets() 682 static const u8 *reg_offsets(const struct intel_engine_cs *engine) in reg_offsets() argument 690 GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 && in reg_offsets() 691 !intel_engine_has_relative_mmio(engine)); in reg_offsets() 693 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) { in reg_offsets() 694 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) in reg_offsets() 696 else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in reg_offsets() [all …]
|
| H A D | selftest_workarounds.c | 34 } engine[I915_NUM_ENGINES]; member 64 struct intel_engine_cs *engine; in reference_lists_init() local 73 for_each_engine(engine, gt, id) { in reference_lists_init() 74 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init() 76 wa_init_start(wal, gt, "REF", engine->name); in reference_lists_init() 77 engine_init_workarounds(engine, wal); in reference_lists_init() 80 __intel_engine_init_ctx_wa(engine, in reference_lists_init() 81 &lists->engine[id].ctx_wa_list, in reference_lists_init() 89 struct intel_engine_cs *engine; in reference_lists_fini() local 92 for_each_engine(engine, gt, id) in reference_lists_fini() [all …]
|
| H A D | intel_reset.c | 324 struct intel_engine_cs *engine; in __gen6_reset_engines() local 333 for_each_engine_masked(engine, gt, engine_mask, tmp) { in __gen6_reset_engines() 334 hw_mask |= engine->reset_domain; in __gen6_reset_engines() 355 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine) in find_sfc_paired_vecs_engine() argument 359 GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS); in find_sfc_paired_vecs_engine() 361 vecs_id = _VECS((engine->instance) / 2); in find_sfc_paired_vecs_engine() 363 return engine->gt->engine[vecs_id]; in find_sfc_paired_vecs_engine() 376 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine, in get_sfc_forced_lock_data() argument 379 switch (engine->class) { in get_sfc_forced_lock_data() 381 MISSING_CASE(engine->class); in get_sfc_forced_lock_data() [all …]
|
| H A D | sysfs_engines.c | 18 struct intel_engine_cs *engine; member 23 return container_of(kobj, struct kobj_engine, base)->engine; in kobj_to_engine() 83 __caps_show(struct intel_engine_cs *engine, in __caps_show() argument 90 switch (engine->class) { in __caps_show() 125 struct intel_engine_cs *engine = kobj_to_engine(kobj); in caps_show() local 127 return __caps_show(engine, engine->uabi_capabilities, buf, true); in caps_show() 146 struct intel_engine_cs *engine = kobj_to_engine(kobj); in max_spin_store() local 171 clamped = intel_clamp_max_busywait_duration_ns(engine, duration); in max_spin_store() 175 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); in max_spin_store() 183 struct intel_engine_cs *engine = kobj_to_engine(kobj); in max_spin_show() local [all …]
|
| H A D | selftest_lrc.c | 26 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) argument 52 static int wait_for_submit(struct intel_engine_cs *engine, in wait_for_submit() argument 57 tasklet_hi_schedule(&engine->sched_engine->tasklet); in wait_for_submit() 67 intel_engine_flush_submission(engine); in wait_for_submit() 68 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 81 i915_ggtt_offset(ce->engine->status_page.vma) + in emit_semaphore_signal() 114 rq = intel_engine_create_kernel_request(ce->engine); in context_flush() 134 static int get_lri_mask(struct intel_engine_cs *engine, u32 lri) in get_lri_mask() argument 139 if (GRAPHICS_VER(engine->i915) < 12) in get_lri_mask() 142 switch (engine->class) { in get_lri_mask() [all …]
|
| H A D | selftest_execlists.c | 24 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) argument 42 static int wait_for_submit(struct intel_engine_cs *engine, in wait_for_submit() argument 47 tasklet_hi_schedule(&engine->sched_engine->tasklet); in wait_for_submit() 57 intel_engine_flush_submission(engine); in wait_for_submit() 58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 68 static int wait_for_reset(struct intel_engine_cs *engine, in wait_for_reset() argument 76 intel_engine_flush_submission(engine); in wait_for_reset() 78 if (READ_ONCE(engine->execlists.pending[0])) in wait_for_reset() 90 engine->name, in wait_for_reset() 100 engine->name, in wait_for_reset() [all …]
|
| H A D | selftest_ring_submission.c | 9 static struct i915_vma *create_wally(struct intel_engine_cs *engine) in create_wally() argument 16 obj = i915_gem_object_create_internal(engine->i915, 4096); in create_wally() 20 vma = i915_vma_instance(obj, engine->gt->vm, NULL); in create_wally() 44 if (GRAPHICS_VER(engine->i915) >= 6) { in create_wally() 47 } else if (GRAPHICS_VER(engine->i915) >= 4) { in create_wally() 61 vma->private = intel_context_create(engine); /* dummy residuals */ in create_wally() 89 static int new_context_sync(struct intel_engine_cs *engine) in new_context_sync() argument 94 ce = intel_context_create(engine); in new_context_sync() 104 static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result) in mixed_contexts_sync() argument 111 err = context_sync(engine->kernel_context); in mixed_contexts_sync() [all …]
|
| H A D | selftest_rps.c | 57 create_spin_counter(struct intel_engine_cs *engine, in create_spin_counter() argument 68 #define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x) in create_spin_counter() 224 struct intel_engine_cs *engine; in live_rps_clock_interval() local 244 for_each_engine(engine, gt, id) { in live_rps_clock_interval() 249 if (!intel_engine_can_store_dword(engine)) in live_rps_clock_interval() 252 st_engine_heartbeat_disable(engine); in live_rps_clock_interval() 255 engine->kernel_context, in live_rps_clock_interval() 258 st_engine_heartbeat_enable(engine); in live_rps_clock_interval() 267 engine->name); in live_rps_clock_interval() 269 st_engine_heartbeat_enable(engine); in live_rps_clock_interval() [all …]
|
| H A D | intel_workarounds.c | 340 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine, in gen6_ctx_workarounds_init() argument 346 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine, in gen7_ctx_workarounds_init() argument 352 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, in gen8_ctx_workarounds_init() argument 400 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, in bdw_ctx_workarounds_init() argument 403 struct drm_i915_private *i915 = engine->i915; in bdw_ctx_workarounds_init() 405 gen8_ctx_workarounds_init(engine, wal); in bdw_ctx_workarounds_init() 428 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, in chv_ctx_workarounds_init() argument 431 gen8_ctx_workarounds_init(engine, wal); in chv_ctx_workarounds_init() 440 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, in gen9_ctx_workarounds_init() argument 443 struct drm_i915_private *i915 = engine->i915; in gen9_ctx_workarounds_init() [all …]
|
| H A D | intel_tlb.c | 29 static int wait_for_invalidate(struct intel_engine_cs *engine) in wait_for_invalidate() argument 31 if (engine->tlb_inv.mcr) in wait_for_invalidate() 32 return intel_gt_mcr_wait_for_reg(engine->gt, in wait_for_invalidate() 33 engine->tlb_inv.reg.mcr_reg, in wait_for_invalidate() 34 engine->tlb_inv.done, in wait_for_invalidate() 39 return __intel_wait_for_register_fw(engine->gt->uncore, in wait_for_invalidate() 40 engine->tlb_inv.reg.reg, in wait_for_invalidate() 41 engine->tlb_inv.done, in wait_for_invalidate() 52 struct intel_engine_cs *engine; in mmio_invalidate_full() local 66 for_each_engine(engine, gt, id) { in mmio_invalidate_full() [all …]
|
| H A D | intel_engine_types.h | 71 /* The following exist only in the RCS engine */ 109 * Keep instances of the same type engine together. 147 /* A simple estimator for the round-trip latency of an engine */ 188 * @ccid: identifier for contexts submitted to this engine 195 * Instead of leaving a semaphore busy-spinning on an engine, we would 207 * the guilty request, and then reset the engine. 263 * @virtual: Queue of requets on a virtual engine, sorted by priority. 311 * @total: Total time this engine was busy. 314 * engine is currently busy (active > 0). 328 * @running: Active state of the engine whe 641 intel_engine_using_cmd_parser(const struct intel_engine_cs * engine) intel_engine_using_cmd_parser() argument 647 intel_engine_requires_cmd_parser(const struct intel_engine_cs * engine) intel_engine_requires_cmd_parser() argument 653 intel_engine_supports_stats(const struct intel_engine_cs * engine) intel_engine_supports_stats() argument 659 intel_engine_has_preemption(const struct intel_engine_cs * engine) intel_engine_has_preemption() argument 665 intel_engine_has_semaphores(const struct intel_engine_cs * engine) intel_engine_has_semaphores() argument 671 intel_engine_has_timeslices(const struct intel_engine_cs * engine) intel_engine_has_timeslices() argument 680 intel_engine_is_virtual(const struct intel_engine_cs * engine) intel_engine_is_virtual() argument 686 intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) intel_engine_has_relative_mmio() argument 693 intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs * engine) intel_engine_uses_wa_hold_ccs_switchout() argument [all...] |
| H A D | gen2_engine_cs.c | 105 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs() 115 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs() 147 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in __gen2_emit_breadcrumb() 191 intel_gt_scratch_offset(rq->engine->gt, in i830_emit_bb_start() 194 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); in i830_emit_bb_start() 293 void gen2_irq_enable(struct intel_engine_cs *engine) in gen2_irq_enable() argument 295 struct drm_i915_private *i915 = engine->i915; in gen2_irq_enable() 297 i915->irq_mask &= ~engine->irq_enable_mask; in gen2_irq_enable() 299 ENGINE_POSTING_READ16(engine, RING_IMR); in gen2_irq_enable() 302 void gen2_irq_disable(struct intel_engine_cs *engine) in gen2_irq_disable() argument [all …]
|
| /openbsd-src/sys/dev/pci/drm/i915/selftests/ |
| H A D | intel_scheduler_helpers.c | 19 struct intel_engine_cs *engine; in intel_selftest_find_any_engine() local 22 for_each_engine(engine, gt, id) in intel_selftest_find_any_engine() 23 return engine; in intel_selftest_find_any_engine() 29 int intel_selftest_modify_policy(struct intel_engine_cs *engine, in intel_selftest_modify_policy() argument 35 saved->reset = engine->i915->params.reset; in intel_selftest_modify_policy() 36 saved->flags = engine->flags; in intel_selftest_modify_policy() 37 saved->timeslice = engine->props.timeslice_duration_ms; in intel_selftest_modify_policy() 38 saved->preempt_timeout = engine->props.preempt_timeout_ms; in intel_selftest_modify_policy() 50 engine->i915->params.reset = 2; in intel_selftest_modify_policy() 51 engine->flags |= I915_ENGINE_WANT_FORCED_PREEMPTION; in intel_selftest_modify_policy() [all …]
|
| H A D | i915_request.c | 53 struct intel_engine_cs *engine; in num_uabi_engines() local 57 for_each_uabi_engine(engine, i915) in num_uabi_engines() 254 request->engine->submit_request(request); in igt_request_rewind() 283 struct intel_engine_cs *engine; member 370 ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx); in __igt_breadcrumbs_smoketest() 411 t->engine->name); in __igt_breadcrumbs_smoketest() 414 intel_gt_set_wedged(t->engine->gt); in __igt_breadcrumbs_smoketest() 458 .engine = rcs0(i915), in mock_breadcrumbs_smoketest() 485 t.contexts[n] = mock_context(t.engine->i915, "mock"); in mock_breadcrumbs_smoketest() 570 struct intel_engine_cs *engine; in live_nop_request() local [all …]
|