xref: /openbsd-src/sys/dev/pci/drm/i915/i915_driver.c (revision 3836e7c723d9c621d1bfafcdf2fb1c68075fdb93)
11bb76ff1Sjsg /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
21bb76ff1Sjsg  */
31bb76ff1Sjsg /*
41bb76ff1Sjsg  *
51bb76ff1Sjsg  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
61bb76ff1Sjsg  * All Rights Reserved.
71bb76ff1Sjsg  *
81bb76ff1Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
91bb76ff1Sjsg  * copy of this software and associated documentation files (the
101bb76ff1Sjsg  * "Software"), to deal in the Software without restriction, including
111bb76ff1Sjsg  * without limitation the rights to use, copy, modify, merge, publish,
121bb76ff1Sjsg  * distribute, sub license, and/or sell copies of the Software, and to
131bb76ff1Sjsg  * permit persons to whom the Software is furnished to do so, subject to
141bb76ff1Sjsg  * the following conditions:
151bb76ff1Sjsg  *
161bb76ff1Sjsg  * The above copyright notice and this permission notice (including the
171bb76ff1Sjsg  * next paragraph) shall be included in all copies or substantial portions
181bb76ff1Sjsg  * of the Software.
191bb76ff1Sjsg  *
201bb76ff1Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
211bb76ff1Sjsg  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
221bb76ff1Sjsg  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
231bb76ff1Sjsg  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
241bb76ff1Sjsg  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
251bb76ff1Sjsg  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
261bb76ff1Sjsg  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
271bb76ff1Sjsg  *
281bb76ff1Sjsg  */
291bb76ff1Sjsg 
301bb76ff1Sjsg #include <linux/acpi.h>
311bb76ff1Sjsg #include <linux/device.h>
321bb76ff1Sjsg #include <linux/module.h>
331bb76ff1Sjsg #include <linux/oom.h>
341bb76ff1Sjsg #include <linux/pci.h>
351bb76ff1Sjsg #include <linux/pm.h>
361bb76ff1Sjsg #include <linux/pm_runtime.h>
371bb76ff1Sjsg #include <linux/slab.h>
381bb76ff1Sjsg #include <linux/string_helpers.h>
391bb76ff1Sjsg #include <linux/vga_switcheroo.h>
401bb76ff1Sjsg #include <linux/vt.h>
411bb76ff1Sjsg 
421bb76ff1Sjsg #include <drm/drm_aperture.h>
431bb76ff1Sjsg #include <drm/drm_atomic_helper.h>
441bb76ff1Sjsg #include <drm/drm_ioctl.h>
451bb76ff1Sjsg #include <drm/drm_managed.h>
461bb76ff1Sjsg #include <drm/drm_probe_helper.h>
471bb76ff1Sjsg 
481bb76ff1Sjsg #include "display/intel_acpi.h"
491bb76ff1Sjsg #include "display/intel_bw.h"
501bb76ff1Sjsg #include "display/intel_cdclk.h"
51f005ef32Sjsg #include "display/intel_display_driver.h"
521bb76ff1Sjsg #include "display/intel_display_types.h"
531bb76ff1Sjsg #include "display/intel_dmc.h"
541bb76ff1Sjsg #include "display/intel_dp.h"
551bb76ff1Sjsg #include "display/intel_dpt.h"
561bb76ff1Sjsg #include "display/intel_fbdev.h"
571bb76ff1Sjsg #include "display/intel_hotplug.h"
581bb76ff1Sjsg #include "display/intel_overlay.h"
591bb76ff1Sjsg #include "display/intel_pch_refclk.h"
601bb76ff1Sjsg #include "display/intel_pipe_crc.h"
611bb76ff1Sjsg #include "display/intel_pps.h"
621bb76ff1Sjsg #include "display/intel_sprite.h"
631bb76ff1Sjsg #include "display/intel_vga.h"
641bb76ff1Sjsg #include "display/skl_watermark.h"
651bb76ff1Sjsg 
661bb76ff1Sjsg #include "gem/i915_gem_context.h"
671bb76ff1Sjsg #include "gem/i915_gem_create.h"
681bb76ff1Sjsg #include "gem/i915_gem_dmabuf.h"
691bb76ff1Sjsg #include "gem/i915_gem_ioctls.h"
701bb76ff1Sjsg #include "gem/i915_gem_mman.h"
711bb76ff1Sjsg #include "gem/i915_gem_pm.h"
721bb76ff1Sjsg #include "gt/intel_gt.h"
731bb76ff1Sjsg #include "gt/intel_gt_pm.h"
741bb76ff1Sjsg #include "gt/intel_rc6.h"
751bb76ff1Sjsg 
76f005ef32Sjsg #include "pxp/intel_pxp.h"
77f005ef32Sjsg #include "pxp/intel_pxp_debugfs.h"
781bb76ff1Sjsg #include "pxp/intel_pxp_pm.h"
791bb76ff1Sjsg 
80f005ef32Sjsg #include "soc/intel_dram.h"
81f005ef32Sjsg #include "soc/intel_gmch.h"
82f005ef32Sjsg 
831bb76ff1Sjsg #include "i915_debugfs.h"
841bb76ff1Sjsg #include "i915_driver.h"
851bb76ff1Sjsg #include "i915_drm_client.h"
861bb76ff1Sjsg #include "i915_drv.h"
87f005ef32Sjsg #include "i915_file_private.h"
881bb76ff1Sjsg #include "i915_getparam.h"
89f005ef32Sjsg #include "i915_hwmon.h"
901bb76ff1Sjsg #include "i915_ioc32.h"
911bb76ff1Sjsg #include "i915_ioctl.h"
921bb76ff1Sjsg #include "i915_irq.h"
931bb76ff1Sjsg #include "i915_memcpy.h"
941bb76ff1Sjsg #include "i915_perf.h"
951bb76ff1Sjsg #include "i915_query.h"
961bb76ff1Sjsg #include "i915_suspend.h"
971bb76ff1Sjsg #include "i915_switcheroo.h"
981bb76ff1Sjsg #include "i915_sysfs.h"
991bb76ff1Sjsg #include "i915_utils.h"
1001bb76ff1Sjsg #include "i915_vgpu.h"
101f005ef32Sjsg #include "intel_clock_gating.h"
1021bb76ff1Sjsg #include "intel_gvt.h"
1031bb76ff1Sjsg #include "intel_memory_region.h"
1041bb76ff1Sjsg #include "intel_pci_config.h"
1051bb76ff1Sjsg #include "intel_pcode.h"
1061bb76ff1Sjsg #include "intel_region_ttm.h"
1071bb76ff1Sjsg #include "vlv_suspend.h"
1081bb76ff1Sjsg 
1091bb76ff1Sjsg static const struct drm_driver i915_drm_driver;
1101bb76ff1Sjsg 
1111bb76ff1Sjsg static int i915_workqueues_init(struct drm_i915_private *dev_priv)
1121bb76ff1Sjsg {
1131bb76ff1Sjsg 	/*
1141bb76ff1Sjsg 	 * The i915 workqueue is primarily used for batched retirement of
1151bb76ff1Sjsg 	 * requests (and thus managing bo) once the task has been completed
1161bb76ff1Sjsg 	 * by the GPU. i915_retire_requests() is called directly when we
1171bb76ff1Sjsg 	 * need high-priority retirement, such as waiting for an explicit
1181bb76ff1Sjsg 	 * bo.
1191bb76ff1Sjsg 	 *
1201bb76ff1Sjsg 	 * It is also used for periodic low-priority events, such as
1211bb76ff1Sjsg 	 * idle-timers and recording error state.
1221bb76ff1Sjsg 	 *
1231bb76ff1Sjsg 	 * All tasks on the workqueue are expected to acquire the dev mutex
1241bb76ff1Sjsg 	 * so there is no point in running more than one instance of the
1251bb76ff1Sjsg 	 * workqueue at any time.  Use an ordered one.
1261bb76ff1Sjsg 	 */
1271bb76ff1Sjsg 	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1281bb76ff1Sjsg 	if (dev_priv->wq == NULL)
1291bb76ff1Sjsg 		goto out_err;
1301bb76ff1Sjsg 
1311bb76ff1Sjsg 	dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1321bb76ff1Sjsg 	if (dev_priv->display.hotplug.dp_wq == NULL)
1331bb76ff1Sjsg 		goto out_free_wq;
1341bb76ff1Sjsg 
135f005ef32Sjsg 	/*
136f005ef32Sjsg 	 * The unordered i915 workqueue should be used for all work
137f005ef32Sjsg 	 * scheduling that do not require running in order, which used
138f005ef32Sjsg 	 * to be scheduled on the system_wq before moving to a driver
139f005ef32Sjsg 	 * instance due deprecation of flush_scheduled_work().
140f005ef32Sjsg 	 */
141f005ef32Sjsg 	dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
142f005ef32Sjsg 	if (dev_priv->unordered_wq == NULL)
143f005ef32Sjsg 		goto out_free_dp_wq;
144f005ef32Sjsg 
1451bb76ff1Sjsg 	return 0;
1461bb76ff1Sjsg 
147f005ef32Sjsg out_free_dp_wq:
148f005ef32Sjsg 	destroy_workqueue(dev_priv->display.hotplug.dp_wq);
1491bb76ff1Sjsg out_free_wq:
1501bb76ff1Sjsg 	destroy_workqueue(dev_priv->wq);
1511bb76ff1Sjsg out_err:
1521bb76ff1Sjsg 	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
1531bb76ff1Sjsg 
1541bb76ff1Sjsg 	return -ENOMEM;
1551bb76ff1Sjsg }
1561bb76ff1Sjsg 
1571bb76ff1Sjsg static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
1581bb76ff1Sjsg {
159f005ef32Sjsg 	destroy_workqueue(dev_priv->unordered_wq);
1601bb76ff1Sjsg 	destroy_workqueue(dev_priv->display.hotplug.dp_wq);
1611bb76ff1Sjsg 	destroy_workqueue(dev_priv->wq);
1621bb76ff1Sjsg }
1631bb76ff1Sjsg 
1641bb76ff1Sjsg /*
1651bb76ff1Sjsg  * We don't keep the workarounds for pre-production hardware, so we expect our
1661bb76ff1Sjsg  * driver to fail on these machines in one way or another. A little warning on
1671bb76ff1Sjsg  * dmesg may help both the user and the bug triagers.
1681bb76ff1Sjsg  *
1691bb76ff1Sjsg  * Our policy for removing pre-production workarounds is to keep the
1701bb76ff1Sjsg  * current gen workarounds as a guide to the bring-up of the next gen
1711bb76ff1Sjsg  * (workarounds have a habit of persisting!). Anything older than that
1721bb76ff1Sjsg  * should be removed along with the complications they introduce.
1731bb76ff1Sjsg  */
1741bb76ff1Sjsg static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
1751bb76ff1Sjsg {
1761bb76ff1Sjsg 	bool pre = false;
1771bb76ff1Sjsg 
178f005ef32Sjsg 	pre |= IS_HASWELL_EARLY_SDV(dev_priv);
1791bb76ff1Sjsg 	pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
1801bb76ff1Sjsg 	pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
1811bb76ff1Sjsg 	pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
1821bb76ff1Sjsg 	pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
1831bb76ff1Sjsg 	pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
184f005ef32Sjsg 	pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
185f005ef32Sjsg 	pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
1861bb76ff1Sjsg 
1871bb76ff1Sjsg 	if (pre) {
1881bb76ff1Sjsg 		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
1891bb76ff1Sjsg 			  "It may not be fully functional.\n");
1901bb76ff1Sjsg 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
1911bb76ff1Sjsg 	}
1921bb76ff1Sjsg }
1931bb76ff1Sjsg 
1941bb76ff1Sjsg static void sanitize_gpu(struct drm_i915_private *i915)
1951bb76ff1Sjsg {
1961bb76ff1Sjsg 	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
1971bb76ff1Sjsg 		struct intel_gt *gt;
1981bb76ff1Sjsg 		unsigned int i;
1991bb76ff1Sjsg 
2001bb76ff1Sjsg 		for_each_gt(gt, i915, i)
2011bb76ff1Sjsg 			__intel_gt_reset(gt, ALL_ENGINES);
2021bb76ff1Sjsg 	}
2031bb76ff1Sjsg }
2041bb76ff1Sjsg 
2051bb76ff1Sjsg /**
2061bb76ff1Sjsg  * i915_driver_early_probe - setup state not requiring device access
2071bb76ff1Sjsg  * @dev_priv: device private
2081bb76ff1Sjsg  *
2091bb76ff1Sjsg  * Initialize everything that is a "SW-only" state, that is state not
2101bb76ff1Sjsg  * requiring accessing the device or exposing the driver via kernel internal
2111bb76ff1Sjsg  * or userspace interfaces. Example steps belonging here: lock initialization,
2121bb76ff1Sjsg  * system memory allocation, setting up device specific attributes and
2131bb76ff1Sjsg  * function hooks not requiring accessing the device.
2141bb76ff1Sjsg  */
2151bb76ff1Sjsg static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
2161bb76ff1Sjsg {
2171bb76ff1Sjsg 	int ret = 0;
2181bb76ff1Sjsg 
2191bb76ff1Sjsg 	if (i915_inject_probe_failure(dev_priv))
2201bb76ff1Sjsg 		return -ENODEV;
2211bb76ff1Sjsg 
222f005ef32Sjsg 	intel_device_info_runtime_init_early(dev_priv);
223f005ef32Sjsg 
2241bb76ff1Sjsg 	intel_step_init(dev_priv);
2251bb76ff1Sjsg 
2261bb76ff1Sjsg 	intel_uncore_mmio_debug_init_early(dev_priv);
2271bb76ff1Sjsg 
2281bb76ff1Sjsg 	mtx_init(&dev_priv->irq_lock, IPL_TTY);
2291bb76ff1Sjsg 	mtx_init(&dev_priv->gpu_error.lock, IPL_TTY);
2301bb76ff1Sjsg 	rw_init(&dev_priv->display.backlight.lock, "blight");
2311bb76ff1Sjsg 
2321bb76ff1Sjsg 	rw_init(&dev_priv->sb_lock, "sb");
2331bb76ff1Sjsg 	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
2341bb76ff1Sjsg 
2351bb76ff1Sjsg 	rw_init(&dev_priv->display.audio.mutex, "daud");
2361bb76ff1Sjsg 	rw_init(&dev_priv->display.wm.wm_mutex, "wmm");
2371bb76ff1Sjsg 	rw_init(&dev_priv->display.pps.mutex, "ppsm");
238f005ef32Sjsg 	rw_init(&dev_priv->display.hdcp.hdcp_mutex, "hdcpc");
2391bb76ff1Sjsg 
2401bb76ff1Sjsg 	i915_memcpy_init_early(dev_priv);
2411bb76ff1Sjsg 	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
2421bb76ff1Sjsg 
2431bb76ff1Sjsg 	ret = i915_workqueues_init(dev_priv);
2441bb76ff1Sjsg 	if (ret < 0)
2451bb76ff1Sjsg 		return ret;
2461bb76ff1Sjsg 
2471bb76ff1Sjsg 	ret = vlv_suspend_init(dev_priv);
2481bb76ff1Sjsg 	if (ret < 0)
2491bb76ff1Sjsg 		goto err_workqueues;
2501bb76ff1Sjsg 
2513d906286Sjsg #ifdef __OpenBSD__
2523d906286Sjsg 	dev_priv->bdev.iot = dev_priv->iot;
2533d906286Sjsg 	dev_priv->bdev.memt = dev_priv->bst;
2543d906286Sjsg 	dev_priv->bdev.dmat = dev_priv->dmat;
2553d906286Sjsg #endif
2563d906286Sjsg 
2571bb76ff1Sjsg 	ret = intel_region_ttm_device_init(dev_priv);
2581bb76ff1Sjsg 	if (ret)
2591bb76ff1Sjsg 		goto err_ttm;
2601bb76ff1Sjsg 
2611bb76ff1Sjsg 	ret = intel_root_gt_init_early(dev_priv);
2621bb76ff1Sjsg 	if (ret < 0)
2631bb76ff1Sjsg 		goto err_rootgt;
2641bb76ff1Sjsg 
2651bb76ff1Sjsg 	i915_gem_init_early(dev_priv);
2661bb76ff1Sjsg 
2671bb76ff1Sjsg 	/* This must be called before any calls to HAS_PCH_* */
2681bb76ff1Sjsg 	intel_detect_pch(dev_priv);
2691bb76ff1Sjsg 
2701bb76ff1Sjsg 	intel_irq_init(dev_priv);
271f005ef32Sjsg 	intel_display_driver_early_probe(dev_priv);
272f005ef32Sjsg 	intel_clock_gating_hooks_init(dev_priv);
2731bb76ff1Sjsg 
2741bb76ff1Sjsg 	intel_detect_preproduction_hw(dev_priv);
2751bb76ff1Sjsg 
2761bb76ff1Sjsg 	return 0;
2771bb76ff1Sjsg 
2781bb76ff1Sjsg err_rootgt:
2791bb76ff1Sjsg 	intel_region_ttm_device_fini(dev_priv);
2801bb76ff1Sjsg err_ttm:
2811bb76ff1Sjsg 	vlv_suspend_cleanup(dev_priv);
2821bb76ff1Sjsg err_workqueues:
2831bb76ff1Sjsg 	i915_workqueues_cleanup(dev_priv);
2841bb76ff1Sjsg 	return ret;
2851bb76ff1Sjsg }
2861bb76ff1Sjsg 
2871bb76ff1Sjsg /**
2881bb76ff1Sjsg  * i915_driver_late_release - cleanup the setup done in
2891bb76ff1Sjsg  *			       i915_driver_early_probe()
2901bb76ff1Sjsg  * @dev_priv: device private
2911bb76ff1Sjsg  */
2921bb76ff1Sjsg static void i915_driver_late_release(struct drm_i915_private *dev_priv)
2931bb76ff1Sjsg {
2941bb76ff1Sjsg 	intel_irq_fini(dev_priv);
2951bb76ff1Sjsg 	intel_power_domains_cleanup(dev_priv);
2961bb76ff1Sjsg 	i915_gem_cleanup_early(dev_priv);
2971bb76ff1Sjsg 	intel_gt_driver_late_release_all(dev_priv);
2981bb76ff1Sjsg 	intel_region_ttm_device_fini(dev_priv);
2991bb76ff1Sjsg 	vlv_suspend_cleanup(dev_priv);
3001bb76ff1Sjsg 	i915_workqueues_cleanup(dev_priv);
3011bb76ff1Sjsg 
3021bb76ff1Sjsg 	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
3031bb76ff1Sjsg 	mutex_destroy(&dev_priv->sb_lock);
3041bb76ff1Sjsg 
3051bb76ff1Sjsg 	i915_params_free(&dev_priv->params);
3061bb76ff1Sjsg }
3071bb76ff1Sjsg 
3081bb76ff1Sjsg /**
3091bb76ff1Sjsg  * i915_driver_mmio_probe - setup device MMIO
3101bb76ff1Sjsg  * @dev_priv: device private
3111bb76ff1Sjsg  *
3121bb76ff1Sjsg  * Setup minimal device state necessary for MMIO accesses later in the
3131bb76ff1Sjsg  * initialization sequence. The setup here should avoid any other device-wide
3141bb76ff1Sjsg  * side effects or exposing the driver via kernel internal or user space
3151bb76ff1Sjsg  * interfaces.
3161bb76ff1Sjsg  */
3171bb76ff1Sjsg static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
3181bb76ff1Sjsg {
3191bb76ff1Sjsg 	struct intel_gt *gt;
3201bb76ff1Sjsg 	int ret, i;
3211bb76ff1Sjsg 
3221bb76ff1Sjsg 	if (i915_inject_probe_failure(dev_priv))
3231bb76ff1Sjsg 		return -ENODEV;
3241bb76ff1Sjsg 
325f005ef32Sjsg 	ret = intel_gmch_bridge_setup(dev_priv);
3261bb76ff1Sjsg 	if (ret < 0)
3271bb76ff1Sjsg 		return ret;
3281bb76ff1Sjsg 
3291bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i) {
3301bb76ff1Sjsg 		ret = intel_uncore_init_mmio(gt->uncore);
3311bb76ff1Sjsg 		if (ret)
3321bb76ff1Sjsg 			return ret;
3331bb76ff1Sjsg 
3341bb76ff1Sjsg 		ret = drmm_add_action_or_reset(&dev_priv->drm,
3351bb76ff1Sjsg 					       intel_uncore_fini_mmio,
3361bb76ff1Sjsg 					       gt->uncore);
3371bb76ff1Sjsg 		if (ret)
3381bb76ff1Sjsg 			return ret;
3391bb76ff1Sjsg 	}
3401bb76ff1Sjsg 
3411bb76ff1Sjsg 	/* Try to make sure MCHBAR is enabled before poking at it */
342f005ef32Sjsg 	intel_gmch_bar_setup(dev_priv);
3431bb76ff1Sjsg 	intel_device_info_runtime_init(dev_priv);
3441bb76ff1Sjsg 
3451bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i) {
3461bb76ff1Sjsg 		ret = intel_gt_init_mmio(gt);
3471bb76ff1Sjsg 		if (ret)
3481bb76ff1Sjsg 			goto err_uncore;
3491bb76ff1Sjsg 	}
3501bb76ff1Sjsg 
3511bb76ff1Sjsg 	/* As early as possible, scrub existing GPU state before clobbering */
3521bb76ff1Sjsg 	sanitize_gpu(dev_priv);
3531bb76ff1Sjsg 
3541bb76ff1Sjsg 	return 0;
3551bb76ff1Sjsg 
3561bb76ff1Sjsg err_uncore:
357f005ef32Sjsg 	intel_gmch_bar_teardown(dev_priv);
3581bb76ff1Sjsg 
3591bb76ff1Sjsg 	return ret;
3601bb76ff1Sjsg }
3611bb76ff1Sjsg 
3621bb76ff1Sjsg /**
3631bb76ff1Sjsg  * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
3641bb76ff1Sjsg  * @dev_priv: device private
3651bb76ff1Sjsg  */
3661bb76ff1Sjsg static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
3671bb76ff1Sjsg {
368f005ef32Sjsg 	intel_gmch_bar_teardown(dev_priv);
3691bb76ff1Sjsg }
3701bb76ff1Sjsg 
3711bb76ff1Sjsg /**
3721bb76ff1Sjsg  * i915_set_dma_info - set all relevant PCI dma info as configured for the
3731bb76ff1Sjsg  * platform
3741bb76ff1Sjsg  * @i915: valid i915 instance
3751bb76ff1Sjsg  *
3761bb76ff1Sjsg  * Set the dma max segment size, device and coherent masks.  The dma mask set
3771bb76ff1Sjsg  * needs to occur before i915_ggtt_probe_hw.
3781bb76ff1Sjsg  *
3791bb76ff1Sjsg  * A couple of platforms have special needs.  Address them as well.
3801bb76ff1Sjsg  *
3811bb76ff1Sjsg  */
3821bb76ff1Sjsg static int i915_set_dma_info(struct drm_i915_private *i915)
3831bb76ff1Sjsg {
3841bb76ff1Sjsg 	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
3851bb76ff1Sjsg 	int ret;
3861bb76ff1Sjsg 
3871bb76ff1Sjsg 	GEM_BUG_ON(!mask_size);
3881bb76ff1Sjsg 
3891bb76ff1Sjsg 	/*
3901bb76ff1Sjsg 	 * We don't have a max segment size, so set it to the max so sg's
3911bb76ff1Sjsg 	 * debugging layer doesn't complain
3921bb76ff1Sjsg 	 */
3931bb76ff1Sjsg 	dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
3941bb76ff1Sjsg 
3951bb76ff1Sjsg 	ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
3961bb76ff1Sjsg 	if (ret)
3971bb76ff1Sjsg 		goto mask_err;
3981bb76ff1Sjsg 
3991bb76ff1Sjsg 	/* overlay on gen2 is broken and can't address above 1G */
4001bb76ff1Sjsg 	if (GRAPHICS_VER(i915) == 2)
4011bb76ff1Sjsg 		mask_size = 30;
4021bb76ff1Sjsg 
4031bb76ff1Sjsg 	/*
4041bb76ff1Sjsg 	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
4051bb76ff1Sjsg 	 * using 32bit addressing, overwriting memory if HWS is located
4061bb76ff1Sjsg 	 * above 4GB.
4071bb76ff1Sjsg 	 *
4081bb76ff1Sjsg 	 * The documentation also mentions an issue with undefined
4091bb76ff1Sjsg 	 * behaviour if any general state is accessed within a page above 4GB,
4101bb76ff1Sjsg 	 * which also needs to be handled carefully.
4111bb76ff1Sjsg 	 */
4121bb76ff1Sjsg 	if (IS_I965G(i915) || IS_I965GM(i915))
4131bb76ff1Sjsg 		mask_size = 32;
4141bb76ff1Sjsg 
4151bb76ff1Sjsg 	ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
4161bb76ff1Sjsg 	if (ret)
4171bb76ff1Sjsg 		goto mask_err;
4181bb76ff1Sjsg 
4191bb76ff1Sjsg 	return 0;
4201bb76ff1Sjsg 
4211bb76ff1Sjsg mask_err:
4221bb76ff1Sjsg 	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
4231bb76ff1Sjsg 	return ret;
4241bb76ff1Sjsg }
4251bb76ff1Sjsg 
4261bb76ff1Sjsg static int i915_pcode_init(struct drm_i915_private *i915)
4271bb76ff1Sjsg {
4281bb76ff1Sjsg 	struct intel_gt *gt;
4291bb76ff1Sjsg 	int id, ret;
4301bb76ff1Sjsg 
4311bb76ff1Sjsg 	for_each_gt(gt, i915, id) {
4321bb76ff1Sjsg 		ret = intel_pcode_init(gt->uncore);
4331bb76ff1Sjsg 		if (ret) {
4341bb76ff1Sjsg 			drm_err(&gt->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret);
4351bb76ff1Sjsg 			return ret;
4361bb76ff1Sjsg 		}
4371bb76ff1Sjsg 	}
4381bb76ff1Sjsg 
4391bb76ff1Sjsg 	return 0;
4401bb76ff1Sjsg }
4411bb76ff1Sjsg 
4421bb76ff1Sjsg /**
4431bb76ff1Sjsg  * i915_driver_hw_probe - setup state requiring device access
4441bb76ff1Sjsg  * @dev_priv: device private
4451bb76ff1Sjsg  *
4461bb76ff1Sjsg  * Setup state that requires accessing the device, but doesn't require
4471bb76ff1Sjsg  * exposing the driver via kernel internal or userspace interfaces.
4481bb76ff1Sjsg  */
4491bb76ff1Sjsg static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
4501bb76ff1Sjsg {
4511bb76ff1Sjsg 	struct pci_dev *pdev = dev_priv->drm.pdev;
4521bb76ff1Sjsg 	int ret;
4531bb76ff1Sjsg 
4541bb76ff1Sjsg 	if (i915_inject_probe_failure(dev_priv))
4551bb76ff1Sjsg 		return -ENODEV;
4561bb76ff1Sjsg 
4571bb76ff1Sjsg 	if (HAS_PPGTT(dev_priv)) {
4581bb76ff1Sjsg 		if (intel_vgpu_active(dev_priv) &&
4591bb76ff1Sjsg 		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
4601bb76ff1Sjsg 			i915_report_error(dev_priv,
4611bb76ff1Sjsg 					  "incompatible vGPU found, support for isolated ppGTT required\n");
4621bb76ff1Sjsg 			return -ENXIO;
4631bb76ff1Sjsg 		}
4641bb76ff1Sjsg 	}
4651bb76ff1Sjsg 
4661bb76ff1Sjsg 	if (HAS_EXECLISTS(dev_priv)) {
4671bb76ff1Sjsg 		/*
4681bb76ff1Sjsg 		 * Older GVT emulation depends upon intercepting CSB mmio,
4691bb76ff1Sjsg 		 * which we no longer use, preferring to use the HWSP cache
4701bb76ff1Sjsg 		 * instead.
4711bb76ff1Sjsg 		 */
4721bb76ff1Sjsg 		if (intel_vgpu_active(dev_priv) &&
4731bb76ff1Sjsg 		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
4741bb76ff1Sjsg 			i915_report_error(dev_priv,
4751bb76ff1Sjsg 					  "old vGPU host found, support for HWSP emulation required\n");
4761bb76ff1Sjsg 			return -ENXIO;
4771bb76ff1Sjsg 		}
4781bb76ff1Sjsg 	}
4791bb76ff1Sjsg 
4801bb76ff1Sjsg 	/* needs to be done before ggtt probe */
4811bb76ff1Sjsg 	intel_dram_edram_detect(dev_priv);
4821bb76ff1Sjsg 
4831bb76ff1Sjsg 	ret = i915_set_dma_info(dev_priv);
4841bb76ff1Sjsg 	if (ret)
4851bb76ff1Sjsg 		return ret;
4861bb76ff1Sjsg 
487f005ef32Sjsg 	ret = i915_perf_init(dev_priv);
4881bb76ff1Sjsg 	if (ret)
489f005ef32Sjsg 		return ret;
4901bb76ff1Sjsg 
4911bb76ff1Sjsg 	ret = i915_ggtt_probe_hw(dev_priv);
4921bb76ff1Sjsg 	if (ret)
4931bb76ff1Sjsg 		goto err_perf;
4941bb76ff1Sjsg 
4951bb76ff1Sjsg 	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
4961bb76ff1Sjsg 	if (ret)
4971bb76ff1Sjsg 		goto err_ggtt;
4981bb76ff1Sjsg 
4991bb76ff1Sjsg 	ret = i915_ggtt_init_hw(dev_priv);
5001bb76ff1Sjsg 	if (ret)
5011bb76ff1Sjsg 		goto err_ggtt;
5021bb76ff1Sjsg 
503f005ef32Sjsg 	/*
504f005ef32Sjsg 	 * Make sure we probe lmem before we probe stolen-lmem. The BAR size
505f005ef32Sjsg 	 * might be different due to bar resizing.
506f005ef32Sjsg 	 */
507f005ef32Sjsg 	ret = intel_gt_tiles_init(dev_priv);
5081bb76ff1Sjsg 	if (ret)
5091bb76ff1Sjsg 		goto err_ggtt;
5101bb76ff1Sjsg 
511f005ef32Sjsg 	ret = intel_memory_regions_hw_probe(dev_priv);
5121bb76ff1Sjsg 	if (ret)
513f005ef32Sjsg 		goto err_ggtt;
5141bb76ff1Sjsg 
5151bb76ff1Sjsg 	ret = i915_ggtt_enable_hw(dev_priv);
5161bb76ff1Sjsg 	if (ret) {
5171bb76ff1Sjsg 		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
5181bb76ff1Sjsg 		goto err_mem_regions;
5191bb76ff1Sjsg 	}
5201bb76ff1Sjsg 
5211bb76ff1Sjsg 	pci_set_master(pdev);
5221bb76ff1Sjsg 
5231bb76ff1Sjsg 	/* On the 945G/GM, the chipset reports the MSI capability on the
5241bb76ff1Sjsg 	 * integrated graphics even though the support isn't actually there
5251bb76ff1Sjsg 	 * according to the published specs.  It doesn't appear to function
5261bb76ff1Sjsg 	 * correctly in testing on 945G.
5271bb76ff1Sjsg 	 * This may be a side effect of MSI having been made available for PEG
5281bb76ff1Sjsg 	 * and the registers being closely associated.
5291bb76ff1Sjsg 	 *
5301bb76ff1Sjsg 	 * According to chipset errata, on the 965GM, MSI interrupts may
5311bb76ff1Sjsg 	 * be lost or delayed, and was defeatured. MSI interrupts seem to
5321bb76ff1Sjsg 	 * get lost on g4x as well, and interrupt delivery seems to stay
5331bb76ff1Sjsg 	 * properly dead afterwards. So we'll just disable them for all
5341bb76ff1Sjsg 	 * pre-gen5 chipsets.
5351bb76ff1Sjsg 	 *
5361bb76ff1Sjsg 	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
5371bb76ff1Sjsg 	 * interrupts even when in MSI mode. This results in spurious
5381bb76ff1Sjsg 	 * interrupt warnings if the legacy irq no. is shared with another
5391bb76ff1Sjsg 	 * device. The kernel then disables that interrupt source and so
5401bb76ff1Sjsg 	 * prevents the other device from working properly.
5411bb76ff1Sjsg 	 */
5421bb76ff1Sjsg 	if (GRAPHICS_VER(dev_priv) >= 5) {
5431bb76ff1Sjsg 		if (pci_enable_msi(pdev) < 0)
5441bb76ff1Sjsg 			drm_dbg(&dev_priv->drm, "can't enable MSI");
5451bb76ff1Sjsg 	}
5461bb76ff1Sjsg 
5471bb76ff1Sjsg 	ret = intel_gvt_init(dev_priv);
5481bb76ff1Sjsg 	if (ret)
5491bb76ff1Sjsg 		goto err_msi;
5501bb76ff1Sjsg 
5511bb76ff1Sjsg 	intel_opregion_setup(dev_priv);
5521bb76ff1Sjsg 
5531bb76ff1Sjsg 	ret = i915_pcode_init(dev_priv);
5541bb76ff1Sjsg 	if (ret)
555f005ef32Sjsg 		goto err_opregion;
5561bb76ff1Sjsg 
5571bb76ff1Sjsg 	/*
5581bb76ff1Sjsg 	 * Fill the dram structure to get the system dram info. This will be
5591bb76ff1Sjsg 	 * used for memory latency calculation.
5601bb76ff1Sjsg 	 */
5611bb76ff1Sjsg 	intel_dram_detect(dev_priv);
5621bb76ff1Sjsg 
5631bb76ff1Sjsg 	intel_bw_init_hw(dev_priv);
5641bb76ff1Sjsg 
5651bb76ff1Sjsg 	return 0;
5661bb76ff1Sjsg 
567f005ef32Sjsg err_opregion:
568f005ef32Sjsg 	intel_opregion_cleanup(dev_priv);
5691bb76ff1Sjsg err_msi:
5701bb76ff1Sjsg 	if (pdev->msi_enabled)
5711bb76ff1Sjsg 		pci_disable_msi(pdev);
5721bb76ff1Sjsg err_mem_regions:
5731bb76ff1Sjsg 	intel_memory_regions_driver_release(dev_priv);
5741bb76ff1Sjsg err_ggtt:
5751bb76ff1Sjsg 	i915_ggtt_driver_release(dev_priv);
5761bb76ff1Sjsg 	i915_gem_drain_freed_objects(dev_priv);
5771bb76ff1Sjsg 	i915_ggtt_driver_late_release(dev_priv);
5781bb76ff1Sjsg err_perf:
5791bb76ff1Sjsg 	i915_perf_fini(dev_priv);
5801bb76ff1Sjsg 	return ret;
5811bb76ff1Sjsg }
5821bb76ff1Sjsg 
5831bb76ff1Sjsg /**
5841bb76ff1Sjsg  * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
5851bb76ff1Sjsg  * @dev_priv: device private
5861bb76ff1Sjsg  */
5871bb76ff1Sjsg static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
5881bb76ff1Sjsg {
589514d4a64Sjsg 	struct pci_dev *pdev = dev_priv->drm.pdev;
5901bb76ff1Sjsg 
5911bb76ff1Sjsg 	i915_perf_fini(dev_priv);
5921bb76ff1Sjsg 
593f005ef32Sjsg 	intel_opregion_cleanup(dev_priv);
594f005ef32Sjsg 
5951bb76ff1Sjsg 	if (pdev->msi_enabled)
5961bb76ff1Sjsg 		pci_disable_msi(pdev);
5971bb76ff1Sjsg }
5981bb76ff1Sjsg 
5991bb76ff1Sjsg /**
6001bb76ff1Sjsg  * i915_driver_register - register the driver with the rest of the system
6011bb76ff1Sjsg  * @dev_priv: device private
6021bb76ff1Sjsg  *
6031bb76ff1Sjsg  * Perform any steps necessary to make the driver available via kernel
6041bb76ff1Sjsg  * internal or userspace interfaces.
6051bb76ff1Sjsg  */
6061bb76ff1Sjsg static void i915_driver_register(struct drm_i915_private *dev_priv)
6071bb76ff1Sjsg {
6081bb76ff1Sjsg 	struct intel_gt *gt;
6091bb76ff1Sjsg 	unsigned int i;
6101bb76ff1Sjsg 
6111bb76ff1Sjsg 	i915_gem_driver_register(dev_priv);
6121bb76ff1Sjsg 	i915_pmu_register(dev_priv);
6131bb76ff1Sjsg 
6141bb76ff1Sjsg 	intel_vgpu_register(dev_priv);
6151bb76ff1Sjsg 
6161bb76ff1Sjsg 	/* Reveal our presence to userspace */
617f005ef32Sjsg 	if (drm_dev_register(&dev_priv->drm, 0)) {
6181bb76ff1Sjsg 		drm_err(&dev_priv->drm,
6191bb76ff1Sjsg 			"Failed to register driver for userspace access!\n");
6201bb76ff1Sjsg 		return;
6211bb76ff1Sjsg 	}
6221bb76ff1Sjsg 
6231bb76ff1Sjsg 	i915_debugfs_register(dev_priv);
6241bb76ff1Sjsg 	i915_setup_sysfs(dev_priv);
6251bb76ff1Sjsg 
6261bb76ff1Sjsg 	/* Depends on sysfs having been initialized */
6271bb76ff1Sjsg 	i915_perf_register(dev_priv);
6281bb76ff1Sjsg 
6291bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
6301bb76ff1Sjsg 		intel_gt_driver_register(gt);
6311bb76ff1Sjsg 
632f005ef32Sjsg 	intel_pxp_debugfs_register(dev_priv->pxp);
633f005ef32Sjsg 
634f005ef32Sjsg 	i915_hwmon_register(dev_priv);
635f005ef32Sjsg 
6361bb76ff1Sjsg 	intel_display_driver_register(dev_priv);
6371bb76ff1Sjsg 
6381bb76ff1Sjsg 	intel_power_domains_enable(dev_priv);
6391bb76ff1Sjsg 	intel_runtime_pm_enable(&dev_priv->runtime_pm);
6401bb76ff1Sjsg 
6411bb76ff1Sjsg 	intel_register_dsm_handler();
6421bb76ff1Sjsg 
6431bb76ff1Sjsg 	if (i915_switcheroo_register(dev_priv))
6441bb76ff1Sjsg 		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
6451bb76ff1Sjsg }
6461bb76ff1Sjsg 
6471bb76ff1Sjsg /**
6481bb76ff1Sjsg  * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
6491bb76ff1Sjsg  * @dev_priv: device private
6501bb76ff1Sjsg  */
6511bb76ff1Sjsg static void i915_driver_unregister(struct drm_i915_private *dev_priv)
6521bb76ff1Sjsg {
6531bb76ff1Sjsg 	struct intel_gt *gt;
6541bb76ff1Sjsg 	unsigned int i;
6551bb76ff1Sjsg 
6561bb76ff1Sjsg 	i915_switcheroo_unregister(dev_priv);
6571bb76ff1Sjsg 
6581bb76ff1Sjsg 	intel_unregister_dsm_handler();
6591bb76ff1Sjsg 
6601bb76ff1Sjsg 	intel_runtime_pm_disable(&dev_priv->runtime_pm);
6611bb76ff1Sjsg 	intel_power_domains_disable(dev_priv);
6621bb76ff1Sjsg 
6631bb76ff1Sjsg 	intel_display_driver_unregister(dev_priv);
6641bb76ff1Sjsg 
665f005ef32Sjsg 	intel_pxp_fini(dev_priv);
666f005ef32Sjsg 
6671bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
6681bb76ff1Sjsg 		intel_gt_driver_unregister(gt);
6691bb76ff1Sjsg 
670f005ef32Sjsg 	i915_hwmon_unregister(dev_priv);
671f005ef32Sjsg 
6721bb76ff1Sjsg 	i915_perf_unregister(dev_priv);
6731bb76ff1Sjsg 	i915_pmu_unregister(dev_priv);
6741bb76ff1Sjsg 
6751bb76ff1Sjsg 	i915_teardown_sysfs(dev_priv);
6761bb76ff1Sjsg 	drm_dev_unplug(&dev_priv->drm);
6771bb76ff1Sjsg 
6781bb76ff1Sjsg 	i915_gem_driver_unregister(dev_priv);
6791bb76ff1Sjsg }
6801bb76ff1Sjsg 
6811bb76ff1Sjsg void
6821bb76ff1Sjsg i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
6831bb76ff1Sjsg {
6841bb76ff1Sjsg 	drm_printf(p, "iommu: %s\n",
6851bb76ff1Sjsg 		   str_enabled_disabled(i915_vtd_active(i915)));
6861bb76ff1Sjsg }
6871bb76ff1Sjsg 
6881bb76ff1Sjsg static void i915_welcome_messages(struct drm_i915_private *dev_priv)
6891bb76ff1Sjsg {
6901bb76ff1Sjsg 	if (drm_debug_enabled(DRM_UT_DRIVER)) {
6911bb76ff1Sjsg 		struct drm_printer p = drm_debug_printer("i915 device info:");
6921bb76ff1Sjsg 		struct intel_gt *gt;
6931bb76ff1Sjsg 		unsigned int i;
6941bb76ff1Sjsg 
6951bb76ff1Sjsg 		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
6961bb76ff1Sjsg 			   INTEL_DEVID(dev_priv),
6971bb76ff1Sjsg 			   INTEL_REVID(dev_priv),
6981bb76ff1Sjsg 			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
6991bb76ff1Sjsg 			   intel_subplatform(RUNTIME_INFO(dev_priv),
7001bb76ff1Sjsg 					     INTEL_INFO(dev_priv)->platform),
7011bb76ff1Sjsg 			   GRAPHICS_VER(dev_priv));
7021bb76ff1Sjsg 
7031bb76ff1Sjsg 		intel_device_info_print(INTEL_INFO(dev_priv),
7041bb76ff1Sjsg 					RUNTIME_INFO(dev_priv), &p);
705f005ef32Sjsg 		intel_display_device_info_print(DISPLAY_INFO(dev_priv),
706f005ef32Sjsg 						DISPLAY_RUNTIME_INFO(dev_priv), &p);
7071bb76ff1Sjsg 		i915_print_iommu_status(dev_priv, &p);
7081bb76ff1Sjsg 		for_each_gt(gt, dev_priv, i)
7091bb76ff1Sjsg 			intel_gt_info_print(&gt->info, &p);
7101bb76ff1Sjsg 	}
7111bb76ff1Sjsg 
7121bb76ff1Sjsg 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
7131bb76ff1Sjsg 		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
7141bb76ff1Sjsg 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
7151bb76ff1Sjsg 		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
7161bb76ff1Sjsg 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
7171bb76ff1Sjsg 		drm_info(&dev_priv->drm,
7181bb76ff1Sjsg 			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
7191bb76ff1Sjsg }
7201bb76ff1Sjsg 
7211bb76ff1Sjsg #ifdef __linux__
7221bb76ff1Sjsg 
7231bb76ff1Sjsg static struct drm_i915_private *
7241bb76ff1Sjsg i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
7251bb76ff1Sjsg {
7261bb76ff1Sjsg 	const struct intel_device_info *match_info =
7271bb76ff1Sjsg 		(struct intel_device_info *)ent->driver_data;
7281bb76ff1Sjsg 	struct drm_i915_private *i915;
7291bb76ff1Sjsg 
7301bb76ff1Sjsg 	i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
7311bb76ff1Sjsg 				  struct drm_i915_private, drm);
7321bb76ff1Sjsg 	if (IS_ERR(i915))
7331bb76ff1Sjsg 		return i915;
7341bb76ff1Sjsg 
7351bb76ff1Sjsg 	pci_set_drvdata(pdev, i915);
7361bb76ff1Sjsg 
7371bb76ff1Sjsg 	/* Device parameters start as a copy of module parameters. */
7381bb76ff1Sjsg 	i915_params_copy(&i915->params, &i915_modparams);
7391bb76ff1Sjsg 
740f005ef32Sjsg 	/* Set up device info and initial runtime info. */
741f005ef32Sjsg 	intel_device_info_driver_create(i915, pdev->device, match_info);
7421bb76ff1Sjsg 
7431bb76ff1Sjsg 	return i915;
7441bb76ff1Sjsg }
7451bb76ff1Sjsg 
746f005ef32Sjsg #endif
7471bb76ff1Sjsg 
7481bb76ff1Sjsg void inteldrm_init_backlight(struct inteldrm_softc *);
7491bb76ff1Sjsg 
7501bb76ff1Sjsg /**
7511bb76ff1Sjsg  * i915_driver_probe - setup chip and create an initial config
7521bb76ff1Sjsg  * @pdev: PCI device
7531bb76ff1Sjsg  * @ent: matching PCI ID entry
7541bb76ff1Sjsg  *
7551bb76ff1Sjsg  * The driver probe routine has to do several things:
756f005ef32Sjsg  *   - drive output discovery via intel_display_driver_probe()
7571bb76ff1Sjsg  *   - initialize the memory manager
7581bb76ff1Sjsg  *   - allocate initial config memory
7591bb76ff1Sjsg  *   - setup the DRM framebuffer with the allocated memory
7601bb76ff1Sjsg  */
7611bb76ff1Sjsg int i915_driver_probe(struct drm_i915_private *i915, const struct pci_device_id *ent)
7621bb76ff1Sjsg {
763f005ef32Sjsg #ifdef __linux__
764f005ef32Sjsg 	struct drm_i915_private *i915;
7651bb76ff1Sjsg 	int ret;
7661bb76ff1Sjsg 
7671bb76ff1Sjsg 	ret = pci_enable_device(pdev);
768f005ef32Sjsg 	if (ret) {
769f005ef32Sjsg 		pr_err("Failed to enable graphics device: %pe\n", ERR_PTR(ret));
770f005ef32Sjsg 		return ret;
771f005ef32Sjsg 	}
772f005ef32Sjsg 
773f005ef32Sjsg 	i915 = i915_driver_create(pdev, ent);
774f005ef32Sjsg 	if (IS_ERR(i915)) {
775f005ef32Sjsg 		pci_disable_device(pdev);
776f005ef32Sjsg 		return PTR_ERR(i915);
777f005ef32Sjsg 	}
778f005ef32Sjsg #else
779f005ef32Sjsg 	struct pci_dev *pdev = i915->drm.pdev;
780f005ef32Sjsg 	int ret;
781f005ef32Sjsg #endif
7821bb76ff1Sjsg 
7831bb76ff1Sjsg 	ret = i915_driver_early_probe(i915);
7841bb76ff1Sjsg 	if (ret < 0)
7851bb76ff1Sjsg 		goto out_pci_disable;
7861bb76ff1Sjsg 
7871bb76ff1Sjsg 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
7881bb76ff1Sjsg 
7891bb76ff1Sjsg 	intel_vgpu_detect(i915);
7901bb76ff1Sjsg 
7911bb76ff1Sjsg 	ret = intel_gt_probe_all(i915);
7921bb76ff1Sjsg 	if (ret < 0)
7931bb76ff1Sjsg 		goto out_runtime_pm_put;
7941bb76ff1Sjsg 
7951bb76ff1Sjsg 	ret = i915_driver_mmio_probe(i915);
7961bb76ff1Sjsg 	if (ret < 0)
7978a534fe9Sjsg 		goto out_runtime_pm_put;
7981bb76ff1Sjsg 
7991bb76ff1Sjsg 	ret = i915_driver_hw_probe(i915);
8001bb76ff1Sjsg 	if (ret < 0)
8011bb76ff1Sjsg 		goto out_cleanup_mmio;
8021bb76ff1Sjsg 
803f005ef32Sjsg 	ret = intel_display_driver_probe_noirq(i915);
8041bb76ff1Sjsg 	if (ret < 0)
8051bb76ff1Sjsg 		goto out_cleanup_hw;
8061bb76ff1Sjsg 
8071bb76ff1Sjsg 	ret = intel_irq_install(i915);
8081bb76ff1Sjsg 	if (ret)
8091bb76ff1Sjsg 		goto out_cleanup_modeset;
8101bb76ff1Sjsg 
811f005ef32Sjsg 	ret = intel_display_driver_probe_nogem(i915);
8121bb76ff1Sjsg 	if (ret)
8131bb76ff1Sjsg 		goto out_cleanup_irq;
8141bb76ff1Sjsg 
8151bb76ff1Sjsg 	ret = i915_gem_init(i915);
8161bb76ff1Sjsg 	if (ret)
8171bb76ff1Sjsg 		goto out_cleanup_modeset2;
8181bb76ff1Sjsg 
819f005ef32Sjsg 	intel_pxp_init(i915);
820f005ef32Sjsg 
821f005ef32Sjsg 	ret = intel_display_driver_probe(i915);
8221bb76ff1Sjsg 	if (ret)
8231bb76ff1Sjsg 		goto out_cleanup_gem;
8241bb76ff1Sjsg 
8251bb76ff1Sjsg 	i915_driver_register(i915);
8261bb76ff1Sjsg 
827f005ef32Sjsg #ifdef __OpenBSD__
8281bb76ff1Sjsg 	inteldrm_init_backlight(i915);
829f005ef32Sjsg #endif
8301bb76ff1Sjsg 
8311bb76ff1Sjsg 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
8321bb76ff1Sjsg 
8331bb76ff1Sjsg 	i915_welcome_messages(i915);
8341bb76ff1Sjsg 
8351bb76ff1Sjsg 	i915->do_release = true;
8361bb76ff1Sjsg 
8371bb76ff1Sjsg 	return 0;
8381bb76ff1Sjsg 
8391bb76ff1Sjsg out_cleanup_gem:
8401bb76ff1Sjsg 	i915_gem_suspend(i915);
8411bb76ff1Sjsg 	i915_gem_driver_remove(i915);
8421bb76ff1Sjsg 	i915_gem_driver_release(i915);
8431bb76ff1Sjsg out_cleanup_modeset2:
8441bb76ff1Sjsg 	/* FIXME clean up the error path */
845f005ef32Sjsg 	intel_display_driver_remove(i915);
8461bb76ff1Sjsg 	intel_irq_uninstall(i915);
847f005ef32Sjsg 	intel_display_driver_remove_noirq(i915);
8481bb76ff1Sjsg 	goto out_cleanup_modeset;
8491bb76ff1Sjsg out_cleanup_irq:
8501bb76ff1Sjsg 	intel_irq_uninstall(i915);
8511bb76ff1Sjsg out_cleanup_modeset:
852f005ef32Sjsg 	intel_display_driver_remove_nogem(i915);
8531bb76ff1Sjsg out_cleanup_hw:
8541bb76ff1Sjsg 	i915_driver_hw_remove(i915);
8551bb76ff1Sjsg 	intel_memory_regions_driver_release(i915);
8561bb76ff1Sjsg 	i915_ggtt_driver_release(i915);
8571bb76ff1Sjsg 	i915_gem_drain_freed_objects(i915);
8581bb76ff1Sjsg 	i915_ggtt_driver_late_release(i915);
8591bb76ff1Sjsg out_cleanup_mmio:
8601bb76ff1Sjsg 	i915_driver_mmio_release(i915);
8611bb76ff1Sjsg out_runtime_pm_put:
8621bb76ff1Sjsg 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
8631bb76ff1Sjsg 	i915_driver_late_release(i915);
8641bb76ff1Sjsg out_pci_disable:
8651bb76ff1Sjsg 	pci_disable_device(pdev);
8661bb76ff1Sjsg 	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
8671bb76ff1Sjsg 	return ret;
8681bb76ff1Sjsg }
8691bb76ff1Sjsg 
8701bb76ff1Sjsg void i915_driver_remove(struct drm_i915_private *i915)
8711bb76ff1Sjsg {
8721bb76ff1Sjsg 	intel_wakeref_t wakeref;
8731bb76ff1Sjsg 
8741bb76ff1Sjsg 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
8751bb76ff1Sjsg 
8761bb76ff1Sjsg 	i915_driver_unregister(i915);
8771bb76ff1Sjsg 
8781bb76ff1Sjsg 	/* Flush any external code that still may be under the RCU lock */
8791bb76ff1Sjsg 	synchronize_rcu();
8801bb76ff1Sjsg 
8811bb76ff1Sjsg 	i915_gem_suspend(i915);
8821bb76ff1Sjsg 
8831bb76ff1Sjsg 	intel_gvt_driver_remove(i915);
8841bb76ff1Sjsg 
885f005ef32Sjsg 	intel_display_driver_remove(i915);
8861bb76ff1Sjsg 
8871bb76ff1Sjsg 	intel_irq_uninstall(i915);
8881bb76ff1Sjsg 
889f005ef32Sjsg 	intel_display_driver_remove_noirq(i915);
8901bb76ff1Sjsg 
8911bb76ff1Sjsg 	i915_reset_error_state(i915);
8921bb76ff1Sjsg 	i915_gem_driver_remove(i915);
8931bb76ff1Sjsg 
894f005ef32Sjsg 	intel_display_driver_remove_nogem(i915);
8951bb76ff1Sjsg 
8961bb76ff1Sjsg 	i915_driver_hw_remove(i915);
8971bb76ff1Sjsg 
8981bb76ff1Sjsg 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
8991bb76ff1Sjsg }
9001bb76ff1Sjsg 
9011bb76ff1Sjsg static void i915_driver_release(struct drm_device *dev)
9021bb76ff1Sjsg {
9031bb76ff1Sjsg 	struct drm_i915_private *dev_priv = to_i915(dev);
9041bb76ff1Sjsg 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
9051bb76ff1Sjsg 	intel_wakeref_t wakeref;
9061bb76ff1Sjsg 
9071bb76ff1Sjsg 	if (!dev_priv->do_release)
9081bb76ff1Sjsg 		return;
9091bb76ff1Sjsg 
9101bb76ff1Sjsg 	wakeref = intel_runtime_pm_get(rpm);
9111bb76ff1Sjsg 
9121bb76ff1Sjsg 	i915_gem_driver_release(dev_priv);
9131bb76ff1Sjsg 
9141bb76ff1Sjsg 	intel_memory_regions_driver_release(dev_priv);
9151bb76ff1Sjsg 	i915_ggtt_driver_release(dev_priv);
9161bb76ff1Sjsg 	i915_gem_drain_freed_objects(dev_priv);
9171bb76ff1Sjsg 	i915_ggtt_driver_late_release(dev_priv);
9181bb76ff1Sjsg 
9191bb76ff1Sjsg 	i915_driver_mmio_release(dev_priv);
9201bb76ff1Sjsg 
9211bb76ff1Sjsg 	intel_runtime_pm_put(rpm, wakeref);
9221bb76ff1Sjsg 
9231bb76ff1Sjsg 	intel_runtime_pm_driver_release(rpm);
9241bb76ff1Sjsg 
9251bb76ff1Sjsg 	i915_driver_late_release(dev_priv);
9261bb76ff1Sjsg }
9271bb76ff1Sjsg 
9281bb76ff1Sjsg static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
9291bb76ff1Sjsg {
9301bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
9311bb76ff1Sjsg 	int ret;
9321bb76ff1Sjsg 
9331bb76ff1Sjsg 	ret = i915_gem_open(i915, file);
9341bb76ff1Sjsg 	if (ret)
9351bb76ff1Sjsg 		return ret;
9361bb76ff1Sjsg 
9371bb76ff1Sjsg 	return 0;
9381bb76ff1Sjsg }
9391bb76ff1Sjsg 
9401bb76ff1Sjsg /**
9411bb76ff1Sjsg  * i915_driver_lastclose - clean up after all DRM clients have exited
9421bb76ff1Sjsg  * @dev: DRM device
9431bb76ff1Sjsg  *
9441bb76ff1Sjsg  * Take care of cleaning up after all DRM clients have exited.  In the
9451bb76ff1Sjsg  * mode setting case, we want to restore the kernel's initial mode (just
9461bb76ff1Sjsg  * in case the last client left us in a bad state).
9471bb76ff1Sjsg  *
9481bb76ff1Sjsg  * Additionally, in the non-mode setting case, we'll tear down the GTT
9491bb76ff1Sjsg  * and DMA structures, since the kernel won't be using them, and clea
9501bb76ff1Sjsg  * up any GEM state.
9511bb76ff1Sjsg  */
9521bb76ff1Sjsg static void i915_driver_lastclose(struct drm_device *dev)
9531bb76ff1Sjsg {
954f005ef32Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
955f005ef32Sjsg 
956f005ef32Sjsg 	intel_fbdev_restore_mode(i915);
9571bb76ff1Sjsg 
9581bb76ff1Sjsg 	vga_switcheroo_process_delayed_switch();
9591bb76ff1Sjsg }
9601bb76ff1Sjsg 
9611bb76ff1Sjsg static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
9621bb76ff1Sjsg {
9631bb76ff1Sjsg 	struct drm_i915_file_private *file_priv = file->driver_priv;
9641bb76ff1Sjsg 
9651bb76ff1Sjsg 	i915_gem_context_close(file);
9661bb76ff1Sjsg 	i915_drm_client_put(file_priv->client);
9671bb76ff1Sjsg 
9681bb76ff1Sjsg 	kfree_rcu(file_priv, rcu);
9691bb76ff1Sjsg 
9701bb76ff1Sjsg 	/* Catch up with all the deferred frees from "this" client */
9711bb76ff1Sjsg 	i915_gem_flush_free_objects(to_i915(dev));
9721bb76ff1Sjsg }
9731bb76ff1Sjsg 
9741bb76ff1Sjsg static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
9751bb76ff1Sjsg {
9761bb76ff1Sjsg 	struct intel_encoder *encoder;
9771bb76ff1Sjsg 
9781bb76ff1Sjsg 	if (!HAS_DISPLAY(dev_priv))
9791bb76ff1Sjsg 		return;
9801bb76ff1Sjsg 
981f005ef32Sjsg 	/*
982f005ef32Sjsg 	 * TODO: check and remove holding the modeset locks if none of
983f005ef32Sjsg 	 * the encoders depends on this.
984f005ef32Sjsg 	 */
985f005ef32Sjsg 	drm_modeset_lock_all(&dev_priv->drm);
986f005ef32Sjsg 	for_each_intel_encoder(&dev_priv->drm, encoder)
9871bb76ff1Sjsg 		if (encoder->suspend)
9881bb76ff1Sjsg 			encoder->suspend(encoder);
989f005ef32Sjsg 	drm_modeset_unlock_all(&dev_priv->drm);
990f005ef32Sjsg 
991f005ef32Sjsg 	for_each_intel_encoder(&dev_priv->drm, encoder)
992f005ef32Sjsg 		if (encoder->suspend_complete)
993f005ef32Sjsg 			encoder->suspend_complete(encoder);
9941bb76ff1Sjsg }
9951bb76ff1Sjsg 
9961bb76ff1Sjsg static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
9971bb76ff1Sjsg {
9981bb76ff1Sjsg 	struct intel_encoder *encoder;
9991bb76ff1Sjsg 
10001bb76ff1Sjsg 	if (!HAS_DISPLAY(dev_priv))
10011bb76ff1Sjsg 		return;
10021bb76ff1Sjsg 
1003f005ef32Sjsg 	/*
1004f005ef32Sjsg 	 * TODO: check and remove holding the modeset locks if none of
1005f005ef32Sjsg 	 * the encoders depends on this.
1006f005ef32Sjsg 	 */
1007f005ef32Sjsg 	drm_modeset_lock_all(&dev_priv->drm);
1008f005ef32Sjsg 	for_each_intel_encoder(&dev_priv->drm, encoder)
10091bb76ff1Sjsg 		if (encoder->shutdown)
10101bb76ff1Sjsg 			encoder->shutdown(encoder);
1011f005ef32Sjsg 	drm_modeset_unlock_all(&dev_priv->drm);
1012f005ef32Sjsg 
1013f005ef32Sjsg 	for_each_intel_encoder(&dev_priv->drm, encoder)
1014f005ef32Sjsg 		if (encoder->shutdown_complete)
1015f005ef32Sjsg 			encoder->shutdown_complete(encoder);
10161bb76ff1Sjsg }
10171bb76ff1Sjsg 
10181bb76ff1Sjsg void i915_driver_shutdown(struct drm_i915_private *i915)
10191bb76ff1Sjsg {
10201bb76ff1Sjsg 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
10211bb76ff1Sjsg 	intel_runtime_pm_disable(&i915->runtime_pm);
10221bb76ff1Sjsg 	intel_power_domains_disable(i915);
10231bb76ff1Sjsg 
10241bb76ff1Sjsg 	if (HAS_DISPLAY(i915)) {
10251bb76ff1Sjsg 		drm_kms_helper_poll_disable(&i915->drm);
10261bb76ff1Sjsg 
10271bb76ff1Sjsg 		drm_atomic_helper_shutdown(&i915->drm);
10281bb76ff1Sjsg 	}
10291bb76ff1Sjsg 
10301bb76ff1Sjsg 	intel_dp_mst_suspend(i915);
10311bb76ff1Sjsg 
10321bb76ff1Sjsg 	intel_runtime_pm_disable_interrupts(i915);
10331bb76ff1Sjsg 	intel_hpd_cancel_work(i915);
10341bb76ff1Sjsg 
10351bb76ff1Sjsg 	intel_suspend_encoders(i915);
10361bb76ff1Sjsg 	intel_shutdown_encoders(i915);
10371bb76ff1Sjsg 
1038f005ef32Sjsg 	intel_dmc_suspend(i915);
10391bb76ff1Sjsg 
10401bb76ff1Sjsg 	i915_gem_suspend(i915);
10411bb76ff1Sjsg 
10421bb76ff1Sjsg 	/*
10431bb76ff1Sjsg 	 * The only requirement is to reboot with display DC states disabled,
10441bb76ff1Sjsg 	 * for now leaving all display power wells in the INIT power domain
10451bb76ff1Sjsg 	 * enabled.
10461bb76ff1Sjsg 	 *
10471bb76ff1Sjsg 	 * TODO:
10481bb76ff1Sjsg 	 * - unify the pci_driver::shutdown sequence here with the
10491bb76ff1Sjsg 	 *   pci_driver.driver.pm.poweroff,poweroff_late sequence.
10501bb76ff1Sjsg 	 * - unify the driver remove and system/runtime suspend sequences with
10511bb76ff1Sjsg 	 *   the above unified shutdown/poweroff sequence.
10521bb76ff1Sjsg 	 */
10531bb76ff1Sjsg 	intel_power_domains_driver_remove(i915);
10541bb76ff1Sjsg 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
10551bb76ff1Sjsg 
10561bb76ff1Sjsg 	intel_runtime_pm_driver_release(&i915->runtime_pm);
10571bb76ff1Sjsg }
10581bb76ff1Sjsg 
10591bb76ff1Sjsg static bool suspend_to_idle(struct drm_i915_private *dev_priv)
10601bb76ff1Sjsg {
10611bb76ff1Sjsg #if IS_ENABLED(CONFIG_ACPI_SLEEP)
10621bb76ff1Sjsg 	if (acpi_target_system_state() < ACPI_STATE_S3)
10631bb76ff1Sjsg 		return true;
10641bb76ff1Sjsg #endif
10651bb76ff1Sjsg 	return false;
10661bb76ff1Sjsg }
10671bb76ff1Sjsg 
1068f005ef32Sjsg static void i915_drm_complete(struct drm_device *dev)
1069f005ef32Sjsg {
1070f005ef32Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
1071f005ef32Sjsg 
1072f005ef32Sjsg 	intel_pxp_resume_complete(i915->pxp);
1073f005ef32Sjsg }
1074f005ef32Sjsg 
10751bb76ff1Sjsg static int i915_drm_prepare(struct drm_device *dev)
10761bb76ff1Sjsg {
10771bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
10781bb76ff1Sjsg 
1079f005ef32Sjsg 	intel_pxp_suspend_prepare(i915->pxp);
1080f005ef32Sjsg 
10811bb76ff1Sjsg 	/*
1082f005ef32Sjsg 	 * NB intel_display_driver_suspend() may issue new requests after we've
10831bb76ff1Sjsg 	 * ostensibly marked the GPU as ready-to-sleep here. We need to
10841bb76ff1Sjsg 	 * split out that work and pull it forward so that after point,
10851bb76ff1Sjsg 	 * the GPU is not woken again.
10861bb76ff1Sjsg 	 */
10871bb76ff1Sjsg 	return i915_gem_backup_suspend(i915);
10881bb76ff1Sjsg }
10891bb76ff1Sjsg 
10901bb76ff1Sjsg static int i915_drm_suspend(struct drm_device *dev)
10911bb76ff1Sjsg {
10921bb76ff1Sjsg 	struct drm_i915_private *dev_priv = to_i915(dev);
10931bb76ff1Sjsg 	struct pci_dev *pdev = dev_priv->drm.pdev;
10941bb76ff1Sjsg 	pci_power_t opregion_target_state;
10951bb76ff1Sjsg 
10961bb76ff1Sjsg 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
10971bb76ff1Sjsg 
10981bb76ff1Sjsg 	/* We do a lot of poking in a lot of registers, make sure they work
10991bb76ff1Sjsg 	 * properly. */
11001bb76ff1Sjsg 	intel_power_domains_disable(dev_priv);
11011bb76ff1Sjsg 	if (HAS_DISPLAY(dev_priv))
11021bb76ff1Sjsg 		drm_kms_helper_poll_disable(dev);
11031bb76ff1Sjsg 
11041bb76ff1Sjsg 	pci_save_state(pdev);
11051bb76ff1Sjsg 
1106f005ef32Sjsg 	intel_display_driver_suspend(dev_priv);
11071bb76ff1Sjsg 
11081bb76ff1Sjsg 	intel_dp_mst_suspend(dev_priv);
11091bb76ff1Sjsg 
11101bb76ff1Sjsg 	intel_runtime_pm_disable_interrupts(dev_priv);
11111bb76ff1Sjsg 	intel_hpd_cancel_work(dev_priv);
11121bb76ff1Sjsg 
11131bb76ff1Sjsg 	intel_suspend_encoders(dev_priv);
11141bb76ff1Sjsg 
11151bb76ff1Sjsg 	/* Must be called before GGTT is suspended. */
11161bb76ff1Sjsg 	intel_dpt_suspend(dev_priv);
11171bb76ff1Sjsg 	i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
11181bb76ff1Sjsg 
11191bb76ff1Sjsg 	i915_save_display(dev_priv);
11201bb76ff1Sjsg 
11211bb76ff1Sjsg 	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
11221bb76ff1Sjsg 	intel_opregion_suspend(dev_priv, opregion_target_state);
11231bb76ff1Sjsg 
11241bb76ff1Sjsg 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
11251bb76ff1Sjsg 
11261bb76ff1Sjsg 	dev_priv->suspend_count++;
11271bb76ff1Sjsg 
1128f005ef32Sjsg 	intel_dmc_suspend(dev_priv);
11291bb76ff1Sjsg 
11301bb76ff1Sjsg 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11311bb76ff1Sjsg 
11321bb76ff1Sjsg 	i915_gem_drain_freed_objects(dev_priv);
11331bb76ff1Sjsg 
11341bb76ff1Sjsg 	return 0;
11351bb76ff1Sjsg }
11361bb76ff1Sjsg 
11371bb76ff1Sjsg static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
11381bb76ff1Sjsg {
11391bb76ff1Sjsg 	struct drm_i915_private *dev_priv = to_i915(dev);
11401bb76ff1Sjsg 	struct pci_dev *pdev = dev_priv->drm.pdev;
11411bb76ff1Sjsg 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
11421bb76ff1Sjsg 	struct intel_gt *gt;
11431bb76ff1Sjsg 	int ret, i;
1144f005ef32Sjsg 	bool s2idle = !hibernation && suspend_to_idle(dev_priv);
11451bb76ff1Sjsg 
11461bb76ff1Sjsg 	disable_rpm_wakeref_asserts(rpm);
11471bb76ff1Sjsg 
1148f005ef32Sjsg 	intel_pxp_suspend(dev_priv->pxp);
1149f005ef32Sjsg 
11501bb76ff1Sjsg 	i915_gem_suspend_late(dev_priv);
11511bb76ff1Sjsg 
11521bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
11531bb76ff1Sjsg 		intel_uncore_suspend(gt->uncore);
11541bb76ff1Sjsg 
1155f005ef32Sjsg 	intel_power_domains_suspend(dev_priv, s2idle);
11561bb76ff1Sjsg 
11571bb76ff1Sjsg 	intel_display_power_suspend_late(dev_priv);
11581bb76ff1Sjsg 
11591bb76ff1Sjsg 	ret = vlv_suspend_complete(dev_priv);
11601bb76ff1Sjsg 	if (ret) {
11611bb76ff1Sjsg 		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
11621bb76ff1Sjsg 		intel_power_domains_resume(dev_priv);
11631bb76ff1Sjsg 
11641bb76ff1Sjsg 		goto out;
11651bb76ff1Sjsg 	}
11661bb76ff1Sjsg 
11671bb76ff1Sjsg 	pci_disable_device(pdev);
11681bb76ff1Sjsg 	/*
11691bb76ff1Sjsg 	 * During hibernation on some platforms the BIOS may try to access
11701bb76ff1Sjsg 	 * the device even though it's already in D3 and hang the machine. So
11711bb76ff1Sjsg 	 * leave the device in D0 on those platforms and hope the BIOS will
11721bb76ff1Sjsg 	 * power down the device properly. The issue was seen on multiple old
11731bb76ff1Sjsg 	 * GENs with different BIOS vendors, so having an explicit blacklist
11741bb76ff1Sjsg 	 * is inpractical; apply the workaround on everything pre GEN6. The
11751bb76ff1Sjsg 	 * platforms where the issue was seen:
11761bb76ff1Sjsg 	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
11771bb76ff1Sjsg 	 * Fujitsu FSC S7110
11781bb76ff1Sjsg 	 * Acer Aspire 1830T
11791bb76ff1Sjsg 	 */
11801bb76ff1Sjsg 	if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
11811bb76ff1Sjsg 		pci_set_power_state(pdev, PCI_D3hot);
11821bb76ff1Sjsg 
11831bb76ff1Sjsg out:
11841bb76ff1Sjsg 	enable_rpm_wakeref_asserts(rpm);
11851bb76ff1Sjsg 	if (!dev_priv->uncore.user_forcewake_count)
11861bb76ff1Sjsg 		intel_runtime_pm_driver_release(rpm);
11871bb76ff1Sjsg 
11881bb76ff1Sjsg 	return ret;
11891bb76ff1Sjsg }
11901bb76ff1Sjsg 
11911bb76ff1Sjsg #ifdef __linux__
11921bb76ff1Sjsg int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
11931bb76ff1Sjsg 				   pm_message_t state)
11941bb76ff1Sjsg {
11951bb76ff1Sjsg 	int error;
11961bb76ff1Sjsg 
11971bb76ff1Sjsg 	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
11981bb76ff1Sjsg 			     state.event != PM_EVENT_FREEZE))
11991bb76ff1Sjsg 		return -EINVAL;
12001bb76ff1Sjsg 
12011bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
12021bb76ff1Sjsg 		return 0;
12031bb76ff1Sjsg 
12041bb76ff1Sjsg 	error = i915_drm_suspend(&i915->drm);
12051bb76ff1Sjsg 	if (error)
12061bb76ff1Sjsg 		return error;
12071bb76ff1Sjsg 
12081bb76ff1Sjsg 	return i915_drm_suspend_late(&i915->drm, false);
12091bb76ff1Sjsg }
12101bb76ff1Sjsg #endif
12111bb76ff1Sjsg 
12121bb76ff1Sjsg static int i915_drm_resume(struct drm_device *dev)
12131bb76ff1Sjsg {
12141bb76ff1Sjsg 	struct drm_i915_private *dev_priv = to_i915(dev);
1215f005ef32Sjsg 	struct intel_gt *gt;
1216f005ef32Sjsg 	int ret, i;
12171bb76ff1Sjsg 
12181bb76ff1Sjsg 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12191bb76ff1Sjsg 
12201bb76ff1Sjsg 	ret = i915_pcode_init(dev_priv);
12211bb76ff1Sjsg 	if (ret)
12221bb76ff1Sjsg 		return ret;
12231bb76ff1Sjsg 
12241bb76ff1Sjsg 	sanitize_gpu(dev_priv);
12251bb76ff1Sjsg 
12261bb76ff1Sjsg 	ret = i915_ggtt_enable_hw(dev_priv);
12271bb76ff1Sjsg 	if (ret)
12281bb76ff1Sjsg 		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
12291bb76ff1Sjsg 
12301bb76ff1Sjsg 	i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1231f005ef32Sjsg 
1232f005ef32Sjsg 	for_each_gt(gt, dev_priv, i)
1233f005ef32Sjsg 		if (GRAPHICS_VER(gt->i915) >= 8)
1234f005ef32Sjsg 			setup_private_pat(gt);
1235f005ef32Sjsg 
12361bb76ff1Sjsg 	/* Must be called after GGTT is resumed. */
12371bb76ff1Sjsg 	intel_dpt_resume(dev_priv);
12381bb76ff1Sjsg 
1239f005ef32Sjsg 	intel_dmc_resume(dev_priv);
12401bb76ff1Sjsg 
12411bb76ff1Sjsg 	i915_restore_display(dev_priv);
12421bb76ff1Sjsg 	intel_pps_unlock_regs_wa(dev_priv);
12431bb76ff1Sjsg 
12441bb76ff1Sjsg 	intel_init_pch_refclk(dev_priv);
12451bb76ff1Sjsg 
12461bb76ff1Sjsg 	/*
12471bb76ff1Sjsg 	 * Interrupts have to be enabled before any batches are run. If not the
12481bb76ff1Sjsg 	 * GPU will hang. i915_gem_init_hw() will initiate batches to
12491bb76ff1Sjsg 	 * update/restore the context.
12501bb76ff1Sjsg 	 *
12511bb76ff1Sjsg 	 * drm_mode_config_reset() needs AUX interrupts.
12521bb76ff1Sjsg 	 *
1253f005ef32Sjsg 	 * Modeset enabling in intel_display_driver_init_hw() also needs working
12541bb76ff1Sjsg 	 * interrupts.
12551bb76ff1Sjsg 	 */
12561bb76ff1Sjsg 	intel_runtime_pm_enable_interrupts(dev_priv);
12571bb76ff1Sjsg 
12581bb76ff1Sjsg 	if (HAS_DISPLAY(dev_priv))
12591bb76ff1Sjsg 		drm_mode_config_reset(dev);
12601bb76ff1Sjsg 
12611bb76ff1Sjsg 	i915_gem_resume(dev_priv);
12621bb76ff1Sjsg 
1263f005ef32Sjsg 	intel_display_driver_init_hw(dev_priv);
1264f005ef32Sjsg 
1265f005ef32Sjsg 	intel_clock_gating_init(dev_priv);
12661bb76ff1Sjsg 	intel_hpd_init(dev_priv);
12671bb76ff1Sjsg 
12681bb76ff1Sjsg 	/* MST sideband requires HPD interrupts enabled */
12691bb76ff1Sjsg 	intel_dp_mst_resume(dev_priv);
1270f005ef32Sjsg 	intel_display_driver_resume(dev_priv);
12711bb76ff1Sjsg 
12721bb76ff1Sjsg 	intel_hpd_poll_disable(dev_priv);
12731bb76ff1Sjsg 	if (HAS_DISPLAY(dev_priv))
12741bb76ff1Sjsg 		drm_kms_helper_poll_enable(dev);
12751bb76ff1Sjsg 
12761bb76ff1Sjsg 	intel_opregion_resume(dev_priv);
12771bb76ff1Sjsg 
12781bb76ff1Sjsg 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
12791bb76ff1Sjsg 
12801bb76ff1Sjsg 	intel_power_domains_enable(dev_priv);
12811bb76ff1Sjsg 
12821bb76ff1Sjsg 	intel_gvt_resume(dev_priv);
12831bb76ff1Sjsg 
12841bb76ff1Sjsg 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12851bb76ff1Sjsg 
12861bb76ff1Sjsg 	return 0;
12871bb76ff1Sjsg }
12881bb76ff1Sjsg 
12891bb76ff1Sjsg static int i915_drm_resume_early(struct drm_device *dev)
12901bb76ff1Sjsg {
12911bb76ff1Sjsg 	struct drm_i915_private *dev_priv = to_i915(dev);
12921bb76ff1Sjsg 	struct pci_dev *pdev = dev_priv->drm.pdev;
12931bb76ff1Sjsg 	struct intel_gt *gt;
12941bb76ff1Sjsg 	int ret, i;
12951bb76ff1Sjsg 
12961bb76ff1Sjsg 	/*
12971bb76ff1Sjsg 	 * We have a resume ordering issue with the snd-hda driver also
12981bb76ff1Sjsg 	 * requiring our device to be power up. Due to the lack of a
12991bb76ff1Sjsg 	 * parent/child relationship we currently solve this with an early
13001bb76ff1Sjsg 	 * resume hook.
13011bb76ff1Sjsg 	 *
13021bb76ff1Sjsg 	 * FIXME: This should be solved with a special hdmi sink device or
13031bb76ff1Sjsg 	 * similar so that power domains can be employed.
13041bb76ff1Sjsg 	 */
13051bb76ff1Sjsg 
13061bb76ff1Sjsg 	/*
13071bb76ff1Sjsg 	 * Note that we need to set the power state explicitly, since we
13081bb76ff1Sjsg 	 * powered off the device during freeze and the PCI core won't power
13091bb76ff1Sjsg 	 * it back up for us during thaw. Powering off the device during
13101bb76ff1Sjsg 	 * freeze is not a hard requirement though, and during the
13111bb76ff1Sjsg 	 * suspend/resume phases the PCI core makes sure we get here with the
13121bb76ff1Sjsg 	 * device powered on. So in case we change our freeze logic and keep
13131bb76ff1Sjsg 	 * the device powered we can also remove the following set power state
13141bb76ff1Sjsg 	 * call.
13151bb76ff1Sjsg 	 */
13161bb76ff1Sjsg 	ret = pci_set_power_state(pdev, PCI_D0);
13171bb76ff1Sjsg 	if (ret) {
13181bb76ff1Sjsg 		drm_err(&dev_priv->drm,
13191bb76ff1Sjsg 			"failed to set PCI D0 power state (%d)\n", ret);
13201bb76ff1Sjsg 		return ret;
13211bb76ff1Sjsg 	}
13221bb76ff1Sjsg 
13231bb76ff1Sjsg 	/*
13241bb76ff1Sjsg 	 * Note that pci_enable_device() first enables any parent bridge
13251bb76ff1Sjsg 	 * device and only then sets the power state for this device. The
13261bb76ff1Sjsg 	 * bridge enabling is a nop though, since bridge devices are resumed
13271bb76ff1Sjsg 	 * first. The order of enabling power and enabling the device is
13281bb76ff1Sjsg 	 * imposed by the PCI core as described above, so here we preserve the
13291bb76ff1Sjsg 	 * same order for the freeze/thaw phases.
13301bb76ff1Sjsg 	 *
13311bb76ff1Sjsg 	 * TODO: eventually we should remove pci_disable_device() /
13321bb76ff1Sjsg 	 * pci_enable_enable_device() from suspend/resume. Due to how they
13331bb76ff1Sjsg 	 * depend on the device enable refcount we can't anyway depend on them
13341bb76ff1Sjsg 	 * disabling/enabling the device.
13351bb76ff1Sjsg 	 */
13361bb76ff1Sjsg 	if (pci_enable_device(pdev))
13371bb76ff1Sjsg 		return -EIO;
13381bb76ff1Sjsg 
13391bb76ff1Sjsg 	pci_set_master(pdev);
13401bb76ff1Sjsg 
13411bb76ff1Sjsg 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
13421bb76ff1Sjsg 
13431bb76ff1Sjsg 	ret = vlv_resume_prepare(dev_priv, false);
13441bb76ff1Sjsg 	if (ret)
13451bb76ff1Sjsg 		drm_err(&dev_priv->drm,
13461bb76ff1Sjsg 			"Resume prepare failed: %d, continuing anyway\n", ret);
13471bb76ff1Sjsg 
13481bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i) {
13491bb76ff1Sjsg 		intel_uncore_resume_early(gt->uncore);
13501bb76ff1Sjsg 		intel_gt_check_and_clear_faults(gt);
13511bb76ff1Sjsg 	}
13521bb76ff1Sjsg 
13531bb76ff1Sjsg 	intel_display_power_resume_early(dev_priv);
13541bb76ff1Sjsg 
13551bb76ff1Sjsg 	intel_power_domains_resume(dev_priv);
13561bb76ff1Sjsg 
13571bb76ff1Sjsg 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
13581bb76ff1Sjsg 
13591bb76ff1Sjsg 	return ret;
13601bb76ff1Sjsg }
13611bb76ff1Sjsg 
13621bb76ff1Sjsg int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
13631bb76ff1Sjsg {
13641bb76ff1Sjsg 	int ret;
13651bb76ff1Sjsg 
13661bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
13671bb76ff1Sjsg 		return 0;
13681bb76ff1Sjsg 
13691bb76ff1Sjsg 	ret = i915_drm_resume_early(&i915->drm);
13701bb76ff1Sjsg 	if (ret)
13711bb76ff1Sjsg 		return ret;
13721bb76ff1Sjsg 
13731bb76ff1Sjsg 	return i915_drm_resume(&i915->drm);
13741bb76ff1Sjsg }
13751bb76ff1Sjsg 
13761bb76ff1Sjsg #ifdef __linux__
13771bb76ff1Sjsg 
13781bb76ff1Sjsg static int i915_pm_prepare(struct device *kdev)
13791bb76ff1Sjsg {
13801bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
13811bb76ff1Sjsg 
13821bb76ff1Sjsg 	if (!i915) {
13831bb76ff1Sjsg 		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
13841bb76ff1Sjsg 		return -ENODEV;
13851bb76ff1Sjsg 	}
13861bb76ff1Sjsg 
13871bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
13881bb76ff1Sjsg 		return 0;
13891bb76ff1Sjsg 
13901bb76ff1Sjsg 	return i915_drm_prepare(&i915->drm);
13911bb76ff1Sjsg }
13921bb76ff1Sjsg 
13931bb76ff1Sjsg static int i915_pm_suspend(struct device *kdev)
13941bb76ff1Sjsg {
13951bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
13961bb76ff1Sjsg 
13971bb76ff1Sjsg 	if (!i915) {
13981bb76ff1Sjsg 		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
13991bb76ff1Sjsg 		return -ENODEV;
14001bb76ff1Sjsg 	}
14011bb76ff1Sjsg 
14021bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
14031bb76ff1Sjsg 		return 0;
14041bb76ff1Sjsg 
14051bb76ff1Sjsg 	return i915_drm_suspend(&i915->drm);
14061bb76ff1Sjsg }
14071bb76ff1Sjsg 
14081bb76ff1Sjsg static int i915_pm_suspend_late(struct device *kdev)
14091bb76ff1Sjsg {
14101bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
14111bb76ff1Sjsg 
14121bb76ff1Sjsg 	/*
14131bb76ff1Sjsg 	 * We have a suspend ordering issue with the snd-hda driver also
14141bb76ff1Sjsg 	 * requiring our device to be power up. Due to the lack of a
14151bb76ff1Sjsg 	 * parent/child relationship we currently solve this with an late
14161bb76ff1Sjsg 	 * suspend hook.
14171bb76ff1Sjsg 	 *
14181bb76ff1Sjsg 	 * FIXME: This should be solved with a special hdmi sink device or
14191bb76ff1Sjsg 	 * similar so that power domains can be employed.
14201bb76ff1Sjsg 	 */
14211bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
14221bb76ff1Sjsg 		return 0;
14231bb76ff1Sjsg 
14241bb76ff1Sjsg 	return i915_drm_suspend_late(&i915->drm, false);
14251bb76ff1Sjsg }
14261bb76ff1Sjsg 
14271bb76ff1Sjsg static int i915_pm_poweroff_late(struct device *kdev)
14281bb76ff1Sjsg {
14291bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
14301bb76ff1Sjsg 
14311bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
14321bb76ff1Sjsg 		return 0;
14331bb76ff1Sjsg 
14341bb76ff1Sjsg 	return i915_drm_suspend_late(&i915->drm, true);
14351bb76ff1Sjsg }
14361bb76ff1Sjsg 
14371bb76ff1Sjsg static int i915_pm_resume_early(struct device *kdev)
14381bb76ff1Sjsg {
14391bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
14401bb76ff1Sjsg 
14411bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
14421bb76ff1Sjsg 		return 0;
14431bb76ff1Sjsg 
14441bb76ff1Sjsg 	return i915_drm_resume_early(&i915->drm);
14451bb76ff1Sjsg }
14461bb76ff1Sjsg 
14471bb76ff1Sjsg static int i915_pm_resume(struct device *kdev)
14481bb76ff1Sjsg {
14491bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
14501bb76ff1Sjsg 
14511bb76ff1Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
14521bb76ff1Sjsg 		return 0;
14531bb76ff1Sjsg 
14541bb76ff1Sjsg 	return i915_drm_resume(&i915->drm);
14551bb76ff1Sjsg }
14561bb76ff1Sjsg 
1457f005ef32Sjsg static void i915_pm_complete(struct device *kdev)
1458f005ef32Sjsg {
1459f005ef32Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1460f005ef32Sjsg 
1461f005ef32Sjsg 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1462f005ef32Sjsg 		return;
1463f005ef32Sjsg 
1464f005ef32Sjsg 	i915_drm_complete(&i915->drm);
1465f005ef32Sjsg }
1466f005ef32Sjsg 
14671bb76ff1Sjsg /* freeze: before creating the hibernation_image */
14681bb76ff1Sjsg static int i915_pm_freeze(struct device *kdev)
14691bb76ff1Sjsg {
14701bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
14711bb76ff1Sjsg 	int ret;
14721bb76ff1Sjsg 
14731bb76ff1Sjsg 	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
14741bb76ff1Sjsg 		ret = i915_drm_suspend(&i915->drm);
14751bb76ff1Sjsg 		if (ret)
14761bb76ff1Sjsg 			return ret;
14771bb76ff1Sjsg 	}
14781bb76ff1Sjsg 
14791bb76ff1Sjsg 	ret = i915_gem_freeze(i915);
14801bb76ff1Sjsg 	if (ret)
14811bb76ff1Sjsg 		return ret;
14821bb76ff1Sjsg 
14831bb76ff1Sjsg 	return 0;
14841bb76ff1Sjsg }
14851bb76ff1Sjsg 
14861bb76ff1Sjsg static int i915_pm_freeze_late(struct device *kdev)
14871bb76ff1Sjsg {
14881bb76ff1Sjsg 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
14891bb76ff1Sjsg 	int ret;
14901bb76ff1Sjsg 
14911bb76ff1Sjsg 	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
14921bb76ff1Sjsg 		ret = i915_drm_suspend_late(&i915->drm, true);
14931bb76ff1Sjsg 		if (ret)
14941bb76ff1Sjsg 			return ret;
14951bb76ff1Sjsg 	}
14961bb76ff1Sjsg 
14971bb76ff1Sjsg 	ret = i915_gem_freeze_late(i915);
14981bb76ff1Sjsg 	if (ret)
14991bb76ff1Sjsg 		return ret;
15001bb76ff1Sjsg 
15011bb76ff1Sjsg 	return 0;
15021bb76ff1Sjsg }
15031bb76ff1Sjsg 
15041bb76ff1Sjsg /* thaw: called after creating the hibernation image, but before turning off. */
15051bb76ff1Sjsg static int i915_pm_thaw_early(struct device *kdev)
15061bb76ff1Sjsg {
15071bb76ff1Sjsg 	return i915_pm_resume_early(kdev);
15081bb76ff1Sjsg }
15091bb76ff1Sjsg 
15101bb76ff1Sjsg static int i915_pm_thaw(struct device *kdev)
15111bb76ff1Sjsg {
15121bb76ff1Sjsg 	return i915_pm_resume(kdev);
15131bb76ff1Sjsg }
15141bb76ff1Sjsg 
15151bb76ff1Sjsg /* restore: called after loading the hibernation image. */
15161bb76ff1Sjsg static int i915_pm_restore_early(struct device *kdev)
15171bb76ff1Sjsg {
15181bb76ff1Sjsg 	return i915_pm_resume_early(kdev);
15191bb76ff1Sjsg }
15201bb76ff1Sjsg 
15211bb76ff1Sjsg static int i915_pm_restore(struct device *kdev)
15221bb76ff1Sjsg {
15231bb76ff1Sjsg 	return i915_pm_resume(kdev);
15241bb76ff1Sjsg }
15251bb76ff1Sjsg 
15261bb76ff1Sjsg static int intel_runtime_suspend(struct device *kdev)
15271bb76ff1Sjsg {
15281bb76ff1Sjsg 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
15291bb76ff1Sjsg 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
153006842942Sjsg 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
153106842942Sjsg 	struct pci_dev *root_pdev;
15321bb76ff1Sjsg 	struct intel_gt *gt;
15331bb76ff1Sjsg 	int ret, i;
15341bb76ff1Sjsg 
15351bb76ff1Sjsg 	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
15361bb76ff1Sjsg 		return -ENODEV;
15371bb76ff1Sjsg 
15381bb76ff1Sjsg 	drm_dbg(&dev_priv->drm, "Suspending device\n");
15391bb76ff1Sjsg 
15401bb76ff1Sjsg 	disable_rpm_wakeref_asserts(rpm);
15411bb76ff1Sjsg 
15421bb76ff1Sjsg 	/*
15431bb76ff1Sjsg 	 * We are safe here against re-faults, since the fault handler takes
15441bb76ff1Sjsg 	 * an RPM reference.
15451bb76ff1Sjsg 	 */
15461bb76ff1Sjsg 	i915_gem_runtime_suspend(dev_priv);
15471bb76ff1Sjsg 
1548f005ef32Sjsg 	intel_pxp_runtime_suspend(dev_priv->pxp);
1549f005ef32Sjsg 
15501bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
15511bb76ff1Sjsg 		intel_gt_runtime_suspend(gt);
15521bb76ff1Sjsg 
15531bb76ff1Sjsg 	intel_runtime_pm_disable_interrupts(dev_priv);
15541bb76ff1Sjsg 
15551bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
15561bb76ff1Sjsg 		intel_uncore_suspend(gt->uncore);
15571bb76ff1Sjsg 
15581bb76ff1Sjsg 	intel_display_power_suspend(dev_priv);
15591bb76ff1Sjsg 
15601bb76ff1Sjsg 	ret = vlv_suspend_complete(dev_priv);
15611bb76ff1Sjsg 	if (ret) {
15621bb76ff1Sjsg 		drm_err(&dev_priv->drm,
15631bb76ff1Sjsg 			"Runtime suspend failed, disabling it (%d)\n", ret);
15641bb76ff1Sjsg 		intel_uncore_runtime_resume(&dev_priv->uncore);
15651bb76ff1Sjsg 
15661bb76ff1Sjsg 		intel_runtime_pm_enable_interrupts(dev_priv);
15671bb76ff1Sjsg 
15681bb76ff1Sjsg 		for_each_gt(gt, dev_priv, i)
15691bb76ff1Sjsg 			intel_gt_runtime_resume(gt);
15701bb76ff1Sjsg 
15711bb76ff1Sjsg 		enable_rpm_wakeref_asserts(rpm);
15721bb76ff1Sjsg 
15731bb76ff1Sjsg 		return ret;
15741bb76ff1Sjsg 	}
15751bb76ff1Sjsg 
15761bb76ff1Sjsg 	enable_rpm_wakeref_asserts(rpm);
15771bb76ff1Sjsg 	intel_runtime_pm_driver_release(rpm);
15781bb76ff1Sjsg 
15791bb76ff1Sjsg 	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
15801bb76ff1Sjsg 		drm_err(&dev_priv->drm,
15811bb76ff1Sjsg 			"Unclaimed access detected prior to suspending\n");
15821bb76ff1Sjsg 
158306842942Sjsg 	/*
158406842942Sjsg 	 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
158506842942Sjsg 	 * This should be totally removed when we handle the pci states properly
158606842942Sjsg 	 * on runtime PM.
158706842942Sjsg 	 */
158806842942Sjsg 	root_pdev = pcie_find_root_port(pdev);
158906842942Sjsg 	if (root_pdev)
159006842942Sjsg 		pci_d3cold_disable(root_pdev);
159106842942Sjsg 
15921bb76ff1Sjsg 	rpm->suspended = true;
15931bb76ff1Sjsg 
15941bb76ff1Sjsg 	/*
15951bb76ff1Sjsg 	 * FIXME: We really should find a document that references the arguments
15961bb76ff1Sjsg 	 * used below!
15971bb76ff1Sjsg 	 */
15981bb76ff1Sjsg 	if (IS_BROADWELL(dev_priv)) {
15991bb76ff1Sjsg 		/*
16001bb76ff1Sjsg 		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
16011bb76ff1Sjsg 		 * being detected, and the call we do at intel_runtime_resume()
16021bb76ff1Sjsg 		 * won't be able to restore them. Since PCI_D3hot matches the
16031bb76ff1Sjsg 		 * actual specification and appears to be working, use it.
16041bb76ff1Sjsg 		 */
16051bb76ff1Sjsg 		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
16061bb76ff1Sjsg 	} else {
16071bb76ff1Sjsg 		/*
16081bb76ff1Sjsg 		 * current versions of firmware which depend on this opregion
16091bb76ff1Sjsg 		 * notification have repurposed the D1 definition to mean
16101bb76ff1Sjsg 		 * "runtime suspended" vs. what you would normally expect (D3)
16111bb76ff1Sjsg 		 * to distinguish it from notifications that might be sent via
16121bb76ff1Sjsg 		 * the suspend path.
16131bb76ff1Sjsg 		 */
16141bb76ff1Sjsg 		intel_opregion_notify_adapter(dev_priv, PCI_D1);
16151bb76ff1Sjsg 	}
16161bb76ff1Sjsg 
16171bb76ff1Sjsg 	assert_forcewakes_inactive(&dev_priv->uncore);
16181bb76ff1Sjsg 
16191bb76ff1Sjsg 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
16201bb76ff1Sjsg 		intel_hpd_poll_enable(dev_priv);
16211bb76ff1Sjsg 
16221bb76ff1Sjsg 	drm_dbg(&dev_priv->drm, "Device suspended\n");
16231bb76ff1Sjsg 	return 0;
16241bb76ff1Sjsg }
16251bb76ff1Sjsg 
16261bb76ff1Sjsg static int intel_runtime_resume(struct device *kdev)
16271bb76ff1Sjsg {
16281bb76ff1Sjsg 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
16291bb76ff1Sjsg 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
163006842942Sjsg 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
163106842942Sjsg 	struct pci_dev *root_pdev;
16321bb76ff1Sjsg 	struct intel_gt *gt;
16331bb76ff1Sjsg 	int ret, i;
16341bb76ff1Sjsg 
16351bb76ff1Sjsg 	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
16361bb76ff1Sjsg 		return -ENODEV;
16371bb76ff1Sjsg 
16381bb76ff1Sjsg 	drm_dbg(&dev_priv->drm, "Resuming device\n");
16391bb76ff1Sjsg 
16401bb76ff1Sjsg 	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
16411bb76ff1Sjsg 	disable_rpm_wakeref_asserts(rpm);
16421bb76ff1Sjsg 
16431bb76ff1Sjsg 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
16441bb76ff1Sjsg 	rpm->suspended = false;
164506842942Sjsg 
164606842942Sjsg 	root_pdev = pcie_find_root_port(pdev);
164706842942Sjsg 	if (root_pdev)
164806842942Sjsg 		pci_d3cold_enable(root_pdev);
164906842942Sjsg 
16501bb76ff1Sjsg 	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
16511bb76ff1Sjsg 		drm_dbg(&dev_priv->drm,
16521bb76ff1Sjsg 			"Unclaimed access during suspend, bios?\n");
16531bb76ff1Sjsg 
16541bb76ff1Sjsg 	intel_display_power_resume(dev_priv);
16551bb76ff1Sjsg 
16561bb76ff1Sjsg 	ret = vlv_resume_prepare(dev_priv, true);
16571bb76ff1Sjsg 
16581bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
16591bb76ff1Sjsg 		intel_uncore_runtime_resume(gt->uncore);
16601bb76ff1Sjsg 
16611bb76ff1Sjsg 	intel_runtime_pm_enable_interrupts(dev_priv);
16621bb76ff1Sjsg 
16631bb76ff1Sjsg 	/*
16641bb76ff1Sjsg 	 * No point of rolling back things in case of an error, as the best
16651bb76ff1Sjsg 	 * we can do is to hope that things will still work (and disable RPM).
16661bb76ff1Sjsg 	 */
16671bb76ff1Sjsg 	for_each_gt(gt, dev_priv, i)
16681bb76ff1Sjsg 		intel_gt_runtime_resume(gt);
16691bb76ff1Sjsg 
1670f005ef32Sjsg 	intel_pxp_runtime_resume(dev_priv->pxp);
1671f005ef32Sjsg 
16721bb76ff1Sjsg 	/*
16731bb76ff1Sjsg 	 * On VLV/CHV display interrupts are part of the display
16741bb76ff1Sjsg 	 * power well, so hpd is reinitialized from there. For
16751bb76ff1Sjsg 	 * everyone else do it here.
16761bb76ff1Sjsg 	 */
16771bb76ff1Sjsg 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
16781bb76ff1Sjsg 		intel_hpd_init(dev_priv);
16791bb76ff1Sjsg 		intel_hpd_poll_disable(dev_priv);
16801bb76ff1Sjsg 	}
16811bb76ff1Sjsg 
16821bb76ff1Sjsg 	skl_watermark_ipc_update(dev_priv);
16831bb76ff1Sjsg 
16841bb76ff1Sjsg 	enable_rpm_wakeref_asserts(rpm);
16851bb76ff1Sjsg 
16861bb76ff1Sjsg 	if (ret)
16871bb76ff1Sjsg 		drm_err(&dev_priv->drm,
16881bb76ff1Sjsg 			"Runtime resume failed, disabling it (%d)\n", ret);
16891bb76ff1Sjsg 	else
16901bb76ff1Sjsg 		drm_dbg(&dev_priv->drm, "Device resumed\n");
16911bb76ff1Sjsg 
16921bb76ff1Sjsg 	return ret;
16931bb76ff1Sjsg }
16941bb76ff1Sjsg 
16951bb76ff1Sjsg const struct dev_pm_ops i915_pm_ops = {
16961bb76ff1Sjsg 	/*
16971bb76ff1Sjsg 	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
16981bb76ff1Sjsg 	 * PMSG_RESUME]
16991bb76ff1Sjsg 	 */
17001bb76ff1Sjsg 	.prepare = i915_pm_prepare,
17011bb76ff1Sjsg 	.suspend = i915_pm_suspend,
17021bb76ff1Sjsg 	.suspend_late = i915_pm_suspend_late,
17031bb76ff1Sjsg 	.resume_early = i915_pm_resume_early,
17041bb76ff1Sjsg 	.resume = i915_pm_resume,
1705f005ef32Sjsg 	.complete = i915_pm_complete,
17061bb76ff1Sjsg 
17071bb76ff1Sjsg 	/*
17081bb76ff1Sjsg 	 * S4 event handlers
17091bb76ff1Sjsg 	 * @freeze, @freeze_late    : called (1) before creating the
17101bb76ff1Sjsg 	 *                            hibernation image [PMSG_FREEZE] and
17111bb76ff1Sjsg 	 *                            (2) after rebooting, before restoring
17121bb76ff1Sjsg 	 *                            the image [PMSG_QUIESCE]
17131bb76ff1Sjsg 	 * @thaw, @thaw_early       : called (1) after creating the hibernation
17141bb76ff1Sjsg 	 *                            image, before writing it [PMSG_THAW]
17151bb76ff1Sjsg 	 *                            and (2) after failing to create or
17161bb76ff1Sjsg 	 *                            restore the image [PMSG_RECOVER]
17171bb76ff1Sjsg 	 * @poweroff, @poweroff_late: called after writing the hibernation
17181bb76ff1Sjsg 	 *                            image, before rebooting [PMSG_HIBERNATE]
17191bb76ff1Sjsg 	 * @restore, @restore_early : called after rebooting and restoring the
17201bb76ff1Sjsg 	 *                            hibernation image [PMSG_RESTORE]
17211bb76ff1Sjsg 	 */
17221bb76ff1Sjsg 	.freeze = i915_pm_freeze,
17231bb76ff1Sjsg 	.freeze_late = i915_pm_freeze_late,
17241bb76ff1Sjsg 	.thaw_early = i915_pm_thaw_early,
17251bb76ff1Sjsg 	.thaw = i915_pm_thaw,
17261bb76ff1Sjsg 	.poweroff = i915_pm_suspend,
17271bb76ff1Sjsg 	.poweroff_late = i915_pm_poweroff_late,
17281bb76ff1Sjsg 	.restore_early = i915_pm_restore_early,
17291bb76ff1Sjsg 	.restore = i915_pm_restore,
17301bb76ff1Sjsg 
17311bb76ff1Sjsg 	/* S0ix (via runtime suspend) event handlers */
17321bb76ff1Sjsg 	.runtime_suspend = intel_runtime_suspend,
17331bb76ff1Sjsg 	.runtime_resume = intel_runtime_resume,
17341bb76ff1Sjsg };
17351bb76ff1Sjsg 
17361bb76ff1Sjsg static const struct file_operations i915_driver_fops = {
17371bb76ff1Sjsg 	.owner = THIS_MODULE,
17381bb76ff1Sjsg 	.open = drm_open,
17391bb76ff1Sjsg 	.release = drm_release_noglobal,
17401bb76ff1Sjsg 	.unlocked_ioctl = drm_ioctl,
17411bb76ff1Sjsg 	.mmap = i915_gem_mmap,
17421bb76ff1Sjsg 	.poll = drm_poll,
17431bb76ff1Sjsg 	.read = drm_read,
17441bb76ff1Sjsg 	.compat_ioctl = i915_ioc32_compat_ioctl,
17451bb76ff1Sjsg 	.llseek = noop_llseek,
17461bb76ff1Sjsg #ifdef CONFIG_PROC_FS
1747f005ef32Sjsg 	.show_fdinfo = drm_show_fdinfo,
17481bb76ff1Sjsg #endif
17491bb76ff1Sjsg };
17501bb76ff1Sjsg 
17511bb76ff1Sjsg #endif /* __linux__ */
17521bb76ff1Sjsg 
17531bb76ff1Sjsg static int
17541bb76ff1Sjsg i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
17551bb76ff1Sjsg 			  struct drm_file *file)
17561bb76ff1Sjsg {
17571bb76ff1Sjsg 	return -ENODEV;
17581bb76ff1Sjsg }
17591bb76ff1Sjsg 
17601bb76ff1Sjsg static const struct drm_ioctl_desc i915_ioctls[] = {
17611bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17621bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
17631bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
17641bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
17651bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
17661bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
17671bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
17681bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17691bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
17701bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
17711bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17721bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
17731bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17741bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17751bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
17761bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
17771bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17781bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17791bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
17801bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
17811bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
17821bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
17831bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
17841bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
17851bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
17861bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
17871bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17881bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
17891bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
17901bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
17911bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
17921bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
17931bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
17941bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
17951bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
17961bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
17971bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
17981bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
17991bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
18001bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
18011bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
18021bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
18031bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
18041bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
18051bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
18061bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
18071bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
18081bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
18091bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
18101bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
18111bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
18121bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
18131bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
18141bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
18151bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
18161bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
18171bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
18181bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
18191bb76ff1Sjsg 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
18201bb76ff1Sjsg };
18211bb76ff1Sjsg 
18221bb76ff1Sjsg /*
18231bb76ff1Sjsg  * Interface history:
18241bb76ff1Sjsg  *
18251bb76ff1Sjsg  * 1.1: Original.
18261bb76ff1Sjsg  * 1.2: Add Power Management
18271bb76ff1Sjsg  * 1.3: Add vblank support
18281bb76ff1Sjsg  * 1.4: Fix cmdbuffer path, add heap destroy
18291bb76ff1Sjsg  * 1.5: Add vblank pipe configuration
18301bb76ff1Sjsg  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
18311bb76ff1Sjsg  *      - Support vertical blank on secondary display pipe
18321bb76ff1Sjsg  */
18331bb76ff1Sjsg #define DRIVER_MAJOR		1
18341bb76ff1Sjsg #define DRIVER_MINOR		6
18351bb76ff1Sjsg #define DRIVER_PATCHLEVEL	0
18361bb76ff1Sjsg 
18371bb76ff1Sjsg static const struct drm_driver i915_drm_driver = {
18381bb76ff1Sjsg 	/* Don't use MTRRs here; the Xserver or userspace app should
18391bb76ff1Sjsg 	 * deal with them for Intel hardware.
18401bb76ff1Sjsg 	 */
18411bb76ff1Sjsg 	.driver_features =
18421bb76ff1Sjsg 	    DRIVER_GEM |
18431bb76ff1Sjsg 	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
18441bb76ff1Sjsg 	    DRIVER_SYNCOBJ_TIMELINE,
18451bb76ff1Sjsg 	.release = i915_driver_release,
18461bb76ff1Sjsg 	.open = i915_driver_open,
18471bb76ff1Sjsg 	.lastclose = i915_driver_lastclose,
18481bb76ff1Sjsg 	.postclose = i915_driver_postclose,
1849f005ef32Sjsg 	.show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
18501bb76ff1Sjsg 
18511bb76ff1Sjsg 	.gem_prime_import = i915_gem_prime_import,
18521bb76ff1Sjsg 
18531bb76ff1Sjsg 	.dumb_create = i915_gem_dumb_create,
18541bb76ff1Sjsg 	.dumb_map_offset = i915_gem_dumb_mmap_offset,
18551bb76ff1Sjsg 
18561bb76ff1Sjsg #ifdef __OpenBSD__
18571bb76ff1Sjsg 	.mmap = i915_gem_mmap,
18581bb76ff1Sjsg 	.gem_fault = i915_gem_fault,
18591bb76ff1Sjsg #endif
18601bb76ff1Sjsg 
18611bb76ff1Sjsg 	.ioctls = i915_ioctls,
18621bb76ff1Sjsg 	.num_ioctls = ARRAY_SIZE(i915_ioctls),
18631bb76ff1Sjsg #ifdef __linux__
18641bb76ff1Sjsg 	.fops = &i915_driver_fops,
18651bb76ff1Sjsg #endif
18661bb76ff1Sjsg 	.name = DRIVER_NAME,
18671bb76ff1Sjsg 	.desc = DRIVER_DESC,
18681bb76ff1Sjsg 	.date = DRIVER_DATE,
18691bb76ff1Sjsg 	.major = DRIVER_MAJOR,
18701bb76ff1Sjsg 	.minor = DRIVER_MINOR,
18711bb76ff1Sjsg 	.patchlevel = DRIVER_PATCHLEVEL,
18721bb76ff1Sjsg };
18731bb76ff1Sjsg 
18741bb76ff1Sjsg #ifdef __OpenBSD__
18751bb76ff1Sjsg 
18761bb76ff1Sjsg #include <drm/drm_legacy.h> /* for agp */
18771bb76ff1Sjsg #include <drm/drm_utils.h>
18781bb76ff1Sjsg #include <drm/drm_fb_helper.h>
18791bb76ff1Sjsg 
18801bb76ff1Sjsg #ifdef __amd64__
18811bb76ff1Sjsg #include "efifb.h"
18821bb76ff1Sjsg #include <machine/biosvar.h>
18831bb76ff1Sjsg #endif
18841bb76ff1Sjsg 
18851bb76ff1Sjsg #if NEFIFB > 0
18861bb76ff1Sjsg #include <machine/efifbvar.h>
18871bb76ff1Sjsg #endif
18881bb76ff1Sjsg 
18891bb76ff1Sjsg #include "intagp.h"
18901bb76ff1Sjsg 
18911bb76ff1Sjsg /*
18921bb76ff1Sjsg  * some functions are only called once on init regardless of how many times
18931bb76ff1Sjsg  * inteldrm attaches in linux this is handled via module_init()/module_exit()
18941bb76ff1Sjsg  */
18951bb76ff1Sjsg int inteldrm_refcnt;
18961bb76ff1Sjsg 
18971bb76ff1Sjsg #if NINTAGP > 0
18981bb76ff1Sjsg int	intagpsubmatch(struct device *, void *, void *);
18991bb76ff1Sjsg int	intagp_print(void *, const char *);
19001bb76ff1Sjsg 
19011bb76ff1Sjsg int
19021bb76ff1Sjsg intagpsubmatch(struct device *parent, void *match, void *aux)
19031bb76ff1Sjsg {
19041bb76ff1Sjsg 	extern struct cfdriver intagp_cd;
19051bb76ff1Sjsg 	struct cfdata *cf = match;
19061bb76ff1Sjsg 
19071bb76ff1Sjsg 	/* only allow intagp to attach */
19081bb76ff1Sjsg 	if (cf->cf_driver == &intagp_cd)
19091bb76ff1Sjsg 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
19101bb76ff1Sjsg 	return (0);
19111bb76ff1Sjsg }
19121bb76ff1Sjsg 
19131bb76ff1Sjsg int
19141bb76ff1Sjsg intagp_print(void *vaa, const char *pnp)
19151bb76ff1Sjsg {
19161bb76ff1Sjsg 	if (pnp)
19171bb76ff1Sjsg 		printf("intagp at %s", pnp);
19181bb76ff1Sjsg 	return (UNCONF);
19191bb76ff1Sjsg }
19201bb76ff1Sjsg #endif
19211bb76ff1Sjsg 
19221bb76ff1Sjsg int	inteldrm_wsioctl(void *, u_long, caddr_t, int, struct proc *);
19231bb76ff1Sjsg paddr_t	inteldrm_wsmmap(void *, off_t, int);
19241bb76ff1Sjsg int	inteldrm_alloc_screen(void *, const struct wsscreen_descr *,
19251bb76ff1Sjsg 	    void **, int *, int *, uint32_t *);
19261bb76ff1Sjsg void	inteldrm_free_screen(void *, void *);
19271bb76ff1Sjsg int	inteldrm_show_screen(void *, void *, int,
19281bb76ff1Sjsg 	    void (*)(void *, int, int), void *);
19291bb76ff1Sjsg void	inteldrm_doswitch(void *);
19301bb76ff1Sjsg void	inteldrm_enter_ddb(void *, void *);
19311bb76ff1Sjsg int	inteldrm_load_font(void *, void *, struct wsdisplay_font *);
19321bb76ff1Sjsg int	inteldrm_list_font(void *, struct wsdisplay_font *);
19331bb76ff1Sjsg int	inteldrm_getchar(void *, int, int, struct wsdisplay_charcell *);
19341bb76ff1Sjsg void	inteldrm_burner(void *, u_int, u_int);
19351bb76ff1Sjsg void	inteldrm_burner_cb(void *);
19361bb76ff1Sjsg void	inteldrm_scrollback(void *, void *, int lines);
19371bb76ff1Sjsg extern const struct pci_device_id pciidlist[];
19381bb76ff1Sjsg 
19391bb76ff1Sjsg struct wsscreen_descr inteldrm_stdscreen = {
19401bb76ff1Sjsg 	"std",
19411bb76ff1Sjsg 	0, 0,
19421bb76ff1Sjsg 	0,
19431bb76ff1Sjsg 	0, 0,
19441bb76ff1Sjsg 	WSSCREEN_UNDERLINE | WSSCREEN_HILIT |
19451bb76ff1Sjsg 	WSSCREEN_REVERSE | WSSCREEN_WSCOLORS
19461bb76ff1Sjsg };
19471bb76ff1Sjsg 
19481bb76ff1Sjsg const struct wsscreen_descr *inteldrm_scrlist[] = {
19491bb76ff1Sjsg 	&inteldrm_stdscreen,
19501bb76ff1Sjsg };
19511bb76ff1Sjsg 
19521bb76ff1Sjsg struct wsscreen_list inteldrm_screenlist = {
19531bb76ff1Sjsg 	nitems(inteldrm_scrlist), inteldrm_scrlist
19541bb76ff1Sjsg };
19551bb76ff1Sjsg 
19561bb76ff1Sjsg struct wsdisplay_accessops inteldrm_accessops = {
19571bb76ff1Sjsg 	.ioctl = inteldrm_wsioctl,
19581bb76ff1Sjsg 	.mmap = inteldrm_wsmmap,
19591bb76ff1Sjsg 	.alloc_screen = inteldrm_alloc_screen,
19601bb76ff1Sjsg 	.free_screen = inteldrm_free_screen,
19611bb76ff1Sjsg 	.show_screen = inteldrm_show_screen,
19621bb76ff1Sjsg 	.enter_ddb = inteldrm_enter_ddb,
19631bb76ff1Sjsg 	.getchar = inteldrm_getchar,
19641bb76ff1Sjsg 	.load_font = inteldrm_load_font,
19651bb76ff1Sjsg 	.list_font = inteldrm_list_font,
19661bb76ff1Sjsg 	.scrollback = inteldrm_scrollback,
19671bb76ff1Sjsg 	.burn_screen = inteldrm_burner
19681bb76ff1Sjsg };
19691bb76ff1Sjsg 
19701bb76ff1Sjsg int
19711bb76ff1Sjsg inteldrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
19721bb76ff1Sjsg {
19731bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
19741bb76ff1Sjsg 	struct backlight_device *bd = dev_priv->backlight;
19751bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
19761bb76ff1Sjsg 	struct wsdisplay_fbinfo *wdf;
19771bb76ff1Sjsg 	struct wsdisplay_param *dp = (struct wsdisplay_param *)data;
19781bb76ff1Sjsg 
19791bb76ff1Sjsg 	switch (cmd) {
19801bb76ff1Sjsg 	case WSDISPLAYIO_GTYPE:
19811bb76ff1Sjsg 		*(u_int *)data = WSDISPLAY_TYPE_INTELDRM;
19821bb76ff1Sjsg 		return 0;
19831bb76ff1Sjsg 	case WSDISPLAYIO_GINFO:
19841bb76ff1Sjsg 		wdf = (struct wsdisplay_fbinfo *)data;
19851bb76ff1Sjsg 		wdf->width = ri->ri_width;
19861bb76ff1Sjsg 		wdf->height = ri->ri_height;
19871bb76ff1Sjsg 		wdf->depth = ri->ri_depth;
19881bb76ff1Sjsg 		wdf->stride = ri->ri_stride;
19891bb76ff1Sjsg 		wdf->offset = 0;
19901bb76ff1Sjsg 		wdf->cmsize = 0;
19911bb76ff1Sjsg 		return 0;
19921bb76ff1Sjsg 	case WSDISPLAYIO_GETPARAM:
19931bb76ff1Sjsg 		if (ws_get_param && ws_get_param(dp) == 0)
19941bb76ff1Sjsg 			return 0;
19951bb76ff1Sjsg 
19961bb76ff1Sjsg 		if (bd == NULL)
19971bb76ff1Sjsg 			return -1;
19981bb76ff1Sjsg 
19991bb76ff1Sjsg 		switch (dp->param) {
20001bb76ff1Sjsg 		case WSDISPLAYIO_PARAM_BRIGHTNESS:
20011bb76ff1Sjsg 			dp->min = 0;
20021bb76ff1Sjsg 			dp->max = bd->props.max_brightness;
20031bb76ff1Sjsg 			dp->curval = bd->ops->get_brightness(bd);
20041bb76ff1Sjsg 			return (dp->max > dp->min) ? 0 : -1;
20051bb76ff1Sjsg 		}
20061bb76ff1Sjsg 		break;
20071bb76ff1Sjsg 	case WSDISPLAYIO_SETPARAM:
20081bb76ff1Sjsg 		if (ws_set_param && ws_set_param(dp) == 0)
20091bb76ff1Sjsg 			return 0;
20101bb76ff1Sjsg 
20111bb76ff1Sjsg 		if (bd == NULL || dp->curval > bd->props.max_brightness)
20121bb76ff1Sjsg 			return -1;
20131bb76ff1Sjsg 
20141bb76ff1Sjsg 		switch (dp->param) {
20151bb76ff1Sjsg 		case WSDISPLAYIO_PARAM_BRIGHTNESS:
20161bb76ff1Sjsg 			bd->props.brightness = dp->curval;
20171bb76ff1Sjsg 			backlight_update_status(bd);
2018c78098b6Svisa 			knote_locked(&dev_priv->drm.note, NOTE_CHANGE);
20191bb76ff1Sjsg 			return 0;
20201bb76ff1Sjsg 		}
20211bb76ff1Sjsg 		break;
2022804fcafaSjsg 	case WSDISPLAYIO_SVIDEO:
2023804fcafaSjsg 	case WSDISPLAYIO_GVIDEO:
2024804fcafaSjsg 		return 0;
20251bb76ff1Sjsg 	}
20261bb76ff1Sjsg 
20271bb76ff1Sjsg 	return (-1);
20281bb76ff1Sjsg }
20291bb76ff1Sjsg 
20301bb76ff1Sjsg paddr_t
20311bb76ff1Sjsg inteldrm_wsmmap(void *v, off_t off, int prot)
20321bb76ff1Sjsg {
20331bb76ff1Sjsg 	return (-1);
20341bb76ff1Sjsg }
20351bb76ff1Sjsg 
20361bb76ff1Sjsg int
20371bb76ff1Sjsg inteldrm_alloc_screen(void *v, const struct wsscreen_descr *type,
20381bb76ff1Sjsg     void **cookiep, int *curxp, int *curyp, uint32_t *attrp)
20391bb76ff1Sjsg {
20401bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
20411bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
20421bb76ff1Sjsg 
20431bb76ff1Sjsg 	return rasops_alloc_screen(ri, cookiep, curxp, curyp, attrp);
20441bb76ff1Sjsg }
20451bb76ff1Sjsg 
20461bb76ff1Sjsg void
20471bb76ff1Sjsg inteldrm_free_screen(void *v, void *cookie)
20481bb76ff1Sjsg {
20491bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
20501bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
20511bb76ff1Sjsg 
20521bb76ff1Sjsg 	return rasops_free_screen(ri, cookie);
20531bb76ff1Sjsg }
20541bb76ff1Sjsg 
20551bb76ff1Sjsg int
20561bb76ff1Sjsg inteldrm_show_screen(void *v, void *cookie, int waitok,
20571bb76ff1Sjsg     void (*cb)(void *, int, int), void *cbarg)
20581bb76ff1Sjsg {
20591bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
20601bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
20611bb76ff1Sjsg 
20621bb76ff1Sjsg 	if (cookie == ri->ri_active)
20631bb76ff1Sjsg 		return (0);
20641bb76ff1Sjsg 
20651bb76ff1Sjsg 	dev_priv->switchcb = cb;
20661bb76ff1Sjsg 	dev_priv->switchcbarg = cbarg;
20671bb76ff1Sjsg 	dev_priv->switchcookie = cookie;
20681bb76ff1Sjsg 	if (cb) {
20691bb76ff1Sjsg 		task_add(systq, &dev_priv->switchtask);
20701bb76ff1Sjsg 		return (EAGAIN);
20711bb76ff1Sjsg 	}
20721bb76ff1Sjsg 
20731bb76ff1Sjsg 	inteldrm_doswitch(v);
20741bb76ff1Sjsg 
20751bb76ff1Sjsg 	return (0);
20761bb76ff1Sjsg }
20771bb76ff1Sjsg 
20781bb76ff1Sjsg void
20791bb76ff1Sjsg inteldrm_doswitch(void *v)
20801bb76ff1Sjsg {
20811bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
20821bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
20831bb76ff1Sjsg 
20841bb76ff1Sjsg 	rasops_show_screen(ri, dev_priv->switchcookie, 0, NULL, NULL);
2085f005ef32Sjsg 	intel_fbdev_restore_mode(dev_priv);
20861bb76ff1Sjsg 
20871bb76ff1Sjsg 	if (dev_priv->switchcb)
20881bb76ff1Sjsg 		(*dev_priv->switchcb)(dev_priv->switchcbarg, 0, 0);
20891bb76ff1Sjsg }
20901bb76ff1Sjsg 
20911bb76ff1Sjsg void
20921bb76ff1Sjsg inteldrm_enter_ddb(void *v, void *cookie)
20931bb76ff1Sjsg {
20941bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
20951bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
20961bb76ff1Sjsg 
20971bb76ff1Sjsg 	if (cookie == ri->ri_active)
20981bb76ff1Sjsg 		return;
20991bb76ff1Sjsg 
21001bb76ff1Sjsg 	rasops_show_screen(ri, cookie, 0, NULL, NULL);
2101f005ef32Sjsg 	intel_fbdev_restore_mode(dev_priv);
21021bb76ff1Sjsg }
21031bb76ff1Sjsg 
21041bb76ff1Sjsg int
21051bb76ff1Sjsg inteldrm_getchar(void *v, int row, int col, struct wsdisplay_charcell *cell)
21061bb76ff1Sjsg {
21071bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
21081bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
21091bb76ff1Sjsg 
21101bb76ff1Sjsg 	return rasops_getchar(ri, row, col, cell);
21111bb76ff1Sjsg }
21121bb76ff1Sjsg 
21131bb76ff1Sjsg int
21141bb76ff1Sjsg inteldrm_load_font(void *v, void *cookie, struct wsdisplay_font *font)
21151bb76ff1Sjsg {
21161bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
21171bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
21181bb76ff1Sjsg 
21191bb76ff1Sjsg 	return rasops_load_font(ri, cookie, font);
21201bb76ff1Sjsg }
21211bb76ff1Sjsg 
21221bb76ff1Sjsg int
21231bb76ff1Sjsg inteldrm_list_font(void *v, struct wsdisplay_font *font)
21241bb76ff1Sjsg {
21251bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
21261bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
21271bb76ff1Sjsg 
21281bb76ff1Sjsg 	return rasops_list_font(ri, font);
21291bb76ff1Sjsg }
21301bb76ff1Sjsg 
21311bb76ff1Sjsg void
21321bb76ff1Sjsg inteldrm_burner(void *v, u_int on, u_int flags)
21331bb76ff1Sjsg {
21341bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
21351bb76ff1Sjsg 
21361bb76ff1Sjsg 	task_del(systq, &dev_priv->burner_task);
21371bb76ff1Sjsg 
21381bb76ff1Sjsg 	if (on)
21391bb76ff1Sjsg 		dev_priv->burner_fblank = FB_BLANK_UNBLANK;
21401bb76ff1Sjsg 	else {
21411bb76ff1Sjsg 		if (flags & WSDISPLAY_BURN_VBLANK)
21421bb76ff1Sjsg 			dev_priv->burner_fblank = FB_BLANK_VSYNC_SUSPEND;
21431bb76ff1Sjsg 		else
21441bb76ff1Sjsg 			dev_priv->burner_fblank = FB_BLANK_NORMAL;
21451bb76ff1Sjsg 	}
21461bb76ff1Sjsg 
21471bb76ff1Sjsg 	/*
21481bb76ff1Sjsg 	 * Setting the DPMS mode may sleep while waiting for the display
21491bb76ff1Sjsg 	 * to come back on so hand things off to a taskq.
21501bb76ff1Sjsg 	 */
21511bb76ff1Sjsg 	task_add(systq, &dev_priv->burner_task);
21521bb76ff1Sjsg }
21531bb76ff1Sjsg 
21541bb76ff1Sjsg void
21551bb76ff1Sjsg inteldrm_burner_cb(void *arg1)
21561bb76ff1Sjsg {
21571bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = arg1;
21581bb76ff1Sjsg 	struct drm_device *dev = &dev_priv->drm;
21591bb76ff1Sjsg 	struct drm_fb_helper *helper = dev->fb_helper;
21601bb76ff1Sjsg 
2161f005ef32Sjsg 	drm_fb_helper_blank(dev_priv->burner_fblank, helper->info);
21621bb76ff1Sjsg }
21631bb76ff1Sjsg 
21641bb76ff1Sjsg int
21651bb76ff1Sjsg inteldrm_backlight_update_status(struct backlight_device *bd)
21661bb76ff1Sjsg {
21671bb76ff1Sjsg 	struct wsdisplay_param dp;
21681bb76ff1Sjsg 
21691bb76ff1Sjsg 	dp.param = WSDISPLAYIO_PARAM_BRIGHTNESS;
21701bb76ff1Sjsg 	dp.curval = bd->props.brightness;
21711bb76ff1Sjsg 	ws_set_param(&dp);
21721bb76ff1Sjsg 	return 0;
21731bb76ff1Sjsg }
21741bb76ff1Sjsg 
21751bb76ff1Sjsg int
21761bb76ff1Sjsg inteldrm_backlight_get_brightness(struct backlight_device *bd)
21771bb76ff1Sjsg {
21781bb76ff1Sjsg 	struct wsdisplay_param dp;
21791bb76ff1Sjsg 
21801bb76ff1Sjsg 	dp.param = WSDISPLAYIO_PARAM_BRIGHTNESS;
21811bb76ff1Sjsg 	ws_get_param(&dp);
21821bb76ff1Sjsg 	return dp.curval;
21831bb76ff1Sjsg }
21841bb76ff1Sjsg 
21851bb76ff1Sjsg const struct backlight_ops inteldrm_backlight_ops = {
21861bb76ff1Sjsg 	.update_status = inteldrm_backlight_update_status,
21871bb76ff1Sjsg 	.get_brightness = inteldrm_backlight_get_brightness
21881bb76ff1Sjsg };
21891bb76ff1Sjsg 
21901bb76ff1Sjsg void
21911bb76ff1Sjsg inteldrm_scrollback(void *v, void *cookie, int lines)
21921bb76ff1Sjsg {
21931bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = v;
21941bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
21951bb76ff1Sjsg 
21961bb76ff1Sjsg 	rasops_scrollback(ri, cookie, lines);
21971bb76ff1Sjsg }
21981bb76ff1Sjsg 
21991bb76ff1Sjsg int	inteldrm_match(struct device *, void *, void *);
22001bb76ff1Sjsg void	inteldrm_attach(struct device *, struct device *, void *);
22011bb76ff1Sjsg int	inteldrm_detach(struct device *, int);
22021bb76ff1Sjsg int	inteldrm_activate(struct device *, int);
22031bb76ff1Sjsg void	inteldrm_attachhook(struct device *);
22041bb76ff1Sjsg 
22051bb76ff1Sjsg const struct cfattach inteldrm_ca = {
22061bb76ff1Sjsg 	sizeof(struct inteldrm_softc), inteldrm_match, inteldrm_attach,
22071bb76ff1Sjsg 	inteldrm_detach, inteldrm_activate
22081bb76ff1Sjsg };
22091bb76ff1Sjsg 
22101bb76ff1Sjsg struct cfdriver inteldrm_cd = {
2211*3836e7c7Smiod 	NULL, "inteldrm", DV_DULL
22121bb76ff1Sjsg };
22131bb76ff1Sjsg 
22141bb76ff1Sjsg int	inteldrm_intr(void *);
22151bb76ff1Sjsg 
22161bb76ff1Sjsg /*
22171bb76ff1Sjsg  * Set if the mountroot hook has a fatal error.
22181bb76ff1Sjsg  */
22191bb76ff1Sjsg int	inteldrm_fatal_error;
22201bb76ff1Sjsg 
22211bb76ff1Sjsg int
22221bb76ff1Sjsg inteldrm_match(struct device *parent, void *match, void *aux)
22231bb76ff1Sjsg {
22241bb76ff1Sjsg 	struct pci_attach_args *pa = aux;
22251bb76ff1Sjsg 	const struct pci_device_id *id;
22261bb76ff1Sjsg 	struct intel_device_info *info;
22271bb76ff1Sjsg 
22281bb76ff1Sjsg 	if (inteldrm_fatal_error)
22291bb76ff1Sjsg 		return 0;
22301bb76ff1Sjsg 
22311bb76ff1Sjsg 	id = drm_find_description(PCI_VENDOR(pa->pa_id),
22321bb76ff1Sjsg 	    PCI_PRODUCT(pa->pa_id), pciidlist);
22331bb76ff1Sjsg 	if (id != NULL) {
22341bb76ff1Sjsg 		info = (struct intel_device_info *)id->driver_data;
22351bb76ff1Sjsg 		if (info->require_force_probe == 0 &&
22361bb76ff1Sjsg 		    pa->pa_function == 0)
22371bb76ff1Sjsg 			return 20;
22381bb76ff1Sjsg 	}
22391bb76ff1Sjsg 
22401bb76ff1Sjsg 	return 0;
22411bb76ff1Sjsg }
22421bb76ff1Sjsg 
22431bb76ff1Sjsg int drm_gem_init(struct drm_device *);
22441bb76ff1Sjsg void intel_init_stolen_res(struct inteldrm_softc *);
22451bb76ff1Sjsg 
22461bb76ff1Sjsg void
22471bb76ff1Sjsg inteldrm_attach(struct device *parent, struct device *self, void *aux)
22481bb76ff1Sjsg {
22491bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
22501bb76ff1Sjsg 	struct drm_device *dev;
22511bb76ff1Sjsg 	struct pci_attach_args *pa = aux;
22521bb76ff1Sjsg 	const struct pci_device_id *id;
2253d0c34296Sjsg 	struct intel_device_info *info;
22541bb76ff1Sjsg 	extern int vga_console_attached;
22551bb76ff1Sjsg 	int mmio_bar, mmio_size, mmio_type;
22561bb76ff1Sjsg 
2257e4142e7bSjsg 	dev_priv->pa = pa;
22581bb76ff1Sjsg 	dev_priv->pc = pa->pa_pc;
22591bb76ff1Sjsg 	dev_priv->tag = pa->pa_tag;
22603d906286Sjsg 	dev_priv->iot = pa->pa_iot;
22611bb76ff1Sjsg 	dev_priv->dmat = pa->pa_dmat;
22621bb76ff1Sjsg 	dev_priv->bst = pa->pa_memt;
22631bb76ff1Sjsg 	dev_priv->memex = pa->pa_memex;
22641bb76ff1Sjsg 	dev_priv->vga_regs = &dev_priv->bar;
22651bb76ff1Sjsg 
226689587ef8Sjsg 	id = drm_find_description(PCI_VENDOR(pa->pa_id),
226789587ef8Sjsg 	    PCI_PRODUCT(pa->pa_id), pciidlist);
226889587ef8Sjsg 	dev_priv->id = id;
226989587ef8Sjsg 	info = (struct intel_device_info *)id->driver_data;
227089587ef8Sjsg 
22711bb76ff1Sjsg 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
22721bb76ff1Sjsg 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
22731bb76ff1Sjsg 	    (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
22741bb76ff1Sjsg 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
22751bb76ff1Sjsg 	    == (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) {
22761bb76ff1Sjsg 		dev_priv->primary = 1;
2277479c151dSjsg 		dev_priv->console = vga_is_console(pa->pa_iot, -1);
22781bb76ff1Sjsg 		vga_console_attached = 1;
22791bb76ff1Sjsg 	}
22801bb76ff1Sjsg 
22811bb76ff1Sjsg #if NEFIFB > 0
22821bb76ff1Sjsg 	if (efifb_is_primary(pa)) {
22831bb76ff1Sjsg 		dev_priv->primary = 1;
22841bb76ff1Sjsg 		dev_priv->console = efifb_is_console(pa);
22851bb76ff1Sjsg 		efifb_detach();
22861bb76ff1Sjsg 	}
22871bb76ff1Sjsg #endif
22881bb76ff1Sjsg 
228989587ef8Sjsg 	/*
229089587ef8Sjsg 	 * Meteor Lake GOP framebuffer doesn't pass efifb pci bar tests
229189587ef8Sjsg 	 * too early for IS_METEORLAKE which uses runtime info
229289587ef8Sjsg 	 */
229389587ef8Sjsg 	if (info->platform == INTEL_METEORLAKE) {
229489587ef8Sjsg 		dev_priv->primary = 1;
229589587ef8Sjsg 		dev_priv->console = 1;
229689587ef8Sjsg #if NEFIFB > 0
229789587ef8Sjsg 		efifb_detach();
229889587ef8Sjsg #endif
229989587ef8Sjsg 	}
230089587ef8Sjsg 
23011bb76ff1Sjsg 	printf("\n");
23021bb76ff1Sjsg 
23031bb76ff1Sjsg 	dev = drm_attach_pci(&i915_drm_driver, pa, 0, dev_priv->primary,
23041bb76ff1Sjsg 	    self, &dev_priv->drm);
23051bb76ff1Sjsg 	if (dev == NULL) {
23061bb76ff1Sjsg 		printf("%s: drm attach failed\n", dev_priv->sc_dev.dv_xname);
23071bb76ff1Sjsg 		return;
23081bb76ff1Sjsg 	}
23091bb76ff1Sjsg 
2310241151fdSjsg 	pci_set_drvdata(dev->pdev, dev_priv);
2311241151fdSjsg 
23121bb76ff1Sjsg 	/* Device parameters start as a copy of module parameters. */
23131bb76ff1Sjsg 	i915_params_copy(&dev_priv->params, &i915_modparams);
23141bb76ff1Sjsg 	dev_priv->params.request_timeout_ms = 0;
23151bb76ff1Sjsg 	dev_priv->params.enable_psr = 0;
23161bb76ff1Sjsg 
2317f005ef32Sjsg 	/* Set up device info and initial runtime info. */
2318f005ef32Sjsg 	intel_device_info_driver_create(dev_priv, dev->pdev->device, info);
23191bb76ff1Sjsg 
23208daebf3fSjsg 	/* uc_expand_default_options() with no GuC submission */
23218daebf3fSjsg 	if (GRAPHICS_VER(dev_priv) >= 12 &&
23227640cf6eSjsg 	    (INTEL_INFO(dev_priv)->platform != INTEL_TIGERLAKE) &&
23237640cf6eSjsg 	    (INTEL_INFO(dev_priv)->platform != INTEL_ROCKETLAKE) &&
23247640cf6eSjsg 	    (INTEL_INFO(dev_priv)->platform != INTEL_XEHPSDV) &&
23257640cf6eSjsg 	    (INTEL_INFO(dev_priv)->platform != INTEL_PONTEVECCHIO))
23268daebf3fSjsg 		dev_priv->params.enable_guc = ENABLE_GUC_LOAD_HUC;
23278daebf3fSjsg 
23281bb76ff1Sjsg 	mmio_bar = (GRAPHICS_VER(dev_priv) == 2) ? 0x14 : 0x10;
23294442b557Sjsg 
23304442b557Sjsg 	/* from intel_uncore_setup_mmio() */
23314442b557Sjsg 
23324442b557Sjsg 	/*
23334442b557Sjsg 	 * Before gen4, the registers and the GTT are behind different BARs.
23341bb76ff1Sjsg 	 * However, from gen4 onwards, the registers and the GTT are shared
23351bb76ff1Sjsg 	 * in the same BAR, so we want to restrict this ioremap from
23361bb76ff1Sjsg 	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
23371bb76ff1Sjsg 	 * the register BAR remains the same size for all the earlier
23381bb76ff1Sjsg 	 * generations up to Ironlake.
23394442b557Sjsg 	 * For dgfx chips register range is expanded to 4MB, and this larger
23404442b557Sjsg 	 * range is also used for integrated gpus beginning with Meteor Lake.
23411bb76ff1Sjsg 	 */
23424442b557Sjsg 	if (IS_DGFX(dev_priv) || GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 70))
23431bb76ff1Sjsg 		mmio_size = 4 * 1024 * 1024;
23444442b557Sjsg 	else if (GRAPHICS_VER(dev_priv) >= 5)
23451bb76ff1Sjsg 		mmio_size = 2 * 1024 * 1024;
23464442b557Sjsg 	else
23474442b557Sjsg 		mmio_size = 512 * 1024;
23481bb76ff1Sjsg 
23491bb76ff1Sjsg 	mmio_type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, mmio_bar);
23501bb76ff1Sjsg 	if (pci_mapreg_map(pa, mmio_bar, mmio_type, BUS_SPACE_MAP_LINEAR,
23511bb76ff1Sjsg 	    &dev_priv->vga_regs->bst, &dev_priv->vga_regs->bsh,
23521bb76ff1Sjsg 	    &dev_priv->vga_regs->base, &dev_priv->vga_regs->size, mmio_size)) {
23531bb76ff1Sjsg 		printf("%s: can't map registers\n",
23541bb76ff1Sjsg 		    dev_priv->sc_dev.dv_xname);
23551bb76ff1Sjsg 		return;
23561bb76ff1Sjsg 	}
23571bb76ff1Sjsg 	dev_priv->uncore.regs = bus_space_vaddr(dev_priv->vga_regs->bst,
23581bb76ff1Sjsg 	     dev_priv->vga_regs->bsh);
23591bb76ff1Sjsg 	if (dev_priv->uncore.regs == NULL) {
23601bb76ff1Sjsg 		printf("%s: bus_space_vaddr registers failed\n",
23611bb76ff1Sjsg 		    dev_priv->sc_dev.dv_xname);
23621bb76ff1Sjsg 		return;
23631bb76ff1Sjsg 	}
23641bb76ff1Sjsg 
23651bb76ff1Sjsg #if NINTAGP > 0
23661bb76ff1Sjsg 	if (GRAPHICS_VER(dev_priv) <= 5) {
23671bb76ff1Sjsg 		config_found_sm(self, aux, intagp_print, intagpsubmatch);
23681bb76ff1Sjsg 		dev->agp = drm_legacy_agp_init(dev);
23691bb76ff1Sjsg 		if (dev->agp) {
23701bb76ff1Sjsg 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
23711bb76ff1Sjsg 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
23721bb76ff1Sjsg 				dev->agp->mtrr = 1;
23731bb76ff1Sjsg 		}
23741bb76ff1Sjsg 	}
23751bb76ff1Sjsg #endif
23761bb76ff1Sjsg 
23771bb76ff1Sjsg 	if (GRAPHICS_VER(dev_priv) < 5)
23781bb76ff1Sjsg 		pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
23791bb76ff1Sjsg 
23801bb76ff1Sjsg 	if (pci_intr_map_msi(pa, &dev_priv->ih) != 0 &&
23811bb76ff1Sjsg 	    pci_intr_map(pa, &dev_priv->ih) != 0) {
23821bb76ff1Sjsg 		printf("%s: couldn't map interrupt\n",
23831bb76ff1Sjsg 		    dev_priv->sc_dev.dv_xname);
23841bb76ff1Sjsg 		return;
23851bb76ff1Sjsg 	}
23861bb76ff1Sjsg 
23871bb76ff1Sjsg 	printf("%s: %s, %s, gen %d\n", dev_priv->sc_dev.dv_xname,
23881bb76ff1Sjsg 	    pci_intr_string(dev_priv->pc, dev_priv->ih),
23891bb76ff1Sjsg 	    intel_platform_name(INTEL_INFO(dev_priv)->platform),
23901bb76ff1Sjsg 	    GRAPHICS_VER(dev_priv));
23911bb76ff1Sjsg 
23921bb76ff1Sjsg 	dev_priv->irqh = pci_intr_establish(dev_priv->pc, dev_priv->ih,
23931bb76ff1Sjsg 	    IPL_TTY, inteldrm_intr, dev_priv, dev_priv->sc_dev.dv_xname);
23941bb76ff1Sjsg 	if (dev_priv->irqh == NULL) {
23951bb76ff1Sjsg 		printf("%s: couldn't establish interrupt\n",
23961bb76ff1Sjsg 		    dev_priv->sc_dev.dv_xname);
23971bb76ff1Sjsg 		return;
23981bb76ff1Sjsg 	}
23991bb76ff1Sjsg 	dev->pdev->irq = -1;
2400f005ef32Sjsg 	intel_gmch_bridge_setup(dev_priv);
24011bb76ff1Sjsg 	intel_init_stolen_res(dev_priv);
24021bb76ff1Sjsg 
24031bb76ff1Sjsg 	config_mountroot(self, inteldrm_attachhook);
24041bb76ff1Sjsg }
24051bb76ff1Sjsg 
24061bb76ff1Sjsg void
24071bb76ff1Sjsg inteldrm_forcedetach(struct inteldrm_softc *dev_priv)
24081bb76ff1Sjsg {
2409d0c34296Sjsg #ifdef notyet
24101bb76ff1Sjsg 	struct pci_softc *psc = (struct pci_softc *)dev_priv->sc_dev.dv_parent;
24111bb76ff1Sjsg 	pcitag_t tag = dev_priv->tag;
2412d0c34296Sjsg #endif
24131bb76ff1Sjsg 	extern int vga_console_attached;
24141bb76ff1Sjsg 
24151bb76ff1Sjsg 	if (dev_priv->primary) {
24161bb76ff1Sjsg 		vga_console_attached = 0;
24171bb76ff1Sjsg #if NEFIFB > 0
24181bb76ff1Sjsg 		efifb_reattach();
24191bb76ff1Sjsg #endif
24201bb76ff1Sjsg 	}
24211bb76ff1Sjsg 
24221bb76ff1Sjsg #ifdef notyet
24231bb76ff1Sjsg 	config_detach(&dev_priv->sc_dev, 0);
24241bb76ff1Sjsg 	pci_probe_device(psc, tag, NULL, NULL);
24251bb76ff1Sjsg #endif
24261bb76ff1Sjsg }
24271bb76ff1Sjsg 
24281bb76ff1Sjsg extern int __init i915_init(void);
24291bb76ff1Sjsg 
24301bb76ff1Sjsg void
24311bb76ff1Sjsg inteldrm_attachhook(struct device *self)
24321bb76ff1Sjsg {
24331bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
24341bb76ff1Sjsg 	struct rasops_info *ri = &dev_priv->ro;
24351bb76ff1Sjsg 	struct wsemuldisplaydev_attach_args aa;
24361bb76ff1Sjsg 	const struct pci_device_id *id = dev_priv->id;
24371bb76ff1Sjsg 	int orientation_quirk;
24381bb76ff1Sjsg 
24391bb76ff1Sjsg 	if (inteldrm_refcnt == 0) {
24401bb76ff1Sjsg 		i915_init();
24411bb76ff1Sjsg 	}
24421bb76ff1Sjsg 	inteldrm_refcnt++;
24431bb76ff1Sjsg 
24441bb76ff1Sjsg 	if (i915_driver_probe(dev_priv, id))
24451bb76ff1Sjsg 		goto fail;
24461bb76ff1Sjsg 
24471bb76ff1Sjsg 	if (ri->ri_bits == NULL)
24481bb76ff1Sjsg 		goto fail;
24491bb76ff1Sjsg 
24501bb76ff1Sjsg 	printf("%s: %dx%d, %dbpp\n", dev_priv->sc_dev.dv_xname,
24511bb76ff1Sjsg 	    ri->ri_width, ri->ri_height, ri->ri_depth);
24521bb76ff1Sjsg 
24531bb76ff1Sjsg 	ri->ri_flg = RI_CENTER | RI_WRONLY | RI_VCONS | RI_CLEAR;
24541bb76ff1Sjsg 
24551bb76ff1Sjsg 	orientation_quirk = drm_get_panel_orientation_quirk(ri->ri_width,
24561bb76ff1Sjsg 	    ri->ri_height);
24571bb76ff1Sjsg 	if (orientation_quirk == DRM_MODE_PANEL_ORIENTATION_LEFT_UP)
24581bb76ff1Sjsg 		ri->ri_flg |= RI_ROTATE_CCW;
24591bb76ff1Sjsg 	else if (orientation_quirk == DRM_MODE_PANEL_ORIENTATION_RIGHT_UP)
24601bb76ff1Sjsg 		ri->ri_flg |= RI_ROTATE_CW;
24611bb76ff1Sjsg 
24621bb76ff1Sjsg 	ri->ri_hw = dev_priv;
24631bb76ff1Sjsg 	rasops_init(ri, 160, 160);
24641bb76ff1Sjsg 
24651bb76ff1Sjsg 	task_set(&dev_priv->switchtask, inteldrm_doswitch, dev_priv);
24661bb76ff1Sjsg 	task_set(&dev_priv->burner_task, inteldrm_burner_cb, dev_priv);
24671bb76ff1Sjsg 
24681bb76ff1Sjsg 	inteldrm_stdscreen.capabilities = ri->ri_caps;
24691bb76ff1Sjsg 	inteldrm_stdscreen.nrows = ri->ri_rows;
24701bb76ff1Sjsg 	inteldrm_stdscreen.ncols = ri->ri_cols;
24711bb76ff1Sjsg 	inteldrm_stdscreen.textops = &ri->ri_ops;
24721bb76ff1Sjsg 	inteldrm_stdscreen.fontwidth = ri->ri_font->fontwidth;
24731bb76ff1Sjsg 	inteldrm_stdscreen.fontheight = ri->ri_font->fontheight;
24741bb76ff1Sjsg 
24751bb76ff1Sjsg 	aa.console = dev_priv->console;
24761bb76ff1Sjsg 	aa.primary = dev_priv->primary;
24771bb76ff1Sjsg 	aa.scrdata = &inteldrm_screenlist;
24781bb76ff1Sjsg 	aa.accessops = &inteldrm_accessops;
24791bb76ff1Sjsg 	aa.accesscookie = dev_priv;
24801bb76ff1Sjsg 	aa.defaultscreens = 0;
24811bb76ff1Sjsg 
24821bb76ff1Sjsg 	if (dev_priv->console) {
24831bb76ff1Sjsg 		uint32_t defattr;
24841bb76ff1Sjsg 
24851bb76ff1Sjsg 		/*
24861bb76ff1Sjsg 		 * Clear the entire screen if we're doing rotation to
24871bb76ff1Sjsg 		 * make sure no unrotated content survives.
24881bb76ff1Sjsg 		 */
24891bb76ff1Sjsg 		if (ri->ri_flg & (RI_ROTATE_CW | RI_ROTATE_CCW))
24901bb76ff1Sjsg 			memset(ri->ri_bits, 0, ri->ri_height * ri->ri_stride);
24911bb76ff1Sjsg 
24921bb76ff1Sjsg 		ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr);
24931bb76ff1Sjsg 		wsdisplay_cnattach(&inteldrm_stdscreen, ri->ri_active,
24941bb76ff1Sjsg 		    0, 0, defattr);
24951bb76ff1Sjsg 	}
24961bb76ff1Sjsg 
24971bb76ff1Sjsg 	config_found_sm(self, &aa, wsemuldisplaydevprint,
24981bb76ff1Sjsg 	    wsemuldisplaydevsubmatch);
24991bb76ff1Sjsg 	return;
25001bb76ff1Sjsg 
25011bb76ff1Sjsg fail:
25021bb76ff1Sjsg 	inteldrm_fatal_error = 1;
25031bb76ff1Sjsg 	inteldrm_forcedetach(dev_priv);
25041bb76ff1Sjsg }
25051bb76ff1Sjsg 
25061bb76ff1Sjsg int
25071bb76ff1Sjsg inteldrm_detach(struct device *self, int flags)
25081bb76ff1Sjsg {
25091bb76ff1Sjsg 	return 0;
25101bb76ff1Sjsg }
25111bb76ff1Sjsg 
25121bb76ff1Sjsg int
25131bb76ff1Sjsg inteldrm_activate(struct device *self, int act)
25141bb76ff1Sjsg {
25151bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
25161bb76ff1Sjsg 	struct drm_device *dev = &dev_priv->drm;
25171bb76ff1Sjsg 	int rv = 0;
25181bb76ff1Sjsg 
25191bb76ff1Sjsg 	if (dev->dev == NULL || inteldrm_fatal_error)
25201bb76ff1Sjsg 		return (0);
25211bb76ff1Sjsg 
25221bb76ff1Sjsg 	/*
25231bb76ff1Sjsg 	 * On hibernate resume activate is called before inteldrm_attachhook().
25241bb76ff1Sjsg 	 * Do not try to call i915_drm_suspend() when
25251bb76ff1Sjsg 	 * i915_load_modeset_init()/i915_gem_init() have not been called.
25261bb76ff1Sjsg 	 */
25271bb76ff1Sjsg 	if (dev_priv->display.wq.modeset == NULL)
25281bb76ff1Sjsg 		return 0;
25291bb76ff1Sjsg 
25301bb76ff1Sjsg 	switch (act) {
25311bb76ff1Sjsg 	case DVACT_QUIESCE:
25321bb76ff1Sjsg 		rv = config_suspend(dev->dev, act);
25331bb76ff1Sjsg 		i915_drm_prepare(dev);
25341bb76ff1Sjsg 		i915_drm_suspend(dev);
25351bb76ff1Sjsg 		i915_drm_suspend_late(dev, false);
25361bb76ff1Sjsg 		break;
25371bb76ff1Sjsg 	case DVACT_SUSPEND:
25381bb76ff1Sjsg 		if (dev->agp)
25391bb76ff1Sjsg 			config_suspend(dev->agp->agpdev->sc_chipc, act);
25401bb76ff1Sjsg 		break;
25411bb76ff1Sjsg 	case DVACT_RESUME:
25421bb76ff1Sjsg 		if (dev->agp)
25431bb76ff1Sjsg 			config_suspend(dev->agp->agpdev->sc_chipc, act);
25441bb76ff1Sjsg 		break;
25451bb76ff1Sjsg 	case DVACT_WAKEUP:
25461bb76ff1Sjsg 		i915_drm_resume_early(dev);
25471bb76ff1Sjsg 		i915_drm_resume(dev);
2548f005ef32Sjsg 		intel_fbdev_restore_mode(dev_priv);
25491bb76ff1Sjsg 		rv = config_suspend(dev->dev, act);
25501bb76ff1Sjsg 		break;
25511bb76ff1Sjsg 	}
25521bb76ff1Sjsg 
25531bb76ff1Sjsg 	return (rv);
25541bb76ff1Sjsg }
25551bb76ff1Sjsg 
25561bb76ff1Sjsg void
25571bb76ff1Sjsg inteldrm_native_backlight(struct inteldrm_softc *dev_priv)
25581bb76ff1Sjsg {
25591bb76ff1Sjsg 	struct drm_device *dev = &dev_priv->drm;
25601bb76ff1Sjsg 	struct drm_connector_list_iter conn_iter;
25611bb76ff1Sjsg 	struct drm_connector *connector;
25621bb76ff1Sjsg 
25631bb76ff1Sjsg 	drm_connector_list_iter_begin(dev, &conn_iter);
25641bb76ff1Sjsg 	drm_for_each_connector_iter(connector, &conn_iter) {
25651bb76ff1Sjsg 		struct intel_connector *intel_connector;
25661bb76ff1Sjsg 		struct intel_panel *panel;
25671bb76ff1Sjsg 		struct backlight_device *bd;
25681bb76ff1Sjsg 
25691bb76ff1Sjsg 		if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
25701bb76ff1Sjsg 			continue;
25711bb76ff1Sjsg 
25721bb76ff1Sjsg 		intel_connector = to_intel_connector(connector);
25731bb76ff1Sjsg 		panel = &intel_connector->panel;
25741bb76ff1Sjsg 		bd = panel->backlight.device;
25751bb76ff1Sjsg 
25761bb76ff1Sjsg 		if (!panel->backlight.present || bd == NULL)
25771bb76ff1Sjsg 			continue;
25781bb76ff1Sjsg 
25791bb76ff1Sjsg 		dev->registered = false;
25801bb76ff1Sjsg 		connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
25811bb76ff1Sjsg 
25821bb76ff1Sjsg 		connector->backlight_device = bd;
25831bb76ff1Sjsg 		connector->backlight_property = drm_property_create_range(dev,
25841bb76ff1Sjsg 		    0, "Backlight", 0, bd->props.max_brightness);
25851bb76ff1Sjsg 		drm_object_attach_property(&connector->base,
25861bb76ff1Sjsg 		    connector->backlight_property, bd->props.brightness);
25871bb76ff1Sjsg 
25881bb76ff1Sjsg 		connector->registration_state = DRM_CONNECTOR_REGISTERED;
25891bb76ff1Sjsg 		dev->registered = true;
25901bb76ff1Sjsg 
25911bb76ff1Sjsg 		/*
25921bb76ff1Sjsg 		 * Use backlight from the first connector that has one
25931bb76ff1Sjsg 		 * for wscons(4).
25941bb76ff1Sjsg 		 */
25951bb76ff1Sjsg 		if (dev_priv->backlight == NULL)
25961bb76ff1Sjsg 			dev_priv->backlight = bd;
25971bb76ff1Sjsg 	}
25981bb76ff1Sjsg 	drm_connector_list_iter_end(&conn_iter);
25991bb76ff1Sjsg }
26001bb76ff1Sjsg 
26011bb76ff1Sjsg void
26021bb76ff1Sjsg inteldrm_firmware_backlight(struct inteldrm_softc *dev_priv,
26031bb76ff1Sjsg     struct wsdisplay_param *dp)
26041bb76ff1Sjsg {
26051bb76ff1Sjsg 	struct drm_device *dev = &dev_priv->drm;
26061bb76ff1Sjsg 	struct drm_connector_list_iter conn_iter;
26071bb76ff1Sjsg 	struct drm_connector *connector;
26081bb76ff1Sjsg 	struct backlight_properties props;
26091bb76ff1Sjsg 	struct backlight_device *bd;
26101bb76ff1Sjsg 
26111bb76ff1Sjsg 	memset(&props, 0, sizeof(props));
26121bb76ff1Sjsg 	props.type = BACKLIGHT_FIRMWARE;
26131bb76ff1Sjsg 	props.brightness = dp->curval;
26141bb76ff1Sjsg 	bd = backlight_device_register(dev->dev->dv_xname, NULL, NULL,
26151bb76ff1Sjsg 	    &inteldrm_backlight_ops, &props);
26161bb76ff1Sjsg 
26171bb76ff1Sjsg 	drm_connector_list_iter_begin(dev, &conn_iter);
26181bb76ff1Sjsg 	drm_for_each_connector_iter(connector, &conn_iter) {
26191bb76ff1Sjsg 		if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
26201bb76ff1Sjsg 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
26211bb76ff1Sjsg 		    connector->connector_type != DRM_MODE_CONNECTOR_DSI)
26221bb76ff1Sjsg 			continue;
26231bb76ff1Sjsg 
26241bb76ff1Sjsg 		if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
26251bb76ff1Sjsg 			continue;
26261bb76ff1Sjsg 
26271bb76ff1Sjsg 		dev->registered = false;
26281bb76ff1Sjsg 		connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
26291bb76ff1Sjsg 
26301bb76ff1Sjsg 		connector->backlight_device = bd;
26311bb76ff1Sjsg 		connector->backlight_property = drm_property_create_range(dev,
26321bb76ff1Sjsg 		    0, "Backlight", dp->min, dp->max);
26331bb76ff1Sjsg 		drm_object_attach_property(&connector->base,
26341bb76ff1Sjsg 		    connector->backlight_property, dp->curval);
26351bb76ff1Sjsg 
26361bb76ff1Sjsg 		connector->registration_state = DRM_CONNECTOR_REGISTERED;
26371bb76ff1Sjsg 		dev->registered = true;
26381bb76ff1Sjsg 	}
26391bb76ff1Sjsg 	drm_connector_list_iter_end(&conn_iter);
26401bb76ff1Sjsg }
26411bb76ff1Sjsg 
26421bb76ff1Sjsg void
26431bb76ff1Sjsg inteldrm_init_backlight(struct inteldrm_softc *dev_priv)
26441bb76ff1Sjsg {
26451bb76ff1Sjsg 	struct wsdisplay_param dp;
26461bb76ff1Sjsg 
26471bb76ff1Sjsg 	dp.param = WSDISPLAYIO_PARAM_BRIGHTNESS;
26481bb76ff1Sjsg 	if (ws_get_param && ws_get_param(&dp) == 0)
26491bb76ff1Sjsg 		inteldrm_firmware_backlight(dev_priv, &dp);
26501bb76ff1Sjsg 	else
26511bb76ff1Sjsg 		inteldrm_native_backlight(dev_priv);
26521bb76ff1Sjsg }
26531bb76ff1Sjsg 
26541bb76ff1Sjsg int
26551bb76ff1Sjsg inteldrm_intr(void *arg)
26561bb76ff1Sjsg {
26571bb76ff1Sjsg 	struct inteldrm_softc *dev_priv = arg;
26581bb76ff1Sjsg 
26591bb76ff1Sjsg 	if (dev_priv->irq_handler)
26601bb76ff1Sjsg 		return dev_priv->irq_handler(0, dev_priv);
26611bb76ff1Sjsg 
26621bb76ff1Sjsg 	return 0;
26631bb76ff1Sjsg }
26641bb76ff1Sjsg 
26651bb76ff1Sjsg #endif
2666