xref: /dflybsd-src/sys/dev/drm/i915/i915_drv.c (revision 7822b354ecf7d21d16f320f21735ed8096ad78a1)
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include <drm/drm_pciids.h>
34 #include "intel_drv.h"
35 
36 /*		 "Specify LVDS channel mode "
37 		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)" */
38 int i915_lvds_channel_mode __read_mostly = 0;
39 TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
40 
41 int i915_disable_power_well __read_mostly = 1;
42 module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
43 MODULE_PARM_DESC(disable_power_well,
44 		 "Disable the power well when possible (default: true)");
45 
46 bool i915_enable_hangcheck __read_mostly = true;
47 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
48 MODULE_PARM_DESC(enable_hangcheck,
49 		"Periodically check GPU activity for detecting hangs. "
50 		"WARNING: Disabling this can cause system wide hangs. "
51 		"(default: true)");
52 
53 int i915_enable_ips __read_mostly = 1;
54 module_param_named(enable_ips, i915_enable_ips, int, 0600);
55 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
56 
57 static struct drm_driver driver;
58 
59 #define INTEL_VGA_DEVICE(id, info) {		\
60 	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
61 	.class_mask = 0xff0000,			\
62 	.vendor = 0x8086,			\
63 	.device = id,				\
64 	.subvendor = PCI_ANY_ID,		\
65 	.subdevice = PCI_ANY_ID,		\
66 	.driver_data = (unsigned long) info }
67 
68 #define INTEL_QUANTA_VGA_DEVICE(info) {		\
69 	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
70 	.class_mask = 0xff0000,			\
71 	.vendor = 0x8086,			\
72 	.device = 0x16a,			\
73 	.subvendor = 0x152d,			\
74 	.subdevice = 0x8990,			\
75 	.driver_data = (unsigned long) info }
76 
77 
78 static const struct intel_device_info intel_i830_info = {
79 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
80 	.has_overlay = 1, .overlay_needs_physical = 1,
81 };
82 
83 static const struct intel_device_info intel_845g_info = {
84 	.gen = 2, .num_pipes = 1,
85 	.has_overlay = 1, .overlay_needs_physical = 1,
86 };
87 
88 static const struct intel_device_info intel_i85x_info = {
89 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
90 	.cursor_needs_physical = 1,
91 	.has_overlay = 1, .overlay_needs_physical = 1,
92 };
93 
94 static const struct intel_device_info intel_i865g_info = {
95 	.gen = 2, .num_pipes = 1,
96 	.has_overlay = 1, .overlay_needs_physical = 1,
97 };
98 
99 static const struct intel_device_info intel_i915g_info = {
100 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
101 	.has_overlay = 1, .overlay_needs_physical = 1,
102 };
103 static const struct intel_device_info intel_i915gm_info = {
104 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
105 	.cursor_needs_physical = 1,
106 	.has_overlay = 1, .overlay_needs_physical = 1,
107 	.supports_tv = 1,
108 };
109 static const struct intel_device_info intel_i945g_info = {
110 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
111 	.has_overlay = 1, .overlay_needs_physical = 1,
112 };
113 static const struct intel_device_info intel_i945gm_info = {
114 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
115 	.has_hotplug = 1, .cursor_needs_physical = 1,
116 	.has_overlay = 1, .overlay_needs_physical = 1,
117 	.supports_tv = 1,
118 };
119 
120 static const struct intel_device_info intel_i965g_info = {
121 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
122 	.has_hotplug = 1,
123 	.has_overlay = 1,
124 };
125 
126 static const struct intel_device_info intel_i965gm_info = {
127 	.gen = 4, .is_crestline = 1, .num_pipes = 2,
128 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
129 	.has_overlay = 1,
130 	.supports_tv = 1,
131 };
132 
133 static const struct intel_device_info intel_g33_info = {
134 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
135 	.need_gfx_hws = 1, .has_hotplug = 1,
136 	.has_overlay = 1,
137 };
138 
139 static const struct intel_device_info intel_g45_info = {
140 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141 	.has_pipe_cxsr = 1, .has_hotplug = 1,
142 	.has_bsd_ring = 1,
143 };
144 
145 static const struct intel_device_info intel_gm45_info = {
146 	.gen = 4, .is_g4x = 1, .num_pipes = 2,
147 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
148 	.has_pipe_cxsr = 1, .has_hotplug = 1,
149 	.supports_tv = 1,
150 	.has_bsd_ring = 1,
151 };
152 
153 static const struct intel_device_info intel_pineview_info = {
154 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
155 	.need_gfx_hws = 1, .has_hotplug = 1,
156 	.has_overlay = 1,
157 };
158 
159 static const struct intel_device_info intel_ironlake_d_info = {
160 	.gen = 5, .num_pipes = 2,
161 	.need_gfx_hws = 1, .has_hotplug = 1,
162 	.has_bsd_ring = 1,
163 };
164 
165 static const struct intel_device_info intel_ironlake_m_info = {
166 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
167 	.need_gfx_hws = 1, .has_hotplug = 1,
168 	.has_fbc = 1,
169 	.has_bsd_ring = 1,
170 };
171 
172 static const struct intel_device_info intel_sandybridge_d_info = {
173 	.gen = 6, .num_pipes = 2,
174 	.need_gfx_hws = 1, .has_hotplug = 1,
175 	.has_bsd_ring = 1,
176 	.has_blt_ring = 1,
177 	.has_llc = 1,
178 	.has_force_wake = 1,
179 };
180 
181 static const struct intel_device_info intel_sandybridge_m_info = {
182 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
183 	.need_gfx_hws = 1, .has_hotplug = 1,
184 	.has_fbc = 1,
185 	.has_bsd_ring = 1,
186 	.has_blt_ring = 1,
187 	.has_llc = 1,
188 	.has_force_wake = 1,
189 };
190 
191 #define GEN7_FEATURES  \
192 	.gen = 7, .num_pipes = 3, \
193 	.need_gfx_hws = 1, .has_hotplug = 1, \
194 	.has_bsd_ring = 1, \
195 	.has_blt_ring = 1, \
196 	.has_llc = 1, \
197 	.has_force_wake = 1
198 
199 static const struct intel_device_info intel_ivybridge_d_info = {
200 	GEN7_FEATURES,
201 	.is_ivybridge = 1,
202 };
203 
204 static const struct intel_device_info intel_ivybridge_m_info = {
205 	GEN7_FEATURES,
206 	.is_ivybridge = 1,
207 	.is_mobile = 1,
208 	.has_fbc = 1,
209 };
210 
211 static const struct intel_device_info intel_ivybridge_q_info = {
212 	GEN7_FEATURES,
213 	.is_ivybridge = 1,
214 	.num_pipes = 0, /* legal, last one wins */
215 };
216 
217 static const struct intel_device_info intel_valleyview_m_info = {
218 	GEN7_FEATURES,
219 	.is_mobile = 1,
220 	.num_pipes = 2,
221 	.is_valleyview = 1,
222 	.display_mmio_offset = VLV_DISPLAY_BASE,
223 	.has_llc = 0, /* legal, last one wins */
224 };
225 
226 static const struct intel_device_info intel_valleyview_d_info = {
227 	GEN7_FEATURES,
228 	.num_pipes = 2,
229 	.is_valleyview = 1,
230 	.display_mmio_offset = VLV_DISPLAY_BASE,
231 	.has_llc = 0, /* legal, last one wins */
232 };
233 
234 static const struct intel_device_info intel_haswell_d_info = {
235 	GEN7_FEATURES,
236 	.is_haswell = 1,
237 	.has_ddi = 1,
238 	.has_fpga_dbg = 1,
239 	.has_vebox_ring = 1,
240 };
241 
242 static const struct intel_device_info intel_haswell_m_info = {
243 	GEN7_FEATURES,
244 	.is_haswell = 1,
245 	.is_mobile = 1,
246 	.has_ddi = 1,
247 	.has_fpga_dbg = 1,
248 	.has_fbc = 1,
249 	.has_vebox_ring = 1,
250 };
251 
252 static const struct pci_device_id pciidlist[] = {		/* aka */
253 	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
254 	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
255 	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
256 	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
257 	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
258 	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
259 	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
260 	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
261 	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
262 	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
263 	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
264 	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
265 	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
266 	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
267 	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
268 	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
269 	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
270 	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
271 	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
272 	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
273 	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
274 	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
275 	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
276 	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
277 	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
278 	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
279 	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
280 	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
281 	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
282 	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
283 	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
284 	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
285 	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
286 	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
287 	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
288 	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
289 	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
290 	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
291 	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
292 	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
293 	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
294 	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
295 	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
296 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
297 	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
298 	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
299 	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
300 	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
301 	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
302 	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
303 	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
304 	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
305 	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
306 	INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
307 	INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
308 	INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
309 	INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
310 	INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
311 	INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
312 	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
313 	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
314 	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
315 	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
316 	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
317 	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
318 	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
319 	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
320 	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
321 	INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
322 	INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
323 	INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
324 	INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
325 	INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
326 	INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
327 	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
328 	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
329 	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
330 	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
331 	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
332 	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
333 	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
334 	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
335 	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
336 	INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
337 	INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
338 	INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
339 	INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
340 	INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
341 	INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
342 	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
343 	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
344 	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
345 	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
346 	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
347 	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
348 	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
349 	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
350 	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
351 	INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
352 	INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
353 	INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
354 	INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
355 	INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
356 	INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
357 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
358 	INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
359 	INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
360 	INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
361 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
362 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
363 	{0, 0}
364 };
365 
366 #define	PCI_VENDOR_INTEL	0x8086
367 
368 void intel_detect_pch(struct drm_device *dev)
369 {
370 	struct drm_i915_private *dev_priv = dev->dev_private;
371 	device_t pch;
372 
373 	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
374 	 * (which really amounts to a PCH but no South Display).
375 	 */
376 	if (INTEL_INFO(dev)->num_pipes == 0) {
377 		dev_priv->pch_type = PCH_NOP;
378 		return;
379 	}
380 
381 	/*
382 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
383 	 * make graphics device passthrough work easy for VMM, that only
384 	 * need to expose ISA bridge to let driver know the real hardware
385 	 * underneath. This is a requirement from virtualization team.
386 	 */
387 	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
388 	if (pch) {
389 		if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
390 			unsigned short id;
391 			id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
392 			dev_priv->pch_id = id;
393 
394 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
395 				dev_priv->pch_type = PCH_IBX;
396 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
397 				WARN_ON(!IS_GEN5(dev));
398 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
399 				dev_priv->pch_type = PCH_CPT;
400 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
401 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
402 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
403 				/* PantherPoint is CPT compatible */
404 				dev_priv->pch_type = PCH_CPT;
405 				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
406 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
407 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
408 				dev_priv->pch_type = PCH_LPT;
409 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
410 				WARN_ON(!IS_HASWELL(dev));
411 				WARN_ON(IS_ULT(dev));
412 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
413 				dev_priv->pch_type = PCH_LPT;
414 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
415 				WARN_ON(!IS_HASWELL(dev));
416 				WARN_ON(!IS_ULT(dev));
417 			}
418 		}
419 #if 0
420 		pci_dev_put(pch);
421 #endif
422 	}
423 }
424 
425 bool i915_semaphore_is_enabled(struct drm_device *dev)
426 {
427 	if (INTEL_INFO(dev)->gen < 6)
428 		return 0;
429 
430 	if (i915_semaphores >= 0)
431 		return i915_semaphores;
432 
433 #ifdef CONFIG_INTEL_IOMMU
434 	/* Enable semaphores on SNB when IO remapping is off */
435 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
436 		return false;
437 #endif
438 
439 	return 1;
440 }
441 
442 static int i915_drm_freeze(struct drm_device *dev)
443 {
444 	struct drm_i915_private *dev_priv = dev->dev_private;
445 	struct drm_crtc *crtc;
446 
447 	/* ignore lid events during suspend */
448 	mutex_lock(&dev_priv->modeset_restore_lock);
449 	dev_priv->modeset_restore = MODESET_SUSPENDED;
450 	mutex_unlock(&dev_priv->modeset_restore_lock);
451 
452 	intel_set_power_well(dev, true);
453 
454 	drm_kms_helper_poll_disable(dev);
455 
456 #if 0
457 	pci_save_state(dev->pdev);
458 #endif
459 
460 	/* If KMS is active, we do the leavevt stuff here */
461 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
462 		int error = i915_gem_idle(dev);
463 		if (error) {
464 			dev_err(dev->pdev->dev,
465 				"GEM idle failed, resume might fail\n");
466 			return error;
467 		}
468 
469 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
470 
471 		drm_irq_uninstall(dev);
472 		dev_priv->enable_hotplug_processing = false;
473 		/*
474 		 * Disable CRTCs directly since we want to preserve sw state
475 		 * for _thaw.
476 		 */
477 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
478 			dev_priv->display.crtc_disable(crtc);
479 
480 		intel_modeset_suspend_hw(dev);
481 	}
482 
483 	i915_save_state(dev);
484 
485 	intel_opregion_fini(dev);
486 
487 #if 0
488 	console_lock();
489 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
490 	console_unlock();
491 #endif
492 
493 	return 0;
494 }
495 
496 static int
497 i915_suspend(device_t kdev)
498 {
499 	struct drm_device *dev;
500 	int error;
501 
502 	dev = device_get_softc(kdev);
503 	if (dev == NULL || dev->dev_private == NULL) {
504 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
505 		return -ENODEV;
506 	}
507 
508 	DRM_DEBUG_KMS("starting suspend\n");
509 	error = i915_drm_freeze(dev);
510 	if (error)
511 		return (error);
512 
513 	error = bus_generic_suspend(kdev);
514 	DRM_DEBUG_KMS("finished suspend %d\n", error);
515 	return (error);
516 }
517 
518 #if 0
519 void intel_console_resume(struct work_struct *work)
520 {
521 	struct drm_i915_private *dev_priv =
522 		container_of(work, struct drm_i915_private,
523 			     console_resume_work);
524 	struct drm_device *dev = dev_priv->dev;
525 
526 	console_lock();
527 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
528 	console_unlock();
529 }
530 #endif
531 
532 static void intel_resume_hotplug(struct drm_device *dev)
533 {
534 	struct drm_mode_config *mode_config = &dev->mode_config;
535 	struct intel_encoder *encoder;
536 
537 	mutex_lock(&mode_config->mutex);
538 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
539 
540 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
541 		if (encoder->hot_plug)
542 			encoder->hot_plug(encoder);
543 
544 	mutex_unlock(&mode_config->mutex);
545 
546 	/* Just fire off a uevent and let userspace tell us what to do */
547 	drm_helper_hpd_irq_event(dev);
548 }
549 
550 static int __i915_drm_thaw(struct drm_device *dev)
551 {
552 	struct drm_i915_private *dev_priv = dev->dev_private;
553 	int error = 0;
554 
555 	i915_restore_state(dev);
556 	intel_opregion_setup(dev);
557 
558 	/* KMS EnterVT equivalent */
559 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
560 		intel_init_pch_refclk(dev);
561 
562 		mutex_lock(&dev->struct_mutex);
563 		dev_priv->mm.suspended = 0;
564 
565 		error = i915_gem_init_hw(dev);
566 		mutex_unlock(&dev->struct_mutex);
567 
568 		/* We need working interrupts for modeset enabling ... */
569 		drm_irq_install(dev);
570 
571 		intel_modeset_init_hw(dev);
572 
573 		drm_modeset_lock_all(dev);
574 		intel_modeset_setup_hw_state(dev, true);
575 		drm_modeset_unlock_all(dev);
576 
577 		/*
578 		 * ... but also need to make sure that hotplug processing
579 		 * doesn't cause havoc. Like in the driver load code we don't
580 		 * bother with the tiny race here where we might loose hotplug
581 		 * notifications.
582 		 * */
583 		intel_hpd_init(dev);
584 		dev_priv->enable_hotplug_processing = true;
585 		/* Config may have changed between suspend and resume */
586 		intel_resume_hotplug(dev);
587 	}
588 
589 	intel_opregion_init(dev);
590 
591 	/*
592 	 * The console lock can be pretty contented on resume due
593 	 * to all the printk activity.  Try to keep it out of the hot
594 	 * path of resume if possible.
595 	 */
596 #if 0
597 	if (console_trylock()) {
598 		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
599 		console_unlock();
600 	} else {
601 		schedule_work(&dev_priv->console_resume_work);
602 	}
603 #endif
604 
605 	mutex_lock(&dev_priv->modeset_restore_lock);
606 	dev_priv->modeset_restore = MODESET_DONE;
607 	mutex_unlock(&dev_priv->modeset_restore_lock);
608 	return error;
609 }
610 
611 static int i915_drm_thaw(struct drm_device *dev)
612 {
613 	int error = 0;
614 
615 	intel_gt_sanitize(dev);
616 
617 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
618 		mutex_lock(&dev->struct_mutex);
619 		i915_gem_restore_gtt_mappings(dev);
620 		mutex_unlock(&dev->struct_mutex);
621 	}
622 
623 	__i915_drm_thaw(dev);
624 
625 	return error;
626 }
627 
628 static int
629 i915_resume(device_t kdev)
630 {
631 	struct drm_device *dev = device_get_softc(kdev);
632 	struct drm_i915_private *dev_priv = dev->dev_private;
633 	int ret;
634 
635 #if 0
636 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
637 		return 0;
638 
639 	if (pci_enable_device(dev->pdev))
640 		return -EIO;
641 
642 	pci_set_master(dev->pdev);
643 #endif
644 
645 	intel_gt_sanitize(dev);
646 
647 	/*
648 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
649 	 * earlier) need this since the BIOS might clear all our scratch PTEs.
650 	 */
651 	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
652 	    !dev_priv->opregion.header) {
653 		mutex_lock(&dev->struct_mutex);
654 		i915_gem_restore_gtt_mappings(dev);
655 		mutex_unlock(&dev->struct_mutex);
656 	}
657 
658 	ret = -i915_drm_thaw(dev);
659 	if (ret)
660 		return ret;
661 
662 	ret = bus_generic_resume(kdev);
663 	if (ret)
664 		return ret;
665 
666 	drm_kms_helper_poll_enable(dev);
667 	return 0;
668 }
669 
670 /* XXX Hack for the old *BSD drm code base
671  * The device id field is set at probe time */
672 static drm_pci_id_list_t i915_attach_list[] = {
673 	{0x8086, 0, 0, "Intel i915 GPU"},
674 	{0, 0, 0, NULL}
675 };
676 
677 int i915_modeset;
678 
679 /* static int __init i915_init(void) */
680 static int
681 i915_attach(device_t kdev)
682 {
683 	struct drm_device *dev;
684 
685 	dev = device_get_softc(kdev);
686 
687 	driver.num_ioctls = i915_max_ioctl;
688 
689 	if (i915_modeset == 1)
690 		driver.driver_features |= DRIVER_MODESET;
691 
692 	dev->driver = &driver;
693 	return (drm_attach(kdev, i915_attach_list));
694 }
695 
696 const struct intel_device_info *
697 i915_get_device_id(int device)
698 {
699 	const struct pci_device_id *did;
700 
701 	for (did = &pciidlist[0]; did->device != 0; did++) {
702 		if (did->device != device)
703 			continue;
704 		return (struct intel_device_info *)did->driver_data;
705 	}
706 	return (NULL);
707 }
708 
709 extern devclass_t drm_devclass;
710 
711 int intel_iommu_enabled = 0;
712 TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
713 
714 int i915_semaphores = -1;
715 TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
716 static int i915_try_reset = 1;
717 TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
718 unsigned int i915_lvds_downclock = 0;
719 TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
720 int i915_vbt_sdvo_panel_type = -1;
721 TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
722 unsigned int i915_powersave = 1;
723 TUNABLE_INT("drm.i915.powersave", &i915_powersave);
724 int i915_enable_fbc = 0;
725 TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
726 int i915_enable_rc6 = 0;
727 TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
728 int i915_panel_use_ssc = -1;
729 TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
730 int i915_panel_ignore_lid = 0;
731 TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
732 int i915_modeset = 1;
733 TUNABLE_INT("drm.i915.modeset", &i915_modeset);
734 int i915_enable_ppgtt = -1;
735 TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
736 
737 static int i8xx_do_reset(struct drm_device *dev)
738 {
739 	struct drm_i915_private *dev_priv = dev->dev_private;
740 
741 	if (IS_I85X(dev))
742 		return -ENODEV;
743 
744 	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
745 	POSTING_READ(D_STATE);
746 
747 	if (IS_I830(dev) || IS_845G(dev)) {
748 		I915_WRITE(DEBUG_RESET_I830,
749 			   DEBUG_RESET_DISPLAY |
750 			   DEBUG_RESET_RENDER |
751 			   DEBUG_RESET_FULL);
752 		POSTING_READ(DEBUG_RESET_I830);
753 		msleep(1);
754 
755 		I915_WRITE(DEBUG_RESET_I830, 0);
756 		POSTING_READ(DEBUG_RESET_I830);
757 	}
758 
759 	msleep(1);
760 
761 	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
762 	POSTING_READ(D_STATE);
763 
764 	return 0;
765 }
766 
767 static int i965_reset_complete(struct drm_device *dev)
768 {
769 	u8 gdrst;
770 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
771 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
772 }
773 
774 static int i965_do_reset(struct drm_device *dev)
775 {
776 	int ret;
777 	u8 gdrst;
778 
779 	/*
780 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
781 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
782 	 * triggers the reset; when done, the hardware will clear it.
783 	 */
784 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
785 	pci_write_config_byte(dev->pdev, I965_GDRST,
786 			      gdrst | GRDOM_RENDER |
787 			      GRDOM_RESET_ENABLE);
788 	ret =  wait_for(i965_reset_complete(dev), 500);
789 	if (ret)
790 		return ret;
791 
792 	/* We can't reset render&media without also resetting display ... */
793 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
794 	pci_write_config_byte(dev->pdev, I965_GDRST,
795 			      gdrst | GRDOM_MEDIA |
796 			      GRDOM_RESET_ENABLE);
797 
798 	return wait_for(i965_reset_complete(dev), 500);
799 }
800 
801 static int ironlake_do_reset(struct drm_device *dev)
802 {
803 	struct drm_i915_private *dev_priv = dev->dev_private;
804 	u32 gdrst;
805 	int ret;
806 
807 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
808 	gdrst &= ~GRDOM_MASK;
809 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
810 		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
811 	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
812 	if (ret)
813 		return ret;
814 
815 	/* We can't reset render&media without also resetting display ... */
816 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
817 	gdrst &= ~GRDOM_MASK;
818 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
819 		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
820 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
821 }
822 
823 static int gen6_do_reset(struct drm_device *dev)
824 {
825 	struct drm_i915_private *dev_priv = dev->dev_private;
826 	int ret;
827 
828 	dev_priv = dev->dev_private;
829 
830 	/* Hold gt_lock across reset to prevent any register access
831 	 * with forcewake not set correctly
832 	 */
833 	lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
834 
835 	/* Reset the chip */
836 
837 	/* GEN6_GDRST is not in the gt power well, no need to check
838 	 * for fifo space for the write or forcewake the chip for
839 	 * the read
840 	 */
841 	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
842 
843 	/* Spin waiting for the device to ack the reset request */
844 	ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
845 
846 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
847 	if (dev_priv->forcewake_count)
848 		dev_priv->gt.force_wake_get(dev_priv);
849 	else
850 		dev_priv->gt.force_wake_put(dev_priv);
851 
852 	/* Restore fifo count */
853 	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
854 
855 	lockmgr(&dev_priv->gt_lock, LK_RELEASE);
856 	return ret;
857 }
858 
859 int intel_gpu_reset(struct drm_device *dev)
860 {
861 	switch (INTEL_INFO(dev)->gen) {
862 	case 7:
863 	case 6: return gen6_do_reset(dev);
864 	case 5: return ironlake_do_reset(dev);
865 	case 4: return i965_do_reset(dev);
866 	case 2: return i8xx_do_reset(dev);
867 	default: return -ENODEV;
868 	}
869 }
870 
871 /**
872  * i915_reset - reset chip after a hang
873  * @dev: drm device to reset
874  *
875  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
876  * reset or otherwise an error code.
877  *
878  * Procedure is fairly simple:
879  *   - reset the chip using the reset reg
880  *   - re-init context state
881  *   - re-init hardware status page
882  *   - re-init ring buffer
883  *   - re-init interrupt state
884  *   - re-init display
885  */
886 int i915_reset(struct drm_device *dev)
887 {
888 	drm_i915_private_t *dev_priv = dev->dev_private;
889 	bool simulated;
890 	int ret;
891 
892 	if (!i915_try_reset)
893 		return 0;
894 
895 	mutex_lock(&dev->struct_mutex);
896 
897 	i915_gem_reset(dev);
898 
899 	simulated = dev_priv->gpu_error.stop_rings != 0;
900 
901 	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
902 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
903 		ret = -ENODEV;
904 	} else {
905 		ret = intel_gpu_reset(dev);
906 
907 		/* Also reset the gpu hangman. */
908 		if (simulated) {
909 			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
910 			dev_priv->gpu_error.stop_rings = 0;
911 			if (ret == -ENODEV) {
912 				DRM_ERROR("Reset not implemented, but ignoring "
913 					  "error for simulated gpu hangs\n");
914 				ret = 0;
915 			}
916 		} else
917 			dev_priv->gpu_error.last_reset = get_seconds();
918 	}
919 	if (ret) {
920 		DRM_ERROR("Failed to reset chip.\n");
921 		mutex_unlock(&dev->struct_mutex);
922 		return ret;
923 	}
924 
925 	/* Ok, now get things going again... */
926 
927 	/*
928 	 * Everything depends on having the GTT running, so we need to start
929 	 * there.  Fortunately we don't need to do this unless we reset the
930 	 * chip at a PCI level.
931 	 *
932 	 * Next we need to restore the context, but we don't use those
933 	 * yet either...
934 	 *
935 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
936 	 * was running at the time of the reset (i.e. we weren't VT
937 	 * switched away).
938 	 */
939 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
940 			!dev_priv->mm.suspended) {
941 		struct intel_ring_buffer *ring;
942 		int i;
943 
944 		dev_priv->mm.suspended = 0;
945 
946 		i915_gem_init_swizzling(dev);
947 
948 		for_each_ring(ring, dev_priv, i)
949 			ring->init(ring);
950 
951 		i915_gem_context_init(dev);
952 		if (dev_priv->mm.aliasing_ppgtt) {
953 			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
954 			if (ret)
955 				i915_gem_cleanup_aliasing_ppgtt(dev);
956 		}
957 
958 		/*
959 		 * It would make sense to re-init all the other hw state, at
960 		 * least the rps/rc6/emon init done within modeset_init_hw. For
961 		 * some unknown reason, this blows up my ilk, so don't.
962 		 */
963 
964 		mutex_unlock(&dev->struct_mutex);
965 
966 		drm_irq_uninstall(dev);
967 		drm_irq_install(dev);
968 		intel_hpd_init(dev);
969 	} else {
970 		mutex_unlock(&dev->struct_mutex);
971 	}
972 
973 	return 0;
974 }
975 
976 static int
977 i915_pci_probe(device_t kdev)
978 {
979 	int device, i = 0;
980 
981 	if (pci_get_class(kdev) != PCIC_DISPLAY)
982 		return ENXIO;
983 
984 	if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL)
985 		return ENXIO;
986 
987 	device = pci_get_device(kdev);
988 
989 	for (i = 0; pciidlist[i].device != 0; i++) {
990 		if (pciidlist[i].device == device) {
991 			i915_attach_list[0].device = device;
992 			return 0;
993 		}
994 	}
995 
996 	return ENXIO;
997 }
998 
999 static struct cdev_pager_ops i915_gem_vm_ops = {
1000 	.cdev_pg_fault	= i915_gem_fault,
1001 	.cdev_pg_ctor	= i915_gem_pager_ctor,
1002 	.cdev_pg_dtor	= i915_gem_pager_dtor
1003 };
1004 
1005 static struct drm_driver driver = {
1006 	.driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1007 	    DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
1008 	    DRIVER_GEM /*| DRIVER_MODESET*/,
1009 
1010 	.buf_priv_size	= sizeof(drm_i915_private_t),
1011 	.load		= i915_driver_load,
1012 	.open		= i915_driver_open,
1013 	.unload		= i915_driver_unload,
1014 	.preclose	= i915_driver_preclose,
1015 	.lastclose	= i915_driver_lastclose,
1016 	.postclose	= i915_driver_postclose,
1017 	.device_is_agp	= i915_driver_device_is_agp,
1018 	.gem_init_object = i915_gem_init_object,
1019 	.gem_free_object = i915_gem_free_object,
1020 	.gem_pager_ops	= &i915_gem_vm_ops,
1021 	.dumb_create	= i915_gem_dumb_create,
1022 	.dumb_map_offset = i915_gem_mmap_gtt,
1023 	.dumb_destroy	= i915_gem_dumb_destroy,
1024 
1025 	.ioctls		= i915_ioctls,
1026 
1027 	.name		= DRIVER_NAME,
1028 	.desc		= DRIVER_DESC,
1029 	.date		= DRIVER_DATE,
1030 	.major		= DRIVER_MAJOR,
1031 	.minor		= DRIVER_MINOR,
1032 	.patchlevel	= DRIVER_PATCHLEVEL,
1033 };
1034 
1035 static device_method_t i915_methods[] = {
1036 	/* Device interface */
1037 	DEVMETHOD(device_probe,		i915_pci_probe),
1038 	DEVMETHOD(device_attach,	i915_attach),
1039 	DEVMETHOD(device_suspend,	i915_suspend),
1040 	DEVMETHOD(device_resume,	i915_resume),
1041 	DEVMETHOD(device_detach,	drm_release),
1042 	DEVMETHOD_END
1043 };
1044 
1045 static driver_t i915_driver = {
1046 	"drm",
1047 	i915_methods,
1048 	sizeof(struct drm_device)
1049 };
1050 
1051 DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
1052     SI_ORDER_ANY);
1053 MODULE_DEPEND(i915kms, drm, 1, 1, 1);
1054 MODULE_DEPEND(i915kms, agp, 1, 1, 1);
1055 MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
1056 MODULE_DEPEND(i915kms, iic, 1, 1, 1);
1057 MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
1058 
1059 /* We give fast paths for the really cool registers */
1060 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
1061 	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1062 	 ((reg) < 0x40000) &&            \
1063 	 ((reg) != FORCEWAKE))
1064 static void
1065 ilk_dummy_write(struct drm_i915_private *dev_priv)
1066 {
1067 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1068 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1069 	 * hence harmless to write 0 into. */
1070 	I915_WRITE_NOTRACE(MI_MODE, 0);
1071 }
1072 
1073 static void
1074 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1075 {
1076 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1077 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1078 		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1079 			  reg);
1080 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1081 	}
1082 }
1083 
1084 static void
1085 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1086 {
1087 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1088 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1089 		DRM_ERROR("Unclaimed write to %x\n", reg);
1090 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1091 	}
1092 }
1093 
1094 #define __i915_read(x, y) \
1095 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1096 	u##x val = 0; \
1097 	lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \
1098 	if (IS_GEN5(dev_priv->dev)) \
1099 		ilk_dummy_write(dev_priv); \
1100 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1101 		if (dev_priv->forcewake_count == 0) \
1102 			dev_priv->gt.force_wake_get(dev_priv); \
1103 		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
1104 		if (dev_priv->forcewake_count == 0) \
1105 			dev_priv->gt.force_wake_put(dev_priv); \
1106 	} else { \
1107 		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
1108 	} \
1109 	lockmgr(&dev_priv->gt_lock, LK_RELEASE); \
1110 	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1111 	return val; \
1112 }
1113 
1114 __i915_read(8, 8)
1115 __i915_read(16, 16)
1116 __i915_read(32, 32)
1117 __i915_read(64, 64)
1118 #undef __i915_read
1119 
1120 #define __i915_write(x, y) \
1121 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1122 	u32 __fifo_ret = 0; \
1123 	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1124 	lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \
1125 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1126 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1127 	} \
1128 	if (IS_GEN5(dev_priv->dev)) \
1129 		ilk_dummy_write(dev_priv); \
1130 	hsw_unclaimed_reg_clear(dev_priv, reg); \
1131 	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
1132 	if (unlikely(__fifo_ret)) { \
1133 		gen6_gt_check_fifodbg(dev_priv); \
1134 	} \
1135 	hsw_unclaimed_reg_check(dev_priv, reg); \
1136 	lockmgr(&dev_priv->gt_lock, LK_RELEASE); \
1137 }
1138 
1139 __i915_write(8, 8)
1140 __i915_write(16, 16)
1141 __i915_write(32, 32)
1142 __i915_write(64, 64)
1143 #undef __i915_write
1144 
1145 static const struct register_whitelist {
1146 	uint64_t offset;
1147 	uint32_t size;
1148 	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1149 } whitelist[] = {
1150 	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1151 };
1152 
1153 int i915_reg_read_ioctl(struct drm_device *dev,
1154 			void *data, struct drm_file *file)
1155 {
1156 	struct drm_i915_private *dev_priv = dev->dev_private;
1157 	struct drm_i915_reg_read *reg = data;
1158 	struct register_whitelist const *entry = whitelist;
1159 	int i;
1160 
1161 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1162 		if (entry->offset == reg->offset &&
1163 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1164 			break;
1165 	}
1166 
1167 	if (i == ARRAY_SIZE(whitelist))
1168 		return -EINVAL;
1169 
1170 	switch (entry->size) {
1171 	case 8:
1172 		reg->val = I915_READ64(reg->offset);
1173 		break;
1174 	case 4:
1175 		reg->val = I915_READ(reg->offset);
1176 		break;
1177 	case 2:
1178 		reg->val = I915_READ16(reg->offset);
1179 		break;
1180 	case 1:
1181 		reg->val = I915_READ8(reg->offset);
1182 		break;
1183 	default:
1184 		WARN_ON(1);
1185 		return -EINVAL;
1186 	}
1187 
1188 	return 0;
1189 }
1190