1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29 
30 #include <linux/device.h>
31 #include "drmP.h"
32 #include "drm.h"
33 #include "i915_drm.h"
34 #include "i915_drv.h"
35 #include "intel_drv.h"
36 
37 #include <linux/console.h>
38 #include "drm_crtc_helper.h"
39 
40 static int i915_modeset = -1;
41 module_param_named(modeset, i915_modeset, int, 0400);
42 
43 unsigned int i915_fbpercrtc = 0;
44 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 
46 int i915_panel_ignore_lid = 0;
47 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
48 
49 unsigned int i915_powersave = 1;
50 module_param_named(powersave, i915_powersave, int, 0600);
51 
52 unsigned int i915_semaphores = 0;
53 module_param_named(semaphores, i915_semaphores, int, 0600);
54 
55 unsigned int i915_enable_rc6 = 0;
56 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
57 
58 unsigned int i915_lvds_downclock = 0;
59 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
60 
61 unsigned int i915_panel_use_ssc = 1;
62 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
63 
64 int i915_vbt_sdvo_panel_type = -1;
65 module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
66 
67 static bool i915_try_reset = true;
68 module_param_named(reset, i915_try_reset, bool, 0600);
69 
70 static struct drm_driver driver;
71 extern int intel_agp_enabled;
72 
73 #define INTEL_VGA_DEVICE(id, info) {		\
74 	.class = PCI_CLASS_DISPLAY_VGA << 8,	\
75 	.class_mask = 0xff0000,			\
76 	.vendor = 0x8086,			\
77 	.device = id,				\
78 	.subvendor = PCI_ANY_ID,		\
79 	.subdevice = PCI_ANY_ID,		\
80 	.driver_data = (unsigned long) info }
81 
82 static const struct intel_device_info intel_i830_info = {
83 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
84 	.has_overlay = 1, .overlay_needs_physical = 1,
85 };
86 
87 static const struct intel_device_info intel_845g_info = {
88 	.gen = 2,
89 	.has_overlay = 1, .overlay_needs_physical = 1,
90 };
91 
92 static const struct intel_device_info intel_i85x_info = {
93 	.gen = 2, .is_i85x = 1, .is_mobile = 1,
94 	.cursor_needs_physical = 1,
95 	.has_overlay = 1, .overlay_needs_physical = 1,
96 };
97 
98 static const struct intel_device_info intel_i865g_info = {
99 	.gen = 2,
100 	.has_overlay = 1, .overlay_needs_physical = 1,
101 };
102 
103 static const struct intel_device_info intel_i915g_info = {
104 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
105 	.has_overlay = 1, .overlay_needs_physical = 1,
106 };
107 static const struct intel_device_info intel_i915gm_info = {
108 	.gen = 3, .is_mobile = 1,
109 	.cursor_needs_physical = 1,
110 	.has_overlay = 1, .overlay_needs_physical = 1,
111 	.supports_tv = 1,
112 };
113 static const struct intel_device_info intel_i945g_info = {
114 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
115 	.has_overlay = 1, .overlay_needs_physical = 1,
116 };
117 static const struct intel_device_info intel_i945gm_info = {
118 	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
119 	.has_hotplug = 1, .cursor_needs_physical = 1,
120 	.has_overlay = 1, .overlay_needs_physical = 1,
121 	.supports_tv = 1,
122 };
123 
124 static const struct intel_device_info intel_i965g_info = {
125 	.gen = 4, .is_broadwater = 1,
126 	.has_hotplug = 1,
127 	.has_overlay = 1,
128 };
129 
130 static const struct intel_device_info intel_i965gm_info = {
131 	.gen = 4, .is_crestline = 1,
132 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
133 	.has_overlay = 1,
134 	.supports_tv = 1,
135 };
136 
137 static const struct intel_device_info intel_g33_info = {
138 	.gen = 3, .is_g33 = 1,
139 	.need_gfx_hws = 1, .has_hotplug = 1,
140 	.has_overlay = 1,
141 };
142 
143 static const struct intel_device_info intel_g45_info = {
144 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
145 	.has_pipe_cxsr = 1, .has_hotplug = 1,
146 	.has_bsd_ring = 1,
147 };
148 
149 static const struct intel_device_info intel_gm45_info = {
150 	.gen = 4, .is_g4x = 1,
151 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
152 	.has_pipe_cxsr = 1, .has_hotplug = 1,
153 	.supports_tv = 1,
154 	.has_bsd_ring = 1,
155 };
156 
157 static const struct intel_device_info intel_pineview_info = {
158 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
159 	.need_gfx_hws = 1, .has_hotplug = 1,
160 	.has_overlay = 1,
161 };
162 
163 static const struct intel_device_info intel_ironlake_d_info = {
164 	.gen = 5,
165 	.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
166 	.has_bsd_ring = 1,
167 };
168 
169 static const struct intel_device_info intel_ironlake_m_info = {
170 	.gen = 5, .is_mobile = 1,
171 	.need_gfx_hws = 1, .has_hotplug = 1,
172 	.has_fbc = 0, /* disabled due to buggy hardware */
173 	.has_bsd_ring = 1,
174 };
175 
176 static const struct intel_device_info intel_sandybridge_d_info = {
177 	.gen = 6,
178 	.need_gfx_hws = 1, .has_hotplug = 1,
179 	.has_bsd_ring = 1,
180 	.has_blt_ring = 1,
181 };
182 
183 static const struct intel_device_info intel_sandybridge_m_info = {
184 	.gen = 6, .is_mobile = 1,
185 	.need_gfx_hws = 1, .has_hotplug = 1,
186 	.has_fbc = 1,
187 	.has_bsd_ring = 1,
188 	.has_blt_ring = 1,
189 };
190 
191 static const struct pci_device_id pciidlist[] = {		/* aka */
192 	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
193 	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
194 	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
195 	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
196 	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
197 	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
198 	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
199 	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
200 	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
201 	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
202 	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
203 	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
204 	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
205 	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
206 	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
207 	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
208 	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
209 	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
210 	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
211 	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
212 	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
213 	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
214 	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
215 	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
216 	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
217 	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
218 	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
219 	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
220 	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
221 	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
222 	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
223 	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
224 	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
225 	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
226 	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
227 	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
228 	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
229 	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
230 	{0, 0, 0}
231 };
232 
233 #if defined(CONFIG_DRM_I915_KMS)
234 MODULE_DEVICE_TABLE(pci, pciidlist);
235 #endif
236 
237 #define INTEL_PCH_DEVICE_ID_MASK	0xff00
238 #define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
239 
intel_detect_pch(struct drm_device * dev)240 void intel_detect_pch (struct drm_device *dev)
241 {
242 	struct drm_i915_private *dev_priv = dev->dev_private;
243 	struct pci_dev *pch;
244 
245 	/*
246 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
247 	 * make graphics device passthrough work easy for VMM, that only
248 	 * need to expose ISA bridge to let driver know the real hardware
249 	 * underneath. This is a requirement from virtualization team.
250 	 */
251 	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
252 	if (pch) {
253 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
254 			int id;
255 			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
256 
257 			if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
258 				dev_priv->pch_type = PCH_CPT;
259 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
260 			}
261 		}
262 		pci_dev_put(pch);
263 	}
264 }
265 
__gen6_gt_force_wake_get(struct drm_i915_private * dev_priv)266 void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
267 {
268 	int count;
269 
270 	count = 0;
271 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
272 		udelay(10);
273 
274 	I915_WRITE_NOTRACE(FORCEWAKE, 1);
275 	POSTING_READ(FORCEWAKE);
276 
277 	count = 0;
278 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
279 		udelay(10);
280 }
281 
__gen6_gt_force_wake_put(struct drm_i915_private * dev_priv)282 void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
283 {
284 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
285 	POSTING_READ(FORCEWAKE);
286 }
287 
__gen6_gt_wait_for_fifo(struct drm_i915_private * dev_priv)288 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
289 {
290 	int loop = 500;
291 	u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
292 	while (fifo < 20 && loop--) {
293 		udelay(10);
294 		fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
295 	}
296 }
297 
i915_drm_freeze(struct drm_device * dev)298 static int i915_drm_freeze(struct drm_device *dev)
299 {
300 	struct drm_i915_private *dev_priv = dev->dev_private;
301 
302 	drm_kms_helper_poll_disable(dev);
303 
304 	pci_save_state(dev->pdev);
305 
306 	/* If KMS is active, we do the leavevt stuff here */
307 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
308 		int error = i915_gem_idle(dev);
309 		if (error) {
310 			dev_err(&dev->pdev->dev,
311 				"GEM idle failed, resume might fail\n");
312 			return error;
313 		}
314 		drm_irq_uninstall(dev);
315 	}
316 
317 	i915_save_state(dev);
318 
319 	intel_opregion_fini(dev);
320 
321 	/* Modeset on resume, not lid events */
322 	dev_priv->modeset_on_lid = 0;
323 
324 	return 0;
325 }
326 
i915_suspend(struct drm_device * dev,pm_message_t state)327 int i915_suspend(struct drm_device *dev, pm_message_t state)
328 {
329 	int error;
330 
331 	if (!dev || !dev->dev_private) {
332 		DRM_ERROR("dev: %p\n", dev);
333 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
334 		return -ENODEV;
335 	}
336 
337 	if (state.event == PM_EVENT_PRETHAW)
338 		return 0;
339 
340 
341 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
342 		return 0;
343 
344 	error = i915_drm_freeze(dev);
345 	if (error)
346 		return error;
347 
348 	if (state.event == PM_EVENT_SUSPEND) {
349 		/* Shut down the device */
350 		pci_disable_device(dev->pdev);
351 		pci_set_power_state(dev->pdev, PCI_D3hot);
352 	}
353 
354 	return 0;
355 }
356 
i915_drm_thaw(struct drm_device * dev)357 static int i915_drm_thaw(struct drm_device *dev)
358 {
359 	struct drm_i915_private *dev_priv = dev->dev_private;
360 	int error = 0;
361 
362 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
363 		mutex_lock(&dev->struct_mutex);
364 		i915_gem_restore_gtt_mappings(dev);
365 		mutex_unlock(&dev->struct_mutex);
366 	}
367 
368 	i915_restore_state(dev);
369 	intel_opregion_setup(dev);
370 
371 	/* KMS EnterVT equivalent */
372 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
373 		mutex_lock(&dev->struct_mutex);
374 		dev_priv->mm.suspended = 0;
375 
376 		error = i915_gem_init_ringbuffer(dev);
377 		mutex_unlock(&dev->struct_mutex);
378 
379 		drm_mode_config_reset(dev);
380 		drm_irq_install(dev);
381 
382 		/* Resume the modeset for every activated CRTC */
383 		drm_helper_resume_force_mode(dev);
384 
385 		if (IS_IRONLAKE_M(dev))
386 			ironlake_enable_rc6(dev);
387 	}
388 
389 	intel_opregion_init(dev);
390 
391 	dev_priv->modeset_on_lid = 0;
392 
393 	return error;
394 }
395 
i915_resume(struct drm_device * dev)396 int i915_resume(struct drm_device *dev)
397 {
398 	int ret;
399 
400 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
401 		return 0;
402 
403 	if (pci_enable_device(dev->pdev))
404 		return -EIO;
405 
406 	pci_set_master(dev->pdev);
407 
408 	ret = i915_drm_thaw(dev);
409 	if (ret)
410 		return ret;
411 
412 	drm_kms_helper_poll_enable(dev);
413 	return 0;
414 }
415 
i8xx_do_reset(struct drm_device * dev,u8 flags)416 static int i8xx_do_reset(struct drm_device *dev, u8 flags)
417 {
418 	struct drm_i915_private *dev_priv = dev->dev_private;
419 
420 	if (IS_I85X(dev))
421 		return -ENODEV;
422 
423 	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
424 	POSTING_READ(D_STATE);
425 
426 	if (IS_I830(dev) || IS_845G(dev)) {
427 		I915_WRITE(DEBUG_RESET_I830,
428 			   DEBUG_RESET_DISPLAY |
429 			   DEBUG_RESET_RENDER |
430 			   DEBUG_RESET_FULL);
431 		POSTING_READ(DEBUG_RESET_I830);
432 		msleep(1);
433 
434 		I915_WRITE(DEBUG_RESET_I830, 0);
435 		POSTING_READ(DEBUG_RESET_I830);
436 	}
437 
438 	msleep(1);
439 
440 	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
441 	POSTING_READ(D_STATE);
442 
443 	return 0;
444 }
445 
i965_reset_complete(struct drm_device * dev)446 static int i965_reset_complete(struct drm_device *dev)
447 {
448 	u8 gdrst;
449 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
450 	return gdrst & 0x1;
451 }
452 
i965_do_reset(struct drm_device * dev,u8 flags)453 static int i965_do_reset(struct drm_device *dev, u8 flags)
454 {
455 	u8 gdrst;
456 
457 	/*
458 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
459 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
460 	 * triggers the reset; when done, the hardware will clear it.
461 	 */
462 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
463 	pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
464 
465 	return wait_for(i965_reset_complete(dev), 500);
466 }
467 
ironlake_do_reset(struct drm_device * dev,u8 flags)468 static int ironlake_do_reset(struct drm_device *dev, u8 flags)
469 {
470 	struct drm_i915_private *dev_priv = dev->dev_private;
471 	u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
472 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
473 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
474 }
475 
gen6_do_reset(struct drm_device * dev,u8 flags)476 static int gen6_do_reset(struct drm_device *dev, u8 flags)
477 {
478 	struct drm_i915_private *dev_priv = dev->dev_private;
479 
480 	I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
481 	return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
482 }
483 
484 /**
485  * i965_reset - reset chip after a hang
486  * @dev: drm device to reset
487  * @flags: reset domains
488  *
489  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
490  * reset or otherwise an error code.
491  *
492  * Procedure is fairly simple:
493  *   - reset the chip using the reset reg
494  *   - re-init context state
495  *   - re-init hardware status page
496  *   - re-init ring buffer
497  *   - re-init interrupt state
498  *   - re-init display
499  */
i915_reset(struct drm_device * dev,u8 flags)500 int i915_reset(struct drm_device *dev, u8 flags)
501 {
502 	drm_i915_private_t *dev_priv = dev->dev_private;
503 	/*
504 	 * We really should only reset the display subsystem if we actually
505 	 * need to
506 	 */
507 	bool need_display = true;
508 	int ret;
509 
510 	if (!i915_try_reset)
511 		return 0;
512 
513 	if (!mutex_trylock(&dev->struct_mutex))
514 		return -EBUSY;
515 
516 	i915_gem_reset(dev);
517 
518 	ret = -ENODEV;
519 	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
520 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
521 	} else switch (INTEL_INFO(dev)->gen) {
522 	case 6:
523 		ret = gen6_do_reset(dev, flags);
524 		break;
525 	case 5:
526 		ret = ironlake_do_reset(dev, flags);
527 		break;
528 	case 4:
529 		ret = i965_do_reset(dev, flags);
530 		break;
531 	case 2:
532 		ret = i8xx_do_reset(dev, flags);
533 		break;
534 	}
535 	dev_priv->last_gpu_reset = get_seconds();
536 	if (ret) {
537 		DRM_ERROR("Failed to reset chip.\n");
538 		mutex_unlock(&dev->struct_mutex);
539 		return ret;
540 	}
541 
542 	/* Ok, now get things going again... */
543 
544 	/*
545 	 * Everything depends on having the GTT running, so we need to start
546 	 * there.  Fortunately we don't need to do this unless we reset the
547 	 * chip at a PCI level.
548 	 *
549 	 * Next we need to restore the context, but we don't use those
550 	 * yet either...
551 	 *
552 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
553 	 * was running at the time of the reset (i.e. we weren't VT
554 	 * switched away).
555 	 */
556 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
557 			!dev_priv->mm.suspended) {
558 		dev_priv->mm.suspended = 0;
559 
560 		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
561 		if (HAS_BSD(dev))
562 		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
563 		if (HAS_BLT(dev))
564 		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
565 
566 		mutex_unlock(&dev->struct_mutex);
567 		drm_irq_uninstall(dev);
568 		drm_mode_config_reset(dev);
569 		drm_irq_install(dev);
570 		mutex_lock(&dev->struct_mutex);
571 	}
572 
573 	mutex_unlock(&dev->struct_mutex);
574 
575 	/*
576 	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
577 	 * need to retrain the display link and cannot just restore the register
578 	 * values.
579 	 */
580 	if (need_display) {
581 		mutex_lock(&dev->mode_config.mutex);
582 		drm_helper_resume_force_mode(dev);
583 		mutex_unlock(&dev->mode_config.mutex);
584 	}
585 
586 	return 0;
587 }
588 
589 
590 static int __devinit
i915_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)591 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
592 {
593 	/* Only bind to function 0 of the device. Early generations
594 	 * used function 1 as a placeholder for multi-head. This causes
595 	 * us confusion instead, especially on the systems where both
596 	 * functions have the same PCI-ID!
597 	 */
598 	if (PCI_FUNC(pdev->devfn))
599 		return -ENODEV;
600 
601 	return drm_get_pci_dev(pdev, ent, &driver);
602 }
603 
604 static void
i915_pci_remove(struct pci_dev * pdev)605 i915_pci_remove(struct pci_dev *pdev)
606 {
607 	struct drm_device *dev = pci_get_drvdata(pdev);
608 
609 	drm_put_dev(dev);
610 }
611 
i915_pm_suspend(struct device * dev)612 static int i915_pm_suspend(struct device *dev)
613 {
614 	struct pci_dev *pdev = to_pci_dev(dev);
615 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
616 	int error;
617 
618 	if (!drm_dev || !drm_dev->dev_private) {
619 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
620 		return -ENODEV;
621 	}
622 
623 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
624 		return 0;
625 
626 	error = i915_drm_freeze(drm_dev);
627 	if (error)
628 		return error;
629 
630 	pci_disable_device(pdev);
631 	pci_set_power_state(pdev, PCI_D3hot);
632 
633 	return 0;
634 }
635 
i915_pm_resume(struct device * dev)636 static int i915_pm_resume(struct device *dev)
637 {
638 	struct pci_dev *pdev = to_pci_dev(dev);
639 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
640 
641 	return i915_resume(drm_dev);
642 }
643 
i915_pm_freeze(struct device * dev)644 static int i915_pm_freeze(struct device *dev)
645 {
646 	struct pci_dev *pdev = to_pci_dev(dev);
647 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
648 
649 	if (!drm_dev || !drm_dev->dev_private) {
650 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
651 		return -ENODEV;
652 	}
653 
654 	return i915_drm_freeze(drm_dev);
655 }
656 
i915_pm_thaw(struct device * dev)657 static int i915_pm_thaw(struct device *dev)
658 {
659 	struct pci_dev *pdev = to_pci_dev(dev);
660 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
661 
662 	return i915_drm_thaw(drm_dev);
663 }
664 
i915_pm_poweroff(struct device * dev)665 static int i915_pm_poweroff(struct device *dev)
666 {
667 	struct pci_dev *pdev = to_pci_dev(dev);
668 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
669 
670 	return i915_drm_freeze(drm_dev);
671 }
672 
673 static const struct dev_pm_ops i915_pm_ops = {
674      .suspend = i915_pm_suspend,
675      .resume = i915_pm_resume,
676      .freeze = i915_pm_freeze,
677      .thaw = i915_pm_thaw,
678      .poweroff = i915_pm_poweroff,
679      .restore = i915_pm_resume,
680 };
681 
682 static struct vm_operations_struct i915_gem_vm_ops = {
683 	.fault = i915_gem_fault,
684 	.open = drm_gem_vm_open,
685 	.close = drm_gem_vm_close,
686 };
687 
688 static struct drm_driver driver = {
689 	/* don't use mtrr's here, the Xserver or user space app should
690 	 * deal with them for intel hardware.
691 	 */
692 	.driver_features =
693 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
694 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
695 	.load = i915_driver_load,
696 	.unload = i915_driver_unload,
697 	.open = i915_driver_open,
698 	.lastclose = i915_driver_lastclose,
699 	.preclose = i915_driver_preclose,
700 	.postclose = i915_driver_postclose,
701 
702 	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
703 	.suspend = i915_suspend,
704 	.resume = i915_resume,
705 
706 	.device_is_agp = i915_driver_device_is_agp,
707 	.enable_vblank = i915_enable_vblank,
708 	.disable_vblank = i915_disable_vblank,
709 	.get_vblank_timestamp = i915_get_vblank_timestamp,
710 	.get_scanout_position = i915_get_crtc_scanoutpos,
711 	.irq_preinstall = i915_driver_irq_preinstall,
712 	.irq_postinstall = i915_driver_irq_postinstall,
713 	.irq_uninstall = i915_driver_irq_uninstall,
714 	.irq_handler = i915_driver_irq_handler,
715 	.reclaim_buffers = drm_core_reclaim_buffers,
716 	.master_create = i915_master_create,
717 	.master_destroy = i915_master_destroy,
718 #if defined(CONFIG_DEBUG_FS)
719 	.debugfs_init = i915_debugfs_init,
720 	.debugfs_cleanup = i915_debugfs_cleanup,
721 #endif
722 	.gem_init_object = i915_gem_init_object,
723 	.gem_free_object = i915_gem_free_object,
724 	.gem_vm_ops = &i915_gem_vm_ops,
725 	.dumb_create = i915_gem_dumb_create,
726 	.dumb_map_offset = i915_gem_mmap_gtt,
727 	.dumb_destroy = i915_gem_dumb_destroy,
728 	.ioctls = i915_ioctls,
729 	.fops = {
730 		 .owner = THIS_MODULE,
731 		 .open = drm_open,
732 		 .release = drm_release,
733 		 .unlocked_ioctl = drm_ioctl,
734 		 .mmap = drm_gem_mmap,
735 		 .poll = drm_poll,
736 		 .fasync = drm_fasync,
737 		 .read = drm_read,
738 #ifdef CONFIG_COMPAT
739 		 .compat_ioctl = i915_compat_ioctl,
740 #endif
741 		 .llseek = noop_llseek,
742 	},
743 
744 	.name = DRIVER_NAME,
745 	.desc = DRIVER_DESC,
746 	.date = DRIVER_DATE,
747 	.major = DRIVER_MAJOR,
748 	.minor = DRIVER_MINOR,
749 	.patchlevel = DRIVER_PATCHLEVEL,
750 };
751 
752 static struct pci_driver i915_pci_driver = {
753 	.name = DRIVER_NAME,
754 	.id_table = pciidlist,
755 	.probe = i915_pci_probe,
756 	.remove = i915_pci_remove,
757 	.driver.pm = &i915_pm_ops,
758 };
759 
i915_init(void)760 static int __init i915_init(void)
761 {
762 	if (!intel_agp_enabled) {
763 		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
764 		return -ENODEV;
765 	}
766 
767 	driver.num_ioctls = i915_max_ioctl;
768 
769 	/*
770 	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
771 	 * explicitly disabled with the module pararmeter.
772 	 *
773 	 * Otherwise, just follow the parameter (defaulting to off).
774 	 *
775 	 * Allow optional vga_text_mode_force boot option to override
776 	 * the default behavior.
777 	 */
778 #if defined(CONFIG_DRM_I915_KMS)
779 	if (i915_modeset != 0)
780 		driver.driver_features |= DRIVER_MODESET;
781 #endif
782 	if (i915_modeset == 1)
783 		driver.driver_features |= DRIVER_MODESET;
784 
785 #ifdef CONFIG_VGA_CONSOLE
786 	if (vgacon_text_force() && i915_modeset == -1)
787 		driver.driver_features &= ~DRIVER_MODESET;
788 #endif
789 
790 	if (!(driver.driver_features & DRIVER_MODESET))
791 		driver.get_vblank_timestamp = NULL;
792 
793 	return drm_pci_init(&driver, &i915_pci_driver);
794 }
795 
i915_exit(void)796 static void __exit i915_exit(void)
797 {
798 	drm_pci_exit(&driver, &i915_pci_driver);
799 }
800 
801 module_init(i915_init);
802 module_exit(i915_exit);
803 
804 MODULE_AUTHOR(DRIVER_AUTHOR);
805 MODULE_DESCRIPTION(DRIVER_DESC);
806 MODULE_LICENSE("GPL and additional rights");
807