Home | History | Annotate | Line # | Download | only in i915
i915_drv.c revision 1.4
      1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
      2  */
      3 /*
      4  *
      5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  */
     29 
     30 #include <linux/device.h>
     31 #include <linux/moduleparam.h>
     32 #include <linux/time.h>
     33 #include <drm/drmP.h>
     34 #include <drm/i915_drm.h>
     35 #include "i915_drv.h"
     36 #include "i915_trace.h"
     37 #include "intel_drv.h"
     38 
     39 #include <linux/console.h>
     40 #include <linux/module.h>
     41 #include <drm/drm_crtc_helper.h>
     42 
     43 static struct drm_driver driver;
     44 
     45 #ifdef __NetBSD__
     46 /* XXX Kludge to expose this to NetBSD driver attachment goop.  */
     47 struct drm_driver *const i915_drm_driver = &driver;
     48 #endif
     49 
     50 #define GEN_DEFAULT_PIPEOFFSETS \
     51 	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
     52 			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
     53 	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
     54 			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
     55 	.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
     56 	.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
     57 	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
     58 
     59 
     60 static const struct intel_device_info intel_i830_info = {
     61 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
     62 	.has_overlay = 1, .overlay_needs_physical = 1,
     63 	.ring_mask = RENDER_RING,
     64 	GEN_DEFAULT_PIPEOFFSETS,
     65 };
     66 
     67 static const struct intel_device_info intel_845g_info = {
     68 	.gen = 2, .num_pipes = 1,
     69 	.has_overlay = 1, .overlay_needs_physical = 1,
     70 	.ring_mask = RENDER_RING,
     71 	GEN_DEFAULT_PIPEOFFSETS,
     72 };
     73 
     74 static const struct intel_device_info intel_i85x_info = {
     75 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
     76 	.cursor_needs_physical = 1,
     77 	.has_overlay = 1, .overlay_needs_physical = 1,
     78 	.has_fbc = 1,
     79 	.ring_mask = RENDER_RING,
     80 	GEN_DEFAULT_PIPEOFFSETS,
     81 };
     82 
     83 static const struct intel_device_info intel_i865g_info = {
     84 	.gen = 2, .num_pipes = 1,
     85 	.has_overlay = 1, .overlay_needs_physical = 1,
     86 	.ring_mask = RENDER_RING,
     87 	GEN_DEFAULT_PIPEOFFSETS,
     88 };
     89 
     90 static const struct intel_device_info intel_i915g_info = {
     91 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
     92 	.has_overlay = 1, .overlay_needs_physical = 1,
     93 	.ring_mask = RENDER_RING,
     94 	GEN_DEFAULT_PIPEOFFSETS,
     95 };
     96 static const struct intel_device_info intel_i915gm_info = {
     97 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
     98 	.cursor_needs_physical = 1,
     99 	.has_overlay = 1, .overlay_needs_physical = 1,
    100 	.supports_tv = 1,
    101 	.has_fbc = 1,
    102 	.ring_mask = RENDER_RING,
    103 	GEN_DEFAULT_PIPEOFFSETS,
    104 };
    105 static const struct intel_device_info intel_i945g_info = {
    106 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
    107 	.has_overlay = 1, .overlay_needs_physical = 1,
    108 	.ring_mask = RENDER_RING,
    109 	GEN_DEFAULT_PIPEOFFSETS,
    110 };
    111 static const struct intel_device_info intel_i945gm_info = {
    112 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
    113 	.has_hotplug = 1, .cursor_needs_physical = 1,
    114 	.has_overlay = 1, .overlay_needs_physical = 1,
    115 	.supports_tv = 1,
    116 	.has_fbc = 1,
    117 	.ring_mask = RENDER_RING,
    118 	GEN_DEFAULT_PIPEOFFSETS,
    119 };
    120 
    121 static const struct intel_device_info intel_i965g_info = {
    122 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
    123 	.has_hotplug = 1,
    124 	.has_overlay = 1,
    125 	.ring_mask = RENDER_RING,
    126 	GEN_DEFAULT_PIPEOFFSETS,
    127 };
    128 
    129 static const struct intel_device_info intel_i965gm_info = {
    130 	.gen = 4, .is_crestline = 1, .num_pipes = 2,
    131 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
    132 	.has_overlay = 1,
    133 	.supports_tv = 1,
    134 	.ring_mask = RENDER_RING,
    135 	GEN_DEFAULT_PIPEOFFSETS,
    136 };
    137 
    138 static const struct intel_device_info intel_g33_info = {
    139 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
    140 	.need_gfx_hws = 1, .has_hotplug = 1,
    141 	.has_overlay = 1,
    142 	.ring_mask = RENDER_RING,
    143 	GEN_DEFAULT_PIPEOFFSETS,
    144 };
    145 
    146 static const struct intel_device_info intel_g45_info = {
    147 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
    148 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    149 	.ring_mask = RENDER_RING | BSD_RING,
    150 	GEN_DEFAULT_PIPEOFFSETS,
    151 };
    152 
    153 static const struct intel_device_info intel_gm45_info = {
    154 	.gen = 4, .is_g4x = 1, .num_pipes = 2,
    155 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
    156 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    157 	.supports_tv = 1,
    158 	.ring_mask = RENDER_RING | BSD_RING,
    159 	GEN_DEFAULT_PIPEOFFSETS,
    160 };
    161 
    162 static const struct intel_device_info intel_pineview_info = {
    163 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
    164 	.need_gfx_hws = 1, .has_hotplug = 1,
    165 	.has_overlay = 1,
    166 	GEN_DEFAULT_PIPEOFFSETS,
    167 };
    168 
    169 static const struct intel_device_info intel_ironlake_d_info = {
    170 	.gen = 5, .num_pipes = 2,
    171 	.need_gfx_hws = 1, .has_hotplug = 1,
    172 	.ring_mask = RENDER_RING | BSD_RING,
    173 	GEN_DEFAULT_PIPEOFFSETS,
    174 };
    175 
    176 static const struct intel_device_info intel_ironlake_m_info = {
    177 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
    178 	.need_gfx_hws = 1, .has_hotplug = 1,
    179 	.has_fbc = 1,
    180 	.ring_mask = RENDER_RING | BSD_RING,
    181 	GEN_DEFAULT_PIPEOFFSETS,
    182 };
    183 
    184 static const struct intel_device_info intel_sandybridge_d_info = {
    185 	.gen = 6, .num_pipes = 2,
    186 	.need_gfx_hws = 1, .has_hotplug = 1,
    187 	.has_fbc = 1,
    188 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
    189 	.has_llc = 1,
    190 	GEN_DEFAULT_PIPEOFFSETS,
    191 };
    192 
    193 static const struct intel_device_info intel_sandybridge_m_info = {
    194 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
    195 	.need_gfx_hws = 1, .has_hotplug = 1,
    196 	.has_fbc = 1,
    197 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
    198 	.has_llc = 1,
    199 	GEN_DEFAULT_PIPEOFFSETS,
    200 };
    201 
    202 #define GEN7_FEATURES  \
    203 	.gen = 7, .num_pipes = 3, \
    204 	.need_gfx_hws = 1, .has_hotplug = 1, \
    205 	.has_fbc = 1, \
    206 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
    207 	.has_llc = 1
    208 
    209 static const struct intel_device_info intel_ivybridge_d_info = {
    210 	GEN7_FEATURES,
    211 	.is_ivybridge = 1,
    212 	GEN_DEFAULT_PIPEOFFSETS,
    213 };
    214 
    215 static const struct intel_device_info intel_ivybridge_m_info = {
    216 	GEN7_FEATURES,
    217 	.is_ivybridge = 1,
    218 	.is_mobile = 1,
    219 	GEN_DEFAULT_PIPEOFFSETS,
    220 };
    221 
    222 static const struct intel_device_info intel_ivybridge_q_info = {
    223 	GEN7_FEATURES,
    224 	.is_ivybridge = 1,
    225 	.num_pipes = 0, /* legal, last one wins */
    226 	GEN_DEFAULT_PIPEOFFSETS,
    227 };
    228 
    229 static const struct intel_device_info intel_valleyview_m_info = {
    230 	GEN7_FEATURES,
    231 	.is_mobile = 1,
    232 	.num_pipes = 2,
    233 	.is_valleyview = 1,
    234 	.display_mmio_offset = VLV_DISPLAY_BASE,
    235 	.has_fbc = 0, /* legal, last one wins */
    236 	.has_llc = 0, /* legal, last one wins */
    237 	GEN_DEFAULT_PIPEOFFSETS,
    238 };
    239 
    240 static const struct intel_device_info intel_valleyview_d_info = {
    241 	GEN7_FEATURES,
    242 	.num_pipes = 2,
    243 	.is_valleyview = 1,
    244 	.display_mmio_offset = VLV_DISPLAY_BASE,
    245 	.has_fbc = 0, /* legal, last one wins */
    246 	.has_llc = 0, /* legal, last one wins */
    247 	GEN_DEFAULT_PIPEOFFSETS,
    248 };
    249 
    250 static const struct intel_device_info intel_haswell_d_info = {
    251 	GEN7_FEATURES,
    252 	.is_haswell = 1,
    253 	.has_ddi = 1,
    254 	.has_fpga_dbg = 1,
    255 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    256 	GEN_DEFAULT_PIPEOFFSETS,
    257 };
    258 
    259 static const struct intel_device_info intel_haswell_m_info = {
    260 	GEN7_FEATURES,
    261 	.is_haswell = 1,
    262 	.is_mobile = 1,
    263 	.has_ddi = 1,
    264 	.has_fpga_dbg = 1,
    265 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    266 	GEN_DEFAULT_PIPEOFFSETS,
    267 };
    268 
    269 static const struct intel_device_info intel_broadwell_d_info = {
    270 	.gen = 8, .num_pipes = 3,
    271 	.need_gfx_hws = 1, .has_hotplug = 1,
    272 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    273 	.has_llc = 1,
    274 	.has_ddi = 1,
    275 	.has_fbc = 1,
    276 	GEN_DEFAULT_PIPEOFFSETS,
    277 };
    278 
    279 static const struct intel_device_info intel_broadwell_m_info = {
    280 	.gen = 8, .is_mobile = 1, .num_pipes = 3,
    281 	.need_gfx_hws = 1, .has_hotplug = 1,
    282 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    283 	.has_llc = 1,
    284 	.has_ddi = 1,
    285 	.has_fbc = 1,
    286 	GEN_DEFAULT_PIPEOFFSETS,
    287 };
    288 
    289 /*
    290  * Make sure any device matches here are from most specific to most
    291  * general.  For example, since the Quanta match is based on the subsystem
    292  * and subvendor IDs, we need it to come before the more general IVB
    293  * PCI ID matches, otherwise we'll use the wrong info struct above.
    294  */
    295 #define INTEL_PCI_IDS \
    296 	INTEL_I830_IDS(&intel_i830_info),	\
    297 	INTEL_I845G_IDS(&intel_845g_info),	\
    298 	INTEL_I85X_IDS(&intel_i85x_info),	\
    299 	INTEL_I865G_IDS(&intel_i865g_info),	\
    300 	INTEL_I915G_IDS(&intel_i915g_info),	\
    301 	INTEL_I915GM_IDS(&intel_i915gm_info),	\
    302 	INTEL_I945G_IDS(&intel_i945g_info),	\
    303 	INTEL_I945GM_IDS(&intel_i945gm_info),	\
    304 	INTEL_I965G_IDS(&intel_i965g_info),	\
    305 	INTEL_G33_IDS(&intel_g33_info),		\
    306 	INTEL_I965GM_IDS(&intel_i965gm_info),	\
    307 	INTEL_GM45_IDS(&intel_gm45_info), 	\
    308 	INTEL_G45_IDS(&intel_g45_info), 	\
    309 	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
    310 	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
    311 	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
    312 	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
    313 	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
    314 	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
    315 	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
    316 	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
    317 	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
    318 	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
    319 	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
    320 	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
    321 	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
    322 	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
    323 
    324 static const struct pci_device_id pciidlist[] = {		/* aka */
    325 	INTEL_PCI_IDS,
    326 	{0, 0, 0, 0, 0, 0, 0}
    327 };
    328 
    329 #if defined(CONFIG_DRM_I915_KMS)
    330 MODULE_DEVICE_TABLE(pci, pciidlist);
    331 #endif
    332 
    333 #ifdef __NetBSD__
    334 /* XXX Kludge to expose this to NetBSD driver attachment goop.  */
    335 const struct pci_device_id *const i915_device_ids = pciidlist;
    336 const size_t i915_n_device_ids = __arraycount(pciidlist);
    337 #endif
    338 
    339 void intel_detect_pch(struct drm_device *dev)
    340 {
    341 	struct drm_i915_private *dev_priv = dev->dev_private;
    342 	struct pci_dev *pch = NULL;
    343 
    344 	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
    345 	 * (which really amounts to a PCH but no South Display).
    346 	 */
    347 	if (INTEL_INFO(dev)->num_pipes == 0) {
    348 		dev_priv->pch_type = PCH_NOP;
    349 		return;
    350 	}
    351 
    352 	/*
    353 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
    354 	 * make graphics device passthrough work easy for VMM, that only
    355 	 * need to expose ISA bridge to let driver know the real hardware
    356 	 * underneath. This is a requirement from virtualization team.
    357 	 *
    358 	 * In some virtualized environments (e.g. XEN), there is irrelevant
    359 	 * ISA bridge in the system. To work reliably, we should scan trhough
    360 	 * all the ISA bridge devices and check for the first match, instead
    361 	 * of only checking the first one.
    362 	 */
    363 	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
    364 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
    365 			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
    366 			dev_priv->pch_id = id;
    367 
    368 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
    369 				dev_priv->pch_type = PCH_IBX;
    370 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
    371 				WARN_ON(!IS_GEN5(dev));
    372 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
    373 				dev_priv->pch_type = PCH_CPT;
    374 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
    375 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    376 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
    377 				/* PantherPoint is CPT compatible */
    378 				dev_priv->pch_type = PCH_CPT;
    379 				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
    380 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    381 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
    382 				dev_priv->pch_type = PCH_LPT;
    383 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
    384 				WARN_ON(!IS_HASWELL(dev));
    385 				WARN_ON(IS_ULT(dev));
    386 			} else if (IS_BROADWELL(dev)) {
    387 				dev_priv->pch_type = PCH_LPT;
    388 				dev_priv->pch_id =
    389 					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
    390 				DRM_DEBUG_KMS("This is Broadwell, assuming "
    391 					      "LynxPoint LP PCH\n");
    392 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
    393 				dev_priv->pch_type = PCH_LPT;
    394 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
    395 				WARN_ON(!IS_HASWELL(dev));
    396 				WARN_ON(!IS_ULT(dev));
    397 			} else
    398 				continue;
    399 
    400 			break;
    401 		}
    402 	}
    403 	if (!pch)
    404 		DRM_DEBUG_KMS("No PCH found.\n");
    405 
    406 	pci_dev_put(pch);
    407 }
    408 
    409 bool i915_semaphore_is_enabled(struct drm_device *dev)
    410 {
    411 	if (INTEL_INFO(dev)->gen < 6)
    412 		return false;
    413 
    414 	if (i915.semaphores >= 0)
    415 		return i915.semaphores;
    416 
    417 	/* Until we get further testing... */
    418 	if (IS_GEN8(dev))
    419 		return false;
    420 
    421 #ifdef CONFIG_INTEL_IOMMU
    422 	/* Enable semaphores on SNB when IO remapping is off */
    423 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
    424 		return false;
    425 #endif
    426 
    427 	return true;
    428 }
    429 
    430 static int i915_drm_freeze(struct drm_device *dev)
    431 {
    432 	struct drm_i915_private *dev_priv = dev->dev_private;
    433 	struct drm_crtc *crtc;
    434 
    435 	intel_runtime_pm_get(dev_priv);
    436 
    437 	/* ignore lid events during suspend */
    438 	mutex_lock(&dev_priv->modeset_restore_lock);
    439 	dev_priv->modeset_restore = MODESET_SUSPENDED;
    440 	mutex_unlock(&dev_priv->modeset_restore_lock);
    441 
    442 	/* We do a lot of poking in a lot of registers, make sure they work
    443 	 * properly. */
    444 	intel_display_set_init_power(dev_priv, true);
    445 
    446 	drm_kms_helper_poll_disable(dev);
    447 
    448 #ifndef __NetBSD__		/* pmf handles this for us.  */
    449 	pci_save_state(dev->pdev);
    450 #endif
    451 
    452 	/* If KMS is active, we do the leavevt stuff here */
    453 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    454 		int error;
    455 
    456 		error = i915_gem_suspend(dev);
    457 		if (error) {
    458 #ifdef __NetBSD__
    459 			dev_err(pci_dev_dev(dev->pdev),
    460 			    "GEM idle failed, resume might fail\n");
    461 #else
    462 			dev_err(&dev->pdev->dev,
    463 				"GEM idle failed, resume might fail\n");
    464 #endif
    465 			return error;
    466 		}
    467 
    468 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
    469 
    470 		drm_irq_uninstall(dev);
    471 		dev_priv->enable_hotplug_processing = false;
    472 		/*
    473 		 * Disable CRTCs directly since we want to preserve sw state
    474 		 * for _thaw.
    475 		 */
    476 		mutex_lock(&dev->mode_config.mutex);
    477 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
    478 			dev_priv->display.crtc_disable(crtc);
    479 		mutex_unlock(&dev->mode_config.mutex);
    480 
    481 		intel_modeset_suspend_hw(dev);
    482 	}
    483 
    484 	i915_gem_suspend_gtt_mappings(dev);
    485 
    486 	i915_save_state(dev);
    487 
    488 	intel_opregion_fini(dev);
    489 	intel_uncore_fini(dev);
    490 
    491 #ifndef __NetBSD__		/* XXX fb */
    492 	console_lock();
    493 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
    494 	console_unlock();
    495 #endif
    496 
    497 	dev_priv->suspend_count++;
    498 
    499 	return 0;
    500 }
    501 
    502 int i915_suspend(struct drm_device *dev, pm_message_t state)
    503 {
    504 	int error;
    505 
    506 	if (!dev || !dev->dev_private) {
    507 		DRM_ERROR("dev: %p\n", dev);
    508 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
    509 		return -ENODEV;
    510 	}
    511 
    512 	if (state.event == PM_EVENT_PRETHAW)
    513 		return 0;
    514 
    515 
    516 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    517 		return 0;
    518 
    519 	error = i915_drm_freeze(dev);
    520 	if (error)
    521 		return error;
    522 
    523 #ifndef __NetBSD__		/* pmf handles this for us.  */
    524 	if (state.event == PM_EVENT_SUSPEND) {
    525 		/* Shut down the device */
    526 		pci_disable_device(dev->pdev);
    527 		pci_set_power_state(dev->pdev, PCI_D3hot);
    528 	}
    529 #endif
    530 
    531 	return 0;
    532 }
    533 
    534 void intel_console_resume(struct work_struct *work)
    535 {
    536 #ifndef __NetBSD__		/* XXX fb */
    537 	struct drm_i915_private *dev_priv =
    538 		container_of(work, struct drm_i915_private,
    539 			     console_resume_work);
    540 	struct drm_device *dev = dev_priv->dev;
    541 
    542 	console_lock();
    543 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
    544 	console_unlock();
    545 #endif
    546 }
    547 
    548 static void intel_resume_hotplug(struct drm_device *dev)
    549 {
    550 	struct drm_mode_config *mode_config = &dev->mode_config;
    551 	struct intel_encoder *encoder;
    552 
    553 	mutex_lock(&mode_config->mutex);
    554 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
    555 
    556 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
    557 		if (encoder->hot_plug)
    558 			encoder->hot_plug(encoder);
    559 
    560 	mutex_unlock(&mode_config->mutex);
    561 
    562 	/* Just fire off a uevent and let userspace tell us what to do */
    563 	drm_helper_hpd_irq_event(dev);
    564 }
    565 
    566 static int i915_drm_thaw_early(struct drm_device *dev)
    567 {
    568 	struct drm_i915_private *dev_priv = dev->dev_private;
    569 
    570 	intel_uncore_early_sanitize(dev);
    571 	intel_uncore_sanitize(dev);
    572 	intel_power_domains_init_hw(dev_priv);
    573 
    574 	return 0;
    575 }
    576 
    577 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
    578 {
    579 	struct drm_i915_private *dev_priv = dev->dev_private;
    580 	int error = 0;
    581 
    582 	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
    583 	    restore_gtt_mappings) {
    584 		mutex_lock(&dev->struct_mutex);
    585 		i915_gem_restore_gtt_mappings(dev);
    586 		mutex_unlock(&dev->struct_mutex);
    587 	}
    588 
    589 	i915_restore_state(dev);
    590 	intel_opregion_setup(dev);
    591 
    592 	/* KMS EnterVT equivalent */
    593 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    594 		intel_init_pch_refclk(dev);
    595 		drm_mode_config_reset(dev);
    596 
    597 		mutex_lock(&dev->struct_mutex);
    598 
    599 		error = i915_gem_init_hw(dev);
    600 		mutex_unlock(&dev->struct_mutex);
    601 
    602 		/* We need working interrupts for modeset enabling ... */
    603 		drm_irq_install(dev);
    604 
    605 		intel_modeset_init_hw(dev);
    606 
    607 		drm_modeset_lock_all(dev);
    608 		intel_modeset_setup_hw_state(dev, true);
    609 		drm_modeset_unlock_all(dev);
    610 
    611 		/*
    612 		 * ... but also need to make sure that hotplug processing
    613 		 * doesn't cause havoc. Like in the driver load code we don't
    614 		 * bother with the tiny race here where we might loose hotplug
    615 		 * notifications.
    616 		 * */
    617 		intel_hpd_init(dev);
    618 		dev_priv->enable_hotplug_processing = true;
    619 		/* Config may have changed between suspend and resume */
    620 		intel_resume_hotplug(dev);
    621 	}
    622 
    623 	intel_opregion_init(dev);
    624 
    625 #ifndef __NetBSD__		/* XXX fb */
    626 	/*
    627 	 * The console lock can be pretty contented on resume due
    628 	 * to all the printk activity.  Try to keep it out of the hot
    629 	 * path of resume if possible.
    630 	 */
    631 	if (console_trylock()) {
    632 		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
    633 		console_unlock();
    634 	} else {
    635 		schedule_work(&dev_priv->console_resume_work);
    636 	}
    637 #endif
    638 
    639 	mutex_lock(&dev_priv->modeset_restore_lock);
    640 	dev_priv->modeset_restore = MODESET_DONE;
    641 	mutex_unlock(&dev_priv->modeset_restore_lock);
    642 
    643 	intel_runtime_pm_put(dev_priv);
    644 	return error;
    645 }
    646 
    647 #ifndef __NetBSD__		/* XXX freeze/thaw */
    648 static int i915_drm_thaw(struct drm_device *dev)
    649 {
    650 	if (drm_core_check_feature(dev, DRIVER_MODESET))
    651 		i915_check_and_clear_faults(dev);
    652 
    653 	return __i915_drm_thaw(dev, true);
    654 }
    655 #endif
    656 
    657 static int i915_resume_early(struct drm_device *dev)
    658 {
    659 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    660 		return 0;
    661 
    662 #ifndef __NetBSD__		/* pmf handles this for us.  */
    663 	/*
    664 	 * We have a resume ordering issue with the snd-hda driver also
    665 	 * requiring our device to be power up. Due to the lack of a
    666 	 * parent/child relationship we currently solve this with an early
    667 	 * resume hook.
    668 	 *
    669 	 * FIXME: This should be solved with a special hdmi sink device or
    670 	 * similar so that power domains can be employed.
    671 	 */
    672 	if (pci_enable_device(dev->pdev))
    673 		return -EIO;
    674 #endif
    675 
    676 	/* XXX pmf probably handles this for us too.  */
    677 	pci_set_master(dev->pdev);
    678 
    679 	return i915_drm_thaw_early(dev);
    680 }
    681 
    682 int i915_resume(struct drm_device *dev)
    683 {
    684 	struct drm_i915_private *dev_priv = dev->dev_private;
    685 	int ret;
    686 
    687 	/*
    688 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
    689 	 * earlier) need to restore the GTT mappings since the BIOS might clear
    690 	 * all our scratch PTEs.
    691 	 */
    692 	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
    693 	if (ret)
    694 		return ret;
    695 
    696 	drm_kms_helper_poll_enable(dev);
    697 	return 0;
    698 }
    699 
    700 static int i915_resume_legacy(struct drm_device *dev)
    701 {
    702 	i915_resume_early(dev);
    703 	i915_resume(dev);
    704 
    705 	return 0;
    706 }
    707 
    708 /**
    709  * i915_reset - reset chip after a hang
    710  * @dev: drm device to reset
    711  *
    712  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
    713  * reset or otherwise an error code.
    714  *
    715  * Procedure is fairly simple:
    716  *   - reset the chip using the reset reg
    717  *   - re-init context state
    718  *   - re-init hardware status page
    719  *   - re-init ring buffer
    720  *   - re-init interrupt state
    721  *   - re-init display
    722  */
    723 int i915_reset(struct drm_device *dev)
    724 {
    725 	struct drm_i915_private *dev_priv = dev->dev_private;
    726 	bool simulated;
    727 	int ret;
    728 
    729 	if (!i915.reset)
    730 		return 0;
    731 
    732 	mutex_lock(&dev->struct_mutex);
    733 
    734 	i915_gem_reset(dev);
    735 
    736 	simulated = dev_priv->gpu_error.stop_rings != 0;
    737 
    738 	ret = intel_gpu_reset(dev);
    739 
    740 	/* Also reset the gpu hangman. */
    741 	if (simulated) {
    742 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
    743 		dev_priv->gpu_error.stop_rings = 0;
    744 		if (ret == -ENODEV) {
    745 			DRM_INFO("Reset not implemented, but ignoring "
    746 				 "error for simulated gpu hangs\n");
    747 			ret = 0;
    748 		}
    749 	}
    750 
    751 	if (ret) {
    752 		DRM_ERROR("Failed to reset chip: %i\n", ret);
    753 		mutex_unlock(&dev->struct_mutex);
    754 		return ret;
    755 	}
    756 
    757 	/* Ok, now get things going again... */
    758 
    759 	/*
    760 	 * Everything depends on having the GTT running, so we need to start
    761 	 * there.  Fortunately we don't need to do this unless we reset the
    762 	 * chip at a PCI level.
    763 	 *
    764 	 * Next we need to restore the context, but we don't use those
    765 	 * yet either...
    766 	 *
    767 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
    768 	 * was running at the time of the reset (i.e. we weren't VT
    769 	 * switched away).
    770 	 */
    771 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
    772 			!dev_priv->ums.mm_suspended) {
    773 		dev_priv->ums.mm_suspended = 0;
    774 
    775 		ret = i915_gem_init_hw(dev);
    776 		mutex_unlock(&dev->struct_mutex);
    777 		if (ret) {
    778 			DRM_ERROR("Failed hw init on reset %d\n", ret);
    779 			return ret;
    780 		}
    781 
    782 		drm_irq_uninstall(dev);
    783 		drm_irq_install(dev);
    784 
    785 		/* rps/rc6 re-init is necessary to restore state lost after the
    786 		 * reset and the re-install of drm irq. Skip for ironlake per
    787 		 * previous concerns that it doesn't respond well to some forms
    788 		 * of re-init after reset. */
    789 		if (INTEL_INFO(dev)->gen > 5) {
    790 			mutex_lock(&dev->struct_mutex);
    791 			intel_enable_gt_powersave(dev);
    792 			mutex_unlock(&dev->struct_mutex);
    793 		}
    794 
    795 		intel_hpd_init(dev);
    796 	} else {
    797 		mutex_unlock(&dev->struct_mutex);
    798 	}
    799 
    800 	return 0;
    801 }
    802 
    803 #ifdef __NetBSD__
    804 
    805 static const struct uvm_pagerops i915_gem_uvm_ops = {
    806 	.pgo_reference = drm_gem_pager_reference,
    807 	.pgo_detach = drm_gem_pager_detach,
    808 	.pgo_fault = i915_gem_fault,
    809 };
    810 
    811 #else
    812 
    813 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    814 {
    815 	struct intel_device_info *intel_info =
    816 		(struct intel_device_info *) ent->driver_data;
    817 
    818 	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
    819 		DRM_INFO("This hardware requires preliminary hardware support.\n"
    820 			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
    821 		return -ENODEV;
    822 	}
    823 
    824 	/* Only bind to function 0 of the device. Early generations
    825 	 * used function 1 as a placeholder for multi-head. This causes
    826 	 * us confusion instead, especially on the systems where both
    827 	 * functions have the same PCI-ID!
    828 	 */
    829 	if (PCI_FUNC(pdev->devfn))
    830 		return -ENODEV;
    831 
    832 	driver.driver_features &= ~(DRIVER_USE_AGP);
    833 
    834 	return drm_get_pci_dev(pdev, ent, &driver);
    835 }
    836 
    837 static void
    838 i915_pci_remove(struct pci_dev *pdev)
    839 {
    840 	struct drm_device *dev = pci_get_drvdata(pdev);
    841 
    842 	drm_put_dev(dev);
    843 }
    844 
    845 static int i915_pm_suspend(struct device *dev)
    846 {
    847 	struct pci_dev *pdev = to_pci_dev(dev);
    848 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    849 
    850 	if (!drm_dev || !drm_dev->dev_private) {
    851 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
    852 		return -ENODEV;
    853 	}
    854 
    855 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    856 		return 0;
    857 
    858 	return i915_drm_freeze(drm_dev);
    859 }
    860 
    861 static int i915_pm_suspend_late(struct device *dev)
    862 {
    863 	struct pci_dev *pdev = to_pci_dev(dev);
    864 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    865 
    866 	/*
    867 	 * We have a suspedn ordering issue with the snd-hda driver also
    868 	 * requiring our device to be power up. Due to the lack of a
    869 	 * parent/child relationship we currently solve this with an late
    870 	 * suspend hook.
    871 	 *
    872 	 * FIXME: This should be solved with a special hdmi sink device or
    873 	 * similar so that power domains can be employed.
    874 	 */
    875 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    876 		return 0;
    877 
    878 	pci_disable_device(pdev);
    879 	pci_set_power_state(pdev, PCI_D3hot);
    880 
    881 	return 0;
    882 }
    883 
    884 static int i915_pm_resume_early(struct device *dev)
    885 {
    886 	struct pci_dev *pdev = to_pci_dev(dev);
    887 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    888 
    889 	return i915_resume_early(drm_dev);
    890 }
    891 
    892 static int i915_pm_resume(struct device *dev)
    893 {
    894 	struct pci_dev *pdev = to_pci_dev(dev);
    895 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    896 
    897 	return i915_resume(drm_dev);
    898 }
    899 
    900 static int i915_pm_freeze(struct device *dev)
    901 {
    902 	struct pci_dev *pdev = to_pci_dev(dev);
    903 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    904 
    905 	if (!drm_dev || !drm_dev->dev_private) {
    906 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
    907 		return -ENODEV;
    908 	}
    909 
    910 	return i915_drm_freeze(drm_dev);
    911 }
    912 
    913 static int i915_pm_thaw_early(struct device *dev)
    914 {
    915 	struct pci_dev *pdev = to_pci_dev(dev);
    916 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    917 
    918 	return i915_drm_thaw_early(drm_dev);
    919 }
    920 
    921 static int i915_pm_thaw(struct device *dev)
    922 {
    923 	struct pci_dev *pdev = to_pci_dev(dev);
    924 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    925 
    926 	return i915_drm_thaw(drm_dev);
    927 }
    928 
    929 static int i915_pm_poweroff(struct device *dev)
    930 {
    931 	struct pci_dev *pdev = to_pci_dev(dev);
    932 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    933 
    934 	return i915_drm_freeze(drm_dev);
    935 }
    936 
    937 static int i915_runtime_suspend(struct device *device)
    938 {
    939 	struct pci_dev *pdev = to_pci_dev(device);
    940 	struct drm_device *dev = pci_get_drvdata(pdev);
    941 	struct drm_i915_private *dev_priv = dev->dev_private;
    942 
    943 	WARN_ON(!HAS_RUNTIME_PM(dev));
    944 	assert_force_wake_inactive(dev_priv);
    945 
    946 	DRM_DEBUG_KMS("Suspending device\n");
    947 
    948 	if (HAS_PC8(dev))
    949 		hsw_enable_pc8(dev_priv);
    950 
    951 	i915_gem_release_all_mmaps(dev_priv);
    952 
    953 	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
    954 	dev_priv->pm.suspended = true;
    955 
    956 	/*
    957 	 * current versions of firmware which depend on this opregion
    958 	 * notification have repurposed the D1 definition to mean
    959 	 * "runtime suspended" vs. what you would normally expect (D3)
    960 	 * to distinguish it from notifications that might be sent
    961 	 * via the suspend path.
    962 	 */
    963 	intel_opregion_notify_adapter(dev, PCI_D1);
    964 
    965 	DRM_DEBUG_KMS("Device suspended\n");
    966 	return 0;
    967 }
    968 
    969 static int i915_runtime_resume(struct device *device)
    970 {
    971 	struct pci_dev *pdev = to_pci_dev(device);
    972 	struct drm_device *dev = pci_get_drvdata(pdev);
    973 	struct drm_i915_private *dev_priv = dev->dev_private;
    974 
    975 	WARN_ON(!HAS_RUNTIME_PM(dev));
    976 
    977 	DRM_DEBUG_KMS("Resuming device\n");
    978 
    979 	intel_opregion_notify_adapter(dev, PCI_D0);
    980 	dev_priv->pm.suspended = false;
    981 
    982 	if (HAS_PC8(dev))
    983 		hsw_disable_pc8(dev_priv);
    984 
    985 	DRM_DEBUG_KMS("Device resumed\n");
    986 	return 0;
    987 }
    988 
    989 static const struct dev_pm_ops i915_pm_ops = {
    990 	.suspend = i915_pm_suspend,
    991 	.suspend_late = i915_pm_suspend_late,
    992 	.resume_early = i915_pm_resume_early,
    993 	.resume = i915_pm_resume,
    994 	.freeze = i915_pm_freeze,
    995 	.thaw_early = i915_pm_thaw_early,
    996 	.thaw = i915_pm_thaw,
    997 	.poweroff = i915_pm_poweroff,
    998 	.restore_early = i915_pm_resume_early,
    999 	.restore = i915_pm_resume,
   1000 	.runtime_suspend = i915_runtime_suspend,
   1001 	.runtime_resume = i915_runtime_resume,
   1002 };
   1003 
   1004 static const struct vm_operations_struct i915_gem_vm_ops = {
   1005 	.fault = i915_gem_fault,
   1006 	.open = drm_gem_vm_open,
   1007 	.close = drm_gem_vm_close,
   1008 };
   1009 
   1010 static const struct file_operations i915_driver_fops = {
   1011 	.owner = THIS_MODULE,
   1012 	.open = drm_open,
   1013 	.release = drm_release,
   1014 	.unlocked_ioctl = drm_ioctl,
   1015 	.mmap = drm_gem_mmap,
   1016 	.poll = drm_poll,
   1017 	.read = drm_read,
   1018 #ifdef CONFIG_COMPAT
   1019 	.compat_ioctl = i915_compat_ioctl,
   1020 #endif
   1021 	.llseek = noop_llseek,
   1022 };
   1023 
   1024 #endif	/* defined(__NetBSD__) */
   1025 
   1026 static struct drm_driver driver = {
   1027 	/* Don't use MTRRs here; the Xserver or userspace app should
   1028 	 * deal with them for Intel hardware.
   1029 	 */
   1030 	.driver_features =
   1031 	    DRIVER_USE_AGP |
   1032 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
   1033 	    DRIVER_RENDER,
   1034 	.load = i915_driver_load,
   1035 	.unload = i915_driver_unload,
   1036 	.open = i915_driver_open,
   1037 	.lastclose = i915_driver_lastclose,
   1038 	.preclose = i915_driver_preclose,
   1039 	.postclose = i915_driver_postclose,
   1040 
   1041 	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
   1042 	.suspend = i915_suspend,
   1043 	.resume = i915_resume_legacy,
   1044 
   1045 	.device_is_agp = i915_driver_device_is_agp,
   1046 	.master_create = i915_master_create,
   1047 	.master_destroy = i915_master_destroy,
   1048 #if defined(CONFIG_DEBUG_FS)
   1049 	.debugfs_init = i915_debugfs_init,
   1050 	.debugfs_cleanup = i915_debugfs_cleanup,
   1051 #endif
   1052 	.gem_free_object = i915_gem_free_object,
   1053 #ifdef __NetBSD__
   1054 	.gem_uvm_ops = &i915_gem_uvm_ops,
   1055 #else
   1056 	.gem_vm_ops = &i915_gem_vm_ops,
   1057 #endif
   1058 
   1059 #ifndef __NetBSD__		/* XXX drm prime */
   1060 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
   1061 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
   1062 	.gem_prime_export = i915_gem_prime_export,
   1063 	.gem_prime_import = i915_gem_prime_import,
   1064 #endif
   1065 
   1066 	.dumb_create = i915_gem_dumb_create,
   1067 	.dumb_map_offset = i915_gem_mmap_gtt,
   1068 	.dumb_destroy = drm_gem_dumb_destroy,
   1069 	.ioctls = i915_ioctls,
   1070 #ifdef __NetBSD__
   1071 	.fops = NULL,
   1072 #else
   1073 	.fops = &i915_driver_fops,
   1074 #endif
   1075 	.name = DRIVER_NAME,
   1076 	.desc = DRIVER_DESC,
   1077 	.date = DRIVER_DATE,
   1078 	.major = DRIVER_MAJOR,
   1079 	.minor = DRIVER_MINOR,
   1080 	.patchlevel = DRIVER_PATCHLEVEL,
   1081 };
   1082 
   1083 #ifndef __NetBSD__
   1084 static struct pci_driver i915_pci_driver = {
   1085 	.name = DRIVER_NAME,
   1086 	.id_table = pciidlist,
   1087 	.probe = i915_pci_probe,
   1088 	.remove = i915_pci_remove,
   1089 	.driver.pm = &i915_pm_ops,
   1090 };
   1091 #endif
   1092 
   1093 #ifndef __NetBSD__
   1094 static int __init i915_init(void)
   1095 {
   1096 	driver.num_ioctls = i915_max_ioctl;
   1097 
   1098 	/*
   1099 	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
   1100 	 * explicitly disabled with the module pararmeter.
   1101 	 *
   1102 	 * Otherwise, just follow the parameter (defaulting to off).
   1103 	 *
   1104 	 * Allow optional vga_text_mode_force boot option to override
   1105 	 * the default behavior.
   1106 	 */
   1107 #if defined(CONFIG_DRM_I915_KMS)
   1108 	if (i915.modeset != 0)
   1109 		driver.driver_features |= DRIVER_MODESET;
   1110 #endif
   1111 	if (i915.modeset == 1)
   1112 		driver.driver_features |= DRIVER_MODESET;
   1113 
   1114 #ifdef CONFIG_VGA_CONSOLE
   1115 	if (vgacon_text_force() && i915.modeset == -1)
   1116 		driver.driver_features &= ~DRIVER_MODESET;
   1117 #endif
   1118 
   1119 	if (!(driver.driver_features & DRIVER_MODESET)) {
   1120 		driver.get_vblank_timestamp = NULL;
   1121 #ifndef CONFIG_DRM_I915_UMS
   1122 		/* Silently fail loading to not upset userspace. */
   1123 		return 0;
   1124 #endif
   1125 	}
   1126 
   1127 	return drm_pci_init(&driver, &i915_pci_driver);
   1128 }
   1129 
   1130 static void __exit i915_exit(void)
   1131 {
   1132 #ifndef CONFIG_DRM_I915_UMS
   1133 	if (!(driver.driver_features & DRIVER_MODESET))
   1134 		return; /* Never loaded a driver. */
   1135 #endif
   1136 
   1137 	drm_pci_exit(&driver, &i915_pci_driver);
   1138 }
   1139 
   1140 module_init(i915_init);
   1141 module_exit(i915_exit);
   1142 #endif
   1143 
   1144 MODULE_AUTHOR(DRIVER_AUTHOR);
   1145 MODULE_DESCRIPTION(DRIVER_DESC);
   1146 MODULE_LICENSE("GPL and additional rights");
   1147