Home | History | Annotate | Line # | Download | only in i915
i915_drv.c revision 1.2.2.1
      1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
      2  */
      3 /*
      4  *
      5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  */
     29 
     30 #include <linux/device.h>
     31 #include <linux/moduleparam.h>
     32 #include <linux/time.h>
     33 #include <drm/drmP.h>
     34 #include <drm/i915_drm.h>
     35 #include "i915_drv.h"
     36 #include "i915_trace.h"
     37 #include "intel_drv.h"
     38 
     39 #include <linux/console.h>
     40 #include <linux/module.h>
     41 #include <drm/drm_crtc_helper.h>
     42 
     43 static struct drm_driver driver;
     44 
     45 #ifdef __NetBSD__
     46 /* XXX Kludge to expose this to NetBSD driver attachment goop.  */
     47 struct drm_driver *const i915_drm_driver = &driver;
     48 #endif
     49 
     50 #define GEN_DEFAULT_PIPEOFFSETS \
     51 	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
     52 			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
     53 	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
     54 			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
     55 	.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
     56 	.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
     57 	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
     58 
     59 
     60 static const struct intel_device_info intel_i830_info = {
     61 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
     62 	.has_overlay = 1, .overlay_needs_physical = 1,
     63 	.ring_mask = RENDER_RING,
     64 	GEN_DEFAULT_PIPEOFFSETS,
     65 };
     66 
     67 static const struct intel_device_info intel_845g_info = {
     68 	.gen = 2, .num_pipes = 1,
     69 	.has_overlay = 1, .overlay_needs_physical = 1,
     70 	.ring_mask = RENDER_RING,
     71 	GEN_DEFAULT_PIPEOFFSETS,
     72 };
     73 
     74 static const struct intel_device_info intel_i85x_info = {
     75 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
     76 	.cursor_needs_physical = 1,
     77 	.has_overlay = 1, .overlay_needs_physical = 1,
     78 	.has_fbc = 1,
     79 	.ring_mask = RENDER_RING,
     80 	GEN_DEFAULT_PIPEOFFSETS,
     81 };
     82 
     83 static const struct intel_device_info intel_i865g_info = {
     84 	.gen = 2, .num_pipes = 1,
     85 	.has_overlay = 1, .overlay_needs_physical = 1,
     86 	.ring_mask = RENDER_RING,
     87 	GEN_DEFAULT_PIPEOFFSETS,
     88 };
     89 
     90 static const struct intel_device_info intel_i915g_info = {
     91 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
     92 	.has_overlay = 1, .overlay_needs_physical = 1,
     93 	.ring_mask = RENDER_RING,
     94 	GEN_DEFAULT_PIPEOFFSETS,
     95 };
     96 static const struct intel_device_info intel_i915gm_info = {
     97 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
     98 	.cursor_needs_physical = 1,
     99 	.has_overlay = 1, .overlay_needs_physical = 1,
    100 	.supports_tv = 1,
    101 	.has_fbc = 1,
    102 	.ring_mask = RENDER_RING,
    103 	GEN_DEFAULT_PIPEOFFSETS,
    104 };
    105 static const struct intel_device_info intel_i945g_info = {
    106 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
    107 	.has_overlay = 1, .overlay_needs_physical = 1,
    108 	.ring_mask = RENDER_RING,
    109 	GEN_DEFAULT_PIPEOFFSETS,
    110 };
    111 static const struct intel_device_info intel_i945gm_info = {
    112 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
    113 	.has_hotplug = 1, .cursor_needs_physical = 1,
    114 	.has_overlay = 1, .overlay_needs_physical = 1,
    115 	.supports_tv = 1,
    116 	.has_fbc = 1,
    117 	.ring_mask = RENDER_RING,
    118 	GEN_DEFAULT_PIPEOFFSETS,
    119 };
    120 
    121 static const struct intel_device_info intel_i965g_info = {
    122 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
    123 	.has_hotplug = 1,
    124 	.has_overlay = 1,
    125 	.ring_mask = RENDER_RING,
    126 	GEN_DEFAULT_PIPEOFFSETS,
    127 };
    128 
    129 static const struct intel_device_info intel_i965gm_info = {
    130 	.gen = 4, .is_crestline = 1, .num_pipes = 2,
    131 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
    132 	.has_overlay = 1,
    133 	.supports_tv = 1,
    134 	.ring_mask = RENDER_RING,
    135 	GEN_DEFAULT_PIPEOFFSETS,
    136 };
    137 
    138 static const struct intel_device_info intel_g33_info = {
    139 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
    140 	.need_gfx_hws = 1, .has_hotplug = 1,
    141 	.has_overlay = 1,
    142 	.ring_mask = RENDER_RING,
    143 	GEN_DEFAULT_PIPEOFFSETS,
    144 };
    145 
    146 static const struct intel_device_info intel_g45_info = {
    147 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
    148 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    149 	.ring_mask = RENDER_RING | BSD_RING,
    150 	GEN_DEFAULT_PIPEOFFSETS,
    151 };
    152 
    153 static const struct intel_device_info intel_gm45_info = {
    154 	.gen = 4, .is_g4x = 1, .num_pipes = 2,
    155 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
    156 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    157 	.supports_tv = 1,
    158 	.ring_mask = RENDER_RING | BSD_RING,
    159 	GEN_DEFAULT_PIPEOFFSETS,
    160 };
    161 
    162 static const struct intel_device_info intel_pineview_info = {
    163 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
    164 	.need_gfx_hws = 1, .has_hotplug = 1,
    165 	.has_overlay = 1,
    166 	GEN_DEFAULT_PIPEOFFSETS,
    167 };
    168 
    169 static const struct intel_device_info intel_ironlake_d_info = {
    170 	.gen = 5, .num_pipes = 2,
    171 	.need_gfx_hws = 1, .has_hotplug = 1,
    172 	.ring_mask = RENDER_RING | BSD_RING,
    173 	GEN_DEFAULT_PIPEOFFSETS,
    174 };
    175 
    176 static const struct intel_device_info intel_ironlake_m_info = {
    177 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
    178 	.need_gfx_hws = 1, .has_hotplug = 1,
    179 	.has_fbc = 1,
    180 	.ring_mask = RENDER_RING | BSD_RING,
    181 	GEN_DEFAULT_PIPEOFFSETS,
    182 };
    183 
    184 static const struct intel_device_info intel_sandybridge_d_info = {
    185 	.gen = 6, .num_pipes = 2,
    186 	.need_gfx_hws = 1, .has_hotplug = 1,
    187 	.has_fbc = 1,
    188 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
    189 	.has_llc = 1,
    190 	GEN_DEFAULT_PIPEOFFSETS,
    191 };
    192 
    193 static const struct intel_device_info intel_sandybridge_m_info = {
    194 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
    195 	.need_gfx_hws = 1, .has_hotplug = 1,
    196 	.has_fbc = 1,
    197 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
    198 	.has_llc = 1,
    199 	GEN_DEFAULT_PIPEOFFSETS,
    200 };
    201 
    202 #define GEN7_FEATURES  \
    203 	.gen = 7, .num_pipes = 3, \
    204 	.need_gfx_hws = 1, .has_hotplug = 1, \
    205 	.has_fbc = 1, \
    206 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
    207 	.has_llc = 1
    208 
    209 static const struct intel_device_info intel_ivybridge_d_info = {
    210 	GEN7_FEATURES,
    211 	.is_ivybridge = 1,
    212 	GEN_DEFAULT_PIPEOFFSETS,
    213 };
    214 
    215 static const struct intel_device_info intel_ivybridge_m_info = {
    216 	GEN7_FEATURES,
    217 	.is_ivybridge = 1,
    218 	.is_mobile = 1,
    219 	GEN_DEFAULT_PIPEOFFSETS,
    220 };
    221 
    222 static const struct intel_device_info intel_ivybridge_q_info = {
    223 	GEN7_FEATURES,
    224 	.is_ivybridge = 1,
    225 	.num_pipes = 0, /* legal, last one wins */
    226 	GEN_DEFAULT_PIPEOFFSETS,
    227 };
    228 
    229 static const struct intel_device_info intel_valleyview_m_info = {
    230 	GEN7_FEATURES,
    231 	.is_mobile = 1,
    232 	.num_pipes = 2,
    233 	.is_valleyview = 1,
    234 	.display_mmio_offset = VLV_DISPLAY_BASE,
    235 	.has_fbc = 0, /* legal, last one wins */
    236 	.has_llc = 0, /* legal, last one wins */
    237 	GEN_DEFAULT_PIPEOFFSETS,
    238 };
    239 
    240 static const struct intel_device_info intel_valleyview_d_info = {
    241 	GEN7_FEATURES,
    242 	.num_pipes = 2,
    243 	.is_valleyview = 1,
    244 	.display_mmio_offset = VLV_DISPLAY_BASE,
    245 	.has_fbc = 0, /* legal, last one wins */
    246 	.has_llc = 0, /* legal, last one wins */
    247 	GEN_DEFAULT_PIPEOFFSETS,
    248 };
    249 
    250 static const struct intel_device_info intel_haswell_d_info = {
    251 	GEN7_FEATURES,
    252 	.is_haswell = 1,
    253 	.has_ddi = 1,
    254 	.has_fpga_dbg = 1,
    255 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    256 	GEN_DEFAULT_PIPEOFFSETS,
    257 };
    258 
    259 static const struct intel_device_info intel_haswell_m_info = {
    260 	GEN7_FEATURES,
    261 	.is_haswell = 1,
    262 	.is_mobile = 1,
    263 	.has_ddi = 1,
    264 	.has_fpga_dbg = 1,
    265 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    266 	GEN_DEFAULT_PIPEOFFSETS,
    267 };
    268 
    269 static const struct intel_device_info intel_broadwell_d_info = {
    270 	.gen = 8, .num_pipes = 3,
    271 	.need_gfx_hws = 1, .has_hotplug = 1,
    272 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    273 	.has_llc = 1,
    274 	.has_ddi = 1,
    275 	.has_fbc = 1,
    276 	GEN_DEFAULT_PIPEOFFSETS,
    277 };
    278 
    279 static const struct intel_device_info intel_broadwell_m_info = {
    280 	.gen = 8, .is_mobile = 1, .num_pipes = 3,
    281 	.need_gfx_hws = 1, .has_hotplug = 1,
    282 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    283 	.has_llc = 1,
    284 	.has_ddi = 1,
    285 	.has_fbc = 1,
    286 	GEN_DEFAULT_PIPEOFFSETS,
    287 };
    288 
    289 /*
    290  * Make sure any device matches here are from most specific to most
    291  * general.  For example, since the Quanta match is based on the subsystem
    292  * and subvendor IDs, we need it to come before the more general IVB
    293  * PCI ID matches, otherwise we'll use the wrong info struct above.
    294  */
    295 #define INTEL_PCI_IDS \
    296 	INTEL_I830_IDS(&intel_i830_info),	\
    297 	INTEL_I845G_IDS(&intel_845g_info),	\
    298 	INTEL_I85X_IDS(&intel_i85x_info),	\
    299 	INTEL_I865G_IDS(&intel_i865g_info),	\
    300 	INTEL_I915G_IDS(&intel_i915g_info),	\
    301 	INTEL_I915GM_IDS(&intel_i915gm_info),	\
    302 	INTEL_I945G_IDS(&intel_i945g_info),	\
    303 	INTEL_I945GM_IDS(&intel_i945gm_info),	\
    304 	INTEL_I965G_IDS(&intel_i965g_info),	\
    305 	INTEL_G33_IDS(&intel_g33_info),		\
    306 	INTEL_I965GM_IDS(&intel_i965gm_info),	\
    307 	INTEL_GM45_IDS(&intel_gm45_info), 	\
    308 	INTEL_G45_IDS(&intel_g45_info), 	\
    309 	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
    310 	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
    311 	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
    312 	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
    313 	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
    314 	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
    315 	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
    316 	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
    317 	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
    318 	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
    319 	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
    320 	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
    321 	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
    322 	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
    323 
    324 static const struct pci_device_id pciidlist[] = {		/* aka */
    325 	INTEL_PCI_IDS,
    326 	{0, 0, 0, 0, 0, 0, 0}
    327 };
    328 
    329 #if defined(CONFIG_DRM_I915_KMS)
    330 MODULE_DEVICE_TABLE(pci, pciidlist);
    331 #endif
    332 
    333 #ifdef __NetBSD__
    334 /* XXX Kludge to expose this to NetBSD driver attachment goop.  */
    335 const struct pci_device_id *const i915_device_ids = pciidlist;
    336 const size_t i915_n_device_ids = __arraycount(pciidlist);
    337 #endif
    338 
    339 void intel_detect_pch(struct drm_device *dev)
    340 {
    341 	struct drm_i915_private *dev_priv = dev->dev_private;
    342 	struct pci_dev *pch = NULL;
    343 
    344 	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
    345 	 * (which really amounts to a PCH but no South Display).
    346 	 */
    347 	if (INTEL_INFO(dev)->num_pipes == 0) {
    348 		dev_priv->pch_type = PCH_NOP;
    349 		return;
    350 	}
    351 
    352 	/*
    353 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
    354 	 * make graphics device passthrough work easy for VMM, that only
    355 	 * need to expose ISA bridge to let driver know the real hardware
    356 	 * underneath. This is a requirement from virtualization team.
    357 	 *
    358 	 * In some virtualized environments (e.g. XEN), there is irrelevant
    359 	 * ISA bridge in the system. To work reliably, we should scan trhough
    360 	 * all the ISA bridge devices and check for the first match, instead
    361 	 * of only checking the first one.
    362 	 */
    363 	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
    364 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
    365 			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
    366 			dev_priv->pch_id = id;
    367 
    368 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
    369 				dev_priv->pch_type = PCH_IBX;
    370 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
    371 				WARN_ON(!IS_GEN5(dev));
    372 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
    373 				dev_priv->pch_type = PCH_CPT;
    374 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
    375 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    376 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
    377 				/* PantherPoint is CPT compatible */
    378 				dev_priv->pch_type = PCH_CPT;
    379 				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
    380 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    381 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
    382 				dev_priv->pch_type = PCH_LPT;
    383 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
    384 				WARN_ON(!IS_HASWELL(dev));
    385 				WARN_ON(IS_ULT(dev));
    386 			} else if (IS_BROADWELL(dev)) {
    387 				dev_priv->pch_type = PCH_LPT;
    388 				dev_priv->pch_id =
    389 					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
    390 				DRM_DEBUG_KMS("This is Broadwell, assuming "
    391 					      "LynxPoint LP PCH\n");
    392 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
    393 				dev_priv->pch_type = PCH_LPT;
    394 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
    395 				WARN_ON(!IS_HASWELL(dev));
    396 				WARN_ON(!IS_ULT(dev));
    397 			} else
    398 				continue;
    399 
    400 			break;
    401 		}
    402 	}
    403 	if (!pch)
    404 		DRM_DEBUG_KMS("No PCH found.\n");
    405 
    406 	pci_dev_put(pch);
    407 }
    408 
    409 bool i915_semaphore_is_enabled(struct drm_device *dev)
    410 {
    411 	if (INTEL_INFO(dev)->gen < 6)
    412 		return false;
    413 
    414 	if (i915.semaphores >= 0)
    415 		return i915.semaphores;
    416 
    417 	/* Until we get further testing... */
    418 	if (IS_GEN8(dev))
    419 		return false;
    420 
    421 #ifdef CONFIG_INTEL_IOMMU
    422 	/* Enable semaphores on SNB when IO remapping is off */
    423 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
    424 		return false;
    425 #endif
    426 
    427 	return true;
    428 }
    429 
    430 int i915_drm_freeze(struct drm_device *dev)
    431 {
    432 	struct drm_i915_private *dev_priv = dev->dev_private;
    433 	struct drm_crtc *crtc;
    434 
    435 	intel_runtime_pm_get(dev_priv);
    436 
    437 	/* ignore lid events during suspend */
    438 	mutex_lock(&dev_priv->modeset_restore_lock);
    439 	dev_priv->modeset_restore = MODESET_SUSPENDED;
    440 	mutex_unlock(&dev_priv->modeset_restore_lock);
    441 
    442 	/* We do a lot of poking in a lot of registers, make sure they work
    443 	 * properly. */
    444 	intel_display_set_init_power(dev_priv, true);
    445 
    446 	drm_kms_helper_poll_disable(dev);
    447 
    448 #ifndef __NetBSD__		/* pmf handles this for us.  */
    449 	pci_save_state(dev->pdev);
    450 #endif
    451 
    452 	/* If KMS is active, we do the leavevt stuff here */
    453 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    454 		int error;
    455 
    456 		error = i915_gem_suspend(dev);
    457 		if (error) {
    458 #ifdef __NetBSD__
    459 			dev_err(pci_dev_dev(dev->pdev),
    460 			    "GEM idle failed, resume might fail\n");
    461 #else
    462 			dev_err(&dev->pdev->dev,
    463 				"GEM idle failed, resume might fail\n");
    464 #endif
    465 			return error;
    466 		}
    467 
    468 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
    469 
    470 		drm_irq_uninstall(dev);
    471 		dev_priv->enable_hotplug_processing = false;
    472 		/*
    473 		 * Disable CRTCs directly since we want to preserve sw state
    474 		 * for _thaw.
    475 		 */
    476 		mutex_lock(&dev->mode_config.mutex);
    477 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
    478 			dev_priv->display.crtc_disable(crtc);
    479 		mutex_unlock(&dev->mode_config.mutex);
    480 
    481 		intel_modeset_suspend_hw(dev);
    482 	}
    483 
    484 	i915_gem_suspend_gtt_mappings(dev);
    485 
    486 	i915_save_state(dev);
    487 
    488 	intel_opregion_fini(dev);
    489 	intel_uncore_fini(dev);
    490 
    491 #ifndef __NetBSD__		/* XXX fb */
    492 	console_lock();
    493 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
    494 	console_unlock();
    495 #endif
    496 
    497 	dev_priv->suspend_count++;
    498 
    499 	return 0;
    500 }
    501 
    502 int i915_suspend(struct drm_device *dev, pm_message_t state)
    503 {
    504 	int error;
    505 
    506 	if (!dev || !dev->dev_private) {
    507 		DRM_ERROR("dev: %p\n", dev);
    508 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
    509 		return -ENODEV;
    510 	}
    511 
    512 	if (state.event == PM_EVENT_PRETHAW)
    513 		return 0;
    514 
    515 
    516 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    517 		return 0;
    518 
    519 	error = i915_drm_freeze(dev);
    520 	if (error)
    521 		return error;
    522 
    523 #ifndef __NetBSD__		/* pmf handles this for us.  */
    524 	if (state.event == PM_EVENT_SUSPEND) {
    525 		/* Shut down the device */
    526 		pci_disable_device(dev->pdev);
    527 		pci_set_power_state(dev->pdev, PCI_D3hot);
    528 	}
    529 #endif
    530 
    531 	return 0;
    532 }
    533 
    534 void intel_console_resume(struct work_struct *work)
    535 {
    536 #ifndef __NetBSD__		/* XXX fb */
    537 	struct drm_i915_private *dev_priv =
    538 		container_of(work, struct drm_i915_private,
    539 			     console_resume_work);
    540 	struct drm_device *dev = dev_priv->dev;
    541 
    542 	console_lock();
    543 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
    544 	console_unlock();
    545 #endif
    546 }
    547 
    548 static void intel_resume_hotplug(struct drm_device *dev)
    549 {
    550 	struct drm_mode_config *mode_config = &dev->mode_config;
    551 	struct intel_encoder *encoder;
    552 
    553 	mutex_lock(&mode_config->mutex);
    554 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
    555 
    556 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
    557 		if (encoder->hot_plug)
    558 			encoder->hot_plug(encoder);
    559 
    560 	mutex_unlock(&mode_config->mutex);
    561 
    562 	/* Just fire off a uevent and let userspace tell us what to do */
    563 	drm_helper_hpd_irq_event(dev);
    564 }
    565 
    566 int i915_drm_thaw_early(struct drm_device *dev)
    567 {
    568 	struct drm_i915_private *dev_priv = dev->dev_private;
    569 
    570 	intel_uncore_early_sanitize(dev);
    571 	intel_uncore_sanitize(dev);
    572 	intel_power_domains_init_hw(dev_priv);
    573 
    574 	return 0;
    575 }
    576 
    577 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
    578 {
    579 	struct drm_i915_private *dev_priv = dev->dev_private;
    580 	int error = 0;
    581 
    582 	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
    583 	    restore_gtt_mappings) {
    584 		mutex_lock(&dev->struct_mutex);
    585 		i915_gem_restore_gtt_mappings(dev);
    586 		mutex_unlock(&dev->struct_mutex);
    587 	}
    588 
    589 	i915_restore_state(dev);
    590 	intel_opregion_setup(dev);
    591 
    592 	/* KMS EnterVT equivalent */
    593 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    594 		intel_init_pch_refclk(dev);
    595 		drm_mode_config_reset(dev);
    596 
    597 		mutex_lock(&dev->struct_mutex);
    598 
    599 		error = i915_gem_init_hw(dev);
    600 		mutex_unlock(&dev->struct_mutex);
    601 
    602 		/* We need working interrupts for modeset enabling ... */
    603 		drm_irq_install(dev);
    604 
    605 		intel_modeset_init_hw(dev);
    606 
    607 		drm_modeset_lock_all(dev);
    608 		intel_modeset_setup_hw_state(dev, true);
    609 		drm_modeset_unlock_all(dev);
    610 
    611 		/*
    612 		 * ... but also need to make sure that hotplug processing
    613 		 * doesn't cause havoc. Like in the driver load code we don't
    614 		 * bother with the tiny race here where we might loose hotplug
    615 		 * notifications.
    616 		 * */
    617 		intel_hpd_init(dev);
    618 		dev_priv->enable_hotplug_processing = true;
    619 		/* Config may have changed between suspend and resume */
    620 		intel_resume_hotplug(dev);
    621 	}
    622 
    623 	intel_opregion_init(dev);
    624 
    625 #ifndef __NetBSD__		/* XXX fb */
    626 	/*
    627 	 * The console lock can be pretty contented on resume due
    628 	 * to all the printk activity.  Try to keep it out of the hot
    629 	 * path of resume if possible.
    630 	 */
    631 	if (console_trylock()) {
    632 		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
    633 		console_unlock();
    634 	} else {
    635 		schedule_work(&dev_priv->console_resume_work);
    636 	}
    637 #endif
    638 
    639 	mutex_lock(&dev_priv->modeset_restore_lock);
    640 	dev_priv->modeset_restore = MODESET_DONE;
    641 	mutex_unlock(&dev_priv->modeset_restore_lock);
    642 
    643 	intel_runtime_pm_put(dev_priv);
    644 	return error;
    645 }
    646 
    647 int i915_drm_thaw(struct drm_device *dev)
    648 {
    649 	if (drm_core_check_feature(dev, DRIVER_MODESET))
    650 		i915_check_and_clear_faults(dev);
    651 
    652 	return __i915_drm_thaw(dev, true);
    653 }
    654 
    655 static int i915_resume_early(struct drm_device *dev)
    656 {
    657 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    658 		return 0;
    659 
    660 #ifndef __NetBSD__		/* pmf handles this for us.  */
    661 	/*
    662 	 * We have a resume ordering issue with the snd-hda driver also
    663 	 * requiring our device to be power up. Due to the lack of a
    664 	 * parent/child relationship we currently solve this with an early
    665 	 * resume hook.
    666 	 *
    667 	 * FIXME: This should be solved with a special hdmi sink device or
    668 	 * similar so that power domains can be employed.
    669 	 */
    670 	if (pci_enable_device(dev->pdev))
    671 		return -EIO;
    672 #endif
    673 
    674 	/* XXX pmf probably handles this for us too.  */
    675 	pci_set_master(dev->pdev);
    676 
    677 	return i915_drm_thaw_early(dev);
    678 }
    679 
    680 int i915_resume(struct drm_device *dev)
    681 {
    682 	struct drm_i915_private *dev_priv = dev->dev_private;
    683 	int ret;
    684 
    685 	/*
    686 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
    687 	 * earlier) need to restore the GTT mappings since the BIOS might clear
    688 	 * all our scratch PTEs.
    689 	 */
    690 	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
    691 	if (ret)
    692 		return ret;
    693 
    694 	drm_kms_helper_poll_enable(dev);
    695 	return 0;
    696 }
    697 
    698 static int i915_resume_legacy(struct drm_device *dev)
    699 {
    700 	i915_resume_early(dev);
    701 	i915_resume(dev);
    702 
    703 	return 0;
    704 }
    705 
    706 /**
    707  * i915_reset - reset chip after a hang
    708  * @dev: drm device to reset
    709  *
    710  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
    711  * reset or otherwise an error code.
    712  *
    713  * Procedure is fairly simple:
    714  *   - reset the chip using the reset reg
    715  *   - re-init context state
    716  *   - re-init hardware status page
    717  *   - re-init ring buffer
    718  *   - re-init interrupt state
    719  *   - re-init display
    720  */
    721 int i915_reset(struct drm_device *dev)
    722 {
    723 	struct drm_i915_private *dev_priv = dev->dev_private;
    724 	bool simulated;
    725 	int ret;
    726 
    727 	if (!i915.reset)
    728 		return 0;
    729 
    730 	mutex_lock(&dev->struct_mutex);
    731 
    732 	i915_gem_reset(dev);
    733 
    734 	simulated = dev_priv->gpu_error.stop_rings != 0;
    735 
    736 	ret = intel_gpu_reset(dev);
    737 
    738 	/* Also reset the gpu hangman. */
    739 	if (simulated) {
    740 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
    741 		dev_priv->gpu_error.stop_rings = 0;
    742 		if (ret == -ENODEV) {
    743 			DRM_INFO("Reset not implemented, but ignoring "
    744 				 "error for simulated gpu hangs\n");
    745 			ret = 0;
    746 		}
    747 	}
    748 
    749 	if (ret) {
    750 		DRM_ERROR("Failed to reset chip: %i\n", ret);
    751 		mutex_unlock(&dev->struct_mutex);
    752 		return ret;
    753 	}
    754 
    755 	/* Ok, now get things going again... */
    756 
    757 	/*
    758 	 * Everything depends on having the GTT running, so we need to start
    759 	 * there.  Fortunately we don't need to do this unless we reset the
    760 	 * chip at a PCI level.
    761 	 *
    762 	 * Next we need to restore the context, but we don't use those
    763 	 * yet either...
    764 	 *
    765 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
    766 	 * was running at the time of the reset (i.e. we weren't VT
    767 	 * switched away).
    768 	 */
    769 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
    770 			!dev_priv->ums.mm_suspended) {
    771 		dev_priv->ums.mm_suspended = 0;
    772 
    773 		ret = i915_gem_init_hw(dev);
    774 		mutex_unlock(&dev->struct_mutex);
    775 		if (ret) {
    776 			DRM_ERROR("Failed hw init on reset %d\n", ret);
    777 			return ret;
    778 		}
    779 
    780 		drm_irq_uninstall(dev);
    781 		drm_irq_install(dev);
    782 
    783 		/* rps/rc6 re-init is necessary to restore state lost after the
    784 		 * reset and the re-install of drm irq. Skip for ironlake per
    785 		 * previous concerns that it doesn't respond well to some forms
    786 		 * of re-init after reset. */
    787 		if (INTEL_INFO(dev)->gen > 5) {
    788 			mutex_lock(&dev->struct_mutex);
    789 			intel_enable_gt_powersave(dev);
    790 			mutex_unlock(&dev->struct_mutex);
    791 		}
    792 
    793 		intel_hpd_init(dev);
    794 	} else {
    795 		mutex_unlock(&dev->struct_mutex);
    796 	}
    797 
    798 	return 0;
    799 }
    800 
    801 #ifdef __NetBSD__
    802 
    803 static const struct uvm_pagerops i915_gem_uvm_ops = {
    804 	.pgo_reference = drm_gem_pager_reference,
    805 	.pgo_detach = drm_gem_pager_detach,
    806 	.pgo_fault = i915_gem_fault,
    807 };
    808 
    809 #else
    810 
    811 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    812 {
    813 	struct intel_device_info *intel_info =
    814 		(struct intel_device_info *) ent->driver_data;
    815 
    816 	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
    817 		DRM_INFO("This hardware requires preliminary hardware support.\n"
    818 			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
    819 		return -ENODEV;
    820 	}
    821 
    822 	/* Only bind to function 0 of the device. Early generations
    823 	 * used function 1 as a placeholder for multi-head. This causes
    824 	 * us confusion instead, especially on the systems where both
    825 	 * functions have the same PCI-ID!
    826 	 */
    827 	if (PCI_FUNC(pdev->devfn))
    828 		return -ENODEV;
    829 
    830 	driver.driver_features &= ~(DRIVER_USE_AGP);
    831 
    832 	return drm_get_pci_dev(pdev, ent, &driver);
    833 }
    834 
    835 static void
    836 i915_pci_remove(struct pci_dev *pdev)
    837 {
    838 	struct drm_device *dev = pci_get_drvdata(pdev);
    839 
    840 	drm_put_dev(dev);
    841 }
    842 
    843 static int i915_pm_suspend(struct device *dev)
    844 {
    845 	struct pci_dev *pdev = to_pci_dev(dev);
    846 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    847 
    848 	if (!drm_dev || !drm_dev->dev_private) {
    849 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
    850 		return -ENODEV;
    851 	}
    852 
    853 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    854 		return 0;
    855 
    856 	return i915_drm_freeze(drm_dev);
    857 }
    858 
    859 static int i915_pm_suspend_late(struct device *dev)
    860 {
    861 	struct pci_dev *pdev = to_pci_dev(dev);
    862 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    863 
    864 	/*
    865 	 * We have a suspedn ordering issue with the snd-hda driver also
    866 	 * requiring our device to be power up. Due to the lack of a
    867 	 * parent/child relationship we currently solve this with an late
    868 	 * suspend hook.
    869 	 *
    870 	 * FIXME: This should be solved with a special hdmi sink device or
    871 	 * similar so that power domains can be employed.
    872 	 */
    873 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    874 		return 0;
    875 
    876 	pci_disable_device(pdev);
    877 	pci_set_power_state(pdev, PCI_D3hot);
    878 
    879 	return 0;
    880 }
    881 
    882 static int i915_pm_resume_early(struct device *dev)
    883 {
    884 	struct pci_dev *pdev = to_pci_dev(dev);
    885 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    886 
    887 	return i915_resume_early(drm_dev);
    888 }
    889 
    890 static int i915_pm_resume(struct device *dev)
    891 {
    892 	struct pci_dev *pdev = to_pci_dev(dev);
    893 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    894 
    895 	return i915_resume(drm_dev);
    896 }
    897 
    898 static int i915_pm_freeze(struct device *dev)
    899 {
    900 	struct pci_dev *pdev = to_pci_dev(dev);
    901 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    902 
    903 	if (!drm_dev || !drm_dev->dev_private) {
    904 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
    905 		return -ENODEV;
    906 	}
    907 
    908 	return i915_drm_freeze(drm_dev);
    909 }
    910 
    911 static int i915_pm_thaw_early(struct device *dev)
    912 {
    913 	struct pci_dev *pdev = to_pci_dev(dev);
    914 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    915 
    916 	return i915_drm_thaw_early(drm_dev);
    917 }
    918 
    919 static int i915_pm_thaw(struct device *dev)
    920 {
    921 	struct pci_dev *pdev = to_pci_dev(dev);
    922 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    923 
    924 	return i915_drm_thaw(drm_dev);
    925 }
    926 
    927 static int i915_pm_poweroff(struct device *dev)
    928 {
    929 	struct pci_dev *pdev = to_pci_dev(dev);
    930 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    931 
    932 	return i915_drm_freeze(drm_dev);
    933 }
    934 
    935 static int i915_runtime_suspend(struct device *device)
    936 {
    937 	struct pci_dev *pdev = to_pci_dev(device);
    938 	struct drm_device *dev = pci_get_drvdata(pdev);
    939 	struct drm_i915_private *dev_priv = dev->dev_private;
    940 
    941 	WARN_ON(!HAS_RUNTIME_PM(dev));
    942 	assert_force_wake_inactive(dev_priv);
    943 
    944 	DRM_DEBUG_KMS("Suspending device\n");
    945 
    946 	if (HAS_PC8(dev))
    947 		hsw_enable_pc8(dev_priv);
    948 
    949 	i915_gem_release_all_mmaps(dev_priv);
    950 
    951 	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
    952 	dev_priv->pm.suspended = true;
    953 
    954 	/*
    955 	 * current versions of firmware which depend on this opregion
    956 	 * notification have repurposed the D1 definition to mean
    957 	 * "runtime suspended" vs. what you would normally expect (D3)
    958 	 * to distinguish it from notifications that might be sent
    959 	 * via the suspend path.
    960 	 */
    961 	intel_opregion_notify_adapter(dev, PCI_D1);
    962 
    963 	DRM_DEBUG_KMS("Device suspended\n");
    964 	return 0;
    965 }
    966 
    967 static int i915_runtime_resume(struct device *device)
    968 {
    969 	struct pci_dev *pdev = to_pci_dev(device);
    970 	struct drm_device *dev = pci_get_drvdata(pdev);
    971 	struct drm_i915_private *dev_priv = dev->dev_private;
    972 
    973 	WARN_ON(!HAS_RUNTIME_PM(dev));
    974 
    975 	DRM_DEBUG_KMS("Resuming device\n");
    976 
    977 	intel_opregion_notify_adapter(dev, PCI_D0);
    978 	dev_priv->pm.suspended = false;
    979 
    980 	if (HAS_PC8(dev))
    981 		hsw_disable_pc8(dev_priv);
    982 
    983 	DRM_DEBUG_KMS("Device resumed\n");
    984 	return 0;
    985 }
    986 
    987 static const struct dev_pm_ops i915_pm_ops = {
    988 	.suspend = i915_pm_suspend,
    989 	.suspend_late = i915_pm_suspend_late,
    990 	.resume_early = i915_pm_resume_early,
    991 	.resume = i915_pm_resume,
    992 	.freeze = i915_pm_freeze,
    993 	.thaw_early = i915_pm_thaw_early,
    994 	.thaw = i915_pm_thaw,
    995 	.poweroff = i915_pm_poweroff,
    996 	.restore_early = i915_pm_resume_early,
    997 	.restore = i915_pm_resume,
    998 	.runtime_suspend = i915_runtime_suspend,
    999 	.runtime_resume = i915_runtime_resume,
   1000 };
   1001 
   1002 static const struct vm_operations_struct i915_gem_vm_ops = {
   1003 	.fault = i915_gem_fault,
   1004 	.open = drm_gem_vm_open,
   1005 	.close = drm_gem_vm_close,
   1006 };
   1007 
   1008 static const struct file_operations i915_driver_fops = {
   1009 	.owner = THIS_MODULE,
   1010 	.open = drm_open,
   1011 	.release = drm_release,
   1012 	.unlocked_ioctl = drm_ioctl,
   1013 	.mmap = drm_gem_mmap,
   1014 	.poll = drm_poll,
   1015 	.read = drm_read,
   1016 #ifdef CONFIG_COMPAT
   1017 	.compat_ioctl = i915_compat_ioctl,
   1018 #endif
   1019 	.llseek = noop_llseek,
   1020 };
   1021 
   1022 #endif	/* defined(__NetBSD__) */
   1023 
   1024 static struct drm_driver driver = {
   1025 	/* Don't use MTRRs here; the Xserver or userspace app should
   1026 	 * deal with them for Intel hardware.
   1027 	 */
   1028 	.driver_features =
   1029 	    DRIVER_USE_AGP |
   1030 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
   1031 	    DRIVER_RENDER,
   1032 	.load = i915_driver_load,
   1033 	.unload = i915_driver_unload,
   1034 	.open = i915_driver_open,
   1035 	.lastclose = i915_driver_lastclose,
   1036 	.preclose = i915_driver_preclose,
   1037 	.postclose = i915_driver_postclose,
   1038 
   1039 	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
   1040 	.suspend = i915_suspend,
   1041 	.resume = i915_resume_legacy,
   1042 
   1043 	.device_is_agp = i915_driver_device_is_agp,
   1044 	.master_create = i915_master_create,
   1045 	.master_destroy = i915_master_destroy,
   1046 #if defined(CONFIG_DEBUG_FS)
   1047 	.debugfs_init = i915_debugfs_init,
   1048 	.debugfs_cleanup = i915_debugfs_cleanup,
   1049 #endif
   1050 	.gem_free_object = i915_gem_free_object,
   1051 #ifdef __NetBSD__
   1052 	/* XXX Not clear the `or legacy' part is important here.  */
   1053 	.mmap_object = &drm_gem_or_legacy_mmap_object,
   1054 	.gem_uvm_ops = &i915_gem_uvm_ops,
   1055 #else
   1056 	.gem_vm_ops = &i915_gem_vm_ops,
   1057 #endif
   1058 
   1059 #ifndef __NetBSD__		/* XXX drm prime */
   1060 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
   1061 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
   1062 	.gem_prime_export = i915_gem_prime_export,
   1063 	.gem_prime_import = i915_gem_prime_import,
   1064 #endif
   1065 
   1066 	.dumb_create = i915_gem_dumb_create,
   1067 	.dumb_map_offset = i915_gem_mmap_gtt,
   1068 	.dumb_destroy = drm_gem_dumb_destroy,
   1069 	.ioctls = i915_ioctls,
   1070 #ifdef __NetBSD__
   1071 	.fops = NULL,
   1072 #else
   1073 	.fops = &i915_driver_fops,
   1074 #endif
   1075 	.name = DRIVER_NAME,
   1076 	.desc = DRIVER_DESC,
   1077 	.date = DRIVER_DATE,
   1078 	.major = DRIVER_MAJOR,
   1079 	.minor = DRIVER_MINOR,
   1080 	.patchlevel = DRIVER_PATCHLEVEL,
   1081 };
   1082 
   1083 #ifndef __NetBSD__
   1084 static struct pci_driver i915_pci_driver = {
   1085 	.name = DRIVER_NAME,
   1086 	.id_table = pciidlist,
   1087 	.probe = i915_pci_probe,
   1088 	.remove = i915_pci_remove,
   1089 	.driver.pm = &i915_pm_ops,
   1090 };
   1091 #endif
   1092 
   1093 #ifndef __NetBSD__
   1094 static int __init i915_init(void)
   1095 {
   1096 	driver.num_ioctls = i915_max_ioctl;
   1097 
   1098 	/*
   1099 	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
   1100 	 * explicitly disabled with the module pararmeter.
   1101 	 *
   1102 	 * Otherwise, just follow the parameter (defaulting to off).
   1103 	 *
   1104 	 * Allow optional vga_text_mode_force boot option to override
   1105 	 * the default behavior.
   1106 	 */
   1107 #if defined(CONFIG_DRM_I915_KMS)
   1108 	if (i915.modeset != 0)
   1109 		driver.driver_features |= DRIVER_MODESET;
   1110 #endif
   1111 	if (i915.modeset == 1)
   1112 		driver.driver_features |= DRIVER_MODESET;
   1113 
   1114 #ifdef CONFIG_VGA_CONSOLE
   1115 	if (vgacon_text_force() && i915.modeset == -1)
   1116 		driver.driver_features &= ~DRIVER_MODESET;
   1117 #endif
   1118 
   1119 	if (!(driver.driver_features & DRIVER_MODESET)) {
   1120 		driver.get_vblank_timestamp = NULL;
   1121 #ifndef CONFIG_DRM_I915_UMS
   1122 		/* Silently fail loading to not upset userspace. */
   1123 		return 0;
   1124 #endif
   1125 	}
   1126 
   1127 	return drm_pci_init(&driver, &i915_pci_driver);
   1128 }
   1129 
   1130 static void __exit i915_exit(void)
   1131 {
   1132 #ifndef CONFIG_DRM_I915_UMS
   1133 	if (!(driver.driver_features & DRIVER_MODESET))
   1134 		return; /* Never loaded a driver. */
   1135 #endif
   1136 
   1137 	drm_pci_exit(&driver, &i915_pci_driver);
   1138 }
   1139 
   1140 module_init(i915_init);
   1141 module_exit(i915_exit);
   1142 #endif
   1143 
   1144 MODULE_AUTHOR(DRIVER_AUTHOR);
   1145 MODULE_DESCRIPTION(DRIVER_DESC);
   1146 MODULE_LICENSE("GPL and additional rights");
   1147