Home | History | Annotate | Line # | Download | only in i915
i915_drv.c revision 1.1.1.3
      1 /*	$NetBSD: i915_drv.c,v 1.1.1.3 2018/08/27 01:34:53 riastradh Exp $	*/
      2 
      3 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
      4  */
      5 /*
      6  *
      7  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      8  * All Rights Reserved.
      9  *
     10  * Permission is hereby granted, free of charge, to any person obtaining a
     11  * copy of this software and associated documentation files (the
     12  * "Software"), to deal in the Software without restriction, including
     13  * without limitation the rights to use, copy, modify, merge, publish,
     14  * distribute, sub license, and/or sell copies of the Software, and to
     15  * permit persons to whom the Software is furnished to do so, subject to
     16  * the following conditions:
     17  *
     18  * The above copyright notice and this permission notice (including the
     19  * next paragraph) shall be included in all copies or substantial portions
     20  * of the Software.
     21  *
     22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     23  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     25  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     26  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     27  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     28  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     29  *
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: i915_drv.c,v 1.1.1.3 2018/08/27 01:34:53 riastradh Exp $");
     34 
     35 #include <linux/device.h>
     36 #include <linux/acpi.h>
     37 #include <drm/drmP.h>
     38 #include <drm/i915_drm.h>
     39 #include "i915_drv.h"
     40 #include "i915_trace.h"
     41 #include "intel_drv.h"
     42 
     43 #include <linux/console.h>
     44 #include <linux/module.h>
     45 #include <linux/pm_runtime.h>
     46 #include <drm/drm_crtc_helper.h>
     47 
     48 static struct drm_driver driver;
     49 
     50 #define GEN_DEFAULT_PIPEOFFSETS \
     51 	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
     52 			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
     53 	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
     54 			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
     55 	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
     56 
     57 #define GEN_CHV_PIPEOFFSETS \
     58 	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
     59 			  CHV_PIPE_C_OFFSET }, \
     60 	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
     61 			   CHV_TRANSCODER_C_OFFSET, }, \
     62 	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
     63 			     CHV_PALETTE_C_OFFSET }
     64 
     65 #define CURSOR_OFFSETS \
     66 	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
     67 
     68 #define IVB_CURSOR_OFFSETS \
     69 	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
     70 
     71 static const struct intel_device_info intel_i830_info = {
     72 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
     73 	.has_overlay = 1, .overlay_needs_physical = 1,
     74 	.ring_mask = RENDER_RING,
     75 	GEN_DEFAULT_PIPEOFFSETS,
     76 	CURSOR_OFFSETS,
     77 };
     78 
     79 static const struct intel_device_info intel_845g_info = {
     80 	.gen = 2, .num_pipes = 1,
     81 	.has_overlay = 1, .overlay_needs_physical = 1,
     82 	.ring_mask = RENDER_RING,
     83 	GEN_DEFAULT_PIPEOFFSETS,
     84 	CURSOR_OFFSETS,
     85 };
     86 
     87 static const struct intel_device_info intel_i85x_info = {
     88 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
     89 	.cursor_needs_physical = 1,
     90 	.has_overlay = 1, .overlay_needs_physical = 1,
     91 	.has_fbc = 1,
     92 	.ring_mask = RENDER_RING,
     93 	GEN_DEFAULT_PIPEOFFSETS,
     94 	CURSOR_OFFSETS,
     95 };
     96 
     97 static const struct intel_device_info intel_i865g_info = {
     98 	.gen = 2, .num_pipes = 1,
     99 	.has_overlay = 1, .overlay_needs_physical = 1,
    100 	.ring_mask = RENDER_RING,
    101 	GEN_DEFAULT_PIPEOFFSETS,
    102 	CURSOR_OFFSETS,
    103 };
    104 
    105 static const struct intel_device_info intel_i915g_info = {
    106 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
    107 	.has_overlay = 1, .overlay_needs_physical = 1,
    108 	.ring_mask = RENDER_RING,
    109 	GEN_DEFAULT_PIPEOFFSETS,
    110 	CURSOR_OFFSETS,
    111 };
    112 static const struct intel_device_info intel_i915gm_info = {
    113 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
    114 	.cursor_needs_physical = 1,
    115 	.has_overlay = 1, .overlay_needs_physical = 1,
    116 	.supports_tv = 1,
    117 	.has_fbc = 1,
    118 	.ring_mask = RENDER_RING,
    119 	GEN_DEFAULT_PIPEOFFSETS,
    120 	CURSOR_OFFSETS,
    121 };
    122 static const struct intel_device_info intel_i945g_info = {
    123 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
    124 	.has_overlay = 1, .overlay_needs_physical = 1,
    125 	.ring_mask = RENDER_RING,
    126 	GEN_DEFAULT_PIPEOFFSETS,
    127 	CURSOR_OFFSETS,
    128 };
    129 static const struct intel_device_info intel_i945gm_info = {
    130 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
    131 	.has_hotplug = 1, .cursor_needs_physical = 1,
    132 	.has_overlay = 1, .overlay_needs_physical = 1,
    133 	.supports_tv = 1,
    134 	.has_fbc = 1,
    135 	.ring_mask = RENDER_RING,
    136 	GEN_DEFAULT_PIPEOFFSETS,
    137 	CURSOR_OFFSETS,
    138 };
    139 
    140 static const struct intel_device_info intel_i965g_info = {
    141 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
    142 	.has_hotplug = 1,
    143 	.has_overlay = 1,
    144 	.ring_mask = RENDER_RING,
    145 	GEN_DEFAULT_PIPEOFFSETS,
    146 	CURSOR_OFFSETS,
    147 };
    148 
    149 static const struct intel_device_info intel_i965gm_info = {
    150 	.gen = 4, .is_crestline = 1, .num_pipes = 2,
    151 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
    152 	.has_overlay = 1,
    153 	.supports_tv = 1,
    154 	.ring_mask = RENDER_RING,
    155 	GEN_DEFAULT_PIPEOFFSETS,
    156 	CURSOR_OFFSETS,
    157 };
    158 
    159 static const struct intel_device_info intel_g33_info = {
    160 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
    161 	.need_gfx_hws = 1, .has_hotplug = 1,
    162 	.has_overlay = 1,
    163 	.ring_mask = RENDER_RING,
    164 	GEN_DEFAULT_PIPEOFFSETS,
    165 	CURSOR_OFFSETS,
    166 };
    167 
    168 static const struct intel_device_info intel_g45_info = {
    169 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
    170 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    171 	.ring_mask = RENDER_RING | BSD_RING,
    172 	GEN_DEFAULT_PIPEOFFSETS,
    173 	CURSOR_OFFSETS,
    174 };
    175 
    176 static const struct intel_device_info intel_gm45_info = {
    177 	.gen = 4, .is_g4x = 1, .num_pipes = 2,
    178 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
    179 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    180 	.supports_tv = 1,
    181 	.ring_mask = RENDER_RING | BSD_RING,
    182 	GEN_DEFAULT_PIPEOFFSETS,
    183 	CURSOR_OFFSETS,
    184 };
    185 
    186 static const struct intel_device_info intel_pineview_info = {
    187 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
    188 	.need_gfx_hws = 1, .has_hotplug = 1,
    189 	.has_overlay = 1,
    190 	GEN_DEFAULT_PIPEOFFSETS,
    191 	CURSOR_OFFSETS,
    192 };
    193 
    194 static const struct intel_device_info intel_ironlake_d_info = {
    195 	.gen = 5, .num_pipes = 2,
    196 	.need_gfx_hws = 1, .has_hotplug = 1,
    197 	.ring_mask = RENDER_RING | BSD_RING,
    198 	GEN_DEFAULT_PIPEOFFSETS,
    199 	CURSOR_OFFSETS,
    200 };
    201 
    202 static const struct intel_device_info intel_ironlake_m_info = {
    203 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
    204 	.need_gfx_hws = 1, .has_hotplug = 1,
    205 	.has_fbc = 1,
    206 	.ring_mask = RENDER_RING | BSD_RING,
    207 	GEN_DEFAULT_PIPEOFFSETS,
    208 	CURSOR_OFFSETS,
    209 };
    210 
    211 static const struct intel_device_info intel_sandybridge_d_info = {
    212 	.gen = 6, .num_pipes = 2,
    213 	.need_gfx_hws = 1, .has_hotplug = 1,
    214 	.has_fbc = 1,
    215 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
    216 	.has_llc = 1,
    217 	GEN_DEFAULT_PIPEOFFSETS,
    218 	CURSOR_OFFSETS,
    219 };
    220 
    221 static const struct intel_device_info intel_sandybridge_m_info = {
    222 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
    223 	.need_gfx_hws = 1, .has_hotplug = 1,
    224 	.has_fbc = 1,
    225 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
    226 	.has_llc = 1,
    227 	GEN_DEFAULT_PIPEOFFSETS,
    228 	CURSOR_OFFSETS,
    229 };
    230 
    231 #define GEN7_FEATURES  \
    232 	.gen = 7, .num_pipes = 3, \
    233 	.need_gfx_hws = 1, .has_hotplug = 1, \
    234 	.has_fbc = 1, \
    235 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
    236 	.has_llc = 1
    237 
    238 static const struct intel_device_info intel_ivybridge_d_info = {
    239 	GEN7_FEATURES,
    240 	.is_ivybridge = 1,
    241 	GEN_DEFAULT_PIPEOFFSETS,
    242 	IVB_CURSOR_OFFSETS,
    243 };
    244 
    245 static const struct intel_device_info intel_ivybridge_m_info = {
    246 	GEN7_FEATURES,
    247 	.is_ivybridge = 1,
    248 	.is_mobile = 1,
    249 	GEN_DEFAULT_PIPEOFFSETS,
    250 	IVB_CURSOR_OFFSETS,
    251 };
    252 
    253 static const struct intel_device_info intel_ivybridge_q_info = {
    254 	GEN7_FEATURES,
    255 	.is_ivybridge = 1,
    256 	.num_pipes = 0, /* legal, last one wins */
    257 	GEN_DEFAULT_PIPEOFFSETS,
    258 	IVB_CURSOR_OFFSETS,
    259 };
    260 
    261 static const struct intel_device_info intel_valleyview_m_info = {
    262 	GEN7_FEATURES,
    263 	.is_mobile = 1,
    264 	.num_pipes = 2,
    265 	.is_valleyview = 1,
    266 	.display_mmio_offset = VLV_DISPLAY_BASE,
    267 	.has_fbc = 0, /* legal, last one wins */
    268 	.has_llc = 0, /* legal, last one wins */
    269 	GEN_DEFAULT_PIPEOFFSETS,
    270 	CURSOR_OFFSETS,
    271 };
    272 
    273 static const struct intel_device_info intel_valleyview_d_info = {
    274 	GEN7_FEATURES,
    275 	.num_pipes = 2,
    276 	.is_valleyview = 1,
    277 	.display_mmio_offset = VLV_DISPLAY_BASE,
    278 	.has_fbc = 0, /* legal, last one wins */
    279 	.has_llc = 0, /* legal, last one wins */
    280 	GEN_DEFAULT_PIPEOFFSETS,
    281 	CURSOR_OFFSETS,
    282 };
    283 
    284 static const struct intel_device_info intel_haswell_d_info = {
    285 	GEN7_FEATURES,
    286 	.is_haswell = 1,
    287 	.has_ddi = 1,
    288 	.has_fpga_dbg = 1,
    289 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    290 	GEN_DEFAULT_PIPEOFFSETS,
    291 	IVB_CURSOR_OFFSETS,
    292 };
    293 
    294 static const struct intel_device_info intel_haswell_m_info = {
    295 	GEN7_FEATURES,
    296 	.is_haswell = 1,
    297 	.is_mobile = 1,
    298 	.has_ddi = 1,
    299 	.has_fpga_dbg = 1,
    300 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    301 	GEN_DEFAULT_PIPEOFFSETS,
    302 	IVB_CURSOR_OFFSETS,
    303 };
    304 
    305 static const struct intel_device_info intel_broadwell_d_info = {
    306 	.gen = 8, .num_pipes = 3,
    307 	.need_gfx_hws = 1, .has_hotplug = 1,
    308 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    309 	.has_llc = 1,
    310 	.has_ddi = 1,
    311 	.has_fpga_dbg = 1,
    312 	.has_fbc = 1,
    313 	GEN_DEFAULT_PIPEOFFSETS,
    314 	IVB_CURSOR_OFFSETS,
    315 };
    316 
    317 static const struct intel_device_info intel_broadwell_m_info = {
    318 	.gen = 8, .is_mobile = 1, .num_pipes = 3,
    319 	.need_gfx_hws = 1, .has_hotplug = 1,
    320 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    321 	.has_llc = 1,
    322 	.has_ddi = 1,
    323 	.has_fpga_dbg = 1,
    324 	.has_fbc = 1,
    325 	GEN_DEFAULT_PIPEOFFSETS,
    326 	IVB_CURSOR_OFFSETS,
    327 };
    328 
    329 static const struct intel_device_info intel_broadwell_gt3d_info = {
    330 	.gen = 8, .num_pipes = 3,
    331 	.need_gfx_hws = 1, .has_hotplug = 1,
    332 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
    333 	.has_llc = 1,
    334 	.has_ddi = 1,
    335 	.has_fpga_dbg = 1,
    336 	.has_fbc = 1,
    337 	GEN_DEFAULT_PIPEOFFSETS,
    338 	IVB_CURSOR_OFFSETS,
    339 };
    340 
    341 static const struct intel_device_info intel_broadwell_gt3m_info = {
    342 	.gen = 8, .is_mobile = 1, .num_pipes = 3,
    343 	.need_gfx_hws = 1, .has_hotplug = 1,
    344 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
    345 	.has_llc = 1,
    346 	.has_ddi = 1,
    347 	.has_fpga_dbg = 1,
    348 	.has_fbc = 1,
    349 	GEN_DEFAULT_PIPEOFFSETS,
    350 	IVB_CURSOR_OFFSETS,
    351 };
    352 
    353 static const struct intel_device_info intel_cherryview_info = {
    354 	.gen = 8, .num_pipes = 3,
    355 	.need_gfx_hws = 1, .has_hotplug = 1,
    356 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    357 	.is_valleyview = 1,
    358 	.display_mmio_offset = VLV_DISPLAY_BASE,
    359 	GEN_CHV_PIPEOFFSETS,
    360 	CURSOR_OFFSETS,
    361 };
    362 
    363 static const struct intel_device_info intel_skylake_info = {
    364 	.is_skylake = 1,
    365 	.gen = 9, .num_pipes = 3,
    366 	.need_gfx_hws = 1, .has_hotplug = 1,
    367 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    368 	.has_llc = 1,
    369 	.has_ddi = 1,
    370 	.has_fpga_dbg = 1,
    371 	.has_fbc = 1,
    372 	GEN_DEFAULT_PIPEOFFSETS,
    373 	IVB_CURSOR_OFFSETS,
    374 };
    375 
    376 static const struct intel_device_info intel_skylake_gt3_info = {
    377 	.is_skylake = 1,
    378 	.gen = 9, .num_pipes = 3,
    379 	.need_gfx_hws = 1, .has_hotplug = 1,
    380 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
    381 	.has_llc = 1,
    382 	.has_ddi = 1,
    383 	.has_fpga_dbg = 1,
    384 	.has_fbc = 1,
    385 	GEN_DEFAULT_PIPEOFFSETS,
    386 	IVB_CURSOR_OFFSETS,
    387 };
    388 
    389 static const struct intel_device_info intel_broxton_info = {
    390 	.is_preliminary = 1,
    391 	.gen = 9,
    392 	.need_gfx_hws = 1, .has_hotplug = 1,
    393 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
    394 	.num_pipes = 3,
    395 	.has_ddi = 1,
    396 	.has_fpga_dbg = 1,
    397 	.has_fbc = 1,
    398 	GEN_DEFAULT_PIPEOFFSETS,
    399 	IVB_CURSOR_OFFSETS,
    400 };
    401 
    402 /*
    403  * Make sure any device matches here are from most specific to most
    404  * general.  For example, since the Quanta match is based on the subsystem
    405  * and subvendor IDs, we need it to come before the more general IVB
    406  * PCI ID matches, otherwise we'll use the wrong info struct above.
    407  */
    408 #define INTEL_PCI_IDS \
    409 	INTEL_I830_IDS(&intel_i830_info),	\
    410 	INTEL_I845G_IDS(&intel_845g_info),	\
    411 	INTEL_I85X_IDS(&intel_i85x_info),	\
    412 	INTEL_I865G_IDS(&intel_i865g_info),	\
    413 	INTEL_I915G_IDS(&intel_i915g_info),	\
    414 	INTEL_I915GM_IDS(&intel_i915gm_info),	\
    415 	INTEL_I945G_IDS(&intel_i945g_info),	\
    416 	INTEL_I945GM_IDS(&intel_i945gm_info),	\
    417 	INTEL_I965G_IDS(&intel_i965g_info),	\
    418 	INTEL_G33_IDS(&intel_g33_info),		\
    419 	INTEL_I965GM_IDS(&intel_i965gm_info),	\
    420 	INTEL_GM45_IDS(&intel_gm45_info), 	\
    421 	INTEL_G45_IDS(&intel_g45_info), 	\
    422 	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
    423 	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
    424 	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
    425 	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
    426 	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
    427 	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
    428 	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
    429 	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
    430 	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
    431 	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
    432 	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
    433 	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
    434 	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
    435 	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
    436 	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
    437 	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
    438 	INTEL_CHV_IDS(&intel_cherryview_info),	\
    439 	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
    440 	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
    441 	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
    442 	INTEL_BXT_IDS(&intel_broxton_info)
    443 
    444 static const struct pci_device_id pciidlist[] = {		/* aka */
    445 	INTEL_PCI_IDS,
    446 	{0, 0, 0}
    447 };
    448 
    449 MODULE_DEVICE_TABLE(pci, pciidlist);
    450 
    451 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
    452 {
    453 	enum intel_pch ret = PCH_NOP;
    454 
    455 	/*
    456 	 * In a virtualized passthrough environment we can be in a
    457 	 * setup where the ISA bridge is not able to be passed through.
    458 	 * In this case, a south bridge can be emulated and we have to
    459 	 * make an educated guess as to which PCH is really there.
    460 	 */
    461 
    462 	if (IS_GEN5(dev)) {
    463 		ret = PCH_IBX;
    464 		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
    465 	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
    466 		ret = PCH_CPT;
    467 		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
    468 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
    469 		ret = PCH_LPT;
    470 		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
    471 	} else if (IS_SKYLAKE(dev)) {
    472 		ret = PCH_SPT;
    473 		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
    474 	}
    475 
    476 	return ret;
    477 }
    478 
    479 void intel_detect_pch(struct drm_device *dev)
    480 {
    481 	struct drm_i915_private *dev_priv = dev->dev_private;
    482 	struct pci_dev *pch = NULL;
    483 
    484 	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
    485 	 * (which really amounts to a PCH but no South Display).
    486 	 */
    487 	if (INTEL_INFO(dev)->num_pipes == 0) {
    488 		dev_priv->pch_type = PCH_NOP;
    489 		return;
    490 	}
    491 
    492 	/*
    493 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
    494 	 * make graphics device passthrough work easy for VMM, that only
    495 	 * need to expose ISA bridge to let driver know the real hardware
    496 	 * underneath. This is a requirement from virtualization team.
    497 	 *
    498 	 * In some virtualized environments (e.g. XEN), there is irrelevant
    499 	 * ISA bridge in the system. To work reliably, we should scan trhough
    500 	 * all the ISA bridge devices and check for the first match, instead
    501 	 * of only checking the first one.
    502 	 */
    503 	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
    504 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
    505 			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
    506 			dev_priv->pch_id = id;
    507 
    508 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
    509 				dev_priv->pch_type = PCH_IBX;
    510 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
    511 				WARN_ON(!IS_GEN5(dev));
    512 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
    513 				dev_priv->pch_type = PCH_CPT;
    514 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
    515 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    516 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
    517 				/* PantherPoint is CPT compatible */
    518 				dev_priv->pch_type = PCH_CPT;
    519 				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
    520 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    521 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
    522 				dev_priv->pch_type = PCH_LPT;
    523 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
    524 				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
    525 				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
    526 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
    527 				dev_priv->pch_type = PCH_LPT;
    528 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
    529 				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
    530 				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
    531 			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
    532 				dev_priv->pch_type = PCH_SPT;
    533 				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
    534 				WARN_ON(!IS_SKYLAKE(dev));
    535 			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
    536 				dev_priv->pch_type = PCH_SPT;
    537 				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
    538 				WARN_ON(!IS_SKYLAKE(dev));
    539 			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
    540 				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
    541 				    pch->subsystem_vendor == 0x1af4 &&
    542 				    pch->subsystem_device == 0x1100)) {
    543 				dev_priv->pch_type = intel_virt_detect_pch(dev);
    544 			} else
    545 				continue;
    546 
    547 			break;
    548 		}
    549 	}
    550 	if (!pch)
    551 		DRM_DEBUG_KMS("No PCH found.\n");
    552 
    553 	pci_dev_put(pch);
    554 }
    555 
    556 bool i915_semaphore_is_enabled(struct drm_device *dev)
    557 {
    558 	if (INTEL_INFO(dev)->gen < 6)
    559 		return false;
    560 
    561 	if (i915.semaphores >= 0)
    562 		return i915.semaphores;
    563 
    564 	/* TODO: make semaphores and Execlists play nicely together */
    565 	if (i915.enable_execlists)
    566 		return false;
    567 
    568 	/* Until we get further testing... */
    569 	if (IS_GEN8(dev))
    570 		return false;
    571 
    572 #ifdef CONFIG_INTEL_IOMMU
    573 	/* Enable semaphores on SNB when IO remapping is off */
    574 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
    575 		return false;
    576 #endif
    577 
    578 	return true;
    579 }
    580 
    581 void i915_firmware_load_error_print(const char *fw_path, int err)
    582 {
    583 	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
    584 
    585 	/*
    586 	 * If the reason is not known assume -ENOENT since that's the most
    587 	 * usual failure mode.
    588 	 */
    589 	if (!err)
    590 		err = -ENOENT;
    591 
    592 	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
    593 		return;
    594 
    595 	DRM_ERROR(
    596 	  "The driver is built-in, so to load the firmware you need to\n"
    597 	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
    598 	  "in your initrd/initramfs image.\n");
    599 }
    600 
    601 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
    602 {
    603 	struct drm_device *dev = dev_priv->dev;
    604 	struct drm_encoder *encoder;
    605 
    606 	drm_modeset_lock_all(dev);
    607 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
    608 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
    609 
    610 		if (intel_encoder->suspend)
    611 			intel_encoder->suspend(intel_encoder);
    612 	}
    613 	drm_modeset_unlock_all(dev);
    614 }
    615 
    616 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
    617 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
    618 			      bool rpm_resume);
    619 static int skl_resume_prepare(struct drm_i915_private *dev_priv);
    620 static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
    621 
    622 
    623 static int i915_drm_suspend(struct drm_device *dev)
    624 {
    625 	struct drm_i915_private *dev_priv = dev->dev_private;
    626 	pci_power_t opregion_target_state;
    627 	int error;
    628 
    629 	/* ignore lid events during suspend */
    630 	mutex_lock(&dev_priv->modeset_restore_lock);
    631 	dev_priv->modeset_restore = MODESET_SUSPENDED;
    632 	mutex_unlock(&dev_priv->modeset_restore_lock);
    633 
    634 	/* We do a lot of poking in a lot of registers, make sure they work
    635 	 * properly. */
    636 	intel_display_set_init_power(dev_priv, true);
    637 
    638 	drm_kms_helper_poll_disable(dev);
    639 
    640 	pci_save_state(dev->pdev);
    641 
    642 	error = i915_gem_suspend(dev);
    643 	if (error) {
    644 		dev_err(&dev->pdev->dev,
    645 			"GEM idle failed, resume might fail\n");
    646 		return error;
    647 	}
    648 
    649 	intel_guc_suspend(dev);
    650 
    651 	intel_suspend_gt_powersave(dev);
    652 
    653 	/*
    654 	 * Disable CRTCs directly since we want to preserve sw state
    655 	 * for _thaw. Also, power gate the CRTC power wells.
    656 	 */
    657 	drm_modeset_lock_all(dev);
    658 	intel_display_suspend(dev);
    659 	drm_modeset_unlock_all(dev);
    660 
    661 	intel_dp_mst_suspend(dev);
    662 
    663 	intel_runtime_pm_disable_interrupts(dev_priv);
    664 	intel_hpd_cancel_work(dev_priv);
    665 
    666 	intel_suspend_encoders(dev_priv);
    667 
    668 	intel_suspend_hw(dev);
    669 
    670 	i915_gem_suspend_gtt_mappings(dev);
    671 
    672 	i915_save_state(dev);
    673 
    674 	opregion_target_state = PCI_D3cold;
    675 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
    676 	if (acpi_target_system_state() < ACPI_STATE_S3)
    677 		opregion_target_state = PCI_D1;
    678 #endif
    679 	intel_opregion_notify_adapter(dev, opregion_target_state);
    680 
    681 	intel_uncore_forcewake_reset(dev, false);
    682 	intel_opregion_fini(dev);
    683 
    684 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
    685 
    686 	dev_priv->suspend_count++;
    687 
    688 	intel_display_set_init_power(dev_priv, false);
    689 
    690 	return 0;
    691 }
    692 
    693 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
    694 {
    695 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
    696 	int ret;
    697 
    698 	ret = intel_suspend_complete(dev_priv);
    699 
    700 	if (ret) {
    701 		DRM_ERROR("Suspend complete failed: %d\n", ret);
    702 
    703 		return ret;
    704 	}
    705 
    706 	pci_disable_device(drm_dev->pdev);
    707 	/*
    708 	 * During hibernation on some platforms the BIOS may try to access
    709 	 * the device even though it's already in D3 and hang the machine. So
    710 	 * leave the device in D0 on those platforms and hope the BIOS will
    711 	 * power down the device properly. The issue was seen on multiple old
    712 	 * GENs with different BIOS vendors, so having an explicit blacklist
    713 	 * is inpractical; apply the workaround on everything pre GEN6. The
    714 	 * platforms where the issue was seen:
    715 	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
    716 	 * Fujitsu FSC S7110
    717 	 * Acer Aspire 1830T
    718 	 */
    719 	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
    720 		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
    721 
    722 	return 0;
    723 }
    724 
    725 int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
    726 {
    727 	int error;
    728 
    729 	if (!dev || !dev->dev_private) {
    730 		DRM_ERROR("dev: %p\n", dev);
    731 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
    732 		return -ENODEV;
    733 	}
    734 
    735 	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
    736 			 state.event != PM_EVENT_FREEZE))
    737 		return -EINVAL;
    738 
    739 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    740 		return 0;
    741 
    742 	error = i915_drm_suspend(dev);
    743 	if (error)
    744 		return error;
    745 
    746 	return i915_drm_suspend_late(dev, false);
    747 }
    748 
    749 static int i915_drm_resume(struct drm_device *dev)
    750 {
    751 	struct drm_i915_private *dev_priv = dev->dev_private;
    752 
    753 	mutex_lock(&dev->struct_mutex);
    754 	i915_gem_restore_gtt_mappings(dev);
    755 	mutex_unlock(&dev->struct_mutex);
    756 
    757 	i915_restore_state(dev);
    758 	intel_opregion_setup(dev);
    759 
    760 	intel_init_pch_refclk(dev);
    761 	drm_mode_config_reset(dev);
    762 
    763 	/*
    764 	 * Interrupts have to be enabled before any batches are run. If not the
    765 	 * GPU will hang. i915_gem_init_hw() will initiate batches to
    766 	 * update/restore the context.
    767 	 *
    768 	 * Modeset enabling in intel_modeset_init_hw() also needs working
    769 	 * interrupts.
    770 	 */
    771 	intel_runtime_pm_enable_interrupts(dev_priv);
    772 
    773 	mutex_lock(&dev->struct_mutex);
    774 	if (i915_gem_init_hw(dev)) {
    775 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
    776 			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
    777 	}
    778 	mutex_unlock(&dev->struct_mutex);
    779 
    780 	intel_guc_resume(dev);
    781 
    782 	intel_modeset_init_hw(dev);
    783 
    784 	spin_lock_irq(&dev_priv->irq_lock);
    785 	if (dev_priv->display.hpd_irq_setup)
    786 		dev_priv->display.hpd_irq_setup(dev);
    787 	spin_unlock_irq(&dev_priv->irq_lock);
    788 
    789 	drm_modeset_lock_all(dev);
    790 	intel_display_resume(dev);
    791 	drm_modeset_unlock_all(dev);
    792 
    793 	intel_dp_mst_resume(dev);
    794 
    795 	/*
    796 	 * ... but also need to make sure that hotplug processing
    797 	 * doesn't cause havoc. Like in the driver load code we don't
    798 	 * bother with the tiny race here where we might loose hotplug
    799 	 * notifications.
    800 	 * */
    801 	intel_hpd_init(dev_priv);
    802 	/* Config may have changed between suspend and resume */
    803 	drm_helper_hpd_irq_event(dev);
    804 
    805 	intel_opregion_init(dev);
    806 
    807 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
    808 
    809 	mutex_lock(&dev_priv->modeset_restore_lock);
    810 	dev_priv->modeset_restore = MODESET_DONE;
    811 	mutex_unlock(&dev_priv->modeset_restore_lock);
    812 
    813 	intel_opregion_notify_adapter(dev, PCI_D0);
    814 
    815 	drm_kms_helper_poll_enable(dev);
    816 
    817 	return 0;
    818 }
    819 
    820 static int i915_drm_resume_early(struct drm_device *dev)
    821 {
    822 	struct drm_i915_private *dev_priv = dev->dev_private;
    823 	int ret = 0;
    824 
    825 	/*
    826 	 * We have a resume ordering issue with the snd-hda driver also
    827 	 * requiring our device to be power up. Due to the lack of a
    828 	 * parent/child relationship we currently solve this with an early
    829 	 * resume hook.
    830 	 *
    831 	 * FIXME: This should be solved with a special hdmi sink device or
    832 	 * similar so that power domains can be employed.
    833 	 */
    834 	if (pci_enable_device(dev->pdev))
    835 		return -EIO;
    836 
    837 	pci_set_master(dev->pdev);
    838 
    839 	if (IS_VALLEYVIEW(dev_priv))
    840 		ret = vlv_resume_prepare(dev_priv, false);
    841 	if (ret)
    842 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
    843 			  ret);
    844 
    845 	intel_uncore_early_sanitize(dev, true);
    846 
    847 	if (IS_BROXTON(dev))
    848 		ret = bxt_resume_prepare(dev_priv);
    849 	else if (IS_SKYLAKE(dev_priv))
    850 		ret = skl_resume_prepare(dev_priv);
    851 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
    852 		hsw_disable_pc8(dev_priv);
    853 
    854 	intel_uncore_sanitize(dev);
    855 	intel_power_domains_init_hw(dev_priv);
    856 
    857 	return ret;
    858 }
    859 
    860 int i915_resume_switcheroo(struct drm_device *dev)
    861 {
    862 	int ret;
    863 
    864 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    865 		return 0;
    866 
    867 	ret = i915_drm_resume_early(dev);
    868 	if (ret)
    869 		return ret;
    870 
    871 	return i915_drm_resume(dev);
    872 }
    873 
    874 /**
    875  * i915_reset - reset chip after a hang
    876  * @dev: drm device to reset
    877  *
    878  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
    879  * reset or otherwise an error code.
    880  *
    881  * Procedure is fairly simple:
    882  *   - reset the chip using the reset reg
    883  *   - re-init context state
    884  *   - re-init hardware status page
    885  *   - re-init ring buffer
    886  *   - re-init interrupt state
    887  *   - re-init display
    888  */
    889 int i915_reset(struct drm_device *dev)
    890 {
    891 	struct drm_i915_private *dev_priv = dev->dev_private;
    892 	bool simulated;
    893 	int ret;
    894 
    895 	intel_reset_gt_powersave(dev);
    896 
    897 	mutex_lock(&dev->struct_mutex);
    898 
    899 	i915_gem_reset(dev);
    900 
    901 	simulated = dev_priv->gpu_error.stop_rings != 0;
    902 
    903 	ret = intel_gpu_reset(dev);
    904 
    905 	/* Also reset the gpu hangman. */
    906 	if (simulated) {
    907 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
    908 		dev_priv->gpu_error.stop_rings = 0;
    909 		if (ret == -ENODEV) {
    910 			DRM_INFO("Reset not implemented, but ignoring "
    911 				 "error for simulated gpu hangs\n");
    912 			ret = 0;
    913 		}
    914 	}
    915 
    916 	if (i915_stop_ring_allow_warn(dev_priv))
    917 		pr_notice("drm/i915: Resetting chip after gpu hang\n");
    918 
    919 	if (ret) {
    920 		DRM_ERROR("Failed to reset chip: %i\n", ret);
    921 		mutex_unlock(&dev->struct_mutex);
    922 		return ret;
    923 	}
    924 
    925 	intel_overlay_reset(dev_priv);
    926 
    927 	/* Ok, now get things going again... */
    928 
    929 	/*
    930 	 * Everything depends on having the GTT running, so we need to start
    931 	 * there.  Fortunately we don't need to do this unless we reset the
    932 	 * chip at a PCI level.
    933 	 *
    934 	 * Next we need to restore the context, but we don't use those
    935 	 * yet either...
    936 	 *
    937 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
    938 	 * was running at the time of the reset (i.e. we weren't VT
    939 	 * switched away).
    940 	 */
    941 
    942 	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
    943 	dev_priv->gpu_error.reload_in_reset = true;
    944 
    945 	ret = i915_gem_init_hw(dev);
    946 
    947 	dev_priv->gpu_error.reload_in_reset = false;
    948 
    949 	mutex_unlock(&dev->struct_mutex);
    950 	if (ret) {
    951 		DRM_ERROR("Failed hw init on reset %d\n", ret);
    952 		return ret;
    953 	}
    954 
    955 	/*
    956 	 * rps/rc6 re-init is necessary to restore state lost after the
    957 	 * reset and the re-install of gt irqs. Skip for ironlake per
    958 	 * previous concerns that it doesn't respond well to some forms
    959 	 * of re-init after reset.
    960 	 */
    961 	if (INTEL_INFO(dev)->gen > 5)
    962 		intel_enable_gt_powersave(dev);
    963 
    964 	return 0;
    965 }
    966 
    967 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    968 {
    969 	struct intel_device_info *intel_info =
    970 		(struct intel_device_info *) ent->driver_data;
    971 
    972 	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
    973 		DRM_INFO("This hardware requires preliminary hardware support.\n"
    974 			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
    975 		return -ENODEV;
    976 	}
    977 
    978 	/* Only bind to function 0 of the device. Early generations
    979 	 * used function 1 as a placeholder for multi-head. This causes
    980 	 * us confusion instead, especially on the systems where both
    981 	 * functions have the same PCI-ID!
    982 	 */
    983 	if (PCI_FUNC(pdev->devfn))
    984 		return -ENODEV;
    985 
    986 	return drm_get_pci_dev(pdev, ent, &driver);
    987 }
    988 
    989 static void
    990 i915_pci_remove(struct pci_dev *pdev)
    991 {
    992 	struct drm_device *dev = pci_get_drvdata(pdev);
    993 
    994 	drm_put_dev(dev);
    995 }
    996 
    997 static int i915_pm_suspend(struct device *dev)
    998 {
    999 	struct pci_dev *pdev = to_pci_dev(dev);
   1000 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
   1001 
   1002 	if (!drm_dev || !drm_dev->dev_private) {
   1003 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
   1004 		return -ENODEV;
   1005 	}
   1006 
   1007 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1008 		return 0;
   1009 
   1010 	return i915_drm_suspend(drm_dev);
   1011 }
   1012 
   1013 static int i915_pm_suspend_late(struct device *dev)
   1014 {
   1015 	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
   1016 
   1017 	/*
   1018 	 * We have a suspend ordering issue with the snd-hda driver also
   1019 	 * requiring our device to be power up. Due to the lack of a
   1020 	 * parent/child relationship we currently solve this with an late
   1021 	 * suspend hook.
   1022 	 *
   1023 	 * FIXME: This should be solved with a special hdmi sink device or
   1024 	 * similar so that power domains can be employed.
   1025 	 */
   1026 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1027 		return 0;
   1028 
   1029 	return i915_drm_suspend_late(drm_dev, false);
   1030 }
   1031 
   1032 static int i915_pm_poweroff_late(struct device *dev)
   1033 {
   1034 	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
   1035 
   1036 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1037 		return 0;
   1038 
   1039 	return i915_drm_suspend_late(drm_dev, true);
   1040 }
   1041 
   1042 static int i915_pm_resume_early(struct device *dev)
   1043 {
   1044 	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
   1045 
   1046 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1047 		return 0;
   1048 
   1049 	return i915_drm_resume_early(drm_dev);
   1050 }
   1051 
   1052 static int i915_pm_resume(struct device *dev)
   1053 {
   1054 	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
   1055 
   1056 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1057 		return 0;
   1058 
   1059 	return i915_drm_resume(drm_dev);
   1060 }
   1061 
   1062 static int skl_suspend_complete(struct drm_i915_private *dev_priv)
   1063 {
   1064 	/* Enabling DC6 is not a hard requirement to enter runtime D3 */
   1065 
   1066 	skl_uninit_cdclk(dev_priv);
   1067 
   1068 	return 0;
   1069 }
   1070 
   1071 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
   1072 {
   1073 	hsw_enable_pc8(dev_priv);
   1074 
   1075 	return 0;
   1076 }
   1077 
   1078 static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
   1079 {
   1080 	struct drm_device *dev = dev_priv->dev;
   1081 
   1082 	/* TODO: when DC5 support is added disable DC5 here. */
   1083 
   1084 	broxton_ddi_phy_uninit(dev);
   1085 	broxton_uninit_cdclk(dev);
   1086 	bxt_enable_dc9(dev_priv);
   1087 
   1088 	return 0;
   1089 }
   1090 
   1091 static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
   1092 {
   1093 	struct drm_device *dev = dev_priv->dev;
   1094 
   1095 	/* TODO: when CSR FW support is added make sure the FW is loaded */
   1096 
   1097 	bxt_disable_dc9(dev_priv);
   1098 
   1099 	/*
   1100 	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
   1101 	 * is available.
   1102 	 */
   1103 	broxton_init_cdclk(dev);
   1104 	broxton_ddi_phy_init(dev);
   1105 	intel_prepare_ddi(dev);
   1106 
   1107 	return 0;
   1108 }
   1109 
   1110 static int skl_resume_prepare(struct drm_i915_private *dev_priv)
   1111 {
   1112 	struct drm_device *dev = dev_priv->dev;
   1113 
   1114 	skl_init_cdclk(dev_priv);
   1115 	intel_csr_load_program(dev);
   1116 
   1117 	return 0;
   1118 }
   1119 
   1120 /*
   1121  * Save all Gunit registers that may be lost after a D3 and a subsequent
   1122  * S0i[R123] transition. The list of registers needing a save/restore is
   1123  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
   1124  * registers in the following way:
   1125  * - Driver: saved/restored by the driver
   1126  * - Punit : saved/restored by the Punit firmware
   1127  * - No, w/o marking: no need to save/restore, since the register is R/O or
   1128  *                    used internally by the HW in a way that doesn't depend
   1129  *                    keeping the content across a suspend/resume.
   1130  * - Debug : used for debugging
   1131  *
   1132  * We save/restore all registers marked with 'Driver', with the following
   1133  * exceptions:
   1134  * - Registers out of use, including also registers marked with 'Debug'.
   1135  *   These have no effect on the driver's operation, so we don't save/restore
   1136  *   them to reduce the overhead.
   1137  * - Registers that are fully setup by an initialization function called from
   1138  *   the resume path. For example many clock gating and RPS/RC6 registers.
   1139  * - Registers that provide the right functionality with their reset defaults.
   1140  *
   1141  * TODO: Except for registers that based on the above 3 criteria can be safely
   1142  * ignored, we save/restore all others, practically treating the HW context as
   1143  * a black-box for the driver. Further investigation is needed to reduce the
   1144  * saved/restored registers even further, by following the same 3 criteria.
   1145  */
   1146 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
   1147 {
   1148 	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
   1149 	int i;
   1150 
   1151 	/* GAM 0x4000-0x4770 */
   1152 	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
   1153 	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
   1154 	s->arb_mode		= I915_READ(ARB_MODE);
   1155 	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
   1156 	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
   1157 
   1158 	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
   1159 		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
   1160 
   1161 	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
   1162 	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
   1163 
   1164 	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
   1165 	s->ecochk		= I915_READ(GAM_ECOCHK);
   1166 	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
   1167 	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
   1168 
   1169 	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
   1170 
   1171 	/* MBC 0x9024-0x91D0, 0x8500 */
   1172 	s->g3dctl		= I915_READ(VLV_G3DCTL);
   1173 	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
   1174 	s->mbctl		= I915_READ(GEN6_MBCTL);
   1175 
   1176 	/* GCP 0x9400-0x9424, 0x8100-0x810C */
   1177 	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
   1178 	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
   1179 	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
   1180 	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
   1181 	s->rstctl		= I915_READ(GEN6_RSTCTL);
   1182 	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
   1183 
   1184 	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
   1185 	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
   1186 	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
   1187 	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
   1188 	s->ecobus		= I915_READ(ECOBUS);
   1189 	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
   1190 	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
   1191 	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
   1192 	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
   1193 	s->rcedata		= I915_READ(VLV_RCEDATA);
   1194 	s->spare2gh		= I915_READ(VLV_SPAREG2H);
   1195 
   1196 	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
   1197 	s->gt_imr		= I915_READ(GTIMR);
   1198 	s->gt_ier		= I915_READ(GTIER);
   1199 	s->pm_imr		= I915_READ(GEN6_PMIMR);
   1200 	s->pm_ier		= I915_READ(GEN6_PMIER);
   1201 
   1202 	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
   1203 		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
   1204 
   1205 	/* GT SA CZ domain, 0x100000-0x138124 */
   1206 	s->tilectl		= I915_READ(TILECTL);
   1207 	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
   1208 	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
   1209 	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
   1210 	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
   1211 
   1212 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
   1213 	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
   1214 	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
   1215 	s->pcbr			= I915_READ(VLV_PCBR);
   1216 	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
   1217 
   1218 	/*
   1219 	 * Not saving any of:
   1220 	 * DFT,		0x9800-0x9EC0
   1221 	 * SARB,	0xB000-0xB1FC
   1222 	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
   1223 	 * PCI CFG
   1224 	 */
   1225 }
   1226 
   1227 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
   1228 {
   1229 	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
   1230 	u32 val;
   1231 	int i;
   1232 
   1233 	/* GAM 0x4000-0x4770 */
   1234 	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
   1235 	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
   1236 	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
   1237 	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
   1238 	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
   1239 
   1240 	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
   1241 		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
   1242 
   1243 	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
   1244 	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
   1245 
   1246 	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
   1247 	I915_WRITE(GAM_ECOCHK,		s->ecochk);
   1248 	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
   1249 	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
   1250 
   1251 	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
   1252 
   1253 	/* MBC 0x9024-0x91D0, 0x8500 */
   1254 	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
   1255 	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
   1256 	I915_WRITE(GEN6_MBCTL,		s->mbctl);
   1257 
   1258 	/* GCP 0x9400-0x9424, 0x8100-0x810C */
   1259 	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
   1260 	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
   1261 	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
   1262 	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
   1263 	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
   1264 	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
   1265 
   1266 	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
   1267 	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
   1268 	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
   1269 	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
   1270 	I915_WRITE(ECOBUS,		s->ecobus);
   1271 	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
   1272 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
   1273 	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
   1274 	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
   1275 	I915_WRITE(VLV_RCEDATA,		s->rcedata);
   1276 	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
   1277 
   1278 	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
   1279 	I915_WRITE(GTIMR,		s->gt_imr);
   1280 	I915_WRITE(GTIER,		s->gt_ier);
   1281 	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
   1282 	I915_WRITE(GEN6_PMIER,		s->pm_ier);
   1283 
   1284 	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
   1285 		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
   1286 
   1287 	/* GT SA CZ domain, 0x100000-0x138124 */
   1288 	I915_WRITE(TILECTL,			s->tilectl);
   1289 	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
   1290 	/*
   1291 	 * Preserve the GT allow wake and GFX force clock bit, they are not
   1292 	 * be restored, as they are used to control the s0ix suspend/resume
   1293 	 * sequence by the caller.
   1294 	 */
   1295 	val = I915_READ(VLV_GTLC_WAKE_CTRL);
   1296 	val &= VLV_GTLC_ALLOWWAKEREQ;
   1297 	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
   1298 	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
   1299 
   1300 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
   1301 	val &= VLV_GFX_CLK_FORCE_ON_BIT;
   1302 	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
   1303 	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
   1304 
   1305 	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
   1306 
   1307 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
   1308 	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
   1309 	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
   1310 	I915_WRITE(VLV_PCBR,			s->pcbr);
   1311 	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
   1312 }
   1313 
   1314 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
   1315 {
   1316 	u32 val;
   1317 	int err;
   1318 
   1319 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
   1320 
   1321 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
   1322 	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
   1323 	if (force_on)
   1324 		val |= VLV_GFX_CLK_FORCE_ON_BIT;
   1325 	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
   1326 
   1327 	if (!force_on)
   1328 		return 0;
   1329 
   1330 	err = wait_for(COND, 20);
   1331 	if (err)
   1332 		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
   1333 			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
   1334 
   1335 	return err;
   1336 #undef COND
   1337 }
   1338 
   1339 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
   1340 {
   1341 	u32 val;
   1342 	int err = 0;
   1343 
   1344 	val = I915_READ(VLV_GTLC_WAKE_CTRL);
   1345 	val &= ~VLV_GTLC_ALLOWWAKEREQ;
   1346 	if (allow)
   1347 		val |= VLV_GTLC_ALLOWWAKEREQ;
   1348 	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
   1349 	POSTING_READ(VLV_GTLC_WAKE_CTRL);
   1350 
   1351 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
   1352 	      allow)
   1353 	err = wait_for(COND, 1);
   1354 	if (err)
   1355 		DRM_ERROR("timeout disabling GT waking\n");
   1356 	return err;
   1357 #undef COND
   1358 }
   1359 
   1360 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
   1361 				 bool wait_for_on)
   1362 {
   1363 	u32 mask;
   1364 	u32 val;
   1365 	int err;
   1366 
   1367 	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
   1368 	val = wait_for_on ? mask : 0;
   1369 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
   1370 	if (COND)
   1371 		return 0;
   1372 
   1373 	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
   1374 			wait_for_on ? "on" : "off",
   1375 			I915_READ(VLV_GTLC_PW_STATUS));
   1376 
   1377 	/*
   1378 	 * RC6 transitioning can be delayed up to 2 msec (see
   1379 	 * valleyview_enable_rps), use 3 msec for safety.
   1380 	 */
   1381 	err = wait_for(COND, 3);
   1382 	if (err)
   1383 		DRM_ERROR("timeout waiting for GT wells to go %s\n",
   1384 			  wait_for_on ? "on" : "off");
   1385 
   1386 	return err;
   1387 #undef COND
   1388 }
   1389 
   1390 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
   1391 {
   1392 	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
   1393 		return;
   1394 
   1395 	DRM_ERROR("GT register access while GT waking disabled\n");
   1396 	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
   1397 }
   1398 
   1399 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
   1400 {
   1401 	u32 mask;
   1402 	int err;
   1403 
   1404 	/*
   1405 	 * Bspec defines the following GT well on flags as debug only, so
   1406 	 * don't treat them as hard failures.
   1407 	 */
   1408 	(void)vlv_wait_for_gt_wells(dev_priv, false);
   1409 
   1410 	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
   1411 	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
   1412 
   1413 	vlv_check_no_gt_access(dev_priv);
   1414 
   1415 	err = vlv_force_gfx_clock(dev_priv, true);
   1416 	if (err)
   1417 		goto err1;
   1418 
   1419 	err = vlv_allow_gt_wake(dev_priv, false);
   1420 	if (err)
   1421 		goto err2;
   1422 
   1423 	if (!IS_CHERRYVIEW(dev_priv->dev))
   1424 		vlv_save_gunit_s0ix_state(dev_priv);
   1425 
   1426 	err = vlv_force_gfx_clock(dev_priv, false);
   1427 	if (err)
   1428 		goto err2;
   1429 
   1430 	return 0;
   1431 
   1432 err2:
   1433 	/* For safety always re-enable waking and disable gfx clock forcing */
   1434 	vlv_allow_gt_wake(dev_priv, true);
   1435 err1:
   1436 	vlv_force_gfx_clock(dev_priv, false);
   1437 
   1438 	return err;
   1439 }
   1440 
   1441 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
   1442 				bool rpm_resume)
   1443 {
   1444 	struct drm_device *dev = dev_priv->dev;
   1445 	int err;
   1446 	int ret;
   1447 
   1448 	/*
   1449 	 * If any of the steps fail just try to continue, that's the best we
   1450 	 * can do at this point. Return the first error code (which will also
   1451 	 * leave RPM permanently disabled).
   1452 	 */
   1453 	ret = vlv_force_gfx_clock(dev_priv, true);
   1454 
   1455 	if (!IS_CHERRYVIEW(dev_priv->dev))
   1456 		vlv_restore_gunit_s0ix_state(dev_priv);
   1457 
   1458 	err = vlv_allow_gt_wake(dev_priv, true);
   1459 	if (!ret)
   1460 		ret = err;
   1461 
   1462 	err = vlv_force_gfx_clock(dev_priv, false);
   1463 	if (!ret)
   1464 		ret = err;
   1465 
   1466 	vlv_check_no_gt_access(dev_priv);
   1467 
   1468 	if (rpm_resume) {
   1469 		intel_init_clock_gating(dev);
   1470 		i915_gem_restore_fences(dev);
   1471 	}
   1472 
   1473 	return ret;
   1474 }
   1475 
   1476 static int intel_runtime_suspend(struct device *device)
   1477 {
   1478 	struct pci_dev *pdev = to_pci_dev(device);
   1479 	struct drm_device *dev = pci_get_drvdata(pdev);
   1480 	struct drm_i915_private *dev_priv = dev->dev_private;
   1481 	int ret;
   1482 
   1483 	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
   1484 		return -ENODEV;
   1485 
   1486 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
   1487 		return -ENODEV;
   1488 
   1489 	DRM_DEBUG_KMS("Suspending device\n");
   1490 
   1491 	/*
   1492 	 * We could deadlock here in case another thread holding struct_mutex
   1493 	 * calls RPM suspend concurrently, since the RPM suspend will wait
   1494 	 * first for this RPM suspend to finish. In this case the concurrent
   1495 	 * RPM resume will be followed by its RPM suspend counterpart. Still
   1496 	 * for consistency return -EAGAIN, which will reschedule this suspend.
   1497 	 */
   1498 	if (!mutex_trylock(&dev->struct_mutex)) {
   1499 		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
   1500 		/*
   1501 		 * Bump the expiration timestamp, otherwise the suspend won't
   1502 		 * be rescheduled.
   1503 		 */
   1504 		pm_runtime_mark_last_busy(device);
   1505 
   1506 		return -EAGAIN;
   1507 	}
   1508 	/*
   1509 	 * We are safe here against re-faults, since the fault handler takes
   1510 	 * an RPM reference.
   1511 	 */
   1512 	i915_gem_release_all_mmaps(dev_priv);
   1513 	mutex_unlock(&dev->struct_mutex);
   1514 
   1515 	intel_guc_suspend(dev);
   1516 
   1517 	intel_suspend_gt_powersave(dev);
   1518 	intel_runtime_pm_disable_interrupts(dev_priv);
   1519 
   1520 	ret = intel_suspend_complete(dev_priv);
   1521 	if (ret) {
   1522 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
   1523 		intel_runtime_pm_enable_interrupts(dev_priv);
   1524 
   1525 		return ret;
   1526 	}
   1527 
   1528 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
   1529 	intel_uncore_forcewake_reset(dev, false);
   1530 	dev_priv->pm.suspended = true;
   1531 
   1532 	/*
   1533 	 * FIXME: We really should find a document that references the arguments
   1534 	 * used below!
   1535 	 */
   1536 	if (IS_BROADWELL(dev)) {
   1537 		/*
   1538 		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
   1539 		 * being detected, and the call we do at intel_runtime_resume()
   1540 		 * won't be able to restore them. Since PCI_D3hot matches the
   1541 		 * actual specification and appears to be working, use it.
   1542 		 */
   1543 		intel_opregion_notify_adapter(dev, PCI_D3hot);
   1544 	} else {
   1545 		/*
   1546 		 * current versions of firmware which depend on this opregion
   1547 		 * notification have repurposed the D1 definition to mean
   1548 		 * "runtime suspended" vs. what you would normally expect (D3)
   1549 		 * to distinguish it from notifications that might be sent via
   1550 		 * the suspend path.
   1551 		 */
   1552 		intel_opregion_notify_adapter(dev, PCI_D1);
   1553 	}
   1554 
   1555 	assert_forcewakes_inactive(dev_priv);
   1556 
   1557 	DRM_DEBUG_KMS("Device suspended\n");
   1558 	return 0;
   1559 }
   1560 
   1561 static int intel_runtime_resume(struct device *device)
   1562 {
   1563 	struct pci_dev *pdev = to_pci_dev(device);
   1564 	struct drm_device *dev = pci_get_drvdata(pdev);
   1565 	struct drm_i915_private *dev_priv = dev->dev_private;
   1566 	int ret = 0;
   1567 
   1568 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
   1569 		return -ENODEV;
   1570 
   1571 	DRM_DEBUG_KMS("Resuming device\n");
   1572 
   1573 	intel_opregion_notify_adapter(dev, PCI_D0);
   1574 	dev_priv->pm.suspended = false;
   1575 
   1576 	intel_guc_resume(dev);
   1577 
   1578 	if (IS_GEN6(dev_priv))
   1579 		intel_init_pch_refclk(dev);
   1580 
   1581 	if (IS_BROXTON(dev))
   1582 		ret = bxt_resume_prepare(dev_priv);
   1583 	else if (IS_SKYLAKE(dev))
   1584 		ret = skl_resume_prepare(dev_priv);
   1585 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   1586 		hsw_disable_pc8(dev_priv);
   1587 	else if (IS_VALLEYVIEW(dev_priv))
   1588 		ret = vlv_resume_prepare(dev_priv, true);
   1589 
   1590 	/*
   1591 	 * No point of rolling back things in case of an error, as the best
   1592 	 * we can do is to hope that things will still work (and disable RPM).
   1593 	 */
   1594 	i915_gem_init_swizzling(dev);
   1595 	gen6_update_ring_freq(dev);
   1596 
   1597 	intel_runtime_pm_enable_interrupts(dev_priv);
   1598 
   1599 	/*
   1600 	 * On VLV/CHV display interrupts are part of the display
   1601 	 * power well, so hpd is reinitialized from there. For
   1602 	 * everyone else do it here.
   1603 	 */
   1604 	if (!IS_VALLEYVIEW(dev_priv))
   1605 		intel_hpd_init(dev_priv);
   1606 
   1607 	intel_enable_gt_powersave(dev);
   1608 
   1609 	if (ret)
   1610 		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
   1611 	else
   1612 		DRM_DEBUG_KMS("Device resumed\n");
   1613 
   1614 	return ret;
   1615 }
   1616 
   1617 /*
   1618  * This function implements common functionality of runtime and system
   1619  * suspend sequence.
   1620  */
   1621 static int intel_suspend_complete(struct drm_i915_private *dev_priv)
   1622 {
   1623 	int ret;
   1624 
   1625 	if (IS_BROXTON(dev_priv))
   1626 		ret = bxt_suspend_complete(dev_priv);
   1627 	else if (IS_SKYLAKE(dev_priv))
   1628 		ret = skl_suspend_complete(dev_priv);
   1629 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   1630 		ret = hsw_suspend_complete(dev_priv);
   1631 	else if (IS_VALLEYVIEW(dev_priv))
   1632 		ret = vlv_suspend_complete(dev_priv);
   1633 	else
   1634 		ret = 0;
   1635 
   1636 	return ret;
   1637 }
   1638 
   1639 static const struct dev_pm_ops i915_pm_ops = {
   1640 	/*
   1641 	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
   1642 	 * PMSG_RESUME]
   1643 	 */
   1644 	.suspend = i915_pm_suspend,
   1645 	.suspend_late = i915_pm_suspend_late,
   1646 	.resume_early = i915_pm_resume_early,
   1647 	.resume = i915_pm_resume,
   1648 
   1649 	/*
   1650 	 * S4 event handlers
   1651 	 * @freeze, @freeze_late    : called (1) before creating the
   1652 	 *                            hibernation image [PMSG_FREEZE] and
   1653 	 *                            (2) after rebooting, before restoring
   1654 	 *                            the image [PMSG_QUIESCE]
   1655 	 * @thaw, @thaw_early       : called (1) after creating the hibernation
   1656 	 *                            image, before writing it [PMSG_THAW]
   1657 	 *                            and (2) after failing to create or
   1658 	 *                            restore the image [PMSG_RECOVER]
   1659 	 * @poweroff, @poweroff_late: called after writing the hibernation
   1660 	 *                            image, before rebooting [PMSG_HIBERNATE]
   1661 	 * @restore, @restore_early : called after rebooting and restoring the
   1662 	 *                            hibernation image [PMSG_RESTORE]
   1663 	 */
   1664 	.freeze = i915_pm_suspend,
   1665 	.freeze_late = i915_pm_suspend_late,
   1666 	.thaw_early = i915_pm_resume_early,
   1667 	.thaw = i915_pm_resume,
   1668 	.poweroff = i915_pm_suspend,
   1669 	.poweroff_late = i915_pm_poweroff_late,
   1670 	.restore_early = i915_pm_resume_early,
   1671 	.restore = i915_pm_resume,
   1672 
   1673 	/* S0ix (via runtime suspend) event handlers */
   1674 	.runtime_suspend = intel_runtime_suspend,
   1675 	.runtime_resume = intel_runtime_resume,
   1676 };
   1677 
   1678 static const struct vm_operations_struct i915_gem_vm_ops = {
   1679 	.fault = i915_gem_fault,
   1680 	.open = drm_gem_vm_open,
   1681 	.close = drm_gem_vm_close,
   1682 };
   1683 
   1684 static const struct file_operations i915_driver_fops = {
   1685 	.owner = THIS_MODULE,
   1686 	.open = drm_open,
   1687 	.release = drm_release,
   1688 	.unlocked_ioctl = drm_ioctl,
   1689 	.mmap = drm_gem_mmap,
   1690 	.poll = drm_poll,
   1691 	.read = drm_read,
   1692 #ifdef CONFIG_COMPAT
   1693 	.compat_ioctl = i915_compat_ioctl,
   1694 #endif
   1695 	.llseek = noop_llseek,
   1696 };
   1697 
   1698 static struct drm_driver driver = {
   1699 	/* Don't use MTRRs here; the Xserver or userspace app should
   1700 	 * deal with them for Intel hardware.
   1701 	 */
   1702 	.driver_features =
   1703 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
   1704 	    DRIVER_RENDER | DRIVER_MODESET,
   1705 	.load = i915_driver_load,
   1706 	.unload = i915_driver_unload,
   1707 	.open = i915_driver_open,
   1708 	.lastclose = i915_driver_lastclose,
   1709 	.preclose = i915_driver_preclose,
   1710 	.postclose = i915_driver_postclose,
   1711 	.set_busid = drm_pci_set_busid,
   1712 
   1713 #if defined(CONFIG_DEBUG_FS)
   1714 	.debugfs_init = i915_debugfs_init,
   1715 	.debugfs_cleanup = i915_debugfs_cleanup,
   1716 #endif
   1717 	.gem_free_object = i915_gem_free_object,
   1718 	.gem_vm_ops = &i915_gem_vm_ops,
   1719 
   1720 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
   1721 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
   1722 	.gem_prime_export = i915_gem_prime_export,
   1723 	.gem_prime_import = i915_gem_prime_import,
   1724 
   1725 	.dumb_create = i915_gem_dumb_create,
   1726 	.dumb_map_offset = i915_gem_mmap_gtt,
   1727 	.dumb_destroy = drm_gem_dumb_destroy,
   1728 	.ioctls = i915_ioctls,
   1729 	.fops = &i915_driver_fops,
   1730 	.name = DRIVER_NAME,
   1731 	.desc = DRIVER_DESC,
   1732 	.date = DRIVER_DATE,
   1733 	.major = DRIVER_MAJOR,
   1734 	.minor = DRIVER_MINOR,
   1735 	.patchlevel = DRIVER_PATCHLEVEL,
   1736 };
   1737 
   1738 static struct pci_driver i915_pci_driver = {
   1739 	.name = DRIVER_NAME,
   1740 	.id_table = pciidlist,
   1741 	.probe = i915_pci_probe,
   1742 	.remove = i915_pci_remove,
   1743 	.driver.pm = &i915_pm_ops,
   1744 };
   1745 
   1746 static int __init i915_init(void)
   1747 {
   1748 	driver.num_ioctls = i915_max_ioctl;
   1749 
   1750 	/*
   1751 	 * Enable KMS by default, unless explicitly overriden by
   1752 	 * either the i915.modeset prarameter or by the
   1753 	 * vga_text_mode_force boot option.
   1754 	 */
   1755 
   1756 	if (i915.modeset == 0)
   1757 		driver.driver_features &= ~DRIVER_MODESET;
   1758 
   1759 #ifdef CONFIG_VGA_CONSOLE
   1760 	if (vgacon_text_force() && i915.modeset == -1)
   1761 		driver.driver_features &= ~DRIVER_MODESET;
   1762 #endif
   1763 
   1764 	if (!(driver.driver_features & DRIVER_MODESET)) {
   1765 		/* Silently fail loading to not upset userspace. */
   1766 		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
   1767 		return 0;
   1768 	}
   1769 
   1770 	if (i915.nuclear_pageflip)
   1771 		driver.driver_features |= DRIVER_ATOMIC;
   1772 
   1773 	return drm_pci_init(&driver, &i915_pci_driver);
   1774 }
   1775 
   1776 static void __exit i915_exit(void)
   1777 {
   1778 	if (!(driver.driver_features & DRIVER_MODESET))
   1779 		return; /* Never loaded a driver. */
   1780 
   1781 	drm_pci_exit(&driver, &i915_pci_driver);
   1782 }
   1783 
   1784 module_init(i915_init);
   1785 module_exit(i915_exit);
   1786 
   1787 MODULE_AUTHOR("Tungsten Graphics, Inc.");
   1788 MODULE_AUTHOR("Intel Corporation");
   1789 
   1790 MODULE_DESCRIPTION(DRIVER_DESC);
   1791 MODULE_LICENSE("GPL and additional rights");
   1792