Home | History | Annotate | Line # | Download | only in i915
i915_drv.c revision 1.2.4.2
      1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
      2  */
      3 /*
      4  *
      5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  */
     29 
     30 #include <linux/device.h>
     31 #include <linux/moduleparam.h>
     32 #include <linux/time.h>
     33 #include <drm/drmP.h>
     34 #include <drm/i915_drm.h>
     35 #include "i915_drv.h"
     36 #include "i915_trace.h"
     37 #include "intel_drv.h"
     38 
     39 #include <linux/console.h>
     40 #include <linux/module.h>
     41 #include <drm/drm_crtc_helper.h>
     42 
     43 #ifndef __NetBSD__		/* XXX Use i915_modeset somewhere.  */
     44 static int i915_modeset __read_mostly = -1;
     45 module_param_named(modeset, i915_modeset, int, 0400);
     46 MODULE_PARM_DESC(modeset,
     47 		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
     48 		"1=on, -1=force vga console preference [default])");
     49 #endif
     50 
     51 unsigned int i915_fbpercrtc __always_unused = 0;
     52 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
     53 
     54 int i915_panel_ignore_lid __read_mostly = 1;
     55 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
     56 MODULE_PARM_DESC(panel_ignore_lid,
     57 		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
     58 		"-1=force lid closed, -2=force lid open)");
     59 
     60 unsigned int i915_powersave __read_mostly = 1;
     61 module_param_named(powersave, i915_powersave, int, 0600);
     62 MODULE_PARM_DESC(powersave,
     63 		"Enable powersavings, fbc, downclocking, etc. (default: true)");
     64 
     65 int i915_semaphores __read_mostly = -1;
     66 module_param_named(semaphores, i915_semaphores, int, 0600);
     67 MODULE_PARM_DESC(semaphores,
     68 		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
     69 
     70 int i915_enable_rc6 __read_mostly = -1;
     71 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
     72 MODULE_PARM_DESC(i915_enable_rc6,
     73 		"Enable power-saving render C-state 6. "
     74 		"Different stages can be selected via bitmask values "
     75 		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
     76 		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
     77 		"default: -1 (use per-chip default)");
     78 
     79 int i915_enable_fbc __read_mostly = -1;
     80 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
     81 MODULE_PARM_DESC(i915_enable_fbc,
     82 		"Enable frame buffer compression for power savings "
     83 		"(default: -1 (use per-chip default))");
     84 
     85 unsigned int i915_lvds_downclock __read_mostly = 0;
     86 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
     87 MODULE_PARM_DESC(lvds_downclock,
     88 		"Use panel (LVDS/eDP) downclocking for power savings "
     89 		"(default: false)");
     90 
     91 int i915_lvds_channel_mode __read_mostly;
     92 module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
     93 MODULE_PARM_DESC(lvds_channel_mode,
     94 		 "Specify LVDS channel mode "
     95 		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
     96 
     97 int i915_panel_use_ssc __read_mostly = -1;
     98 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
     99 MODULE_PARM_DESC(lvds_use_ssc,
    100 		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
    101 		"(default: auto from VBT)");
    102 
    103 int i915_vbt_sdvo_panel_type __read_mostly = -1;
    104 module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
    105 MODULE_PARM_DESC(vbt_sdvo_panel_type,
    106 		"Override/Ignore selection of SDVO panel mode in the VBT "
    107 		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
    108 
    109 static bool i915_try_reset __read_mostly = true;
    110 module_param_named(reset, i915_try_reset, bool, 0600);
    111 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
    112 
    113 bool i915_enable_hangcheck __read_mostly = true;
    114 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
    115 MODULE_PARM_DESC(enable_hangcheck,
    116 		"Periodically check GPU activity for detecting hangs. "
    117 		"WARNING: Disabling this can cause system wide hangs. "
    118 		"(default: true)");
    119 
    120 int i915_enable_ppgtt __read_mostly = -1;
    121 module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
    122 MODULE_PARM_DESC(i915_enable_ppgtt,
    123 		"Enable PPGTT (default: true)");
    124 
    125 unsigned int i915_preliminary_hw_support __read_mostly = 0;
    126 module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
    127 MODULE_PARM_DESC(preliminary_hw_support,
    128 		"Enable preliminary hardware support. "
    129 		"Enable Haswell and ValleyView Support. "
    130 		"(default: false)");
    131 
    132 static struct drm_driver driver;
    133 extern int intel_agp_enabled;
    134 
    135 #ifdef __NetBSD__
    136 /* XXX Kludge to expose this to NetBSD driver attachment goop.  */
    137 struct drm_driver *const i915_drm_driver = &driver;
    138 #endif
    139 
    140 #define INTEL_VGA_DEVICE(id, info) {		\
    141 	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
    142 	.class_mask = 0xff0000,			\
    143 	.vendor = 0x8086,			\
    144 	.device = id,				\
    145 	.subvendor = PCI_ANY_ID,		\
    146 	.subdevice = PCI_ANY_ID,		\
    147 	.driver_data = (unsigned long) info }
    148 
    149 static const struct intel_device_info intel_i830_info = {
    150 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
    151 	.has_overlay = 1, .overlay_needs_physical = 1,
    152 };
    153 
    154 static const struct intel_device_info intel_845g_info = {
    155 	.gen = 2,
    156 	.has_overlay = 1, .overlay_needs_physical = 1,
    157 };
    158 
    159 static const struct intel_device_info intel_i85x_info = {
    160 	.gen = 2, .is_i85x = 1, .is_mobile = 1,
    161 	.cursor_needs_physical = 1,
    162 	.has_overlay = 1, .overlay_needs_physical = 1,
    163 };
    164 
    165 static const struct intel_device_info intel_i865g_info = {
    166 	.gen = 2,
    167 	.has_overlay = 1, .overlay_needs_physical = 1,
    168 };
    169 
    170 static const struct intel_device_info intel_i915g_info = {
    171 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
    172 	.has_overlay = 1, .overlay_needs_physical = 1,
    173 };
    174 static const struct intel_device_info intel_i915gm_info = {
    175 	.gen = 3, .is_mobile = 1,
    176 	.cursor_needs_physical = 1,
    177 	.has_overlay = 1, .overlay_needs_physical = 1,
    178 	.supports_tv = 1,
    179 };
    180 static const struct intel_device_info intel_i945g_info = {
    181 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
    182 	.has_overlay = 1, .overlay_needs_physical = 1,
    183 };
    184 static const struct intel_device_info intel_i945gm_info = {
    185 	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
    186 	.has_hotplug = 1, .cursor_needs_physical = 1,
    187 	.has_overlay = 1, .overlay_needs_physical = 1,
    188 	.supports_tv = 1,
    189 };
    190 
    191 static const struct intel_device_info intel_i965g_info = {
    192 	.gen = 4, .is_broadwater = 1,
    193 	.has_hotplug = 1,
    194 	.has_overlay = 1,
    195 };
    196 
    197 static const struct intel_device_info intel_i965gm_info = {
    198 	.gen = 4, .is_crestline = 1,
    199 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
    200 	.has_overlay = 1,
    201 	.supports_tv = 1,
    202 };
    203 
    204 static const struct intel_device_info intel_g33_info = {
    205 	.gen = 3, .is_g33 = 1,
    206 	.need_gfx_hws = 1, .has_hotplug = 1,
    207 	.has_overlay = 1,
    208 };
    209 
    210 static const struct intel_device_info intel_g45_info = {
    211 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
    212 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    213 	.has_bsd_ring = 1,
    214 };
    215 
    216 static const struct intel_device_info intel_gm45_info = {
    217 	.gen = 4, .is_g4x = 1,
    218 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
    219 	.has_pipe_cxsr = 1, .has_hotplug = 1,
    220 	.supports_tv = 1,
    221 	.has_bsd_ring = 1,
    222 };
    223 
    224 static const struct intel_device_info intel_pineview_info = {
    225 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
    226 	.need_gfx_hws = 1, .has_hotplug = 1,
    227 	.has_overlay = 1,
    228 };
    229 
    230 static const struct intel_device_info intel_ironlake_d_info = {
    231 	.gen = 5,
    232 	.need_gfx_hws = 1, .has_hotplug = 1,
    233 	.has_bsd_ring = 1,
    234 };
    235 
    236 static const struct intel_device_info intel_ironlake_m_info = {
    237 	.gen = 5, .is_mobile = 1,
    238 	.need_gfx_hws = 1, .has_hotplug = 1,
    239 	.has_fbc = 1,
    240 	.has_bsd_ring = 1,
    241 };
    242 
    243 static const struct intel_device_info intel_sandybridge_d_info = {
    244 	.gen = 6,
    245 	.need_gfx_hws = 1, .has_hotplug = 1,
    246 	.has_bsd_ring = 1,
    247 	.has_blt_ring = 1,
    248 	.has_llc = 1,
    249 	.has_force_wake = 1,
    250 };
    251 
    252 static const struct intel_device_info intel_sandybridge_m_info = {
    253 	.gen = 6, .is_mobile = 1,
    254 	.need_gfx_hws = 1, .has_hotplug = 1,
    255 	.has_fbc = 1,
    256 	.has_bsd_ring = 1,
    257 	.has_blt_ring = 1,
    258 	.has_llc = 1,
    259 	.has_force_wake = 1,
    260 };
    261 
    262 static const struct intel_device_info intel_ivybridge_d_info = {
    263 	.is_ivybridge = 1, .gen = 7,
    264 	.need_gfx_hws = 1, .has_hotplug = 1,
    265 	.has_bsd_ring = 1,
    266 	.has_blt_ring = 1,
    267 	.has_llc = 1,
    268 	.has_force_wake = 1,
    269 };
    270 
    271 static const struct intel_device_info intel_ivybridge_m_info = {
    272 	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
    273 	.need_gfx_hws = 1, .has_hotplug = 1,
    274 	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
    275 	.has_bsd_ring = 1,
    276 	.has_blt_ring = 1,
    277 	.has_llc = 1,
    278 	.has_force_wake = 1,
    279 };
    280 
    281 static const struct intel_device_info intel_valleyview_m_info = {
    282 	.gen = 7, .is_mobile = 1,
    283 	.need_gfx_hws = 1, .has_hotplug = 1,
    284 	.has_fbc = 0,
    285 	.has_bsd_ring = 1,
    286 	.has_blt_ring = 1,
    287 	.is_valleyview = 1,
    288 };
    289 
    290 static const struct intel_device_info intel_valleyview_d_info = {
    291 	.gen = 7,
    292 	.need_gfx_hws = 1, .has_hotplug = 1,
    293 	.has_fbc = 0,
    294 	.has_bsd_ring = 1,
    295 	.has_blt_ring = 1,
    296 	.is_valleyview = 1,
    297 };
    298 
    299 static const struct intel_device_info intel_haswell_d_info = {
    300 	.is_haswell = 1, .gen = 7,
    301 	.need_gfx_hws = 1, .has_hotplug = 1,
    302 	.has_bsd_ring = 1,
    303 	.has_blt_ring = 1,
    304 	.has_llc = 1,
    305 	.has_force_wake = 1,
    306 };
    307 
    308 static const struct intel_device_info intel_haswell_m_info = {
    309 	.is_haswell = 1, .gen = 7, .is_mobile = 1,
    310 	.need_gfx_hws = 1, .has_hotplug = 1,
    311 	.has_bsd_ring = 1,
    312 	.has_blt_ring = 1,
    313 	.has_llc = 1,
    314 	.has_force_wake = 1,
    315 };
    316 
    317 static const struct pci_device_id pciidlist[] = {		/* aka */
    318 	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
    319 	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
    320 	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
    321 	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
    322 	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
    323 	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
    324 	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
    325 	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
    326 	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
    327 	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
    328 	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
    329 	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
    330 	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
    331 	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
    332 	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
    333 	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
    334 	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
    335 	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
    336 	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
    337 	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
    338 	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
    339 	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
    340 	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
    341 	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
    342 	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
    343 	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
    344 	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
    345 	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
    346 	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
    347 	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
    348 	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
    349 	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
    350 	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
    351 	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
    352 	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
    353 	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
    354 	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
    355 	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
    356 	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
    357 	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
    358 	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
    359 	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
    360 	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
    361 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
    362 	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
    363 	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
    364 	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
    365 	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
    366 	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
    367 	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
    368 	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
    369 	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
    370 	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
    371 	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
    372 	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
    373 	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
    374 	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
    375 	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
    376 	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
    377 	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
    378 	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
    379 	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
    380 	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
    381 	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
    382 	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
    383 	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
    384 	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
    385 	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
    386 	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
    387 	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
    388 	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
    389 	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
    390 	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
    391 	INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
    392 	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
    393 	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
    394 	INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
    395 	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
    396 	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
    397 	INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
    398 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
    399 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
    400 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
    401 #ifdef __NetBSD__
    402 	{0, 0, 0, 0, 0, 0, 0}
    403 #else
    404 	{0, 0, 0}
    405 #endif
    406 };
    407 
    408 #if defined(CONFIG_DRM_I915_KMS)
    409 MODULE_DEVICE_TABLE(pci, pciidlist);
    410 #endif
    411 
    412 #ifdef __NetBSD__
    413 /* XXX Kludge to expose this to NetBSD driver attachment goop.  */
    414 const struct pci_device_id *const i915_device_ids = pciidlist;
    415 const size_t i915_n_device_ids = __arraycount(pciidlist);
    416 #endif
    417 
    418 void intel_detect_pch(struct drm_device *dev)
    419 {
    420 	struct drm_i915_private *dev_priv = dev->dev_private;
    421 	struct pci_dev *pch;
    422 
    423 	/*
    424 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
    425 	 * make graphics device passthrough work easy for VMM, that only
    426 	 * need to expose ISA bridge to let driver know the real hardware
    427 	 * underneath. This is a requirement from virtualization team.
    428 	 */
    429 	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
    430 	if (pch) {
    431 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
    432 			unsigned short id;
    433 			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
    434 			dev_priv->pch_id = id;
    435 
    436 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
    437 				dev_priv->pch_type = PCH_IBX;
    438 				dev_priv->num_pch_pll = 2;
    439 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
    440 				WARN_ON(!IS_GEN5(dev));
    441 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
    442 				dev_priv->pch_type = PCH_CPT;
    443 				dev_priv->num_pch_pll = 2;
    444 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
    445 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    446 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
    447 				/* PantherPoint is CPT compatible */
    448 				dev_priv->pch_type = PCH_CPT;
    449 				dev_priv->num_pch_pll = 2;
    450 				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
    451 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
    452 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
    453 				dev_priv->pch_type = PCH_LPT;
    454 				dev_priv->num_pch_pll = 0;
    455 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
    456 				WARN_ON(!IS_HASWELL(dev));
    457 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
    458 				dev_priv->pch_type = PCH_LPT;
    459 				dev_priv->num_pch_pll = 0;
    460 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
    461 				WARN_ON(!IS_HASWELL(dev));
    462 			}
    463 			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
    464 		}
    465 		pci_dev_put(pch);
    466 	}
    467 }
    468 
    469 bool i915_semaphore_is_enabled(struct drm_device *dev)
    470 {
    471 	if (INTEL_INFO(dev)->gen < 6)
    472 		return 0;
    473 
    474 	if (i915_semaphores >= 0)
    475 		return i915_semaphores;
    476 
    477 #ifdef CONFIG_INTEL_IOMMU
    478 	/* Enable semaphores on SNB when IO remapping is off */
    479 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
    480 		return false;
    481 #endif
    482 
    483 	return 1;
    484 }
    485 
    486 static int i915_drm_freeze(struct drm_device *dev)
    487 {
    488 	struct drm_i915_private *dev_priv = dev->dev_private;
    489 
    490 	drm_kms_helper_poll_disable(dev);
    491 
    492 #ifndef __NetBSD__		/* pmf handles this for us.  */
    493 	pci_save_state(dev->pdev);
    494 #endif
    495 
    496 	/* If KMS is active, we do the leavevt stuff here */
    497 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    498 		int error = i915_gem_idle(dev);
    499 		if (error) {
    500 #ifdef __NetBSD__
    501 			dev_err(pci_dev_dev(dev->pdev),
    502 			    "GEM idle failed, resume might fail\n");
    503 #else
    504 			dev_err(&dev->pdev->dev,
    505 				"GEM idle failed, resume might fail\n");
    506 #endif
    507 			return error;
    508 		}
    509 
    510 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
    511 
    512 		intel_modeset_disable(dev);
    513 
    514 		drm_irq_uninstall(dev);
    515 	}
    516 
    517 	i915_save_state(dev);
    518 
    519 	intel_opregion_fini(dev);
    520 
    521 	/* Modeset on resume, not lid events */
    522 	dev_priv->modeset_on_lid = 0;
    523 
    524 #ifndef __NetBSD__		/* XXX fb */
    525 	console_lock();
    526 	intel_fbdev_set_suspend(dev, 1);
    527 	console_unlock();
    528 #endif
    529 
    530 	return 0;
    531 }
    532 
    533 int i915_suspend(struct drm_device *dev, pm_message_t state)
    534 {
    535 	int error;
    536 
    537 	if (!dev || !dev->dev_private) {
    538 		DRM_ERROR("dev: %p\n", dev);
    539 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
    540 		return -ENODEV;
    541 	}
    542 
    543 	if (state.event == PM_EVENT_PRETHAW)
    544 		return 0;
    545 
    546 
    547 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    548 		return 0;
    549 
    550 	error = i915_drm_freeze(dev);
    551 	if (error)
    552 		return error;
    553 
    554 #ifndef __NetBSD__		/* pmf handles this for us.  */
    555 	if (state.event == PM_EVENT_SUSPEND) {
    556 		/* Shut down the device */
    557 		pci_disable_device(dev->pdev);
    558 		pci_set_power_state(dev->pdev, PCI_D3hot);
    559 	}
    560 #endif
    561 
    562 	return 0;
    563 }
    564 
    565 void intel_console_resume(struct work_struct *work)
    566 {
    567 #ifndef __NetBSD__		/* XXX fb */
    568 	struct drm_i915_private *dev_priv =
    569 		container_of(work, struct drm_i915_private,
    570 			     console_resume_work);
    571 	struct drm_device *dev = dev_priv->dev;
    572 
    573 	console_lock();
    574 	intel_fbdev_set_suspend(dev, 0);
    575 	console_unlock();
    576 #endif
    577 }
    578 
    579 static int __i915_drm_thaw(struct drm_device *dev)
    580 {
    581 	struct drm_i915_private *dev_priv = dev->dev_private;
    582 	int error = 0;
    583 
    584 	i915_restore_state(dev);
    585 	intel_opregion_setup(dev);
    586 
    587 	/* KMS EnterVT equivalent */
    588 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    589 		intel_init_pch_refclk(dev);
    590 
    591 		mutex_lock(&dev->struct_mutex);
    592 		dev_priv->mm.suspended = 0;
    593 
    594 		error = i915_gem_init_hw(dev);
    595 		mutex_unlock(&dev->struct_mutex);
    596 
    597 		intel_modeset_init_hw(dev);
    598 		intel_modeset_setup_hw_state(dev, false);
    599 		drm_irq_install(dev);
    600 	}
    601 
    602 	intel_opregion_init(dev);
    603 
    604 	dev_priv->modeset_on_lid = 0;
    605 
    606 #ifndef __NetBSD__		/* XXX fb */
    607 	/*
    608 	 * The console lock can be pretty contented on resume due
    609 	 * to all the printk activity.  Try to keep it out of the hot
    610 	 * path of resume if possible.
    611 	 */
    612 	if (console_trylock()) {
    613 		intel_fbdev_set_suspend(dev, 0);
    614 		console_unlock();
    615 	} else {
    616 		schedule_work(&dev_priv->console_resume_work);
    617 	}
    618 #endif
    619 
    620 	return error;
    621 }
    622 
    623 #ifndef __NetBSD__		/* XXX freeze/thaw */
    624 static int i915_drm_thaw(struct drm_device *dev)
    625 {
    626 	int error = 0;
    627 
    628 	intel_gt_reset(dev);
    629 
    630 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
    631 		mutex_lock(&dev->struct_mutex);
    632 		i915_gem_restore_gtt_mappings(dev);
    633 		mutex_unlock(&dev->struct_mutex);
    634 	}
    635 
    636 	__i915_drm_thaw(dev);
    637 
    638 	return error;
    639 }
    640 #endif
    641 
    642 int i915_resume(struct drm_device *dev)
    643 {
    644 	struct drm_i915_private *dev_priv = dev->dev_private;
    645 	int ret;
    646 
    647 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    648 		return 0;
    649 
    650 #ifndef __NetBSD__		/* pmf handles this for us.  */
    651 	if (pci_enable_device(dev->pdev))
    652 		return -EIO;
    653 #endif
    654 
    655 	pci_set_master(dev->pdev);
    656 
    657 	intel_gt_reset(dev);
    658 
    659 	/*
    660 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
    661 	 * earlier) need this since the BIOS might clear all our scratch PTEs.
    662 	 */
    663 	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
    664 	    !dev_priv->opregion.header) {
    665 		mutex_lock(&dev->struct_mutex);
    666 		i915_gem_restore_gtt_mappings(dev);
    667 		mutex_unlock(&dev->struct_mutex);
    668 	}
    669 
    670 	ret = __i915_drm_thaw(dev);
    671 	if (ret)
    672 		return ret;
    673 
    674 	drm_kms_helper_poll_enable(dev);
    675 	return 0;
    676 }
    677 
    678 static int i8xx_do_reset(struct drm_device *dev)
    679 {
    680 	struct drm_i915_private *dev_priv = dev->dev_private;
    681 
    682 	if (IS_I85X(dev))
    683 		return -ENODEV;
    684 
    685 	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
    686 	POSTING_READ(D_STATE);
    687 
    688 	if (IS_I830(dev) || IS_845G(dev)) {
    689 		I915_WRITE(DEBUG_RESET_I830,
    690 			   DEBUG_RESET_DISPLAY |
    691 			   DEBUG_RESET_RENDER |
    692 			   DEBUG_RESET_FULL);
    693 		POSTING_READ(DEBUG_RESET_I830);
    694 		msleep(1);
    695 
    696 		I915_WRITE(DEBUG_RESET_I830, 0);
    697 		POSTING_READ(DEBUG_RESET_I830);
    698 	}
    699 
    700 	msleep(1);
    701 
    702 	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
    703 	POSTING_READ(D_STATE);
    704 
    705 	return 0;
    706 }
    707 
    708 static int i965_reset_complete(struct drm_device *dev)
    709 {
    710 	u8 gdrst;
    711 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
    712 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
    713 }
    714 
    715 static int i965_do_reset(struct drm_device *dev)
    716 {
    717 	int ret;
    718 	u8 gdrst;
    719 
    720 	/*
    721 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
    722 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
    723 	 * triggers the reset; when done, the hardware will clear it.
    724 	 */
    725 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
    726 	pci_write_config_byte(dev->pdev, I965_GDRST,
    727 			      gdrst | GRDOM_RENDER |
    728 			      GRDOM_RESET_ENABLE);
    729 	ret =  wait_for(i965_reset_complete(dev), 500);
    730 	if (ret)
    731 		return ret;
    732 
    733 	/* We can't reset render&media without also resetting display ... */
    734 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
    735 	pci_write_config_byte(dev->pdev, I965_GDRST,
    736 			      gdrst | GRDOM_MEDIA |
    737 			      GRDOM_RESET_ENABLE);
    738 
    739 	return wait_for(i965_reset_complete(dev), 500);
    740 }
    741 
    742 static int ironlake_do_reset(struct drm_device *dev)
    743 {
    744 	struct drm_i915_private *dev_priv = dev->dev_private;
    745 	u32 gdrst;
    746 	int ret;
    747 
    748 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
    749 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
    750 		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
    751 	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
    752 	if (ret)
    753 		return ret;
    754 
    755 	/* We can't reset render&media without also resetting display ... */
    756 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
    757 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
    758 		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
    759 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
    760 }
    761 
    762 static int gen6_do_reset(struct drm_device *dev)
    763 {
    764 	struct drm_i915_private *dev_priv = dev->dev_private;
    765 	int	ret;
    766 	unsigned long irqflags;
    767 
    768 	/* Hold gt_lock across reset to prevent any register access
    769 	 * with forcewake not set correctly
    770 	 */
    771 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
    772 
    773 	/* Reset the chip */
    774 
    775 	/* GEN6_GDRST is not in the gt power well, no need to check
    776 	 * for fifo space for the write or forcewake the chip for
    777 	 * the read
    778 	 */
    779 	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
    780 
    781 	/* Spin waiting for the device to ack the reset request */
    782 	ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
    783 
    784 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
    785 	if (dev_priv->forcewake_count)
    786 		dev_priv->gt.force_wake_get(dev_priv);
    787 	else
    788 		dev_priv->gt.force_wake_put(dev_priv);
    789 
    790 	/* Restore fifo count */
    791 	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
    792 
    793 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
    794 	return ret;
    795 }
    796 
    797 int intel_gpu_reset(struct drm_device *dev)
    798 {
    799 	struct drm_i915_private *dev_priv = dev->dev_private;
    800 	int ret = -ENODEV;
    801 
    802 	switch (INTEL_INFO(dev)->gen) {
    803 	case 7:
    804 	case 6:
    805 		ret = gen6_do_reset(dev);
    806 		break;
    807 	case 5:
    808 		ret = ironlake_do_reset(dev);
    809 		break;
    810 	case 4:
    811 		ret = i965_do_reset(dev);
    812 		break;
    813 	case 2:
    814 		ret = i8xx_do_reset(dev);
    815 		break;
    816 	}
    817 
    818 	/* Also reset the gpu hangman. */
    819 	if (dev_priv->stop_rings) {
    820 		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
    821 		dev_priv->stop_rings = 0;
    822 		if (ret == -ENODEV) {
    823 			DRM_ERROR("Reset not implemented, but ignoring "
    824 				  "error for simulated gpu hangs\n");
    825 			ret = 0;
    826 		}
    827 	}
    828 
    829 	return ret;
    830 }
    831 
    832 /**
    833  * i915_reset - reset chip after a hang
    834  * @dev: drm device to reset
    835  *
    836  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
    837  * reset or otherwise an error code.
    838  *
    839  * Procedure is fairly simple:
    840  *   - reset the chip using the reset reg
    841  *   - re-init context state
    842  *   - re-init hardware status page
    843  *   - re-init ring buffer
    844  *   - re-init interrupt state
    845  *   - re-init display
    846  */
    847 int i915_reset(struct drm_device *dev)
    848 {
    849 	drm_i915_private_t *dev_priv = dev->dev_private;
    850 	int ret;
    851 
    852 	if (!i915_try_reset)
    853 		return 0;
    854 
    855 	mutex_lock(&dev->struct_mutex);
    856 
    857 	i915_gem_reset(dev);
    858 
    859 	ret = -ENODEV;
    860 	if (get_seconds() - dev_priv->last_gpu_reset < 5)
    861 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
    862 	else
    863 		ret = intel_gpu_reset(dev);
    864 
    865 	dev_priv->last_gpu_reset = get_seconds();
    866 	if (ret) {
    867 		DRM_ERROR("Failed to reset chip.\n");
    868 		mutex_unlock(&dev->struct_mutex);
    869 		return ret;
    870 	}
    871 
    872 	/* Ok, now get things going again... */
    873 
    874 	/*
    875 	 * Everything depends on having the GTT running, so we need to start
    876 	 * there.  Fortunately we don't need to do this unless we reset the
    877 	 * chip at a PCI level.
    878 	 *
    879 	 * Next we need to restore the context, but we don't use those
    880 	 * yet either...
    881 	 *
    882 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
    883 	 * was running at the time of the reset (i.e. we weren't VT
    884 	 * switched away).
    885 	 */
    886 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
    887 			!dev_priv->mm.suspended) {
    888 		struct intel_ring_buffer *ring;
    889 		int i;
    890 
    891 		dev_priv->mm.suspended = 0;
    892 
    893 		i915_gem_init_swizzling(dev);
    894 
    895 		for_each_ring(ring, dev_priv, i)
    896 			ring->init(ring);
    897 
    898 		i915_gem_context_init(dev);
    899 		i915_gem_init_ppgtt(dev);
    900 
    901 		/*
    902 		 * It would make sense to re-init all the other hw state, at
    903 		 * least the rps/rc6/emon init done within modeset_init_hw. For
    904 		 * some unknown reason, this blows up my ilk, so don't.
    905 		 */
    906 
    907 		mutex_unlock(&dev->struct_mutex);
    908 
    909 		drm_irq_uninstall(dev);
    910 		drm_irq_install(dev);
    911 	} else {
    912 		mutex_unlock(&dev->struct_mutex);
    913 	}
    914 
    915 	return 0;
    916 }
    917 
    918 #ifdef __NetBSD__
    919 
    920 static const struct uvm_pagerops i915_gem_uvm_ops = {
    921 	.pgo_reference = drm_gem_pager_reference,
    922 	.pgo_detach = drm_gem_pager_detach,
    923 	.pgo_fault = i915_gem_fault,
    924 };
    925 
    926 #else
    927 
    928 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    929 {
    930 	struct intel_device_info *intel_info =
    931 		(struct intel_device_info *) ent->driver_data;
    932 
    933 	if (intel_info->is_valleyview)
    934 		if(!i915_preliminary_hw_support) {
    935 			DRM_ERROR("Preliminary hardware support disabled\n");
    936 			return -ENODEV;
    937 		}
    938 
    939 	/* Only bind to function 0 of the device. Early generations
    940 	 * used function 1 as a placeholder for multi-head. This causes
    941 	 * us confusion instead, especially on the systems where both
    942 	 * functions have the same PCI-ID!
    943 	 */
    944 	if (PCI_FUNC(pdev->devfn))
    945 		return -ENODEV;
    946 
    947 	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
    948 	 * implementation for gen3 (and only gen3) that used legacy drm maps
    949 	 * (gasp!) to share buffers between X and the client. Hence we need to
    950 	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
    951 	if (intel_info->gen != 3) {
    952 		driver.driver_features &=
    953 			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
    954 	} else if (!intel_agp_enabled) {
    955 		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
    956 		return -ENODEV;
    957 	}
    958 
    959 	return drm_get_pci_dev(pdev, ent, &driver);
    960 }
    961 
    962 static void
    963 i915_pci_remove(struct pci_dev *pdev)
    964 {
    965 	struct drm_device *dev = pci_get_drvdata(pdev);
    966 
    967 	drm_put_dev(dev);
    968 }
    969 
    970 static int i915_pm_suspend(struct device *dev)
    971 {
    972 	struct pci_dev *pdev = to_pci_dev(dev);
    973 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    974 	int error;
    975 
    976 	if (!drm_dev || !drm_dev->dev_private) {
    977 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
    978 		return -ENODEV;
    979 	}
    980 
    981 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
    982 		return 0;
    983 
    984 	error = i915_drm_freeze(drm_dev);
    985 	if (error)
    986 		return error;
    987 
    988 	pci_disable_device(pdev);
    989 	pci_set_power_state(pdev, PCI_D3hot);
    990 
    991 	return 0;
    992 }
    993 
    994 static int i915_pm_resume(struct device *dev)
    995 {
    996 	struct pci_dev *pdev = to_pci_dev(dev);
    997 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
    998 
    999 	return i915_resume(drm_dev);
   1000 }
   1001 
   1002 static int i915_pm_freeze(struct device *dev)
   1003 {
   1004 	struct pci_dev *pdev = to_pci_dev(dev);
   1005 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
   1006 
   1007 	if (!drm_dev || !drm_dev->dev_private) {
   1008 		dev_err(dev, "DRM not initialized, aborting suspend.\n");
   1009 		return -ENODEV;
   1010 	}
   1011 
   1012 	return i915_drm_freeze(drm_dev);
   1013 }
   1014 
   1015 static int i915_pm_thaw(struct device *dev)
   1016 {
   1017 	struct pci_dev *pdev = to_pci_dev(dev);
   1018 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
   1019 
   1020 	return i915_drm_thaw(drm_dev);
   1021 }
   1022 
   1023 static int i915_pm_poweroff(struct device *dev)
   1024 {
   1025 	struct pci_dev *pdev = to_pci_dev(dev);
   1026 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
   1027 
   1028 	return i915_drm_freeze(drm_dev);
   1029 }
   1030 
   1031 static const struct dev_pm_ops i915_pm_ops = {
   1032 	.suspend = i915_pm_suspend,
   1033 	.resume = i915_pm_resume,
   1034 	.freeze = i915_pm_freeze,
   1035 	.thaw = i915_pm_thaw,
   1036 	.poweroff = i915_pm_poweroff,
   1037 	.restore = i915_pm_resume,
   1038 };
   1039 
   1040 static const struct vm_operations_struct i915_gem_vm_ops = {
   1041 	.fault = i915_gem_fault,
   1042 	.open = drm_gem_vm_open,
   1043 	.close = drm_gem_vm_close,
   1044 };
   1045 
   1046 static const struct file_operations i915_driver_fops = {
   1047 	.owner = THIS_MODULE,
   1048 	.open = drm_open,
   1049 	.release = drm_release,
   1050 	.unlocked_ioctl = drm_ioctl,
   1051 	.mmap = drm_gem_mmap,
   1052 	.poll = drm_poll,
   1053 	.fasync = drm_fasync,
   1054 	.read = drm_read,
   1055 #ifdef CONFIG_COMPAT
   1056 	.compat_ioctl = i915_compat_ioctl,
   1057 #endif
   1058 	.llseek = noop_llseek,
   1059 };
   1060 
   1061 #endif	/* defined(__NetBSD__) */
   1062 
   1063 static struct drm_driver driver = {
   1064 	/* Don't use MTRRs here; the Xserver or userspace app should
   1065 	 * deal with them for Intel hardware.
   1066 	 */
   1067 	.driver_features =
   1068 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
   1069 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
   1070 	.load = i915_driver_load,
   1071 	.unload = i915_driver_unload,
   1072 	.open = i915_driver_open,
   1073 	.lastclose = i915_driver_lastclose,
   1074 	.preclose = i915_driver_preclose,
   1075 	.postclose = i915_driver_postclose,
   1076 
   1077 	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
   1078 	.suspend = i915_suspend,
   1079 	.resume = i915_resume,
   1080 
   1081 	.device_is_agp = i915_driver_device_is_agp,
   1082 	.master_create = i915_master_create,
   1083 	.master_destroy = i915_master_destroy,
   1084 #if defined(CONFIG_DEBUG_FS)
   1085 	.debugfs_init = i915_debugfs_init,
   1086 	.debugfs_cleanup = i915_debugfs_cleanup,
   1087 #endif
   1088 	.gem_init_object = i915_gem_init_object,
   1089 	.gem_free_object = i915_gem_free_object,
   1090 #ifdef __NetBSD__
   1091 	.gem_uvm_ops = &i915_gem_uvm_ops,
   1092 #else
   1093 	.gem_vm_ops = &i915_gem_vm_ops,
   1094 #endif
   1095 
   1096 #ifndef __NetBSD__		/* XXX drm prime */
   1097 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
   1098 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
   1099 	.gem_prime_export = i915_gem_prime_export,
   1100 	.gem_prime_import = i915_gem_prime_import,
   1101 #endif
   1102 
   1103 	.dumb_create = i915_gem_dumb_create,
   1104 	.dumb_map_offset = i915_gem_mmap_gtt,
   1105 	.dumb_destroy = i915_gem_dumb_destroy,
   1106 	.ioctls = i915_ioctls,
   1107 #ifdef __NetBSD__
   1108 	.fops = NULL,
   1109 #else
   1110 	.fops = &i915_driver_fops,
   1111 #endif
   1112 	.name = DRIVER_NAME,
   1113 	.desc = DRIVER_DESC,
   1114 	.date = DRIVER_DATE,
   1115 	.major = DRIVER_MAJOR,
   1116 	.minor = DRIVER_MINOR,
   1117 	.patchlevel = DRIVER_PATCHLEVEL,
   1118 };
   1119 
   1120 #ifndef __NetBSD__
   1121 static struct pci_driver i915_pci_driver = {
   1122 	.name = DRIVER_NAME,
   1123 	.id_table = pciidlist,
   1124 	.probe = i915_pci_probe,
   1125 	.remove = i915_pci_remove,
   1126 	.driver.pm = &i915_pm_ops,
   1127 };
   1128 #endif
   1129 
   1130 #ifndef __NetBSD__
   1131 static int __init i915_init(void)
   1132 {
   1133 	driver.num_ioctls = i915_max_ioctl;
   1134 
   1135 	/*
   1136 	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
   1137 	 * explicitly disabled with the module pararmeter.
   1138 	 *
   1139 	 * Otherwise, just follow the parameter (defaulting to off).
   1140 	 *
   1141 	 * Allow optional vga_text_mode_force boot option to override
   1142 	 * the default behavior.
   1143 	 */
   1144 #if defined(CONFIG_DRM_I915_KMS)
   1145 	if (i915_modeset != 0)
   1146 		driver.driver_features |= DRIVER_MODESET;
   1147 #endif
   1148 	if (i915_modeset == 1)
   1149 		driver.driver_features |= DRIVER_MODESET;
   1150 
   1151 #ifdef CONFIG_VGA_CONSOLE
   1152 	if (vgacon_text_force() && i915_modeset == -1)
   1153 		driver.driver_features &= ~DRIVER_MODESET;
   1154 #endif
   1155 
   1156 	if (!(driver.driver_features & DRIVER_MODESET))
   1157 		driver.get_vblank_timestamp = NULL;
   1158 
   1159 	return drm_pci_init(&driver, &i915_pci_driver);
   1160 }
   1161 
   1162 static void __exit i915_exit(void)
   1163 {
   1164 	drm_pci_exit(&driver, &i915_pci_driver);
   1165 }
   1166 
   1167 module_init(i915_init);
   1168 module_exit(i915_exit);
   1169 #endif
   1170 
   1171 MODULE_AUTHOR(DRIVER_AUTHOR);
   1172 MODULE_DESCRIPTION(DRIVER_DESC);
   1173 MODULE_LICENSE("GPL and additional rights");
   1174 
   1175 /* We give fast paths for the really cool registers */
   1176 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
   1177 	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
   1178 	 ((reg) < 0x40000) &&            \
   1179 	 ((reg) != FORCEWAKE))
   1180 
   1181 static bool IS_DISPLAYREG(u32 reg)
   1182 {
   1183 	/*
   1184 	 * This should make it easier to transition modules over to the
   1185 	 * new register block scheme, since we can do it incrementally.
   1186 	 */
   1187 	if (reg >= VLV_DISPLAY_BASE)
   1188 		return false;
   1189 
   1190 	if (reg >= RENDER_RING_BASE &&
   1191 	    reg < RENDER_RING_BASE + 0xff)
   1192 		return false;
   1193 	if (reg >= GEN6_BSD_RING_BASE &&
   1194 	    reg < GEN6_BSD_RING_BASE + 0xff)
   1195 		return false;
   1196 	if (reg >= BLT_RING_BASE &&
   1197 	    reg < BLT_RING_BASE + 0xff)
   1198 		return false;
   1199 
   1200 	if (reg == PGTBL_ER)
   1201 		return false;
   1202 
   1203 	if (reg >= IPEIR_I965 &&
   1204 	    reg < HWSTAM)
   1205 		return false;
   1206 
   1207 	if (reg == MI_MODE)
   1208 		return false;
   1209 
   1210 	if (reg == GFX_MODE_GEN7)
   1211 		return false;
   1212 
   1213 	if (reg == RENDER_HWS_PGA_GEN7 ||
   1214 	    reg == BSD_HWS_PGA_GEN7 ||
   1215 	    reg == BLT_HWS_PGA_GEN7)
   1216 		return false;
   1217 
   1218 	if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
   1219 	    reg == GEN6_BSD_RNCID)
   1220 		return false;
   1221 
   1222 	if (reg == GEN6_BLITTER_ECOSKPD)
   1223 		return false;
   1224 
   1225 	if (reg >= 0x4000c &&
   1226 	    reg <= 0x4002c)
   1227 		return false;
   1228 
   1229 	if (reg >= 0x4f000 &&
   1230 	    reg <= 0x4f08f)
   1231 		return false;
   1232 
   1233 	if (reg >= 0x4f100 &&
   1234 	    reg <= 0x4f11f)
   1235 		return false;
   1236 
   1237 	if (reg >= VLV_MASTER_IER &&
   1238 	    reg <= GEN6_PMIER)
   1239 		return false;
   1240 
   1241 	if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
   1242 	    reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
   1243 		return false;
   1244 
   1245 	if (reg >= VLV_IIR_RW &&
   1246 	    reg <= VLV_ISR)
   1247 		return false;
   1248 
   1249 	if (reg == FORCEWAKE_VLV ||
   1250 	    reg == FORCEWAKE_ACK_VLV)
   1251 		return false;
   1252 
   1253 	if (reg == GEN6_GDRST)
   1254 		return false;
   1255 
   1256 	switch (reg) {
   1257 	case _3D_CHICKEN3:
   1258 	case IVB_CHICKEN3:
   1259 	case GEN7_COMMON_SLICE_CHICKEN1:
   1260 	case GEN7_L3CNTLREG1:
   1261 	case GEN7_L3_CHICKEN_MODE_REGISTER:
   1262 	case GEN7_ROW_CHICKEN2:
   1263 	case GEN7_L3SQCREG4:
   1264 	case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
   1265 	case GEN7_HALF_SLICE_CHICKEN1:
   1266 	case GEN6_MBCTL:
   1267 	case GEN6_UCGCTL2:
   1268 		return false;
   1269 	default:
   1270 		break;
   1271 	}
   1272 
   1273 	return true;
   1274 }
   1275 
   1276 static void
   1277 ilk_dummy_write(struct drm_i915_private *dev_priv)
   1278 {
   1279 	/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
   1280 	 * chip from rc6 before touching it for real. MI_MODE is masked, hence
   1281 	 * harmless to write 0 into. */
   1282 	I915_WRITE_NOTRACE(MI_MODE, 0);
   1283 }
   1284 
   1285 #ifdef __NetBSD__
   1286 #define __i915_read(x, y) \
   1287 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
   1288 	u##x val = 0; \
   1289 	if (IS_GEN5(dev_priv->dev)) \
   1290 		ilk_dummy_write(dev_priv); \
   1291 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
   1292 		unsigned long irqflags; \
   1293 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
   1294 		if (dev_priv->forcewake_count == 0) \
   1295 			dev_priv->gt.force_wake_get(dev_priv); \
   1296 		val = DRM_READ##x(dev_priv->regs_map, reg); \
   1297 		if (dev_priv->forcewake_count == 0) \
   1298 			dev_priv->gt.force_wake_put(dev_priv); \
   1299 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
   1300 	} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
   1301 		val = DRM_READ##x(dev_priv->regs_map, reg + 0x180000);	\
   1302 	} else { \
   1303 		val = DRM_READ##x(dev_priv->regs_map, reg); \
   1304 	} \
   1305 	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
   1306 	return val; \
   1307 }
   1308 #else
   1309 #define __i915_read(x, y) \
   1310 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
   1311 	u##x val = 0; \
   1312 	if (IS_GEN5(dev_priv->dev)) \
   1313 		ilk_dummy_write(dev_priv); \
   1314 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
   1315 		unsigned long irqflags; \
   1316 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
   1317 		if (dev_priv->forcewake_count == 0) \
   1318 			dev_priv->gt.force_wake_get(dev_priv); \
   1319 		val = read##y(dev_priv->regs + reg); \
   1320 		if (dev_priv->forcewake_count == 0) \
   1321 			dev_priv->gt.force_wake_put(dev_priv); \
   1322 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
   1323 	} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
   1324 		val = read##y(dev_priv->regs + reg + 0x180000);		\
   1325 	} else { \
   1326 		val = read##y(dev_priv->regs + reg); \
   1327 	} \
   1328 	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
   1329 	return val; \
   1330 }
   1331 #endif
   1332 
   1333 __i915_read(8, b)
   1334 __i915_read(16, w)
   1335 __i915_read(32, l)
   1336 __i915_read(64, q)
   1337 #undef __i915_read
   1338 
   1339 #ifdef __NetBSD__
   1340 #define __i915_write(x, y) \
   1341 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
   1342 	u32 __fifo_ret = 0; \
   1343 	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
   1344 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
   1345 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
   1346 	} \
   1347 	if (IS_GEN5(dev_priv->dev)) \
   1348 		ilk_dummy_write(dev_priv); \
   1349 	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
   1350 		DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
   1351 		I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
   1352 	} \
   1353 	if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
   1354 		DRM_WRITE##x(dev_priv->regs_map, reg + 0x180000, val);	\
   1355 	} else {							\
   1356 		DRM_WRITE##x(dev_priv->regs_map, reg, val);		\
   1357 	}								\
   1358 	if (unlikely(__fifo_ret)) { \
   1359 		gen6_gt_check_fifodbg(dev_priv); \
   1360 	} \
   1361 	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
   1362 		DRM_ERROR("Unclaimed write to %x\n", reg); \
   1363 		DRM_WRITE32(dev_priv->regs_map, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
   1364 	} \
   1365 }
   1366 #else
   1367 #define __i915_write(x, y) \
   1368 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
   1369 	u32 __fifo_ret = 0; \
   1370 	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
   1371 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
   1372 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
   1373 	} \
   1374 	if (IS_GEN5(dev_priv->dev)) \
   1375 		ilk_dummy_write(dev_priv); \
   1376 	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
   1377 		DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
   1378 		I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
   1379 	} \
   1380 	if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
   1381 		write##y(val, dev_priv->regs + reg + 0x180000);		\
   1382 	} else {							\
   1383 		write##y(val, dev_priv->regs + reg);			\
   1384 	}								\
   1385 	if (unlikely(__fifo_ret)) { \
   1386 		gen6_gt_check_fifodbg(dev_priv); \
   1387 	} \
   1388 	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
   1389 		DRM_ERROR("Unclaimed write to %x\n", reg); \
   1390 		writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT);	\
   1391 	} \
   1392 }
   1393 #endif
   1394 
   1395 __i915_write(8, b)
   1396 __i915_write(16, w)
   1397 __i915_write(32, l)
   1398 __i915_write(64, q)
   1399 #undef __i915_write
   1400 
   1401 static const struct register_whitelist {
   1402 	uint64_t offset;
   1403 	uint32_t size;
   1404 	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
   1405 } whitelist[] = {
   1406 	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
   1407 };
   1408 
   1409 int i915_reg_read_ioctl(struct drm_device *dev,
   1410 			void *data, struct drm_file *file)
   1411 {
   1412 	struct drm_i915_private *dev_priv = dev->dev_private;
   1413 	struct drm_i915_reg_read *reg = data;
   1414 	struct register_whitelist const *entry = whitelist;
   1415 	int i;
   1416 
   1417 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
   1418 		if (entry->offset == reg->offset &&
   1419 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
   1420 			break;
   1421 	}
   1422 
   1423 	if (i == ARRAY_SIZE(whitelist))
   1424 		return -EINVAL;
   1425 
   1426 	switch (entry->size) {
   1427 	case 8:
   1428 		reg->val = I915_READ64(reg->offset);
   1429 		break;
   1430 	case 4:
   1431 		reg->val = I915_READ(reg->offset);
   1432 		break;
   1433 	case 2:
   1434 		reg->val = I915_READ16(reg->offset);
   1435 		break;
   1436 	case 1:
   1437 		reg->val = I915_READ8(reg->offset);
   1438 		break;
   1439 	default:
   1440 		WARN_ON(1);
   1441 		return -EINVAL;
   1442 	}
   1443 
   1444 	return 0;
   1445 }
   1446