Home | History | Annotate | Line # | Download | only in i915
i915_irq.c revision 1.10.20.2
      1 /*	$NetBSD: i915_irq.c,v 1.10.20.2 2020/04/08 14:08:23 martin Exp $	*/
      2 
      3 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
      4  */
      5 /*
      6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.10.20.2 2020/04/08 14:08:23 martin Exp $");
     33 
     34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     35 
     36 #include <linux/sysrq.h>
     37 #include <linux/slab.h>
     38 #ifdef CONFIG_DEBUG_FS
     39 #include <linux/circ_buf.h>
     40 #endif
     41 #include <drm/drmP.h>
     42 #include <drm/i915_drm.h>
     43 #include "i915_drv.h"
     44 #include "i915_trace.h"
     45 #include "intel_drv.h"
     46 
     47 /**
     48  * DOC: interrupt handling
     49  *
     50  * These functions provide the basic support for enabling and disabling the
     51  * interrupt handling support. There's a lot more functionality in i915_irq.c
     52  * and related files, but that will be described in separate chapters.
     53  */
     54 
     55 static const u32 hpd_ilk[HPD_NUM_PINS] = {
     56 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
     57 };
     58 
     59 static const u32 hpd_ivb[HPD_NUM_PINS] = {
     60 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
     61 };
     62 
     63 static const u32 hpd_bdw[HPD_NUM_PINS] = {
     64 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
     65 };
     66 
     67 static const u32 hpd_ibx[HPD_NUM_PINS] = {
     68 	[HPD_CRT] = SDE_CRT_HOTPLUG,
     69 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
     70 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
     71 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
     72 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
     73 };
     74 
     75 static const u32 hpd_cpt[HPD_NUM_PINS] = {
     76 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
     77 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
     78 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
     79 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
     80 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
     81 };
     82 
     83 static const u32 hpd_spt[HPD_NUM_PINS] = {
     84 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
     85 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
     86 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
     87 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
     88 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
     89 };
     90 
     91 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
     92 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
     93 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
     94 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
     95 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
     96 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
     97 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
     98 };
     99 
    100 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
    101 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
    102 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
    103 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
    104 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
    105 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
    106 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
    107 };
    108 
    109 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
    110 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
    111 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
    112 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
    113 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
    114 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
    115 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
    116 };
    117 
    118 /* BXT hpd list */
    119 static const u32 hpd_bxt[HPD_NUM_PINS] = {
    120 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
    121 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
    122 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
    123 };
    124 
    125 /* IIR can theoretically queue up two events. Be paranoid. */
    126 #define GEN8_IRQ_RESET_NDX(type, which) do { \
    127 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
    128 	POSTING_READ(GEN8_##type##_IMR(which)); \
    129 	I915_WRITE(GEN8_##type##_IER(which), 0); \
    130 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
    131 	POSTING_READ(GEN8_##type##_IIR(which)); \
    132 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
    133 	POSTING_READ(GEN8_##type##_IIR(which)); \
    134 } while (0)
    135 
    136 #define GEN5_IRQ_RESET(type) do { \
    137 	I915_WRITE(type##IMR, 0xffffffff); \
    138 	POSTING_READ(type##IMR); \
    139 	I915_WRITE(type##IER, 0); \
    140 	I915_WRITE(type##IIR, 0xffffffff); \
    141 	POSTING_READ(type##IIR); \
    142 	I915_WRITE(type##IIR, 0xffffffff); \
    143 	POSTING_READ(type##IIR); \
    144 } while (0)
    145 
    146 /*
    147  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
    148  */
    149 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
    150 {
    151 	u32 val = I915_READ(reg);
    152 
    153 	if (val == 0)
    154 		return;
    155 
    156 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
    157 	     reg, val);
    158 	I915_WRITE(reg, 0xffffffff);
    159 	POSTING_READ(reg);
    160 	I915_WRITE(reg, 0xffffffff);
    161 	POSTING_READ(reg);
    162 }
    163 
    164 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
    165 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
    166 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
    167 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
    168 	POSTING_READ(GEN8_##type##_IMR(which)); \
    169 } while (0)
    170 
    171 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
    172 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
    173 	I915_WRITE(type##IER, (ier_val)); \
    174 	I915_WRITE(type##IMR, (imr_val)); \
    175 	POSTING_READ(type##IMR); \
    176 } while (0)
    177 
    178 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
    179 
    180 /* For display hotplug interrupt */
    181 static inline void
    182 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
    183 				     uint32_t mask,
    184 				     uint32_t bits)
    185 {
    186 	uint32_t val;
    187 
    188 	assert_spin_locked(&dev_priv->irq_lock);
    189 	WARN_ON(bits & ~mask);
    190 
    191 	val = I915_READ(PORT_HOTPLUG_EN);
    192 	val &= ~mask;
    193 	val |= bits;
    194 	I915_WRITE(PORT_HOTPLUG_EN, val);
    195 }
    196 
    197 /**
    198  * i915_hotplug_interrupt_update - update hotplug interrupt enable
    199  * @dev_priv: driver private
    200  * @mask: bits to update
    201  * @bits: bits to enable
    202  * NOTE: the HPD enable bits are modified both inside and outside
    203  * of an interrupt context. To avoid that read-modify-write cycles
    204  * interfer, these bits are protected by a spinlock. Since this
    205  * function is usually not called from a context where the lock is
    206  * held already, this function acquires the lock itself. A non-locking
    207  * version is also available.
    208  */
    209 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
    210 				   uint32_t mask,
    211 				   uint32_t bits)
    212 {
    213 	spin_lock_irq(&dev_priv->irq_lock);
    214 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
    215 	spin_unlock_irq(&dev_priv->irq_lock);
    216 }
    217 
    218 /**
    219  * ilk_update_display_irq - update DEIMR
    220  * @dev_priv: driver private
    221  * @interrupt_mask: mask of interrupt bits to update
    222  * @enabled_irq_mask: mask of interrupt bits to enable
    223  */
    224 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
    225 				   uint32_t interrupt_mask,
    226 				   uint32_t enabled_irq_mask)
    227 {
    228 	uint32_t new_val;
    229 
    230 	assert_spin_locked(&dev_priv->irq_lock);
    231 
    232 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    233 
    234 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    235 		return;
    236 
    237 	new_val = dev_priv->irq_mask;
    238 	new_val &= ~interrupt_mask;
    239 	new_val |= (~enabled_irq_mask & interrupt_mask);
    240 
    241 	if (new_val != dev_priv->irq_mask) {
    242 		dev_priv->irq_mask = new_val;
    243 		I915_WRITE(DEIMR, dev_priv->irq_mask);
    244 		POSTING_READ(DEIMR);
    245 	}
    246 }
    247 
    248 void
    249 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
    250 {
    251 	ilk_update_display_irq(dev_priv, mask, mask);
    252 }
    253 
    254 void
    255 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
    256 {
    257 	ilk_update_display_irq(dev_priv, mask, 0);
    258 }
    259 
    260 /**
    261  * ilk_update_gt_irq - update GTIMR
    262  * @dev_priv: driver private
    263  * @interrupt_mask: mask of interrupt bits to update
    264  * @enabled_irq_mask: mask of interrupt bits to enable
    265  */
    266 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
    267 			      uint32_t interrupt_mask,
    268 			      uint32_t enabled_irq_mask)
    269 {
    270 	assert_spin_locked(&dev_priv->irq_lock);
    271 
    272 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    273 
    274 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    275 		return;
    276 
    277 	dev_priv->gt_irq_mask &= ~interrupt_mask;
    278 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
    279 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
    280 	POSTING_READ(GTIMR);
    281 }
    282 
    283 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
    284 {
    285 	ilk_update_gt_irq(dev_priv, mask, mask);
    286 }
    287 
    288 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
    289 {
    290 	ilk_update_gt_irq(dev_priv, mask, 0);
    291 }
    292 
    293 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
    294 {
    295 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
    296 }
    297 
    298 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
    299 {
    300 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
    301 }
    302 
    303 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
    304 {
    305 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
    306 }
    307 
    308 /**
    309   * snb_update_pm_irq - update GEN6_PMIMR
    310   * @dev_priv: driver private
    311   * @interrupt_mask: mask of interrupt bits to update
    312   * @enabled_irq_mask: mask of interrupt bits to enable
    313   */
    314 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
    315 			      uint32_t interrupt_mask,
    316 			      uint32_t enabled_irq_mask)
    317 {
    318 	uint32_t new_val;
    319 
    320 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    321 
    322 	assert_spin_locked(&dev_priv->irq_lock);
    323 
    324 	new_val = dev_priv->pm_irq_mask;
    325 	new_val &= ~interrupt_mask;
    326 	new_val |= (~enabled_irq_mask & interrupt_mask);
    327 
    328 	if (new_val != dev_priv->pm_irq_mask) {
    329 		dev_priv->pm_irq_mask = new_val;
    330 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
    331 		POSTING_READ(gen6_pm_imr(dev_priv));
    332 	}
    333 }
    334 
    335 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
    336 {
    337 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    338 		return;
    339 
    340 	snb_update_pm_irq(dev_priv, mask, mask);
    341 }
    342 
    343 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
    344 				  uint32_t mask)
    345 {
    346 	snb_update_pm_irq(dev_priv, mask, 0);
    347 }
    348 
    349 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
    350 {
    351 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    352 		return;
    353 
    354 	__gen6_disable_pm_irq(dev_priv, mask);
    355 }
    356 
    357 void gen6_reset_rps_interrupts(struct drm_device *dev)
    358 {
    359 	struct drm_i915_private *dev_priv = dev->dev_private;
    360 	uint32_t reg = gen6_pm_iir(dev_priv);
    361 
    362 	spin_lock_irq(&dev_priv->irq_lock);
    363 	I915_WRITE(reg, dev_priv->pm_rps_events);
    364 	I915_WRITE(reg, dev_priv->pm_rps_events);
    365 	POSTING_READ(reg);
    366 	dev_priv->rps.pm_iir = 0;
    367 	spin_unlock_irq(&dev_priv->irq_lock);
    368 }
    369 
    370 void gen6_enable_rps_interrupts(struct drm_device *dev)
    371 {
    372 	struct drm_i915_private *dev_priv = dev->dev_private;
    373 
    374 	spin_lock_irq(&dev_priv->irq_lock);
    375 
    376 	WARN_ON(dev_priv->rps.pm_iir);
    377 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
    378 	dev_priv->rps.interrupts_enabled = true;
    379 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
    380 				dev_priv->pm_rps_events);
    381 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
    382 
    383 	spin_unlock_irq(&dev_priv->irq_lock);
    384 }
    385 
    386 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
    387 {
    388 	/*
    389 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
    390 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
    391 	 *
    392 	 * TODO: verify if this can be reproduced on VLV,CHV.
    393 	 */
    394 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
    395 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
    396 
    397 	if (INTEL_INFO(dev_priv)->gen >= 8)
    398 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
    399 
    400 	return mask;
    401 }
    402 
    403 void gen6_disable_rps_interrupts(struct drm_device *dev)
    404 {
    405 	struct drm_i915_private *dev_priv = dev->dev_private;
    406 
    407 	spin_lock_irq(&dev_priv->irq_lock);
    408 	dev_priv->rps.interrupts_enabled = false;
    409 	spin_unlock_irq(&dev_priv->irq_lock);
    410 
    411 	cancel_work_sync(&dev_priv->rps.work);
    412 
    413 	spin_lock_irq(&dev_priv->irq_lock);
    414 
    415 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
    416 
    417 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
    418 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
    419 				~dev_priv->pm_rps_events);
    420 
    421 	spin_unlock_irq(&dev_priv->irq_lock);
    422 
    423 	synchronize_irq(dev->irq);
    424 }
    425 
    426 /**
    427   * bdw_update_port_irq - update DE port interrupt
    428   * @dev_priv: driver private
    429   * @interrupt_mask: mask of interrupt bits to update
    430   * @enabled_irq_mask: mask of interrupt bits to enable
    431   */
    432 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
    433 				uint32_t interrupt_mask,
    434 				uint32_t enabled_irq_mask)
    435 {
    436 	uint32_t new_val;
    437 	uint32_t old_val;
    438 
    439 	assert_spin_locked(&dev_priv->irq_lock);
    440 
    441 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    442 
    443 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    444 		return;
    445 
    446 	old_val = I915_READ(GEN8_DE_PORT_IMR);
    447 
    448 	new_val = old_val;
    449 	new_val &= ~interrupt_mask;
    450 	new_val |= (~enabled_irq_mask & interrupt_mask);
    451 
    452 	if (new_val != old_val) {
    453 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
    454 		POSTING_READ(GEN8_DE_PORT_IMR);
    455 	}
    456 }
    457 
    458 /**
    459  * ibx_display_interrupt_update - update SDEIMR
    460  * @dev_priv: driver private
    461  * @interrupt_mask: mask of interrupt bits to update
    462  * @enabled_irq_mask: mask of interrupt bits to enable
    463  */
    464 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
    465 				  uint32_t interrupt_mask,
    466 				  uint32_t enabled_irq_mask)
    467 {
    468 	uint32_t sdeimr = I915_READ(SDEIMR);
    469 	sdeimr &= ~interrupt_mask;
    470 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
    471 
    472 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    473 
    474 	assert_spin_locked(&dev_priv->irq_lock);
    475 
    476 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    477 		return;
    478 
    479 	I915_WRITE(SDEIMR, sdeimr);
    480 	POSTING_READ(SDEIMR);
    481 }
    482 
    483 static void
    484 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
    485 		       u32 enable_mask, u32 status_mask)
    486 {
    487 	u32 reg = PIPESTAT(pipe);
    488 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
    489 
    490 	assert_spin_locked(&dev_priv->irq_lock);
    491 	WARN_ON(!intel_irqs_enabled(dev_priv));
    492 
    493 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
    494 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
    495 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
    496 		      pipe_name(pipe), enable_mask, status_mask))
    497 		return;
    498 
    499 	if ((pipestat & enable_mask) == enable_mask)
    500 		return;
    501 
    502 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
    503 
    504 	/* Enable the interrupt, clear any pending status */
    505 	pipestat |= enable_mask | status_mask;
    506 	I915_WRITE(reg, pipestat);
    507 	POSTING_READ(reg);
    508 }
    509 
    510 static void
    511 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
    512 		        u32 enable_mask, u32 status_mask)
    513 {
    514 	u32 reg = PIPESTAT(pipe);
    515 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
    516 
    517 	assert_spin_locked(&dev_priv->irq_lock);
    518 	WARN_ON(!intel_irqs_enabled(dev_priv));
    519 
    520 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
    521 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
    522 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
    523 		      pipe_name(pipe), enable_mask, status_mask))
    524 		return;
    525 
    526 	if ((pipestat & enable_mask) == 0)
    527 		return;
    528 
    529 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
    530 
    531 	pipestat &= ~enable_mask;
    532 	I915_WRITE(reg, pipestat);
    533 	POSTING_READ(reg);
    534 }
    535 
    536 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
    537 {
    538 	u32 enable_mask = status_mask << 16;
    539 
    540 	/*
    541 	 * On pipe A we don't support the PSR interrupt yet,
    542 	 * on pipe B and C the same bit MBZ.
    543 	 */
    544 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
    545 		return 0;
    546 	/*
    547 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
    548 	 * A the same bit is for perf counters which we don't use either.
    549 	 */
    550 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
    551 		return 0;
    552 
    553 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
    554 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
    555 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
    556 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
    557 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
    558 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
    559 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
    560 
    561 	return enable_mask;
    562 }
    563 
    564 void
    565 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
    566 		     u32 status_mask)
    567 {
    568 	u32 enable_mask;
    569 
    570 	if (IS_VALLEYVIEW(dev_priv->dev))
    571 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
    572 							   status_mask);
    573 	else
    574 		enable_mask = status_mask << 16;
    575 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
    576 }
    577 
    578 void
    579 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
    580 		      u32 status_mask)
    581 {
    582 	u32 enable_mask;
    583 
    584 	if (IS_VALLEYVIEW(dev_priv->dev))
    585 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
    586 							   status_mask);
    587 	else
    588 		enable_mask = status_mask << 16;
    589 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
    590 }
    591 
    592 /**
    593  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
    594  * @dev: drm device
    595  */
    596 static void i915_enable_asle_pipestat(struct drm_device *dev)
    597 {
    598 	struct drm_i915_private *dev_priv = dev->dev_private;
    599 
    600 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
    601 		return;
    602 
    603 	spin_lock_irq(&dev_priv->irq_lock);
    604 
    605 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
    606 	if (INTEL_INFO(dev)->gen >= 4)
    607 		i915_enable_pipestat(dev_priv, PIPE_A,
    608 				     PIPE_LEGACY_BLC_EVENT_STATUS);
    609 
    610 	spin_unlock_irq(&dev_priv->irq_lock);
    611 }
    612 
    613 /*
    614  * This timing diagram depicts the video signal in and
    615  * around the vertical blanking period.
    616  *
    617  * Assumptions about the fictitious mode used in this example:
    618  *  vblank_start >= 3
    619  *  vsync_start = vblank_start + 1
    620  *  vsync_end = vblank_start + 2
    621  *  vtotal = vblank_start + 3
    622  *
    623  *           start of vblank:
    624  *           latch double buffered registers
    625  *           increment frame counter (ctg+)
    626  *           generate start of vblank interrupt (gen4+)
    627  *           |
    628  *           |          frame start:
    629  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
    630  *           |          may be shifted forward 1-3 extra lines via PIPECONF
    631  *           |          |
    632  *           |          |  start of vsync:
    633  *           |          |  generate vsync interrupt
    634  *           |          |  |
    635  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
    636  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
    637  * ----va---> <-----------------vb--------------------> <--------va-------------
    638  *       |          |       <----vs----->                     |
    639  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
    640  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
    641  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
    642  *       |          |                                         |
    643  *       last visible pixel                                   first visible pixel
    644  *                  |                                         increment frame counter (gen3/4)
    645  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
    646  *
    647  * x  = horizontal active
    648  * _  = horizontal blanking
    649  * hs = horizontal sync
    650  * va = vertical active
    651  * vb = vertical blanking
    652  * vs = vertical sync
    653  * vbs = vblank_start (number)
    654  *
    655  * Summary:
    656  * - most events happen at the start of horizontal sync
    657  * - frame start happens at the start of horizontal blank, 1-4 lines
    658  *   (depending on PIPECONF settings) after the start of vblank
    659  * - gen3/4 pixel and frame counter are synchronized with the start
    660  *   of horizontal active on the first line of vertical active
    661  */
    662 
    663 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
    664 {
    665 	/* Gen2 doesn't have a hardware frame counter */
    666 	return 0;
    667 }
    668 
    669 /* Called from drm generic code, passed a 'crtc', which
    670  * we use as a pipe index
    671  */
    672 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
    673 {
    674 	struct drm_i915_private *dev_priv = dev->dev_private;
    675 	unsigned long high_frame;
    676 	unsigned long low_frame;
    677 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
    678 	struct intel_crtc *intel_crtc =
    679 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
    680 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
    681 
    682 	htotal = mode->crtc_htotal;
    683 	hsync_start = mode->crtc_hsync_start;
    684 	vbl_start = mode->crtc_vblank_start;
    685 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
    686 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
    687 
    688 	/* Convert to pixel count */
    689 	vbl_start *= htotal;
    690 
    691 	/* Start of vblank event occurs at start of hsync */
    692 	vbl_start -= htotal - hsync_start;
    693 
    694 	high_frame = PIPEFRAME(pipe);
    695 	low_frame = PIPEFRAMEPIXEL(pipe);
    696 
    697 	/*
    698 	 * High & low register fields aren't synchronized, so make sure
    699 	 * we get a low value that's stable across two reads of the high
    700 	 * register.
    701 	 */
    702 	do {
    703 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
    704 		low   = I915_READ(low_frame);
    705 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
    706 	} while (high1 != high2);
    707 
    708 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
    709 	pixel = low & PIPE_PIXEL_MASK;
    710 	low >>= PIPE_FRAME_LOW_SHIFT;
    711 
    712 	/*
    713 	 * The frame counter increments at beginning of active.
    714 	 * Cook up a vblank counter by also checking the pixel
    715 	 * counter against vblank start.
    716 	 */
    717 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
    718 }
    719 
    720 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
    721 {
    722 	struct drm_i915_private *dev_priv = dev->dev_private;
    723 
    724 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
    725 }
    726 
    727 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
    728 #ifdef __NetBSD__
    729 #define	__raw_i915_read32(dev_priv, reg) bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg))
    730 #else
    731 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
    732 #endif
    733 
    734 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
    735 {
    736 	struct drm_device *dev = crtc->base.dev;
    737 	struct drm_i915_private *dev_priv = dev->dev_private;
    738 	const struct drm_display_mode *mode = &crtc->base.hwmode;
    739 	enum pipe pipe = crtc->pipe;
    740 	int position, vtotal;
    741 
    742 	vtotal = mode->crtc_vtotal;
    743 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
    744 		vtotal /= 2;
    745 
    746 	if (IS_GEN2(dev))
    747 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
    748 	else
    749 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
    750 
    751 	/*
    752 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
    753 	 * read it just before the start of vblank.  So try it again
    754 	 * so we don't accidentally end up spanning a vblank frame
    755 	 * increment, causing the pipe_update_end() code to squak at us.
    756 	 *
    757 	 * The nature of this problem means we can't simply check the ISR
    758 	 * bit and return the vblank start value; nor can we use the scanline
    759 	 * debug register in the transcoder as it appears to have the same
    760 	 * problem.  We may need to extend this to include other platforms,
    761 	 * but so far testing only shows the problem on HSW.
    762 	 */
    763 	if (HAS_DDI(dev) && !position) {
    764 		int i, temp;
    765 
    766 		for (i = 0; i < 100; i++) {
    767 			udelay(1);
    768 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
    769 				DSL_LINEMASK_GEN3;
    770 			if (temp != position) {
    771 				position = temp;
    772 				break;
    773 			}
    774 		}
    775 	}
    776 
    777 	/*
    778 	 * See update_scanline_offset() for the details on the
    779 	 * scanline_offset adjustment.
    780 	 */
    781 	return (position + crtc->scanline_offset) % vtotal;
    782 }
    783 
    784 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
    785 				    unsigned int flags, int *vpos, int *hpos,
    786 				    ktime_t *stime, ktime_t *etime,
    787 				    const struct drm_display_mode *mode)
    788 {
    789 	struct drm_i915_private *dev_priv = dev->dev_private;
    790 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
    791 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
    792 	int position;
    793 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
    794 	bool in_vbl = true;
    795 	int ret = 0;
    796 	unsigned long irqflags;
    797 
    798 	if (WARN_ON(!mode->crtc_clock)) {
    799 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
    800 				 "pipe %c\n", pipe_name(pipe));
    801 		return 0;
    802 	}
    803 
    804 	htotal = mode->crtc_htotal;
    805 	hsync_start = mode->crtc_hsync_start;
    806 	vtotal = mode->crtc_vtotal;
    807 	vbl_start = mode->crtc_vblank_start;
    808 	vbl_end = mode->crtc_vblank_end;
    809 
    810 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
    811 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
    812 		vbl_end /= 2;
    813 		vtotal /= 2;
    814 	}
    815 
    816 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
    817 
    818 	/*
    819 	 * Lock uncore.lock, as we will do multiple timing critical raw
    820 	 * register reads, potentially with preemption disabled, so the
    821 	 * following code must not block on uncore.lock.
    822 	 */
    823 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
    824 
    825 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
    826 
    827 	/* Get optional system timestamp before query. */
    828 	if (stime)
    829 		*stime = ktime_get();
    830 
    831 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
    832 		/* No obvious pixelcount register. Only query vertical
    833 		 * scanout position from Display scan line register.
    834 		 */
    835 		position = __intel_get_crtc_scanline(intel_crtc);
    836 	} else {
    837 		/* Have access to pixelcount since start of frame.
    838 		 * We can split this into vertical and horizontal
    839 		 * scanout position.
    840 		 */
    841 		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
    842 
    843 		/* convert to pixel counts */
    844 		vbl_start *= htotal;
    845 		vbl_end *= htotal;
    846 		vtotal *= htotal;
    847 
    848 		/*
    849 		 * In interlaced modes, the pixel counter counts all pixels,
    850 		 * so one field will have htotal more pixels. In order to avoid
    851 		 * the reported position from jumping backwards when the pixel
    852 		 * counter is beyond the length of the shorter field, just
    853 		 * clamp the position the length of the shorter field. This
    854 		 * matches how the scanline counter based position works since
    855 		 * the scanline counter doesn't count the two half lines.
    856 		 */
    857 		if (position >= vtotal)
    858 			position = vtotal - 1;
    859 
    860 		/*
    861 		 * Start of vblank interrupt is triggered at start of hsync,
    862 		 * just prior to the first active line of vblank. However we
    863 		 * consider lines to start at the leading edge of horizontal
    864 		 * active. So, should we get here before we've crossed into
    865 		 * the horizontal active of the first line in vblank, we would
    866 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
    867 		 * always add htotal-hsync_start to the current pixel position.
    868 		 */
    869 		position = (position + htotal - hsync_start) % vtotal;
    870 	}
    871 
    872 	/* Get optional system timestamp after query. */
    873 	if (etime)
    874 		*etime = ktime_get();
    875 
    876 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
    877 
    878 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
    879 
    880 	in_vbl = position >= vbl_start && position < vbl_end;
    881 
    882 	/*
    883 	 * While in vblank, position will be negative
    884 	 * counting up towards 0 at vbl_end. And outside
    885 	 * vblank, position will be positive counting
    886 	 * up since vbl_end.
    887 	 */
    888 	if (position >= vbl_start)
    889 		position -= vbl_end;
    890 	else
    891 		position += vtotal - vbl_end;
    892 
    893 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
    894 		*vpos = position;
    895 		*hpos = 0;
    896 	} else {
    897 		*vpos = position / htotal;
    898 		*hpos = position - (*vpos * htotal);
    899 	}
    900 
    901 	/* In vblank? */
    902 	if (in_vbl)
    903 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
    904 
    905 	return ret;
    906 }
    907 
    908 int intel_get_crtc_scanline(struct intel_crtc *crtc)
    909 {
    910 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
    911 	unsigned long irqflags;
    912 	int position;
    913 
    914 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
    915 	position = __intel_get_crtc_scanline(crtc);
    916 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
    917 
    918 	return position;
    919 }
    920 
    921 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
    922 			      int *max_error,
    923 			      struct timeval *vblank_time,
    924 			      unsigned flags)
    925 {
    926 	struct drm_crtc *crtc;
    927 
    928 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
    929 		DRM_ERROR("Invalid crtc %u\n", pipe);
    930 		return -EINVAL;
    931 	}
    932 
    933 	/* Get drm_crtc to timestamp: */
    934 	crtc = intel_get_crtc_for_pipe(dev, pipe);
    935 	if (crtc == NULL) {
    936 		DRM_ERROR("Invalid crtc %u\n", pipe);
    937 		return -EINVAL;
    938 	}
    939 
    940 	if (!crtc->hwmode.crtc_clock) {
    941 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
    942 		return -EBUSY;
    943 	}
    944 
    945 	/* Helper routine in DRM core does all the work: */
    946 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
    947 						     vblank_time, flags,
    948 						     &crtc->hwmode);
    949 }
    950 
    951 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
    952 {
    953 	struct drm_i915_private *dev_priv = dev->dev_private;
    954 	u32 busy_up, busy_down, max_avg, min_avg;
    955 	u8 new_delay;
    956 
    957 	spin_lock(&mchdev_lock);
    958 
    959 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
    960 
    961 	new_delay = dev_priv->ips.cur_delay;
    962 
    963 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
    964 	busy_up = I915_READ(RCPREVBSYTUPAVG);
    965 	busy_down = I915_READ(RCPREVBSYTDNAVG);
    966 	max_avg = I915_READ(RCBMAXAVG);
    967 	min_avg = I915_READ(RCBMINAVG);
    968 
    969 	/* Handle RCS change request from hw */
    970 	if (busy_up > max_avg) {
    971 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
    972 			new_delay = dev_priv->ips.cur_delay - 1;
    973 		if (new_delay < dev_priv->ips.max_delay)
    974 			new_delay = dev_priv->ips.max_delay;
    975 	} else if (busy_down < min_avg) {
    976 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
    977 			new_delay = dev_priv->ips.cur_delay + 1;
    978 		if (new_delay > dev_priv->ips.min_delay)
    979 			new_delay = dev_priv->ips.min_delay;
    980 	}
    981 
    982 	if (ironlake_set_drps(dev, new_delay))
    983 		dev_priv->ips.cur_delay = new_delay;
    984 
    985 	spin_unlock(&mchdev_lock);
    986 
    987 	return;
    988 }
    989 
    990 static void notify_ring(struct intel_engine_cs *ring)
    991 {
    992 #ifdef __NetBSD__
    993 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
    994 	unsigned long flags;
    995 #endif
    996 
    997 	if (!intel_ring_initialized(ring))
    998 		return;
    999 
   1000 	trace_i915_gem_request_notify(ring);
   1001 
   1002 #ifdef __NetBSD__
   1003 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
   1004 	DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
   1005 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
   1006 #else
   1007 	wake_up_all(&ring->irq_queue);
   1008 #endif
   1009 }
   1010 
   1011 static void vlv_c0_read(struct drm_i915_private *dev_priv,
   1012 			struct intel_rps_ei *ei)
   1013 {
   1014 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
   1015 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
   1016 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
   1017 }
   1018 
   1019 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
   1020 {
   1021 	memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
   1022 }
   1023 
   1024 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
   1025 {
   1026 	const struct intel_rps_ei *prev = &dev_priv->rps.ei;
   1027 	struct intel_rps_ei now;
   1028 	u32 events = 0;
   1029 
   1030 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
   1031 		return 0;
   1032 
   1033 	vlv_c0_read(dev_priv, &now);
   1034 	if (now.cz_clock == 0)
   1035 		return 0;
   1036 
   1037 	if (prev->cz_clock) {
   1038 		u64 time, c0;
   1039 		unsigned int mul;
   1040 
   1041 		mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
   1042 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
   1043 			mul <<= 8;
   1044 
   1045 		time = now.cz_clock - prev->cz_clock;
   1046 		time *= dev_priv->czclk_freq;
   1047 
   1048 		/* Workload can be split between render + media,
   1049 		 * e.g. SwapBuffers being blitted in X after being rendered in
   1050 		 * mesa. To account for this we need to combine both engines
   1051 		 * into our activity counter.
   1052 		 */
   1053 		c0 = now.render_c0 - prev->render_c0;
   1054 		c0 += now.media_c0 - prev->media_c0;
   1055 		c0 *= mul;
   1056 
   1057 		if (c0 > time * dev_priv->rps.up_threshold)
   1058 			events = GEN6_PM_RP_UP_THRESHOLD;
   1059 		else if (c0 < time * dev_priv->rps.down_threshold)
   1060 			events = GEN6_PM_RP_DOWN_THRESHOLD;
   1061 	}
   1062 
   1063 	dev_priv->rps.ei = now;
   1064 	return events;
   1065 }
   1066 
   1067 static bool any_waiters(struct drm_i915_private *dev_priv)
   1068 {
   1069 	struct intel_engine_cs *ring;
   1070 	int i;
   1071 
   1072 	for_each_ring(ring, dev_priv, i)
   1073 		if (ring->irq_refcount)
   1074 			return true;
   1075 
   1076 	return false;
   1077 }
   1078 
   1079 static void gen6_pm_rps_work(struct work_struct *work)
   1080 {
   1081 	struct drm_i915_private *dev_priv =
   1082 		container_of(work, struct drm_i915_private, rps.work);
   1083 	bool client_boost;
   1084 	int new_delay, adj, min, max;
   1085 	u32 pm_iir;
   1086 
   1087 	spin_lock_irq(&dev_priv->irq_lock);
   1088 	/* Speed up work cancelation during disabling rps interrupts. */
   1089 	if (!dev_priv->rps.interrupts_enabled) {
   1090 		spin_unlock_irq(&dev_priv->irq_lock);
   1091 		return;
   1092 	}
   1093 	pm_iir = dev_priv->rps.pm_iir;
   1094 	dev_priv->rps.pm_iir = 0;
   1095 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
   1096 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
   1097 	client_boost = dev_priv->rps.client_boost;
   1098 	dev_priv->rps.client_boost = false;
   1099 	spin_unlock_irq(&dev_priv->irq_lock);
   1100 
   1101 	/* Make sure we didn't queue anything we're not going to process. */
   1102 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
   1103 
   1104 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
   1105 		return;
   1106 
   1107 	mutex_lock(&dev_priv->rps.hw_lock);
   1108 
   1109 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
   1110 
   1111 	adj = dev_priv->rps.last_adj;
   1112 	new_delay = dev_priv->rps.cur_freq;
   1113 	min = dev_priv->rps.min_freq_softlimit;
   1114 	max = dev_priv->rps.max_freq_softlimit;
   1115 
   1116 	if (client_boost) {
   1117 		new_delay = dev_priv->rps.max_freq_softlimit;
   1118 		adj = 0;
   1119 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
   1120 		if (adj > 0)
   1121 			adj *= 2;
   1122 		else /* CHV needs even encode values */
   1123 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
   1124 		/*
   1125 		 * For better performance, jump directly
   1126 		 * to RPe if we're below it.
   1127 		 */
   1128 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
   1129 			new_delay = dev_priv->rps.efficient_freq;
   1130 			adj = 0;
   1131 		}
   1132 	} else if (any_waiters(dev_priv)) {
   1133 		adj = 0;
   1134 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
   1135 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
   1136 			new_delay = dev_priv->rps.efficient_freq;
   1137 		else
   1138 			new_delay = dev_priv->rps.min_freq_softlimit;
   1139 		adj = 0;
   1140 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
   1141 		if (adj < 0)
   1142 			adj *= 2;
   1143 		else /* CHV needs even encode values */
   1144 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
   1145 	} else { /* unknown event */
   1146 		adj = 0;
   1147 	}
   1148 
   1149 	dev_priv->rps.last_adj = adj;
   1150 
   1151 	/* sysfs frequency interfaces may have snuck in while servicing the
   1152 	 * interrupt
   1153 	 */
   1154 	new_delay += adj;
   1155 	new_delay = clamp_t(int, new_delay, min, max);
   1156 
   1157 	intel_set_rps(dev_priv->dev, new_delay);
   1158 
   1159 	mutex_unlock(&dev_priv->rps.hw_lock);
   1160 }
   1161 
   1162 
   1163 /**
   1164  * ivybridge_parity_work - Workqueue called when a parity error interrupt
   1165  * occurred.
   1166  * @work: workqueue struct
   1167  *
   1168  * Doesn't actually do anything except notify userspace. As a consequence of
   1169  * this event, userspace should try to remap the bad rows since statistically
   1170  * it is likely the same row is more likely to go bad again.
   1171  */
   1172 static void ivybridge_parity_work(struct work_struct *work)
   1173 {
   1174 	struct drm_i915_private *dev_priv =
   1175 		container_of(work, struct drm_i915_private, l3_parity.error_work);
   1176 	u32 error_status, row, bank, subbank;
   1177 #ifndef __NetBSD__		/* XXX kobject uevent...? */
   1178 	char *parity_event[6];
   1179 #endif
   1180 	uint32_t misccpctl;
   1181 	uint8_t slice = 0;
   1182 
   1183 	/* We must turn off DOP level clock gating to access the L3 registers.
   1184 	 * In order to prevent a get/put style interface, acquire struct mutex
   1185 	 * any time we access those registers.
   1186 	 */
   1187 	mutex_lock(&dev_priv->dev->struct_mutex);
   1188 
   1189 	/* If we've screwed up tracking, just let the interrupt fire again */
   1190 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
   1191 		goto out;
   1192 
   1193 	misccpctl = I915_READ(GEN7_MISCCPCTL);
   1194 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
   1195 	POSTING_READ(GEN7_MISCCPCTL);
   1196 
   1197 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
   1198 		u32 reg;
   1199 
   1200 		slice--;
   1201 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
   1202 			break;
   1203 
   1204 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
   1205 
   1206 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
   1207 
   1208 		error_status = I915_READ(reg);
   1209 		row = GEN7_PARITY_ERROR_ROW(error_status);
   1210 		bank = GEN7_PARITY_ERROR_BANK(error_status);
   1211 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
   1212 
   1213 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
   1214 		POSTING_READ(reg);
   1215 
   1216 #ifndef __NetBSD__		/* XXX kobject uevent...? */
   1217 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
   1218 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
   1219 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
   1220 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
   1221 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
   1222 		parity_event[5] = NULL;
   1223 
   1224 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
   1225 				   KOBJ_CHANGE, parity_event);
   1226 #endif
   1227 
   1228 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
   1229 			  slice, row, bank, subbank);
   1230 
   1231 #ifndef __NetBSD__		/* XXX kobject uevent...? */
   1232 		kfree(parity_event[4]);
   1233 		kfree(parity_event[3]);
   1234 		kfree(parity_event[2]);
   1235 		kfree(parity_event[1]);
   1236 #endif
   1237 	}
   1238 
   1239 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
   1240 
   1241 out:
   1242 	WARN_ON(dev_priv->l3_parity.which_slice);
   1243 	spin_lock_irq(&dev_priv->irq_lock);
   1244 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
   1245 	spin_unlock_irq(&dev_priv->irq_lock);
   1246 
   1247 	mutex_unlock(&dev_priv->dev->struct_mutex);
   1248 }
   1249 
   1250 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
   1251 {
   1252 	struct drm_i915_private *dev_priv = dev->dev_private;
   1253 
   1254 	if (!HAS_L3_DPF(dev))
   1255 		return;
   1256 
   1257 	spin_lock(&dev_priv->irq_lock);
   1258 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
   1259 	spin_unlock(&dev_priv->irq_lock);
   1260 
   1261 	iir &= GT_PARITY_ERROR(dev);
   1262 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
   1263 		dev_priv->l3_parity.which_slice |= 1 << 1;
   1264 
   1265 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
   1266 		dev_priv->l3_parity.which_slice |= 1 << 0;
   1267 
   1268 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
   1269 }
   1270 
   1271 static void ilk_gt_irq_handler(struct drm_device *dev,
   1272 			       struct drm_i915_private *dev_priv,
   1273 			       u32 gt_iir)
   1274 {
   1275 	if (gt_iir &
   1276 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
   1277 		notify_ring(&dev_priv->ring[RCS]);
   1278 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
   1279 		notify_ring(&dev_priv->ring[VCS]);
   1280 }
   1281 
   1282 static void snb_gt_irq_handler(struct drm_device *dev,
   1283 			       struct drm_i915_private *dev_priv,
   1284 			       u32 gt_iir)
   1285 {
   1286 
   1287 	if (gt_iir &
   1288 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
   1289 		notify_ring(&dev_priv->ring[RCS]);
   1290 	if (gt_iir & GT_BSD_USER_INTERRUPT)
   1291 		notify_ring(&dev_priv->ring[VCS]);
   1292 	if (gt_iir & GT_BLT_USER_INTERRUPT)
   1293 		notify_ring(&dev_priv->ring[BCS]);
   1294 
   1295 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
   1296 		      GT_BSD_CS_ERROR_INTERRUPT |
   1297 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
   1298 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
   1299 
   1300 	if (gt_iir & GT_PARITY_ERROR(dev))
   1301 		ivybridge_parity_error_irq_handler(dev, gt_iir);
   1302 }
   1303 
   1304 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
   1305 				       u32 master_ctl)
   1306 {
   1307 	irqreturn_t ret = IRQ_NONE;
   1308 
   1309 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
   1310 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
   1311 		if (tmp) {
   1312 			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
   1313 			ret = IRQ_HANDLED;
   1314 
   1315 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
   1316 				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
   1317 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
   1318 				notify_ring(&dev_priv->ring[RCS]);
   1319 
   1320 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
   1321 				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
   1322 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
   1323 				notify_ring(&dev_priv->ring[BCS]);
   1324 		} else
   1325 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
   1326 	}
   1327 
   1328 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
   1329 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
   1330 		if (tmp) {
   1331 			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
   1332 			ret = IRQ_HANDLED;
   1333 
   1334 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
   1335 				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
   1336 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
   1337 				notify_ring(&dev_priv->ring[VCS]);
   1338 
   1339 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
   1340 				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
   1341 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
   1342 				notify_ring(&dev_priv->ring[VCS2]);
   1343 		} else
   1344 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
   1345 	}
   1346 
   1347 	if (master_ctl & GEN8_GT_VECS_IRQ) {
   1348 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
   1349 		if (tmp) {
   1350 			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
   1351 			ret = IRQ_HANDLED;
   1352 
   1353 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
   1354 				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
   1355 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
   1356 				notify_ring(&dev_priv->ring[VECS]);
   1357 		} else
   1358 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
   1359 	}
   1360 
   1361 	if (master_ctl & GEN8_GT_PM_IRQ) {
   1362 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
   1363 		if (tmp & dev_priv->pm_rps_events) {
   1364 			I915_WRITE_FW(GEN8_GT_IIR(2),
   1365 				      tmp & dev_priv->pm_rps_events);
   1366 			ret = IRQ_HANDLED;
   1367 			gen6_rps_irq_handler(dev_priv, tmp);
   1368 		} else
   1369 			DRM_ERROR("The master control interrupt lied (PM)!\n");
   1370 	}
   1371 
   1372 	return ret;
   1373 }
   1374 
   1375 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
   1376 {
   1377 	switch (port) {
   1378 	case PORT_A:
   1379 		return val & PORTA_HOTPLUG_LONG_DETECT;
   1380 	case PORT_B:
   1381 		return val & PORTB_HOTPLUG_LONG_DETECT;
   1382 	case PORT_C:
   1383 		return val & PORTC_HOTPLUG_LONG_DETECT;
   1384 	default:
   1385 		return false;
   1386 	}
   1387 }
   1388 
   1389 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
   1390 {
   1391 	switch (port) {
   1392 	case PORT_E:
   1393 		return val & PORTE_HOTPLUG_LONG_DETECT;
   1394 	default:
   1395 		return false;
   1396 	}
   1397 }
   1398 
   1399 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
   1400 {
   1401 	switch (port) {
   1402 	case PORT_A:
   1403 		return val & PORTA_HOTPLUG_LONG_DETECT;
   1404 	case PORT_B:
   1405 		return val & PORTB_HOTPLUG_LONG_DETECT;
   1406 	case PORT_C:
   1407 		return val & PORTC_HOTPLUG_LONG_DETECT;
   1408 	case PORT_D:
   1409 		return val & PORTD_HOTPLUG_LONG_DETECT;
   1410 	default:
   1411 		return false;
   1412 	}
   1413 }
   1414 
   1415 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
   1416 {
   1417 	switch (port) {
   1418 	case PORT_A:
   1419 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
   1420 	default:
   1421 		return false;
   1422 	}
   1423 }
   1424 
   1425 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
   1426 {
   1427 	switch (port) {
   1428 	case PORT_B:
   1429 		return val & PORTB_HOTPLUG_LONG_DETECT;
   1430 	case PORT_C:
   1431 		return val & PORTC_HOTPLUG_LONG_DETECT;
   1432 	case PORT_D:
   1433 		return val & PORTD_HOTPLUG_LONG_DETECT;
   1434 	default:
   1435 		return false;
   1436 	}
   1437 }
   1438 
   1439 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
   1440 {
   1441 	switch (port) {
   1442 	case PORT_B:
   1443 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
   1444 	case PORT_C:
   1445 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
   1446 	case PORT_D:
   1447 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
   1448 	default:
   1449 		return false;
   1450 	}
   1451 }
   1452 
   1453 /*
   1454  * Get a bit mask of pins that have triggered, and which ones may be long.
   1455  * This can be called multiple times with the same masks to accumulate
   1456  * hotplug detection results from several registers.
   1457  *
   1458  * Note that the caller is expected to zero out the masks initially.
   1459  */
   1460 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
   1461 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
   1462 			     const u32 hpd[HPD_NUM_PINS],
   1463 			     bool long_pulse_detect(enum port port, u32 val))
   1464 {
   1465 	enum port port;
   1466 	int i;
   1467 
   1468 	for_each_hpd_pin(i) {
   1469 		if ((hpd[i] & hotplug_trigger) == 0)
   1470 			continue;
   1471 
   1472 		*pin_mask |= BIT(i);
   1473 
   1474 		if (!intel_hpd_pin_to_port(i, &port))
   1475 			continue;
   1476 
   1477 		if (long_pulse_detect(port, dig_hotplug_reg))
   1478 			*long_mask |= BIT(i);
   1479 	}
   1480 
   1481 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
   1482 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
   1483 
   1484 }
   1485 
   1486 static void gmbus_irq_handler(struct drm_device *dev)
   1487 {
   1488 	struct drm_i915_private *dev_priv = dev->dev_private;
   1489 
   1490 #ifdef __NetBSD__
   1491 	spin_lock(&dev_priv->gmbus_wait_lock);
   1492 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
   1493 	    &dev_priv->gmbus_wait_lock);
   1494 	spin_unlock(&dev_priv->gmbus_wait_lock);
   1495 #else
   1496 	wake_up_all(&dev_priv->gmbus_wait_queue);
   1497 #endif
   1498 }
   1499 
   1500 static void dp_aux_irq_handler(struct drm_device *dev)
   1501 {
   1502 	struct drm_i915_private *dev_priv = dev->dev_private;
   1503 
   1504 #ifdef __NetBSD__
   1505 	spin_lock(&dev_priv->gmbus_wait_lock);
   1506 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
   1507 	    &dev_priv->gmbus_wait_lock);
   1508 	spin_unlock(&dev_priv->gmbus_wait_lock);
   1509 #else
   1510 	wake_up_all(&dev_priv->gmbus_wait_queue);
   1511 #endif
   1512 }
   1513 
   1514 #if defined(CONFIG_DEBUG_FS)
   1515 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
   1516 					 uint32_t crc0, uint32_t crc1,
   1517 					 uint32_t crc2, uint32_t crc3,
   1518 					 uint32_t crc4)
   1519 {
   1520 	struct drm_i915_private *dev_priv = dev->dev_private;
   1521 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
   1522 	struct intel_pipe_crc_entry *entry;
   1523 	int head, tail;
   1524 
   1525 	spin_lock(&pipe_crc->lock);
   1526 
   1527 	if (!pipe_crc->entries) {
   1528 		spin_unlock(&pipe_crc->lock);
   1529 		DRM_DEBUG_KMS("spurious interrupt\n");
   1530 		return;
   1531 	}
   1532 
   1533 	head = pipe_crc->head;
   1534 	tail = pipe_crc->tail;
   1535 
   1536 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
   1537 		spin_unlock(&pipe_crc->lock);
   1538 		DRM_ERROR("CRC buffer overflowing\n");
   1539 		return;
   1540 	}
   1541 
   1542 	entry = &pipe_crc->entries[head];
   1543 
   1544 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
   1545 	entry->crc[0] = crc0;
   1546 	entry->crc[1] = crc1;
   1547 	entry->crc[2] = crc2;
   1548 	entry->crc[3] = crc3;
   1549 	entry->crc[4] = crc4;
   1550 
   1551 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
   1552 	pipe_crc->head = head;
   1553 
   1554 	spin_unlock(&pipe_crc->lock);
   1555 
   1556 	wake_up_interruptible(&pipe_crc->wq);
   1557 }
   1558 #else
   1559 static inline void
   1560 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
   1561 			     uint32_t crc0, uint32_t crc1,
   1562 			     uint32_t crc2, uint32_t crc3,
   1563 			     uint32_t crc4) {}
   1564 #endif
   1565 
   1566 
   1567 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
   1568 {
   1569 	struct drm_i915_private *dev_priv = dev->dev_private;
   1570 
   1571 	display_pipe_crc_irq_handler(dev, pipe,
   1572 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
   1573 				     0, 0, 0, 0);
   1574 }
   1575 
   1576 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
   1577 {
   1578 	struct drm_i915_private *dev_priv = dev->dev_private;
   1579 
   1580 	display_pipe_crc_irq_handler(dev, pipe,
   1581 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
   1582 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
   1583 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
   1584 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
   1585 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
   1586 }
   1587 
   1588 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
   1589 {
   1590 	struct drm_i915_private *dev_priv = dev->dev_private;
   1591 	uint32_t res1, res2;
   1592 
   1593 	if (INTEL_INFO(dev)->gen >= 3)
   1594 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
   1595 	else
   1596 		res1 = 0;
   1597 
   1598 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
   1599 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
   1600 	else
   1601 		res2 = 0;
   1602 
   1603 	display_pipe_crc_irq_handler(dev, pipe,
   1604 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
   1605 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
   1606 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
   1607 				     res1, res2);
   1608 }
   1609 
   1610 /* The RPS events need forcewake, so we add them to a work queue and mask their
   1611  * IMR bits until the work is done. Other interrupts can be processed without
   1612  * the work queue. */
   1613 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
   1614 {
   1615 	if (pm_iir & dev_priv->pm_rps_events) {
   1616 		spin_lock(&dev_priv->irq_lock);
   1617 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
   1618 		if (dev_priv->rps.interrupts_enabled) {
   1619 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
   1620 			queue_work(dev_priv->wq, &dev_priv->rps.work);
   1621 		}
   1622 		spin_unlock(&dev_priv->irq_lock);
   1623 	}
   1624 
   1625 	if (INTEL_INFO(dev_priv)->gen >= 8)
   1626 		return;
   1627 
   1628 	if (HAS_VEBOX(dev_priv->dev)) {
   1629 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
   1630 			notify_ring(&dev_priv->ring[VECS]);
   1631 
   1632 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
   1633 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
   1634 	}
   1635 }
   1636 
   1637 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
   1638 {
   1639 	if (!drm_handle_vblank(dev, pipe))
   1640 		return false;
   1641 
   1642 	return true;
   1643 }
   1644 
   1645 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
   1646 {
   1647 	struct drm_i915_private *dev_priv = dev->dev_private;
   1648 	u32 pipe_stats[I915_MAX_PIPES] = { };
   1649 	int pipe;
   1650 
   1651 	spin_lock(&dev_priv->irq_lock);
   1652 	for_each_pipe(dev_priv, pipe) {
   1653 		int reg;
   1654 		u32 mask, iir_bit = 0;
   1655 
   1656 		/*
   1657 		 * PIPESTAT bits get signalled even when the interrupt is
   1658 		 * disabled with the mask bits, and some of the status bits do
   1659 		 * not generate interrupts at all (like the underrun bit). Hence
   1660 		 * we need to be careful that we only handle what we want to
   1661 		 * handle.
   1662 		 */
   1663 
   1664 		/* fifo underruns are filterered in the underrun handler. */
   1665 		mask = PIPE_FIFO_UNDERRUN_STATUS;
   1666 
   1667 		switch (pipe) {
   1668 		case PIPE_A:
   1669 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
   1670 			break;
   1671 		case PIPE_B:
   1672 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
   1673 			break;
   1674 		case PIPE_C:
   1675 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
   1676 			break;
   1677 		}
   1678 		if (iir & iir_bit)
   1679 			mask |= dev_priv->pipestat_irq_mask[pipe];
   1680 
   1681 		if (!mask)
   1682 			continue;
   1683 
   1684 		reg = PIPESTAT(pipe);
   1685 		mask |= PIPESTAT_INT_ENABLE_MASK;
   1686 		pipe_stats[pipe] = I915_READ(reg) & mask;
   1687 
   1688 		/*
   1689 		 * Clear the PIPE*STAT regs before the IIR
   1690 		 */
   1691 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
   1692 					PIPESTAT_INT_STATUS_MASK))
   1693 			I915_WRITE(reg, pipe_stats[pipe]);
   1694 	}
   1695 	spin_unlock(&dev_priv->irq_lock);
   1696 
   1697 	for_each_pipe(dev_priv, pipe) {
   1698 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
   1699 		    intel_pipe_handle_vblank(dev, pipe))
   1700 			intel_check_page_flip(dev, pipe);
   1701 
   1702 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
   1703 			intel_prepare_page_flip(dev, pipe);
   1704 			intel_finish_page_flip(dev, pipe);
   1705 		}
   1706 
   1707 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   1708 			i9xx_pipe_crc_irq_handler(dev, pipe);
   1709 
   1710 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   1711 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1712 	}
   1713 
   1714 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
   1715 		gmbus_irq_handler(dev);
   1716 }
   1717 
   1718 static void i9xx_hpd_irq_handler(struct drm_device *dev)
   1719 {
   1720 	struct drm_i915_private *dev_priv = dev->dev_private;
   1721 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
   1722 	u32 pin_mask = 0, long_mask = 0;
   1723 
   1724 	if (!hotplug_status)
   1725 		return;
   1726 
   1727 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
   1728 	/*
   1729 	 * Make sure hotplug status is cleared before we clear IIR, or else we
   1730 	 * may miss hotplug events.
   1731 	 */
   1732 	POSTING_READ(PORT_HOTPLUG_STAT);
   1733 
   1734 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
   1735 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
   1736 
   1737 		if (hotplug_trigger) {
   1738 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
   1739 					   hotplug_trigger, hpd_status_g4x,
   1740 					   i9xx_port_hotplug_long_detect);
   1741 
   1742 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
   1743 		}
   1744 
   1745 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
   1746 			dp_aux_irq_handler(dev);
   1747 	} else {
   1748 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
   1749 
   1750 		if (hotplug_trigger) {
   1751 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
   1752 					   hotplug_trigger, hpd_status_i915,
   1753 					   i9xx_port_hotplug_long_detect);
   1754 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
   1755 		}
   1756 	}
   1757 }
   1758 
   1759 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
   1760 {
   1761 	struct drm_device *dev = arg;
   1762 	struct drm_i915_private *dev_priv = dev->dev_private;
   1763 	u32 iir, gt_iir, pm_iir;
   1764 	irqreturn_t ret = IRQ_NONE;
   1765 
   1766 	if (!intel_irqs_enabled(dev_priv))
   1767 		return IRQ_NONE;
   1768 
   1769 	while (true) {
   1770 		/* Find, clear, then process each source of interrupt */
   1771 
   1772 		gt_iir = I915_READ(GTIIR);
   1773 		if (gt_iir)
   1774 			I915_WRITE(GTIIR, gt_iir);
   1775 
   1776 		pm_iir = I915_READ(GEN6_PMIIR);
   1777 		if (pm_iir)
   1778 			I915_WRITE(GEN6_PMIIR, pm_iir);
   1779 
   1780 		iir = I915_READ(VLV_IIR);
   1781 		if (iir) {
   1782 			/* Consume port before clearing IIR or we'll miss events */
   1783 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
   1784 				i9xx_hpd_irq_handler(dev);
   1785 			I915_WRITE(VLV_IIR, iir);
   1786 		}
   1787 
   1788 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
   1789 			goto out;
   1790 
   1791 		ret = IRQ_HANDLED;
   1792 
   1793 		if (gt_iir)
   1794 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
   1795 		if (pm_iir)
   1796 			gen6_rps_irq_handler(dev_priv, pm_iir);
   1797 		/* Call regardless, as some status bits might not be
   1798 		 * signalled in iir */
   1799 		valleyview_pipestat_irq_handler(dev, iir);
   1800 	}
   1801 
   1802 out:
   1803 	return ret;
   1804 }
   1805 
   1806 static irqreturn_t cherryview_irq_handler(DRM_IRQ_ARGS)
   1807 {
   1808 	struct drm_device *dev = arg;
   1809 	struct drm_i915_private *dev_priv = dev->dev_private;
   1810 	u32 master_ctl, iir;
   1811 	irqreturn_t ret = IRQ_NONE;
   1812 
   1813 	if (!intel_irqs_enabled(dev_priv))
   1814 		return IRQ_NONE;
   1815 
   1816 	for (;;) {
   1817 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
   1818 		iir = I915_READ(VLV_IIR);
   1819 
   1820 		if (master_ctl == 0 && iir == 0)
   1821 			break;
   1822 
   1823 		ret = IRQ_HANDLED;
   1824 
   1825 		I915_WRITE(GEN8_MASTER_IRQ, 0);
   1826 
   1827 		/* Find, clear, then process each source of interrupt */
   1828 
   1829 		if (iir) {
   1830 			/* Consume port before clearing IIR or we'll miss events */
   1831 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
   1832 				i9xx_hpd_irq_handler(dev);
   1833 			I915_WRITE(VLV_IIR, iir);
   1834 		}
   1835 
   1836 		gen8_gt_irq_handler(dev_priv, master_ctl);
   1837 
   1838 		/* Call regardless, as some status bits might not be
   1839 		 * signalled in iir */
   1840 		valleyview_pipestat_irq_handler(dev, iir);
   1841 
   1842 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
   1843 		POSTING_READ(GEN8_MASTER_IRQ);
   1844 	}
   1845 
   1846 	return ret;
   1847 }
   1848 
   1849 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
   1850 				const u32 hpd[HPD_NUM_PINS])
   1851 {
   1852 	struct drm_i915_private *dev_priv = to_i915(dev);
   1853 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
   1854 
   1855 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
   1856 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
   1857 
   1858 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
   1859 			   dig_hotplug_reg, hpd,
   1860 			   pch_port_hotplug_long_detect);
   1861 
   1862 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
   1863 }
   1864 
   1865 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
   1866 {
   1867 	struct drm_i915_private *dev_priv = dev->dev_private;
   1868 	int pipe;
   1869 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
   1870 
   1871 	if (hotplug_trigger)
   1872 		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
   1873 
   1874 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
   1875 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
   1876 			       SDE_AUDIO_POWER_SHIFT);
   1877 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
   1878 				 port_name(port));
   1879 	}
   1880 
   1881 	if (pch_iir & SDE_AUX_MASK)
   1882 		dp_aux_irq_handler(dev);
   1883 
   1884 	if (pch_iir & SDE_GMBUS)
   1885 		gmbus_irq_handler(dev);
   1886 
   1887 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
   1888 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
   1889 
   1890 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
   1891 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
   1892 
   1893 	if (pch_iir & SDE_POISON)
   1894 		DRM_ERROR("PCH poison interrupt\n");
   1895 
   1896 	if (pch_iir & SDE_FDI_MASK)
   1897 		for_each_pipe(dev_priv, pipe)
   1898 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
   1899 					 pipe_name(pipe),
   1900 					 I915_READ(FDI_RX_IIR(pipe)));
   1901 
   1902 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
   1903 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
   1904 
   1905 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
   1906 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
   1907 
   1908 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
   1909 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
   1910 
   1911 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
   1912 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
   1913 }
   1914 
   1915 static void ivb_err_int_handler(struct drm_device *dev)
   1916 {
   1917 	struct drm_i915_private *dev_priv = dev->dev_private;
   1918 	u32 err_int = I915_READ(GEN7_ERR_INT);
   1919 	enum pipe pipe;
   1920 
   1921 	if (err_int & ERR_INT_POISON)
   1922 		DRM_ERROR("Poison interrupt\n");
   1923 
   1924 	for_each_pipe(dev_priv, pipe) {
   1925 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
   1926 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1927 
   1928 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
   1929 			if (IS_IVYBRIDGE(dev))
   1930 				ivb_pipe_crc_irq_handler(dev, pipe);
   1931 			else
   1932 				hsw_pipe_crc_irq_handler(dev, pipe);
   1933 		}
   1934 	}
   1935 
   1936 	I915_WRITE(GEN7_ERR_INT, err_int);
   1937 }
   1938 
   1939 static void cpt_serr_int_handler(struct drm_device *dev)
   1940 {
   1941 	struct drm_i915_private *dev_priv = dev->dev_private;
   1942 	u32 serr_int = I915_READ(SERR_INT);
   1943 
   1944 	if (serr_int & SERR_INT_POISON)
   1945 		DRM_ERROR("PCH poison interrupt\n");
   1946 
   1947 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
   1948 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
   1949 
   1950 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
   1951 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
   1952 
   1953 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
   1954 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
   1955 
   1956 	I915_WRITE(SERR_INT, serr_int);
   1957 }
   1958 
   1959 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
   1960 {
   1961 	struct drm_i915_private *dev_priv = dev->dev_private;
   1962 	int pipe;
   1963 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
   1964 
   1965 	if (hotplug_trigger)
   1966 		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
   1967 
   1968 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
   1969 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
   1970 			       SDE_AUDIO_POWER_SHIFT_CPT);
   1971 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
   1972 				 port_name(port));
   1973 	}
   1974 
   1975 	if (pch_iir & SDE_AUX_MASK_CPT)
   1976 		dp_aux_irq_handler(dev);
   1977 
   1978 	if (pch_iir & SDE_GMBUS_CPT)
   1979 		gmbus_irq_handler(dev);
   1980 
   1981 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
   1982 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
   1983 
   1984 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
   1985 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
   1986 
   1987 	if (pch_iir & SDE_FDI_MASK_CPT)
   1988 		for_each_pipe(dev_priv, pipe)
   1989 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
   1990 					 pipe_name(pipe),
   1991 					 I915_READ(FDI_RX_IIR(pipe)));
   1992 
   1993 	if (pch_iir & SDE_ERROR_CPT)
   1994 		cpt_serr_int_handler(dev);
   1995 }
   1996 
   1997 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
   1998 {
   1999 	struct drm_i915_private *dev_priv = dev->dev_private;
   2000 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
   2001 		~SDE_PORTE_HOTPLUG_SPT;
   2002 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
   2003 	u32 pin_mask = 0, long_mask = 0;
   2004 
   2005 	if (hotplug_trigger) {
   2006 		u32 dig_hotplug_reg;
   2007 
   2008 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
   2009 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
   2010 
   2011 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
   2012 				   dig_hotplug_reg, hpd_spt,
   2013 				   spt_port_hotplug_long_detect);
   2014 	}
   2015 
   2016 	if (hotplug2_trigger) {
   2017 		u32 dig_hotplug_reg;
   2018 
   2019 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
   2020 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
   2021 
   2022 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
   2023 				   dig_hotplug_reg, hpd_spt,
   2024 				   spt_port_hotplug2_long_detect);
   2025 	}
   2026 
   2027 	if (pin_mask)
   2028 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
   2029 
   2030 	if (pch_iir & SDE_GMBUS_CPT)
   2031 		gmbus_irq_handler(dev);
   2032 }
   2033 
   2034 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
   2035 				const u32 hpd[HPD_NUM_PINS])
   2036 {
   2037 	struct drm_i915_private *dev_priv = to_i915(dev);
   2038 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
   2039 
   2040 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
   2041 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
   2042 
   2043 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
   2044 			   dig_hotplug_reg, hpd,
   2045 			   ilk_port_hotplug_long_detect);
   2046 
   2047 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
   2048 }
   2049 
   2050 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
   2051 {
   2052 	struct drm_i915_private *dev_priv = dev->dev_private;
   2053 	enum pipe pipe;
   2054 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
   2055 
   2056 	if (hotplug_trigger)
   2057 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
   2058 
   2059 	if (de_iir & DE_AUX_CHANNEL_A)
   2060 		dp_aux_irq_handler(dev);
   2061 
   2062 	if (de_iir & DE_GSE)
   2063 		intel_opregion_asle_intr(dev);
   2064 
   2065 	if (de_iir & DE_POISON)
   2066 		DRM_ERROR("Poison interrupt\n");
   2067 
   2068 	for_each_pipe(dev_priv, pipe) {
   2069 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
   2070 		    intel_pipe_handle_vblank(dev, pipe))
   2071 			intel_check_page_flip(dev, pipe);
   2072 
   2073 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
   2074 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   2075 
   2076 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
   2077 			i9xx_pipe_crc_irq_handler(dev, pipe);
   2078 
   2079 		/* plane/pipes map 1:1 on ilk+ */
   2080 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
   2081 			intel_prepare_page_flip(dev, pipe);
   2082 			intel_finish_page_flip_plane(dev, pipe);
   2083 		}
   2084 	}
   2085 
   2086 	/* check event from PCH */
   2087 	if (de_iir & DE_PCH_EVENT) {
   2088 		u32 pch_iir = I915_READ(SDEIIR);
   2089 
   2090 		if (HAS_PCH_CPT(dev))
   2091 			cpt_irq_handler(dev, pch_iir);
   2092 		else
   2093 			ibx_irq_handler(dev, pch_iir);
   2094 
   2095 		/* should clear PCH hotplug event before clear CPU irq */
   2096 		I915_WRITE(SDEIIR, pch_iir);
   2097 	}
   2098 
   2099 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
   2100 		ironlake_rps_change_irq_handler(dev);
   2101 }
   2102 
   2103 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
   2104 {
   2105 	struct drm_i915_private *dev_priv = dev->dev_private;
   2106 	enum pipe pipe;
   2107 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
   2108 
   2109 	if (hotplug_trigger)
   2110 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
   2111 
   2112 	if (de_iir & DE_ERR_INT_IVB)
   2113 		ivb_err_int_handler(dev);
   2114 
   2115 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
   2116 		dp_aux_irq_handler(dev);
   2117 
   2118 	if (de_iir & DE_GSE_IVB)
   2119 		intel_opregion_asle_intr(dev);
   2120 
   2121 	for_each_pipe(dev_priv, pipe) {
   2122 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
   2123 		    intel_pipe_handle_vblank(dev, pipe))
   2124 			intel_check_page_flip(dev, pipe);
   2125 
   2126 		/* plane/pipes map 1:1 on ilk+ */
   2127 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
   2128 			intel_prepare_page_flip(dev, pipe);
   2129 			intel_finish_page_flip_plane(dev, pipe);
   2130 		}
   2131 	}
   2132 
   2133 	/* check event from PCH */
   2134 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
   2135 		u32 pch_iir = I915_READ(SDEIIR);
   2136 
   2137 		cpt_irq_handler(dev, pch_iir);
   2138 
   2139 		/* clear PCH hotplug event before clear CPU irq */
   2140 		I915_WRITE(SDEIIR, pch_iir);
   2141 	}
   2142 }
   2143 
   2144 /*
   2145  * To handle irqs with the minimum potential races with fresh interrupts, we:
   2146  * 1 - Disable Master Interrupt Control.
   2147  * 2 - Find the source(s) of the interrupt.
   2148  * 3 - Clear the Interrupt Identity bits (IIR).
   2149  * 4 - Process the interrupt(s) that had bits set in the IIRs.
   2150  * 5 - Re-enable Master Interrupt Control.
   2151  */
   2152 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
   2153 {
   2154 	struct drm_device *dev = arg;
   2155 	struct drm_i915_private *dev_priv = dev->dev_private;
   2156 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
   2157 	irqreturn_t ret = IRQ_NONE;
   2158 
   2159 	if (!intel_irqs_enabled(dev_priv))
   2160 		return IRQ_NONE;
   2161 
   2162 	/* We get interrupts on unclaimed registers, so check for this before we
   2163 	 * do any I915_{READ,WRITE}. */
   2164 	intel_uncore_check_errors(dev);
   2165 
   2166 	/* disable master interrupt before clearing iir  */
   2167 	de_ier = I915_READ(DEIER);
   2168 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
   2169 	POSTING_READ(DEIER);
   2170 
   2171 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
   2172 	 * interrupts will will be stored on its back queue, and then we'll be
   2173 	 * able to process them after we restore SDEIER (as soon as we restore
   2174 	 * it, we'll get an interrupt if SDEIIR still has something to process
   2175 	 * due to its back queue). */
   2176 	if (!HAS_PCH_NOP(dev)) {
   2177 		sde_ier = I915_READ(SDEIER);
   2178 		I915_WRITE(SDEIER, 0);
   2179 		POSTING_READ(SDEIER);
   2180 	}
   2181 
   2182 	/* Find, clear, then process each source of interrupt */
   2183 
   2184 	gt_iir = I915_READ(GTIIR);
   2185 	if (gt_iir) {
   2186 		I915_WRITE(GTIIR, gt_iir);
   2187 		ret = IRQ_HANDLED;
   2188 		if (INTEL_INFO(dev)->gen >= 6)
   2189 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
   2190 		else
   2191 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
   2192 	}
   2193 
   2194 	de_iir = I915_READ(DEIIR);
   2195 	if (de_iir) {
   2196 		I915_WRITE(DEIIR, de_iir);
   2197 		ret = IRQ_HANDLED;
   2198 		if (INTEL_INFO(dev)->gen >= 7)
   2199 			ivb_display_irq_handler(dev, de_iir);
   2200 		else
   2201 			ilk_display_irq_handler(dev, de_iir);
   2202 	}
   2203 
   2204 	if (INTEL_INFO(dev)->gen >= 6) {
   2205 		u32 pm_iir = I915_READ(GEN6_PMIIR);
   2206 		if (pm_iir) {
   2207 			I915_WRITE(GEN6_PMIIR, pm_iir);
   2208 			ret = IRQ_HANDLED;
   2209 			gen6_rps_irq_handler(dev_priv, pm_iir);
   2210 		}
   2211 	}
   2212 
   2213 	I915_WRITE(DEIER, de_ier);
   2214 	POSTING_READ(DEIER);
   2215 	if (!HAS_PCH_NOP(dev)) {
   2216 		I915_WRITE(SDEIER, sde_ier);
   2217 		POSTING_READ(SDEIER);
   2218 	}
   2219 
   2220 	return ret;
   2221 }
   2222 
   2223 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
   2224 				const u32 hpd[HPD_NUM_PINS])
   2225 {
   2226 	struct drm_i915_private *dev_priv = to_i915(dev);
   2227 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
   2228 
   2229 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
   2230 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
   2231 
   2232 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
   2233 			   dig_hotplug_reg, hpd,
   2234 			   bxt_port_hotplug_long_detect);
   2235 
   2236 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
   2237 }
   2238 
   2239 static irqreturn_t gen8_irq_handler(DRM_IRQ_ARGS)
   2240 {
   2241 	struct drm_device *dev = arg;
   2242 	struct drm_i915_private *dev_priv = dev->dev_private;
   2243 	u32 master_ctl;
   2244 	irqreturn_t ret = IRQ_NONE;
   2245 	uint32_t tmp = 0;
   2246 	enum pipe pipe;
   2247 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
   2248 
   2249 	if (!intel_irqs_enabled(dev_priv))
   2250 		return IRQ_NONE;
   2251 
   2252 	if (INTEL_INFO(dev_priv)->gen >= 9)
   2253 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
   2254 			GEN9_AUX_CHANNEL_D;
   2255 
   2256 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
   2257 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
   2258 	if (!master_ctl)
   2259 		return IRQ_NONE;
   2260 
   2261 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
   2262 
   2263 	/* Find, clear, then process each source of interrupt */
   2264 
   2265 	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
   2266 
   2267 	if (master_ctl & GEN8_DE_MISC_IRQ) {
   2268 		tmp = I915_READ(GEN8_DE_MISC_IIR);
   2269 		if (tmp) {
   2270 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
   2271 			ret = IRQ_HANDLED;
   2272 			if (tmp & GEN8_DE_MISC_GSE)
   2273 				intel_opregion_asle_intr(dev);
   2274 			else
   2275 				DRM_ERROR("Unexpected DE Misc interrupt\n");
   2276 		}
   2277 		else
   2278 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
   2279 	}
   2280 
   2281 	if (master_ctl & GEN8_DE_PORT_IRQ) {
   2282 		tmp = I915_READ(GEN8_DE_PORT_IIR);
   2283 		if (tmp) {
   2284 			bool found = false;
   2285 			u32 hotplug_trigger = 0;
   2286 
   2287 			if (IS_BROXTON(dev_priv))
   2288 				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
   2289 			else if (IS_BROADWELL(dev_priv))
   2290 				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
   2291 
   2292 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
   2293 			ret = IRQ_HANDLED;
   2294 
   2295 			if (tmp & aux_mask) {
   2296 				dp_aux_irq_handler(dev);
   2297 				found = true;
   2298 			}
   2299 
   2300 			if (hotplug_trigger) {
   2301 				if (IS_BROXTON(dev))
   2302 					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
   2303 				else
   2304 					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
   2305 				found = true;
   2306 			}
   2307 
   2308 			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
   2309 				gmbus_irq_handler(dev);
   2310 				found = true;
   2311 			}
   2312 
   2313 			if (!found)
   2314 				DRM_ERROR("Unexpected DE Port interrupt\n");
   2315 		}
   2316 		else
   2317 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
   2318 	}
   2319 
   2320 	for_each_pipe(dev_priv, pipe) {
   2321 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
   2322 
   2323 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
   2324 			continue;
   2325 
   2326 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
   2327 		if (pipe_iir) {
   2328 			ret = IRQ_HANDLED;
   2329 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
   2330 
   2331 			if (pipe_iir & GEN8_PIPE_VBLANK &&
   2332 			    intel_pipe_handle_vblank(dev, pipe))
   2333 				intel_check_page_flip(dev, pipe);
   2334 
   2335 			if (INTEL_INFO(dev_priv)->gen >= 9)
   2336 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
   2337 			else
   2338 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
   2339 
   2340 			if (flip_done) {
   2341 				intel_prepare_page_flip(dev, pipe);
   2342 				intel_finish_page_flip_plane(dev, pipe);
   2343 			}
   2344 
   2345 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
   2346 				hsw_pipe_crc_irq_handler(dev, pipe);
   2347 
   2348 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
   2349 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
   2350 								    pipe);
   2351 
   2352 
   2353 			if (INTEL_INFO(dev_priv)->gen >= 9)
   2354 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
   2355 			else
   2356 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
   2357 
   2358 			if (fault_errors)
   2359 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
   2360 					  pipe_name(pipe),
   2361 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
   2362 		} else
   2363 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
   2364 	}
   2365 
   2366 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
   2367 	    master_ctl & GEN8_DE_PCH_IRQ) {
   2368 		/*
   2369 		 * FIXME(BDW): Assume for now that the new interrupt handling
   2370 		 * scheme also closed the SDE interrupt handling race we've seen
   2371 		 * on older pch-split platforms. But this needs testing.
   2372 		 */
   2373 		u32 pch_iir = I915_READ(SDEIIR);
   2374 		if (pch_iir) {
   2375 			I915_WRITE(SDEIIR, pch_iir);
   2376 			ret = IRQ_HANDLED;
   2377 
   2378 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
   2379 				spt_irq_handler(dev, pch_iir);
   2380 			else
   2381 				cpt_irq_handler(dev, pch_iir);
   2382 		} else {
   2383 			/*
   2384 			 * Like on previous PCH there seems to be something
   2385 			 * fishy going on with forwarding PCH interrupts.
   2386 			 */
   2387 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
   2388 		}
   2389 	}
   2390 
   2391 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
   2392 	POSTING_READ_FW(GEN8_MASTER_IRQ);
   2393 
   2394 	return ret;
   2395 }
   2396 
   2397 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
   2398 			       bool reset_completed)
   2399 {
   2400 	struct intel_engine_cs *ring;
   2401 	int i;
   2402 
   2403 	/*
   2404 	 * Notify all waiters for GPU completion events that reset state has
   2405 	 * been changed, and that they need to restart their wait after
   2406 	 * checking for potential errors (and bail out to drop locks if there is
   2407 	 * a gpu reset pending so that i915_error_work_func can acquire them).
   2408 	 */
   2409 
   2410 #ifdef __NetBSD__
   2411 	for_each_ring(ring, dev_priv, i) {
   2412 		spin_lock(&dev_priv->irq_lock);
   2413 		DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
   2414 		spin_unlock(&dev_priv->irq_lock);
   2415 	}
   2416 
   2417 	spin_lock(&dev_priv->pending_flip_lock);
   2418 	DRM_SPIN_WAKEUP_ALL(&dev_priv->pending_flip_queue,
   2419 	    &dev_priv->pending_flip_lock);
   2420 	spin_unlock(&dev_priv->pending_flip_lock);
   2421 
   2422 	if (reset_completed) {
   2423 		spin_lock(&dev_priv->gpu_error.reset_lock);
   2424 		DRM_SPIN_WAKEUP_ALL(&dev_priv->gpu_error.reset_queue,
   2425 		    &dev_priv->gpu_error.reset_lock);
   2426 		spin_unlock(&dev_priv->gpu_error.reset_lock);
   2427 	}
   2428 #else
   2429 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
   2430 	for_each_ring(ring, dev_priv, i)
   2431 		wake_up_all(&ring->irq_queue);
   2432 
   2433 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
   2434 	wake_up_all(&dev_priv->pending_flip_queue);
   2435 
   2436 	/*
   2437 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
   2438 	 * reset state is cleared.
   2439 	 */
   2440 	if (reset_completed)
   2441 		wake_up_all(&dev_priv->gpu_error.reset_queue);
   2442 #endif
   2443 }
   2444 
   2445 /**
   2446  * i915_reset_and_wakeup - do process context error handling work
   2447  * @dev: drm device
   2448  *
   2449  * Fire an error uevent so userspace can see that a hang or error
   2450  * was detected.
   2451  */
   2452 static void i915_reset_and_wakeup(struct drm_device *dev)
   2453 {
   2454 	struct drm_i915_private *dev_priv = to_i915(dev);
   2455 	struct i915_gpu_error *error = &dev_priv->gpu_error;
   2456 #ifndef __NetBSD__
   2457 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
   2458 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
   2459 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
   2460 #endif
   2461 	int ret;
   2462 
   2463 #ifndef __NetBSD__
   2464 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
   2465 #endif
   2466 
   2467 	/*
   2468 	 * Note that there's only one work item which does gpu resets, so we
   2469 	 * need not worry about concurrent gpu resets potentially incrementing
   2470 	 * error->reset_counter twice. We only need to take care of another
   2471 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
   2472 	 * quick check for that is good enough: schedule_work ensures the
   2473 	 * correct ordering between hang detection and this work item, and since
   2474 	 * the reset in-progress bit is only ever set by code outside of this
   2475 	 * work we don't need to worry about any other races.
   2476 	 */
   2477 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
   2478 		DRM_DEBUG_DRIVER("resetting chip\n");
   2479 #ifndef __NetBSD__
   2480 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
   2481 				   reset_event);
   2482 #endif
   2483 
   2484 		/*
   2485 		 * In most cases it's guaranteed that we get here with an RPM
   2486 		 * reference held, for example because there is a pending GPU
   2487 		 * request that won't finish until the reset is done. This
   2488 		 * isn't the case at least when we get here by doing a
   2489 		 * simulated reset via debugs, so get an RPM reference.
   2490 		 */
   2491 		intel_runtime_pm_get(dev_priv);
   2492 
   2493 		intel_prepare_reset(dev);
   2494 
   2495 		/*
   2496 		 * All state reset _must_ be completed before we update the
   2497 		 * reset counter, for otherwise waiters might miss the reset
   2498 		 * pending state and not properly drop locks, resulting in
   2499 		 * deadlocks with the reset work.
   2500 		 */
   2501 		ret = i915_reset(dev);
   2502 
   2503 		intel_finish_reset(dev);
   2504 
   2505 		intel_runtime_pm_put(dev_priv);
   2506 
   2507 		if (ret == 0) {
   2508 			/*
   2509 			 * After all the gem state is reset, increment the reset
   2510 			 * counter and wake up everyone waiting for the reset to
   2511 			 * complete.
   2512 			 *
   2513 			 * Since unlock operations are a one-sided barrier only,
   2514 			 * we need to insert a barrier here to order any seqno
   2515 			 * updates before
   2516 			 * the counter increment.
   2517 			 */
   2518 			smp_mb__before_atomic();
   2519 			atomic_inc(&dev_priv->gpu_error.reset_counter);
   2520 
   2521 #ifndef __NetBSD__
   2522 			kobject_uevent_env(&dev->primary->kdev->kobj,
   2523 					   KOBJ_CHANGE, reset_done_event);
   2524 #endif
   2525 		} else {
   2526 			atomic_or(I915_WEDGED, &error->reset_counter);
   2527 		}
   2528 
   2529 		/*
   2530 		 * Note: The wake_up also serves as a memory barrier so that
   2531 		 * waiters see the update value of the reset counter atomic_t.
   2532 		 */
   2533 		i915_error_wake_up(dev_priv, true);
   2534 	}
   2535 }
   2536 
   2537 static void i915_report_and_clear_eir(struct drm_device *dev)
   2538 {
   2539 	struct drm_i915_private *dev_priv = dev->dev_private;
   2540 	uint32_t instdone[I915_NUM_INSTDONE_REG];
   2541 	u32 eir = I915_READ(EIR);
   2542 	int pipe, i;
   2543 
   2544 	if (!eir)
   2545 		return;
   2546 
   2547 	pr_err("render error detected, EIR: 0x%08x\n", eir);
   2548 
   2549 	i915_get_extra_instdone(dev, instdone);
   2550 
   2551 	if (IS_G4X(dev)) {
   2552 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
   2553 			u32 ipeir = I915_READ(IPEIR_I965);
   2554 
   2555 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
   2556 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
   2557 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
   2558 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
   2559 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
   2560 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
   2561 			I915_WRITE(IPEIR_I965, ipeir);
   2562 			POSTING_READ(IPEIR_I965);
   2563 		}
   2564 		if (eir & GM45_ERROR_PAGE_TABLE) {
   2565 			u32 pgtbl_err = I915_READ(PGTBL_ER);
   2566 			pr_err("page table error\n");
   2567 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
   2568 			I915_WRITE(PGTBL_ER, pgtbl_err);
   2569 			POSTING_READ(PGTBL_ER);
   2570 		}
   2571 	}
   2572 
   2573 	if (!IS_GEN2(dev)) {
   2574 		if (eir & I915_ERROR_PAGE_TABLE) {
   2575 			u32 pgtbl_err = I915_READ(PGTBL_ER);
   2576 			pr_err("page table error\n");
   2577 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
   2578 			I915_WRITE(PGTBL_ER, pgtbl_err);
   2579 			POSTING_READ(PGTBL_ER);
   2580 		}
   2581 	}
   2582 
   2583 	if (eir & I915_ERROR_MEMORY_REFRESH) {
   2584 		pr_err("memory refresh error:\n");
   2585 		for_each_pipe(dev_priv, pipe)
   2586 			pr_err("pipe %c stat: 0x%08x\n",
   2587 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
   2588 		/* pipestat has already been acked */
   2589 	}
   2590 	if (eir & I915_ERROR_INSTRUCTION) {
   2591 		pr_err("instruction error\n");
   2592 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
   2593 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
   2594 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
   2595 		if (INTEL_INFO(dev)->gen < 4) {
   2596 			u32 ipeir = I915_READ(IPEIR);
   2597 
   2598 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
   2599 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
   2600 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
   2601 			I915_WRITE(IPEIR, ipeir);
   2602 			POSTING_READ(IPEIR);
   2603 		} else {
   2604 			u32 ipeir = I915_READ(IPEIR_I965);
   2605 
   2606 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
   2607 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
   2608 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
   2609 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
   2610 			I915_WRITE(IPEIR_I965, ipeir);
   2611 			POSTING_READ(IPEIR_I965);
   2612 		}
   2613 	}
   2614 
   2615 	I915_WRITE(EIR, eir);
   2616 	POSTING_READ(EIR);
   2617 	eir = I915_READ(EIR);
   2618 	if (eir) {
   2619 		/*
   2620 		 * some errors might have become stuck,
   2621 		 * mask them.
   2622 		 */
   2623 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
   2624 		I915_WRITE(EMR, I915_READ(EMR) | eir);
   2625 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
   2626 	}
   2627 }
   2628 
   2629 /**
   2630  * i915_handle_error - handle a gpu error
   2631  * @dev: drm device
   2632  *
   2633  * Do some basic checking of register state at error time and
   2634  * dump it to the syslog.  Also call i915_capture_error_state() to make
   2635  * sure we get a record and make it available in debugfs.  Fire a uevent
   2636  * so userspace knows something bad happened (should trigger collection
   2637  * of a ring dump etc.).
   2638  */
   2639 void i915_handle_error(struct drm_device *dev, bool wedged,
   2640 		       const char *fmt, ...)
   2641 {
   2642 	struct drm_i915_private *dev_priv = dev->dev_private;
   2643 	va_list args;
   2644 	char error_msg[80];
   2645 
   2646 	va_start(args, fmt);
   2647 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
   2648 	va_end(args);
   2649 
   2650 	i915_capture_error_state(dev, wedged, error_msg);
   2651 	i915_report_and_clear_eir(dev);
   2652 
   2653 	if (wedged) {
   2654 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
   2655 				&dev_priv->gpu_error.reset_counter);
   2656 
   2657 		/*
   2658 		 * Wakeup waiting processes so that the reset function
   2659 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
   2660 		 * various locks. By bumping the reset counter first, the woken
   2661 		 * processes will see a reset in progress and back off,
   2662 		 * releasing their locks and then wait for the reset completion.
   2663 		 * We must do this for _all_ gpu waiters that might hold locks
   2664 		 * that the reset work needs to acquire.
   2665 		 *
   2666 		 * Note: The wake_up serves as the required memory barrier to
   2667 		 * ensure that the waiters see the updated value of the reset
   2668 		 * counter atomic_t.
   2669 		 */
   2670 		i915_error_wake_up(dev_priv, false);
   2671 	}
   2672 
   2673 	i915_reset_and_wakeup(dev);
   2674 
   2675     do {
   2676 	struct i915_error_state_file_priv error_priv;
   2677 	struct drm_i915_error_state_buf error_str;
   2678 	int ret;
   2679 
   2680 	memset(&error_priv, 0, sizeof(error_priv));
   2681 
   2682 	ret = i915_error_state_buf_init(&error_str, dev_priv, 512*1024, 0);
   2683 	if (ret) {
   2684 		DRM_ERROR("Failed to initialize error buf: %d\n", ret);
   2685 		break;
   2686 	}
   2687 	error_priv.dev = dev;
   2688 	i915_error_state_get(dev, &error_priv);
   2689 
   2690 	ret = i915_error_state_to_str(&error_str, &error_priv);
   2691 	if (ret) {
   2692 		DRM_ERROR("Failed to format error buf: %d\n", ret);
   2693 		i915_error_state_put(&error_priv);
   2694 	}
   2695 
   2696 	error_str.buf[MIN(error_str.size - 1, error_str.bytes)] = '\0';
   2697 	DRM_ERROR("Error state:\n%s\n", error_str.buf);
   2698 
   2699 	i915_error_state_buf_release(&error_str);
   2700 	i915_error_state_put(&error_priv);
   2701     } while (0);
   2702 }
   2703 
   2704 /* Called from drm generic code, passed 'crtc' which
   2705  * we use as a pipe index
   2706  */
   2707 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
   2708 {
   2709 	struct drm_i915_private *dev_priv = dev->dev_private;
   2710 	unsigned long irqflags;
   2711 
   2712 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2713 	if (INTEL_INFO(dev)->gen >= 4)
   2714 		i915_enable_pipestat(dev_priv, pipe,
   2715 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
   2716 	else
   2717 		i915_enable_pipestat(dev_priv, pipe,
   2718 				     PIPE_VBLANK_INTERRUPT_STATUS);
   2719 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2720 
   2721 	return 0;
   2722 }
   2723 
   2724 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
   2725 {
   2726 	struct drm_i915_private *dev_priv = dev->dev_private;
   2727 	unsigned long irqflags;
   2728 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
   2729 						     DE_PIPE_VBLANK(pipe);
   2730 
   2731 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2732 	ironlake_enable_display_irq(dev_priv, bit);
   2733 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2734 
   2735 	return 0;
   2736 }
   2737 
   2738 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
   2739 {
   2740 	struct drm_i915_private *dev_priv = dev->dev_private;
   2741 	unsigned long irqflags;
   2742 
   2743 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2744 	i915_enable_pipestat(dev_priv, pipe,
   2745 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
   2746 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2747 
   2748 	return 0;
   2749 }
   2750 
   2751 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
   2752 {
   2753 	struct drm_i915_private *dev_priv = dev->dev_private;
   2754 	unsigned long irqflags;
   2755 
   2756 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2757 	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
   2758 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
   2759 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
   2760 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2761 	return 0;
   2762 }
   2763 
   2764 /* Called from drm generic code, passed 'crtc' which
   2765  * we use as a pipe index
   2766  */
   2767 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
   2768 {
   2769 	struct drm_i915_private *dev_priv = dev->dev_private;
   2770 	unsigned long irqflags;
   2771 
   2772 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2773 	i915_disable_pipestat(dev_priv, pipe,
   2774 			      PIPE_VBLANK_INTERRUPT_STATUS |
   2775 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
   2776 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2777 }
   2778 
   2779 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
   2780 {
   2781 	struct drm_i915_private *dev_priv = dev->dev_private;
   2782 	unsigned long irqflags;
   2783 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
   2784 						     DE_PIPE_VBLANK(pipe);
   2785 
   2786 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2787 	ironlake_disable_display_irq(dev_priv, bit);
   2788 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2789 }
   2790 
   2791 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
   2792 {
   2793 	struct drm_i915_private *dev_priv = dev->dev_private;
   2794 	unsigned long irqflags;
   2795 
   2796 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2797 	i915_disable_pipestat(dev_priv, pipe,
   2798 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
   2799 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2800 }
   2801 
   2802 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
   2803 {
   2804 	struct drm_i915_private *dev_priv = dev->dev_private;
   2805 	unsigned long irqflags;
   2806 
   2807 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2808 	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
   2809 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
   2810 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
   2811 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2812 }
   2813 
   2814 static bool
   2815 ring_idle(struct intel_engine_cs *ring, u32 seqno)
   2816 {
   2817 	return (list_empty(&ring->request_list) ||
   2818 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
   2819 }
   2820 
   2821 static bool
   2822 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
   2823 {
   2824 	if (INTEL_INFO(dev)->gen >= 8) {
   2825 		return (ipehr >> 23) == 0x1c;
   2826 	} else {
   2827 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
   2828 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
   2829 				 MI_SEMAPHORE_REGISTER);
   2830 	}
   2831 }
   2832 
   2833 static struct intel_engine_cs *
   2834 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
   2835 {
   2836 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
   2837 	struct intel_engine_cs *signaller;
   2838 	int i;
   2839 
   2840 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
   2841 		for_each_ring(signaller, dev_priv, i) {
   2842 			if (ring == signaller)
   2843 				continue;
   2844 
   2845 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
   2846 				return signaller;
   2847 		}
   2848 	} else {
   2849 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
   2850 
   2851 		for_each_ring(signaller, dev_priv, i) {
   2852 			if(ring == signaller)
   2853 				continue;
   2854 
   2855 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
   2856 				return signaller;
   2857 		}
   2858 	}
   2859 
   2860 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016"PRIx64"\n",
   2861 		  ring->id, ipehr, offset);
   2862 
   2863 	return NULL;
   2864 }
   2865 
   2866 static struct intel_engine_cs *
   2867 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
   2868 {
   2869 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
   2870 	u32 cmd, ipehr, head;
   2871 	u64 offset = 0;
   2872 	int i, backwards;
   2873 
   2874 	/*
   2875 	 * This function does not support execlist mode - any attempt to
   2876 	 * proceed further into this function will result in a kernel panic
   2877 	 * when dereferencing ring->buffer, which is not set up in execlist
   2878 	 * mode.
   2879 	 *
   2880 	 * The correct way of doing it would be to derive the currently
   2881 	 * executing ring buffer from the current context, which is derived
   2882 	 * from the currently running request. Unfortunately, to get the
   2883 	 * current request we would have to grab the struct_mutex before doing
   2884 	 * anything else, which would be ill-advised since some other thread
   2885 	 * might have grabbed it already and managed to hang itself, causing
   2886 	 * the hang checker to deadlock.
   2887 	 *
   2888 	 * Therefore, this function does not support execlist mode in its
   2889 	 * current form. Just return NULL and move on.
   2890 	 */
   2891 	if (ring->buffer == NULL)
   2892 		return NULL;
   2893 
   2894 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
   2895 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
   2896 		return NULL;
   2897 
   2898 	/*
   2899 	 * HEAD is likely pointing to the dword after the actual command,
   2900 	 * so scan backwards until we find the MBOX. But limit it to just 3
   2901 	 * or 4 dwords depending on the semaphore wait command size.
   2902 	 * Note that we don't care about ACTHD here since that might
   2903 	 * point at at batch, and semaphores are always emitted into the
   2904 	 * ringbuffer itself.
   2905 	 */
   2906 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
   2907 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
   2908 
   2909 	for (i = backwards; i; --i) {
   2910 		/*
   2911 		 * Be paranoid and presume the hw has gone off into the wild -
   2912 		 * our ring is smaller than what the hardware (and hence
   2913 		 * HEAD_ADDR) allows. Also handles wrap-around.
   2914 		 */
   2915 		head &= ring->buffer->size - 1;
   2916 
   2917 		/* This here seems to blow up */
   2918 #ifdef __NetBSD__
   2919 		cmd = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
   2920 		    head);
   2921 #else
   2922 		cmd = ioread32(ring->buffer->virtual_start + head);
   2923 #endif
   2924 		if (cmd == ipehr)
   2925 			break;
   2926 
   2927 		head -= 4;
   2928 	}
   2929 
   2930 	if (!i)
   2931 		return NULL;
   2932 
   2933 #ifdef __NetBSD__
   2934 	*seqno = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
   2935 	    head + 4) + 1;
   2936 #else
   2937 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
   2938 #endif
   2939 	if (INTEL_INFO(ring->dev)->gen >= 8) {
   2940 #ifdef __NetBSD__
   2941 		offset = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
   2942 		    head + 12);
   2943 		offset <<= 32;
   2944 		offset |= bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
   2945 		    head + 8);
   2946 #else
   2947 		offset = ioread32(ring->buffer->virtual_start + head + 12);
   2948 		offset <<= 32;
   2949 		offset = ioread32(ring->buffer->virtual_start + head + 8);
   2950 #endif
   2951 	}
   2952 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
   2953 }
   2954 
   2955 static int semaphore_passed(struct intel_engine_cs *ring)
   2956 {
   2957 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
   2958 	struct intel_engine_cs *signaller;
   2959 	u32 seqno;
   2960 
   2961 	ring->hangcheck.deadlock++;
   2962 
   2963 	signaller = semaphore_waits_for(ring, &seqno);
   2964 	if (signaller == NULL)
   2965 		return -1;
   2966 
   2967 	/* Prevent pathological recursion due to driver bugs */
   2968 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
   2969 		return -1;
   2970 
   2971 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
   2972 		return 1;
   2973 
   2974 	/* cursory check for an unkickable deadlock */
   2975 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
   2976 	    semaphore_passed(signaller) < 0)
   2977 		return -1;
   2978 
   2979 	return 0;
   2980 }
   2981 
   2982 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
   2983 {
   2984 	struct intel_engine_cs *ring;
   2985 	int i;
   2986 
   2987 	for_each_ring(ring, dev_priv, i)
   2988 		ring->hangcheck.deadlock = 0;
   2989 }
   2990 
   2991 static enum intel_ring_hangcheck_action
   2992 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
   2993 {
   2994 	struct drm_device *dev = ring->dev;
   2995 	struct drm_i915_private *dev_priv = dev->dev_private;
   2996 	u32 tmp;
   2997 
   2998 	if (acthd != ring->hangcheck.acthd) {
   2999 		if (acthd > ring->hangcheck.max_acthd) {
   3000 			ring->hangcheck.max_acthd = acthd;
   3001 			return HANGCHECK_ACTIVE;
   3002 		}
   3003 
   3004 		return HANGCHECK_ACTIVE_LOOP;
   3005 	}
   3006 
   3007 	if (IS_GEN2(dev))
   3008 		return HANGCHECK_HUNG;
   3009 
   3010 	/* Is the chip hanging on a WAIT_FOR_EVENT?
   3011 	 * If so we can simply poke the RB_WAIT bit
   3012 	 * and break the hang. This should work on
   3013 	 * all but the second generation chipsets.
   3014 	 */
   3015 	tmp = I915_READ_CTL(ring);
   3016 	if (tmp & RING_WAIT) {
   3017 		i915_handle_error(dev, false,
   3018 				  "Kicking stuck wait on %s",
   3019 				  ring->name);
   3020 		I915_WRITE_CTL(ring, tmp);
   3021 		return HANGCHECK_KICK;
   3022 	}
   3023 
   3024 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
   3025 		switch (semaphore_passed(ring)) {
   3026 		default:
   3027 			return HANGCHECK_HUNG;
   3028 		case 1:
   3029 			i915_handle_error(dev, false,
   3030 					  "Kicking stuck semaphore on %s",
   3031 					  ring->name);
   3032 			I915_WRITE_CTL(ring, tmp);
   3033 			return HANGCHECK_KICK;
   3034 		case 0:
   3035 			return HANGCHECK_WAIT;
   3036 		}
   3037 	}
   3038 
   3039 	return HANGCHECK_HUNG;
   3040 }
   3041 
   3042 /*
   3043  * This is called when the chip hasn't reported back with completed
   3044  * batchbuffers in a long time. We keep track per ring seqno progress and
   3045  * if there are no progress, hangcheck score for that ring is increased.
   3046  * Further, acthd is inspected to see if the ring is stuck. On stuck case
   3047  * we kick the ring. If we see no progress on three subsequent calls
   3048  * we assume chip is wedged and try to fix it by resetting the chip.
   3049  */
   3050 static void i915_hangcheck_elapsed(struct work_struct *work)
   3051 {
   3052 	struct drm_i915_private *dev_priv =
   3053 		container_of(work, typeof(*dev_priv),
   3054 			     gpu_error.hangcheck_work.work);
   3055 	struct drm_device *dev = dev_priv->dev;
   3056 	struct intel_engine_cs *ring;
   3057 	int i;
   3058 	int busy_count = 0, rings_hung = 0;
   3059 	bool stuck[I915_NUM_RINGS] = { 0 };
   3060 #define BUSY 1
   3061 #define KICK 5
   3062 #define HUNG 20
   3063 
   3064 	if (!i915.enable_hangcheck)
   3065 		return;
   3066 
   3067 	for_each_ring(ring, dev_priv, i) {
   3068 		u64 acthd;
   3069 		u32 seqno;
   3070 		bool busy = true;
   3071 
   3072 		semaphore_clear_deadlocks(dev_priv);
   3073 
   3074 		seqno = ring->get_seqno(ring, false);
   3075 		acthd = intel_ring_get_active_head(ring);
   3076 
   3077 		if (ring->hangcheck.seqno == seqno) {
   3078 			if (ring_idle(ring, seqno)) {
   3079 				ring->hangcheck.action = HANGCHECK_IDLE;
   3080 #ifdef __NetBSD__
   3081 				spin_lock(&dev_priv->irq_lock);
   3082 				if (DRM_SPIN_WAITERS_P(&ring->irq_queue,
   3083 					&dev_priv->irq_lock)) {
   3084 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
   3085 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
   3086 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
   3087 								  ring->name);
   3088 						else
   3089 							DRM_INFO("Fake missed irq on %s\n",
   3090 								 ring->name);
   3091 						DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
   3092 					}
   3093 					ring->hangcheck.score += BUSY;
   3094 				} else {
   3095 					busy = false;
   3096 				}
   3097 				spin_unlock(&dev_priv->irq_lock);
   3098 #else
   3099 				if (waitqueue_active(&ring->irq_queue)) {
   3100 					/* Issue a wake-up to catch stuck h/w. */
   3101 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
   3102 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
   3103 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
   3104 								  ring->name);
   3105 						else
   3106 							DRM_INFO("Fake missed irq on %s\n",
   3107 								 ring->name);
   3108 						wake_up_all(&ring->irq_queue);
   3109 					}
   3110 					/* Safeguard against driver failure */
   3111 					ring->hangcheck.score += BUSY;
   3112 				} else
   3113 					busy = false;
   3114 #endif
   3115 			} else {
   3116 				/* We always increment the hangcheck score
   3117 				 * if the ring is busy and still processing
   3118 				 * the same request, so that no single request
   3119 				 * can run indefinitely (such as a chain of
   3120 				 * batches). The only time we do not increment
   3121 				 * the hangcheck score on this ring, if this
   3122 				 * ring is in a legitimate wait for another
   3123 				 * ring. In that case the waiting ring is a
   3124 				 * victim and we want to be sure we catch the
   3125 				 * right culprit. Then every time we do kick
   3126 				 * the ring, add a small increment to the
   3127 				 * score so that we can catch a batch that is
   3128 				 * being repeatedly kicked and so responsible
   3129 				 * for stalling the machine.
   3130 				 */
   3131 				ring->hangcheck.action = ring_stuck(ring,
   3132 								    acthd);
   3133 
   3134 				switch (ring->hangcheck.action) {
   3135 				case HANGCHECK_IDLE:
   3136 				case HANGCHECK_WAIT:
   3137 				case HANGCHECK_ACTIVE:
   3138 					break;
   3139 				case HANGCHECK_ACTIVE_LOOP:
   3140 					ring->hangcheck.score += BUSY;
   3141 					break;
   3142 				case HANGCHECK_KICK:
   3143 					ring->hangcheck.score += KICK;
   3144 					break;
   3145 				case HANGCHECK_HUNG:
   3146 					ring->hangcheck.score += HUNG;
   3147 					stuck[i] = true;
   3148 					break;
   3149 				}
   3150 			}
   3151 		} else {
   3152 			ring->hangcheck.action = HANGCHECK_ACTIVE;
   3153 
   3154 			/* Gradually reduce the count so that we catch DoS
   3155 			 * attempts across multiple batches.
   3156 			 */
   3157 			if (ring->hangcheck.score > 0)
   3158 				ring->hangcheck.score--;
   3159 
   3160 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
   3161 		}
   3162 
   3163 		ring->hangcheck.seqno = seqno;
   3164 		ring->hangcheck.acthd = acthd;
   3165 		busy_count += busy;
   3166 	}
   3167 
   3168 	for_each_ring(ring, dev_priv, i) {
   3169 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
   3170 			DRM_INFO("%s on %s\n",
   3171 				 stuck[i] ? "stuck" : "no progress",
   3172 				 ring->name);
   3173 			rings_hung++;
   3174 		}
   3175 	}
   3176 
   3177 	if (rings_hung) {
   3178 		i915_handle_error(dev, true, "Ring hung");
   3179 		return;
   3180 	}
   3181 
   3182 	if (busy_count)
   3183 		/* Reset timer case chip hangs without another request
   3184 		 * being added */
   3185 		i915_queue_hangcheck(dev);
   3186 }
   3187 
   3188 void i915_queue_hangcheck(struct drm_device *dev)
   3189 {
   3190 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
   3191 
   3192 	if (!i915.enable_hangcheck)
   3193 		return;
   3194 
   3195 	/* Don't continually defer the hangcheck so that it is always run at
   3196 	 * least once after work has been scheduled on any ring. Otherwise,
   3197 	 * we will ignore a hung ring if a second ring is kept busy.
   3198 	 */
   3199 
   3200 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
   3201 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
   3202 }
   3203 
   3204 static void ibx_irq_reset(struct drm_device *dev)
   3205 {
   3206 	struct drm_i915_private *dev_priv = dev->dev_private;
   3207 
   3208 	if (HAS_PCH_NOP(dev))
   3209 		return;
   3210 
   3211 	GEN5_IRQ_RESET(SDE);
   3212 
   3213 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
   3214 		I915_WRITE(SERR_INT, 0xffffffff);
   3215 }
   3216 
   3217 /*
   3218  * SDEIER is also touched by the interrupt handler to work around missed PCH
   3219  * interrupts. Hence we can't update it after the interrupt handler is enabled -
   3220  * instead we unconditionally enable all PCH interrupt sources here, but then
   3221  * only unmask them as needed with SDEIMR.
   3222  *
   3223  * This function needs to be called before interrupts are enabled.
   3224  */
   3225 static void ibx_irq_pre_postinstall(struct drm_device *dev)
   3226 {
   3227 	struct drm_i915_private *dev_priv = dev->dev_private;
   3228 
   3229 	if (HAS_PCH_NOP(dev))
   3230 		return;
   3231 
   3232 	WARN_ON(I915_READ(SDEIER) != 0);
   3233 	I915_WRITE(SDEIER, 0xffffffff);
   3234 	POSTING_READ(SDEIER);
   3235 }
   3236 
   3237 static void gen5_gt_irq_reset(struct drm_device *dev)
   3238 {
   3239 	struct drm_i915_private *dev_priv = dev->dev_private;
   3240 
   3241 	GEN5_IRQ_RESET(GT);
   3242 	if (INTEL_INFO(dev)->gen >= 6)
   3243 		GEN5_IRQ_RESET(GEN6_PM);
   3244 }
   3245 
   3246 /* drm_dma.h hooks
   3247 */
   3248 static void ironlake_irq_reset(struct drm_device *dev)
   3249 {
   3250 	struct drm_i915_private *dev_priv = dev->dev_private;
   3251 
   3252 	I915_WRITE(HWSTAM, 0xffffffff);
   3253 
   3254 	GEN5_IRQ_RESET(DE);
   3255 	if (IS_GEN7(dev))
   3256 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
   3257 
   3258 	gen5_gt_irq_reset(dev);
   3259 
   3260 	ibx_irq_reset(dev);
   3261 }
   3262 
   3263 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
   3264 {
   3265 	enum pipe pipe;
   3266 
   3267 	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
   3268 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   3269 
   3270 	for_each_pipe(dev_priv, pipe)
   3271 		I915_WRITE(PIPESTAT(pipe), 0xffff);
   3272 
   3273 	GEN5_IRQ_RESET(VLV_);
   3274 }
   3275 
   3276 static void valleyview_irq_preinstall(struct drm_device *dev)
   3277 {
   3278 	struct drm_i915_private *dev_priv = dev->dev_private;
   3279 
   3280 	/* VLV magic */
   3281 	I915_WRITE(VLV_IMR, 0);
   3282 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
   3283 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
   3284 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
   3285 
   3286 	gen5_gt_irq_reset(dev);
   3287 
   3288 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
   3289 
   3290 	vlv_display_irq_reset(dev_priv);
   3291 }
   3292 
   3293 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
   3294 {
   3295 	GEN8_IRQ_RESET_NDX(GT, 0);
   3296 	GEN8_IRQ_RESET_NDX(GT, 1);
   3297 	GEN8_IRQ_RESET_NDX(GT, 2);
   3298 	GEN8_IRQ_RESET_NDX(GT, 3);
   3299 }
   3300 
   3301 static void gen8_irq_reset(struct drm_device *dev)
   3302 {
   3303 	struct drm_i915_private *dev_priv = dev->dev_private;
   3304 	int pipe;
   3305 
   3306 	I915_WRITE(GEN8_MASTER_IRQ, 0);
   3307 	POSTING_READ(GEN8_MASTER_IRQ);
   3308 
   3309 	gen8_gt_irq_reset(dev_priv);
   3310 
   3311 	for_each_pipe(dev_priv, pipe)
   3312 		if (intel_display_power_is_enabled(dev_priv,
   3313 						   POWER_DOMAIN_PIPE(pipe)))
   3314 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
   3315 
   3316 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
   3317 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
   3318 	GEN5_IRQ_RESET(GEN8_PCU_);
   3319 
   3320 	if (HAS_PCH_SPLIT(dev))
   3321 		ibx_irq_reset(dev);
   3322 }
   3323 
   3324 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
   3325 				     unsigned int pipe_mask)
   3326 {
   3327 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
   3328 
   3329 	spin_lock_irq(&dev_priv->irq_lock);
   3330 	if (pipe_mask & 1 << PIPE_A)
   3331 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
   3332 				  dev_priv->de_irq_mask[PIPE_A],
   3333 				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
   3334 	if (pipe_mask & 1 << PIPE_B)
   3335 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
   3336 				  dev_priv->de_irq_mask[PIPE_B],
   3337 				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
   3338 	if (pipe_mask & 1 << PIPE_C)
   3339 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
   3340 				  dev_priv->de_irq_mask[PIPE_C],
   3341 				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
   3342 	spin_unlock_irq(&dev_priv->irq_lock);
   3343 }
   3344 
   3345 static void cherryview_irq_preinstall(struct drm_device *dev)
   3346 {
   3347 	struct drm_i915_private *dev_priv = dev->dev_private;
   3348 
   3349 	I915_WRITE(GEN8_MASTER_IRQ, 0);
   3350 	POSTING_READ(GEN8_MASTER_IRQ);
   3351 
   3352 	gen8_gt_irq_reset(dev_priv);
   3353 
   3354 	GEN5_IRQ_RESET(GEN8_PCU_);
   3355 
   3356 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
   3357 
   3358 	vlv_display_irq_reset(dev_priv);
   3359 }
   3360 
   3361 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
   3362 				  const u32 hpd[HPD_NUM_PINS])
   3363 {
   3364 	struct drm_i915_private *dev_priv = to_i915(dev);
   3365 	struct intel_encoder *encoder;
   3366 	u32 enabled_irqs = 0;
   3367 
   3368 	for_each_intel_encoder(dev, encoder)
   3369 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
   3370 			enabled_irqs |= hpd[encoder->hpd_pin];
   3371 
   3372 	return enabled_irqs;
   3373 }
   3374 
   3375 static void ibx_hpd_irq_setup(struct drm_device *dev)
   3376 {
   3377 	struct drm_i915_private *dev_priv = dev->dev_private;
   3378 	u32 hotplug_irqs, hotplug, enabled_irqs;
   3379 
   3380 	if (HAS_PCH_IBX(dev)) {
   3381 		hotplug_irqs = SDE_HOTPLUG_MASK;
   3382 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
   3383 	} else {
   3384 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
   3385 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
   3386 	}
   3387 
   3388 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
   3389 
   3390 	/*
   3391 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
   3392 	 * duration to 2ms (which is the minimum in the Display Port spec).
   3393 	 * The pulse duration bits are reserved on LPT+.
   3394 	 */
   3395 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
   3396 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
   3397 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
   3398 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
   3399 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
   3400 	/*
   3401 	 * When CPU and PCH are on the same package, port A
   3402 	 * HPD must be enabled in both north and south.
   3403 	 */
   3404 	if (HAS_PCH_LPT_LP(dev))
   3405 		hotplug |= PORTA_HOTPLUG_ENABLE;
   3406 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
   3407 }
   3408 
   3409 static void spt_hpd_irq_setup(struct drm_device *dev)
   3410 {
   3411 	struct drm_i915_private *dev_priv = dev->dev_private;
   3412 	u32 hotplug_irqs, hotplug, enabled_irqs;
   3413 
   3414 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
   3415 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
   3416 
   3417 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
   3418 
   3419 	/* Enable digital hotplug on the PCH */
   3420 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
   3421 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
   3422 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
   3423 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
   3424 
   3425 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
   3426 	hotplug |= PORTE_HOTPLUG_ENABLE;
   3427 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
   3428 }
   3429 
   3430 static void ilk_hpd_irq_setup(struct drm_device *dev)
   3431 {
   3432 	struct drm_i915_private *dev_priv = dev->dev_private;
   3433 	u32 hotplug_irqs, hotplug, enabled_irqs;
   3434 
   3435 	if (INTEL_INFO(dev)->gen >= 8) {
   3436 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
   3437 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
   3438 
   3439 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3440 	} else if (INTEL_INFO(dev)->gen >= 7) {
   3441 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
   3442 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
   3443 
   3444 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3445 	} else {
   3446 		hotplug_irqs = DE_DP_A_HOTPLUG;
   3447 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
   3448 
   3449 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3450 	}
   3451 
   3452 	/*
   3453 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
   3454 	 * duration to 2ms (which is the minimum in the Display Port spec)
   3455 	 * The pulse duration bits are reserved on HSW+.
   3456 	 */
   3457 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
   3458 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
   3459 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
   3460 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
   3461 
   3462 	ibx_hpd_irq_setup(dev);
   3463 }
   3464 
   3465 static void bxt_hpd_irq_setup(struct drm_device *dev)
   3466 {
   3467 	struct drm_i915_private *dev_priv = dev->dev_private;
   3468 	u32 hotplug_irqs, hotplug, enabled_irqs;
   3469 
   3470 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
   3471 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
   3472 
   3473 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3474 
   3475 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
   3476 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
   3477 		PORTA_HOTPLUG_ENABLE;
   3478 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
   3479 }
   3480 
   3481 static void ibx_irq_postinstall(struct drm_device *dev)
   3482 {
   3483 	struct drm_i915_private *dev_priv = dev->dev_private;
   3484 	u32 mask;
   3485 
   3486 	if (HAS_PCH_NOP(dev))
   3487 		return;
   3488 
   3489 	if (HAS_PCH_IBX(dev))
   3490 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
   3491 	else
   3492 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
   3493 
   3494 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
   3495 	I915_WRITE(SDEIMR, ~mask);
   3496 }
   3497 
   3498 static void gen5_gt_irq_postinstall(struct drm_device *dev)
   3499 {
   3500 	struct drm_i915_private *dev_priv = dev->dev_private;
   3501 	u32 pm_irqs, gt_irqs;
   3502 
   3503 	pm_irqs = gt_irqs = 0;
   3504 
   3505 	dev_priv->gt_irq_mask = ~0;
   3506 	if (HAS_L3_DPF(dev)) {
   3507 		/* L3 parity interrupt is always unmasked. */
   3508 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
   3509 		gt_irqs |= GT_PARITY_ERROR(dev);
   3510 	}
   3511 
   3512 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
   3513 	if (IS_GEN5(dev)) {
   3514 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
   3515 			   ILK_BSD_USER_INTERRUPT;
   3516 	} else {
   3517 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
   3518 	}
   3519 
   3520 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
   3521 
   3522 	if (INTEL_INFO(dev)->gen >= 6) {
   3523 		/*
   3524 		 * RPS interrupts will get enabled/disabled on demand when RPS
   3525 		 * itself is enabled/disabled.
   3526 		 */
   3527 		if (HAS_VEBOX(dev))
   3528 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
   3529 
   3530 		dev_priv->pm_irq_mask = 0xffffffff;
   3531 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
   3532 	}
   3533 }
   3534 
   3535 static int ironlake_irq_postinstall(struct drm_device *dev)
   3536 {
   3537 	struct drm_i915_private *dev_priv = dev->dev_private;
   3538 	u32 display_mask, extra_mask;
   3539 
   3540 	if (INTEL_INFO(dev)->gen >= 7) {
   3541 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
   3542 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
   3543 				DE_PLANEB_FLIP_DONE_IVB |
   3544 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
   3545 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
   3546 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
   3547 			      DE_DP_A_HOTPLUG_IVB);
   3548 	} else {
   3549 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
   3550 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
   3551 				DE_AUX_CHANNEL_A |
   3552 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
   3553 				DE_POISON);
   3554 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
   3555 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
   3556 			      DE_DP_A_HOTPLUG);
   3557 	}
   3558 
   3559 	dev_priv->irq_mask = ~display_mask;
   3560 
   3561 	I915_WRITE(HWSTAM, 0xeffe);
   3562 
   3563 	ibx_irq_pre_postinstall(dev);
   3564 
   3565 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
   3566 
   3567 	gen5_gt_irq_postinstall(dev);
   3568 
   3569 	ibx_irq_postinstall(dev);
   3570 
   3571 	if (IS_IRONLAKE_M(dev)) {
   3572 		/* Enable PCU event interrupts
   3573 		 *
   3574 		 * spinlocking not required here for correctness since interrupt
   3575 		 * setup is guaranteed to run in single-threaded context. But we
   3576 		 * need it to make the assert_spin_locked happy. */
   3577 		spin_lock_irq(&dev_priv->irq_lock);
   3578 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
   3579 		spin_unlock_irq(&dev_priv->irq_lock);
   3580 	}
   3581 
   3582 	return 0;
   3583 }
   3584 
   3585 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
   3586 {
   3587 	u32 pipestat_mask;
   3588 	u32 iir_mask;
   3589 	enum pipe pipe;
   3590 
   3591 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
   3592 			PIPE_FIFO_UNDERRUN_STATUS;
   3593 
   3594 	for_each_pipe(dev_priv, pipe)
   3595 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
   3596 	POSTING_READ(PIPESTAT(PIPE_A));
   3597 
   3598 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
   3599 			PIPE_CRC_DONE_INTERRUPT_STATUS;
   3600 
   3601 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
   3602 	for_each_pipe(dev_priv, pipe)
   3603 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
   3604 
   3605 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
   3606 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3607 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
   3608 	if (IS_CHERRYVIEW(dev_priv))
   3609 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
   3610 	dev_priv->irq_mask &= ~iir_mask;
   3611 
   3612 	I915_WRITE(VLV_IIR, iir_mask);
   3613 	I915_WRITE(VLV_IIR, iir_mask);
   3614 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
   3615 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
   3616 	POSTING_READ(VLV_IMR);
   3617 }
   3618 
   3619 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
   3620 {
   3621 	u32 pipestat_mask;
   3622 	u32 iir_mask;
   3623 	enum pipe pipe;
   3624 
   3625 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
   3626 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3627 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
   3628 	if (IS_CHERRYVIEW(dev_priv))
   3629 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
   3630 
   3631 	dev_priv->irq_mask |= iir_mask;
   3632 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
   3633 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
   3634 	I915_WRITE(VLV_IIR, iir_mask);
   3635 	I915_WRITE(VLV_IIR, iir_mask);
   3636 	POSTING_READ(VLV_IIR);
   3637 
   3638 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
   3639 			PIPE_CRC_DONE_INTERRUPT_STATUS;
   3640 
   3641 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
   3642 	for_each_pipe(dev_priv, pipe)
   3643 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
   3644 
   3645 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
   3646 			PIPE_FIFO_UNDERRUN_STATUS;
   3647 
   3648 	for_each_pipe(dev_priv, pipe)
   3649 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
   3650 	POSTING_READ(PIPESTAT(PIPE_A));
   3651 }
   3652 
   3653 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
   3654 {
   3655 	assert_spin_locked(&dev_priv->irq_lock);
   3656 
   3657 	if (dev_priv->display_irqs_enabled)
   3658 		return;
   3659 
   3660 	dev_priv->display_irqs_enabled = true;
   3661 
   3662 	if (intel_irqs_enabled(dev_priv))
   3663 		valleyview_display_irqs_install(dev_priv);
   3664 }
   3665 
   3666 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
   3667 {
   3668 	assert_spin_locked(&dev_priv->irq_lock);
   3669 
   3670 	if (!dev_priv->display_irqs_enabled)
   3671 		return;
   3672 
   3673 	dev_priv->display_irqs_enabled = false;
   3674 
   3675 	if (intel_irqs_enabled(dev_priv))
   3676 		valleyview_display_irqs_uninstall(dev_priv);
   3677 }
   3678 
   3679 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
   3680 {
   3681 	dev_priv->irq_mask = ~0;
   3682 
   3683 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   3684 	POSTING_READ(PORT_HOTPLUG_EN);
   3685 
   3686 	I915_WRITE(VLV_IIR, 0xffffffff);
   3687 	I915_WRITE(VLV_IIR, 0xffffffff);
   3688 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
   3689 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
   3690 	POSTING_READ(VLV_IMR);
   3691 
   3692 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   3693 	 * just to make the assert_spin_locked check happy. */
   3694 	spin_lock_irq(&dev_priv->irq_lock);
   3695 	if (dev_priv->display_irqs_enabled)
   3696 		valleyview_display_irqs_install(dev_priv);
   3697 	spin_unlock_irq(&dev_priv->irq_lock);
   3698 }
   3699 
   3700 static int valleyview_irq_postinstall(struct drm_device *dev)
   3701 {
   3702 	struct drm_i915_private *dev_priv = dev->dev_private;
   3703 
   3704 	vlv_display_irq_postinstall(dev_priv);
   3705 
   3706 	gen5_gt_irq_postinstall(dev);
   3707 
   3708 	/* ack & enable invalid PTE error interrupts */
   3709 #if 0 /* FIXME: add support to irq handler for checking these bits */
   3710 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
   3711 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
   3712 #endif
   3713 
   3714 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
   3715 
   3716 	return 0;
   3717 }
   3718 
   3719 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
   3720 {
   3721 	/* These are interrupts we'll toggle with the ring mask register */
   3722 	uint32_t gt_interrupts[] = {
   3723 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
   3724 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
   3725 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
   3726 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
   3727 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
   3728 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
   3729 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
   3730 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
   3731 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
   3732 		0,
   3733 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
   3734 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
   3735 		};
   3736 
   3737 	dev_priv->pm_irq_mask = 0xffffffff;
   3738 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
   3739 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
   3740 	/*
   3741 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
   3742 	 * is enabled/disabled.
   3743 	 */
   3744 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
   3745 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
   3746 }
   3747 
   3748 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
   3749 {
   3750 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
   3751 	uint32_t de_pipe_enables;
   3752 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
   3753 	u32 de_port_enables;
   3754 	enum pipe pipe;
   3755 
   3756 	if (INTEL_INFO(dev_priv)->gen >= 9) {
   3757 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
   3758 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
   3759 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
   3760 				  GEN9_AUX_CHANNEL_D;
   3761 		if (IS_BROXTON(dev_priv))
   3762 			de_port_masked |= BXT_DE_PORT_GMBUS;
   3763 	} else {
   3764 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
   3765 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
   3766 	}
   3767 
   3768 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
   3769 					   GEN8_PIPE_FIFO_UNDERRUN;
   3770 
   3771 	de_port_enables = de_port_masked;
   3772 	if (IS_BROXTON(dev_priv))
   3773 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
   3774 	else if (IS_BROADWELL(dev_priv))
   3775 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
   3776 
   3777 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
   3778 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
   3779 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
   3780 
   3781 	for_each_pipe(dev_priv, pipe)
   3782 		if (intel_display_power_is_enabled(dev_priv,
   3783 				POWER_DOMAIN_PIPE(pipe)))
   3784 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
   3785 					  dev_priv->de_irq_mask[pipe],
   3786 					  de_pipe_enables);
   3787 
   3788 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
   3789 }
   3790 
   3791 static int gen8_irq_postinstall(struct drm_device *dev)
   3792 {
   3793 	struct drm_i915_private *dev_priv = dev->dev_private;
   3794 
   3795 	if (HAS_PCH_SPLIT(dev))
   3796 		ibx_irq_pre_postinstall(dev);
   3797 
   3798 	gen8_gt_irq_postinstall(dev_priv);
   3799 	gen8_de_irq_postinstall(dev_priv);
   3800 
   3801 	if (HAS_PCH_SPLIT(dev))
   3802 		ibx_irq_postinstall(dev);
   3803 
   3804 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
   3805 	POSTING_READ(GEN8_MASTER_IRQ);
   3806 
   3807 	return 0;
   3808 }
   3809 
   3810 static int cherryview_irq_postinstall(struct drm_device *dev)
   3811 {
   3812 	struct drm_i915_private *dev_priv = dev->dev_private;
   3813 
   3814 	vlv_display_irq_postinstall(dev_priv);
   3815 
   3816 	gen8_gt_irq_postinstall(dev_priv);
   3817 
   3818 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
   3819 	POSTING_READ(GEN8_MASTER_IRQ);
   3820 
   3821 	return 0;
   3822 }
   3823 
   3824 static void gen8_irq_uninstall(struct drm_device *dev)
   3825 {
   3826 	struct drm_i915_private *dev_priv = dev->dev_private;
   3827 
   3828 	if (!dev_priv)
   3829 		return;
   3830 
   3831 	gen8_irq_reset(dev);
   3832 }
   3833 
   3834 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
   3835 {
   3836 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   3837 	 * just to make the assert_spin_locked check happy. */
   3838 	spin_lock_irq(&dev_priv->irq_lock);
   3839 	if (dev_priv->display_irqs_enabled)
   3840 		valleyview_display_irqs_uninstall(dev_priv);
   3841 	spin_unlock_irq(&dev_priv->irq_lock);
   3842 
   3843 	vlv_display_irq_reset(dev_priv);
   3844 
   3845 	dev_priv->irq_mask = ~0;
   3846 }
   3847 
   3848 static void valleyview_irq_uninstall(struct drm_device *dev)
   3849 {
   3850 	struct drm_i915_private *dev_priv = dev->dev_private;
   3851 
   3852 	if (!dev_priv)
   3853 		return;
   3854 
   3855 	I915_WRITE(VLV_MASTER_IER, 0);
   3856 
   3857 	gen5_gt_irq_reset(dev);
   3858 
   3859 	I915_WRITE(HWSTAM, 0xffffffff);
   3860 
   3861 	vlv_display_irq_uninstall(dev_priv);
   3862 }
   3863 
   3864 static void cherryview_irq_uninstall(struct drm_device *dev)
   3865 {
   3866 	struct drm_i915_private *dev_priv = dev->dev_private;
   3867 
   3868 	if (!dev_priv)
   3869 		return;
   3870 
   3871 	I915_WRITE(GEN8_MASTER_IRQ, 0);
   3872 	POSTING_READ(GEN8_MASTER_IRQ);
   3873 
   3874 	gen8_gt_irq_reset(dev_priv);
   3875 
   3876 	GEN5_IRQ_RESET(GEN8_PCU_);
   3877 
   3878 	vlv_display_irq_uninstall(dev_priv);
   3879 }
   3880 
   3881 static void ironlake_irq_uninstall(struct drm_device *dev)
   3882 {
   3883 	struct drm_i915_private *dev_priv = dev->dev_private;
   3884 
   3885 	if (!dev_priv)
   3886 		return;
   3887 
   3888 	ironlake_irq_reset(dev);
   3889 }
   3890 
   3891 static void i8xx_irq_preinstall(struct drm_device * dev)
   3892 {
   3893 	struct drm_i915_private *dev_priv = dev->dev_private;
   3894 	int pipe;
   3895 
   3896 	for_each_pipe(dev_priv, pipe)
   3897 		I915_WRITE(PIPESTAT(pipe), 0);
   3898 	I915_WRITE16(IMR, 0xffff);
   3899 	I915_WRITE16(IER, 0x0);
   3900 	POSTING_READ16(IER);
   3901 }
   3902 
   3903 static int i8xx_irq_postinstall(struct drm_device *dev)
   3904 {
   3905 	struct drm_i915_private *dev_priv = dev->dev_private;
   3906 
   3907 	I915_WRITE16(EMR,
   3908 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
   3909 
   3910 	/* Unmask the interrupts that we always want on. */
   3911 	dev_priv->irq_mask =
   3912 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3913 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3914 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   3915 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
   3916 	I915_WRITE16(IMR, dev_priv->irq_mask);
   3917 
   3918 	I915_WRITE16(IER,
   3919 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3920 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3921 		     I915_USER_INTERRUPT);
   3922 	POSTING_READ16(IER);
   3923 
   3924 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   3925 	 * just to make the assert_spin_locked check happy. */
   3926 	spin_lock_irq(&dev_priv->irq_lock);
   3927 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3928 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3929 	spin_unlock_irq(&dev_priv->irq_lock);
   3930 
   3931 	return 0;
   3932 }
   3933 
   3934 /*
   3935  * Returns true when a page flip has completed.
   3936  */
   3937 static bool i8xx_handle_vblank(struct drm_device *dev,
   3938 			       int plane, int pipe, u32 iir)
   3939 {
   3940 	struct drm_i915_private *dev_priv = dev->dev_private;
   3941 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
   3942 
   3943 	if (!intel_pipe_handle_vblank(dev, pipe))
   3944 		return false;
   3945 
   3946 	if ((iir & flip_pending) == 0)
   3947 		goto check_page_flip;
   3948 
   3949 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
   3950 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
   3951 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
   3952 	 * the flip is completed (no longer pending). Since this doesn't raise
   3953 	 * an interrupt per se, we watch for the change at vblank.
   3954 	 */
   3955 	if (I915_READ16(ISR) & flip_pending)
   3956 		goto check_page_flip;
   3957 
   3958 	intel_prepare_page_flip(dev, plane);
   3959 	intel_finish_page_flip(dev, pipe);
   3960 	return true;
   3961 
   3962 check_page_flip:
   3963 	intel_check_page_flip(dev, pipe);
   3964 	return false;
   3965 }
   3966 
   3967 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
   3968 {
   3969 	struct drm_device *dev = arg;
   3970 	struct drm_i915_private *dev_priv = dev->dev_private;
   3971 	u16 iir, new_iir;
   3972 	u32 pipe_stats[2];
   3973 	int pipe;
   3974 	u16 flip_mask =
   3975 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   3976 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
   3977 
   3978 	if (!intel_irqs_enabled(dev_priv))
   3979 		return IRQ_NONE;
   3980 
   3981 	iir = I915_READ16(IIR);
   3982 	if (iir == 0)
   3983 		return IRQ_NONE;
   3984 
   3985 	while (iir & ~flip_mask) {
   3986 		/* Can't rely on pipestat interrupt bit in iir as it might
   3987 		 * have been cleared after the pipestat interrupt was received.
   3988 		 * It doesn't set the bit in iir again, but it still produces
   3989 		 * interrupts (for non-MSI).
   3990 		 */
   3991 		spin_lock(&dev_priv->irq_lock);
   3992 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
   3993 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
   3994 
   3995 		for_each_pipe(dev_priv, pipe) {
   3996 			int reg = PIPESTAT(pipe);
   3997 			pipe_stats[pipe] = I915_READ(reg);
   3998 
   3999 			/*
   4000 			 * Clear the PIPE*STAT regs before the IIR
   4001 			 */
   4002 			if (pipe_stats[pipe] & 0x8000ffff)
   4003 				I915_WRITE(reg, pipe_stats[pipe]);
   4004 		}
   4005 		spin_unlock(&dev_priv->irq_lock);
   4006 
   4007 		I915_WRITE16(IIR, iir & ~flip_mask);
   4008 		new_iir = I915_READ16(IIR); /* Flush posted writes */
   4009 
   4010 		if (iir & I915_USER_INTERRUPT)
   4011 			notify_ring(&dev_priv->ring[RCS]);
   4012 
   4013 		for_each_pipe(dev_priv, pipe) {
   4014 			int plane = pipe;
   4015 			if (HAS_FBC(dev))
   4016 				plane = !plane;
   4017 
   4018 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
   4019 			    i8xx_handle_vblank(dev, plane, pipe, iir))
   4020 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
   4021 
   4022 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   4023 				i9xx_pipe_crc_irq_handler(dev, pipe);
   4024 
   4025 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   4026 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
   4027 								    pipe);
   4028 		}
   4029 
   4030 		iir = new_iir;
   4031 	}
   4032 
   4033 	return IRQ_HANDLED;
   4034 }
   4035 
   4036 static void i8xx_irq_uninstall(struct drm_device * dev)
   4037 {
   4038 	struct drm_i915_private *dev_priv = dev->dev_private;
   4039 	int pipe;
   4040 
   4041 	for_each_pipe(dev_priv, pipe) {
   4042 		/* Clear enable bits; then clear status bits */
   4043 		I915_WRITE(PIPESTAT(pipe), 0);
   4044 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
   4045 	}
   4046 	I915_WRITE16(IMR, 0xffff);
   4047 	I915_WRITE16(IER, 0x0);
   4048 	I915_WRITE16(IIR, I915_READ16(IIR));
   4049 }
   4050 
   4051 static void i915_irq_preinstall(struct drm_device * dev)
   4052 {
   4053 	struct drm_i915_private *dev_priv = dev->dev_private;
   4054 	int pipe;
   4055 
   4056 	if (I915_HAS_HOTPLUG(dev)) {
   4057 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   4058 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   4059 	}
   4060 
   4061 	I915_WRITE16(HWSTAM, 0xeffe);
   4062 	for_each_pipe(dev_priv, pipe)
   4063 		I915_WRITE(PIPESTAT(pipe), 0);
   4064 	I915_WRITE(IMR, 0xffffffff);
   4065 	I915_WRITE(IER, 0x0);
   4066 	POSTING_READ(IER);
   4067 }
   4068 
   4069 static int i915_irq_postinstall(struct drm_device *dev)
   4070 {
   4071 	struct drm_i915_private *dev_priv = dev->dev_private;
   4072 	u32 enable_mask;
   4073 
   4074 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
   4075 
   4076 	/* Unmask the interrupts that we always want on. */
   4077 	dev_priv->irq_mask =
   4078 		~(I915_ASLE_INTERRUPT |
   4079 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   4080 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   4081 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   4082 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
   4083 
   4084 	enable_mask =
   4085 		I915_ASLE_INTERRUPT |
   4086 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   4087 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   4088 		I915_USER_INTERRUPT;
   4089 
   4090 	if (I915_HAS_HOTPLUG(dev)) {
   4091 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   4092 		POSTING_READ(PORT_HOTPLUG_EN);
   4093 
   4094 		/* Enable in IER... */
   4095 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
   4096 		/* and unmask in IMR */
   4097 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
   4098 	}
   4099 
   4100 	I915_WRITE(IMR, dev_priv->irq_mask);
   4101 	I915_WRITE(IER, enable_mask);
   4102 	POSTING_READ(IER);
   4103 
   4104 	i915_enable_asle_pipestat(dev);
   4105 
   4106 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   4107 	 * just to make the assert_spin_locked check happy. */
   4108 	spin_lock_irq(&dev_priv->irq_lock);
   4109 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
   4110 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
   4111 	spin_unlock_irq(&dev_priv->irq_lock);
   4112 
   4113 	return 0;
   4114 }
   4115 
   4116 /*
   4117  * Returns true when a page flip has completed.
   4118  */
   4119 static bool i915_handle_vblank(struct drm_device *dev,
   4120 			       int plane, int pipe, u32 iir)
   4121 {
   4122 	struct drm_i915_private *dev_priv = dev->dev_private;
   4123 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
   4124 
   4125 	if (!intel_pipe_handle_vblank(dev, pipe))
   4126 		return false;
   4127 
   4128 	if ((iir & flip_pending) == 0)
   4129 		goto check_page_flip;
   4130 
   4131 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
   4132 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
   4133 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
   4134 	 * the flip is completed (no longer pending). Since this doesn't raise
   4135 	 * an interrupt per se, we watch for the change at vblank.
   4136 	 */
   4137 	if (I915_READ(ISR) & flip_pending)
   4138 		goto check_page_flip;
   4139 
   4140 	intel_prepare_page_flip(dev, plane);
   4141 	intel_finish_page_flip(dev, pipe);
   4142 	return true;
   4143 
   4144 check_page_flip:
   4145 	intel_check_page_flip(dev, pipe);
   4146 	return false;
   4147 }
   4148 
   4149 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
   4150 {
   4151 	struct drm_device *dev = arg;
   4152 	struct drm_i915_private *dev_priv = dev->dev_private;
   4153 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
   4154 	u32 flip_mask =
   4155 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   4156 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
   4157 	int pipe, ret = IRQ_NONE;
   4158 
   4159 	if (!intel_irqs_enabled(dev_priv))
   4160 		return IRQ_NONE;
   4161 
   4162 	iir = I915_READ(IIR);
   4163 	do {
   4164 		bool irq_received = (iir & ~flip_mask) != 0;
   4165 		bool blc_event = false;
   4166 
   4167 		/* Can't rely on pipestat interrupt bit in iir as it might
   4168 		 * have been cleared after the pipestat interrupt was received.
   4169 		 * It doesn't set the bit in iir again, but it still produces
   4170 		 * interrupts (for non-MSI).
   4171 		 */
   4172 		spin_lock(&dev_priv->irq_lock);
   4173 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
   4174 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
   4175 
   4176 		for_each_pipe(dev_priv, pipe) {
   4177 			int reg = PIPESTAT(pipe);
   4178 			pipe_stats[pipe] = I915_READ(reg);
   4179 
   4180 			/* Clear the PIPE*STAT regs before the IIR */
   4181 			if (pipe_stats[pipe] & 0x8000ffff) {
   4182 				I915_WRITE(reg, pipe_stats[pipe]);
   4183 				irq_received = true;
   4184 			}
   4185 		}
   4186 		spin_unlock(&dev_priv->irq_lock);
   4187 
   4188 		if (!irq_received)
   4189 			break;
   4190 
   4191 		/* Consume port.  Then clear IIR or we'll miss events */
   4192 		if (I915_HAS_HOTPLUG(dev) &&
   4193 		    iir & I915_DISPLAY_PORT_INTERRUPT)
   4194 			i9xx_hpd_irq_handler(dev);
   4195 
   4196 		I915_WRITE(IIR, iir & ~flip_mask);
   4197 		new_iir = I915_READ(IIR); /* Flush posted writes */
   4198 
   4199 		if (iir & I915_USER_INTERRUPT)
   4200 			notify_ring(&dev_priv->ring[RCS]);
   4201 
   4202 		for_each_pipe(dev_priv, pipe) {
   4203 			int plane = pipe;
   4204 			if (HAS_FBC(dev))
   4205 				plane = !plane;
   4206 
   4207 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
   4208 			    i915_handle_vblank(dev, plane, pipe, iir))
   4209 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
   4210 
   4211 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
   4212 				blc_event = true;
   4213 
   4214 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   4215 				i9xx_pipe_crc_irq_handler(dev, pipe);
   4216 
   4217 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   4218 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
   4219 								    pipe);
   4220 		}
   4221 
   4222 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
   4223 			intel_opregion_asle_intr(dev);
   4224 
   4225 		/* With MSI, interrupts are only generated when iir
   4226 		 * transitions from zero to nonzero.  If another bit got
   4227 		 * set while we were handling the existing iir bits, then
   4228 		 * we would never get another interrupt.
   4229 		 *
   4230 		 * This is fine on non-MSI as well, as if we hit this path
   4231 		 * we avoid exiting the interrupt handler only to generate
   4232 		 * another one.
   4233 		 *
   4234 		 * Note that for MSI this could cause a stray interrupt report
   4235 		 * if an interrupt landed in the time between writing IIR and
   4236 		 * the posting read.  This should be rare enough to never
   4237 		 * trigger the 99% of 100,000 interrupts test for disabling
   4238 		 * stray interrupts.
   4239 		 */
   4240 		ret = IRQ_HANDLED;
   4241 		iir = new_iir;
   4242 	} while (iir & ~flip_mask);
   4243 
   4244 	return ret;
   4245 }
   4246 
   4247 static void i915_irq_uninstall(struct drm_device * dev)
   4248 {
   4249 	struct drm_i915_private *dev_priv = dev->dev_private;
   4250 	int pipe;
   4251 
   4252 	if (I915_HAS_HOTPLUG(dev)) {
   4253 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   4254 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   4255 	}
   4256 
   4257 	I915_WRITE16(HWSTAM, 0xffff);
   4258 	for_each_pipe(dev_priv, pipe) {
   4259 		/* Clear enable bits; then clear status bits */
   4260 		I915_WRITE(PIPESTAT(pipe), 0);
   4261 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
   4262 	}
   4263 	I915_WRITE(IMR, 0xffffffff);
   4264 	I915_WRITE(IER, 0x0);
   4265 
   4266 	I915_WRITE(IIR, I915_READ(IIR));
   4267 }
   4268 
   4269 static void i965_irq_preinstall(struct drm_device * dev)
   4270 {
   4271 	struct drm_i915_private *dev_priv = dev->dev_private;
   4272 	int pipe;
   4273 
   4274 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   4275 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   4276 
   4277 	I915_WRITE(HWSTAM, 0xeffe);
   4278 	for_each_pipe(dev_priv, pipe)
   4279 		I915_WRITE(PIPESTAT(pipe), 0);
   4280 	I915_WRITE(IMR, 0xffffffff);
   4281 	I915_WRITE(IER, 0x0);
   4282 	POSTING_READ(IER);
   4283 }
   4284 
   4285 static int i965_irq_postinstall(struct drm_device *dev)
   4286 {
   4287 	struct drm_i915_private *dev_priv = dev->dev_private;
   4288 	u32 enable_mask;
   4289 	u32 error_mask;
   4290 
   4291 	/* Unmask the interrupts that we always want on. */
   4292 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
   4293 			       I915_DISPLAY_PORT_INTERRUPT |
   4294 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   4295 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   4296 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   4297 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
   4298 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
   4299 
   4300 	enable_mask = ~dev_priv->irq_mask;
   4301 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   4302 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
   4303 	enable_mask |= I915_USER_INTERRUPT;
   4304 
   4305 	if (IS_G4X(dev))
   4306 		enable_mask |= I915_BSD_USER_INTERRUPT;
   4307 
   4308 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   4309 	 * just to make the assert_spin_locked check happy. */
   4310 	spin_lock_irq(&dev_priv->irq_lock);
   4311 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
   4312 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
   4313 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
   4314 	spin_unlock_irq(&dev_priv->irq_lock);
   4315 
   4316 	/*
   4317 	 * Enable some error detection, note the instruction error mask
   4318 	 * bit is reserved, so we leave it masked.
   4319 	 */
   4320 	if (IS_G4X(dev)) {
   4321 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
   4322 			       GM45_ERROR_MEM_PRIV |
   4323 			       GM45_ERROR_CP_PRIV |
   4324 			       I915_ERROR_MEMORY_REFRESH);
   4325 	} else {
   4326 		error_mask = ~(I915_ERROR_PAGE_TABLE |
   4327 			       I915_ERROR_MEMORY_REFRESH);
   4328 	}
   4329 	I915_WRITE(EMR, error_mask);
   4330 
   4331 	I915_WRITE(IMR, dev_priv->irq_mask);
   4332 	I915_WRITE(IER, enable_mask);
   4333 	POSTING_READ(IER);
   4334 
   4335 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   4336 	POSTING_READ(PORT_HOTPLUG_EN);
   4337 
   4338 	i915_enable_asle_pipestat(dev);
   4339 
   4340 	return 0;
   4341 }
   4342 
   4343 static void i915_hpd_irq_setup(struct drm_device *dev)
   4344 {
   4345 	struct drm_i915_private *dev_priv = dev->dev_private;
   4346 	u32 hotplug_en;
   4347 
   4348 	assert_spin_locked(&dev_priv->irq_lock);
   4349 
   4350 	/* Note HDMI and DP share hotplug bits */
   4351 	/* enable bits are the same for all generations */
   4352 	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
   4353 	/* Programming the CRT detection parameters tends
   4354 	   to generate a spurious hotplug event about three
   4355 	   seconds later.  So just do it once.
   4356 	*/
   4357 	if (IS_G4X(dev))
   4358 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
   4359 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
   4360 
   4361 	/* Ignore TV since it's buggy */
   4362 	i915_hotplug_interrupt_update_locked(dev_priv,
   4363 					     HOTPLUG_INT_EN_MASK |
   4364 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
   4365 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
   4366 					     hotplug_en);
   4367 }
   4368 
   4369 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
   4370 {
   4371 	struct drm_device *dev = arg;
   4372 	struct drm_i915_private *dev_priv = dev->dev_private;
   4373 	u32 iir, new_iir;
   4374 	u32 pipe_stats[I915_MAX_PIPES];
   4375 	int ret = IRQ_NONE, pipe;
   4376 	u32 flip_mask =
   4377 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
   4378 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
   4379 
   4380 	if (!intel_irqs_enabled(dev_priv))
   4381 		return IRQ_NONE;
   4382 
   4383 	iir = I915_READ(IIR);
   4384 
   4385 	for (;;) {
   4386 		bool irq_received = (iir & ~flip_mask) != 0;
   4387 		bool blc_event = false;
   4388 
   4389 		/* Can't rely on pipestat interrupt bit in iir as it might
   4390 		 * have been cleared after the pipestat interrupt was received.
   4391 		 * It doesn't set the bit in iir again, but it still produces
   4392 		 * interrupts (for non-MSI).
   4393 		 */
   4394 		spin_lock(&dev_priv->irq_lock);
   4395 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
   4396 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
   4397 
   4398 		for_each_pipe(dev_priv, pipe) {
   4399 			int reg = PIPESTAT(pipe);
   4400 			pipe_stats[pipe] = I915_READ(reg);
   4401 
   4402 			/*
   4403 			 * Clear the PIPE*STAT regs before the IIR
   4404 			 */
   4405 			if (pipe_stats[pipe] & 0x8000ffff) {
   4406 				I915_WRITE(reg, pipe_stats[pipe]);
   4407 				irq_received = true;
   4408 			}
   4409 		}
   4410 		spin_unlock(&dev_priv->irq_lock);
   4411 
   4412 		if (!irq_received)
   4413 			break;
   4414 
   4415 		ret = IRQ_HANDLED;
   4416 
   4417 		/* Consume port.  Then clear IIR or we'll miss events */
   4418 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
   4419 			i9xx_hpd_irq_handler(dev);
   4420 
   4421 		I915_WRITE(IIR, iir & ~flip_mask);
   4422 		new_iir = I915_READ(IIR); /* Flush posted writes */
   4423 
   4424 		if (iir & I915_USER_INTERRUPT)
   4425 			notify_ring(&dev_priv->ring[RCS]);
   4426 		if (iir & I915_BSD_USER_INTERRUPT)
   4427 			notify_ring(&dev_priv->ring[VCS]);
   4428 
   4429 		for_each_pipe(dev_priv, pipe) {
   4430 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
   4431 			    i915_handle_vblank(dev, pipe, pipe, iir))
   4432 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
   4433 
   4434 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
   4435 				blc_event = true;
   4436 
   4437 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   4438 				i9xx_pipe_crc_irq_handler(dev, pipe);
   4439 
   4440 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   4441 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   4442 		}
   4443 
   4444 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
   4445 			intel_opregion_asle_intr(dev);
   4446 
   4447 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
   4448 			gmbus_irq_handler(dev);
   4449 
   4450 		/* With MSI, interrupts are only generated when iir
   4451 		 * transitions from zero to nonzero.  If another bit got
   4452 		 * set while we were handling the existing iir bits, then
   4453 		 * we would never get another interrupt.
   4454 		 *
   4455 		 * This is fine on non-MSI as well, as if we hit this path
   4456 		 * we avoid exiting the interrupt handler only to generate
   4457 		 * another one.
   4458 		 *
   4459 		 * Note that for MSI this could cause a stray interrupt report
   4460 		 * if an interrupt landed in the time between writing IIR and
   4461 		 * the posting read.  This should be rare enough to never
   4462 		 * trigger the 99% of 100,000 interrupts test for disabling
   4463 		 * stray interrupts.
   4464 		 */
   4465 		iir = new_iir;
   4466 	}
   4467 
   4468 	return ret;
   4469 }
   4470 
   4471 static void i965_irq_uninstall(struct drm_device * dev)
   4472 {
   4473 	struct drm_i915_private *dev_priv = dev->dev_private;
   4474 	int pipe;
   4475 
   4476 	if (!dev_priv)
   4477 		return;
   4478 
   4479 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   4480 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   4481 
   4482 	I915_WRITE(HWSTAM, 0xffffffff);
   4483 	for_each_pipe(dev_priv, pipe)
   4484 		I915_WRITE(PIPESTAT(pipe), 0);
   4485 	I915_WRITE(IMR, 0xffffffff);
   4486 	I915_WRITE(IER, 0x0);
   4487 
   4488 	for_each_pipe(dev_priv, pipe)
   4489 		I915_WRITE(PIPESTAT(pipe),
   4490 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
   4491 	I915_WRITE(IIR, I915_READ(IIR));
   4492 }
   4493 
   4494 /**
   4495  * intel_irq_init - initializes irq support
   4496  * @dev_priv: i915 device instance
   4497  *
   4498  * This function initializes all the irq support including work items, timers
   4499  * and all the vtables. It does not setup the interrupt itself though.
   4500  */
   4501 void intel_irq_init(struct drm_i915_private *dev_priv)
   4502 {
   4503 	struct drm_device *dev = dev_priv->dev;
   4504 
   4505 	intel_hpd_init_work(dev_priv);
   4506 
   4507 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
   4508 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
   4509 
   4510 	/* Let's track the enabled rps events */
   4511 	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
   4512 		/* WaGsvRC0ResidencyMethod:vlv */
   4513 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
   4514 	else
   4515 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
   4516 
   4517 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
   4518 			  i915_hangcheck_elapsed);
   4519 
   4520 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
   4521 
   4522 	if (IS_GEN2(dev_priv)) {
   4523 		dev->max_vblank_count = 0;
   4524 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
   4525 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
   4526 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
   4527 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
   4528 	} else {
   4529 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
   4530 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
   4531 	}
   4532 
   4533 	/*
   4534 	 * Opt out of the vblank disable timer on everything except gen2.
   4535 	 * Gen2 doesn't have a hardware frame counter and so depends on
   4536 	 * vblank interrupts to produce sane vblank seuquence numbers.
   4537 	 */
   4538 	if (!IS_GEN2(dev_priv))
   4539 		dev->vblank_disable_immediate = true;
   4540 
   4541 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
   4542 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
   4543 
   4544 	if (IS_CHERRYVIEW(dev_priv)) {
   4545 		dev->driver->irq_handler = cherryview_irq_handler;
   4546 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
   4547 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
   4548 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
   4549 		dev->driver->enable_vblank = valleyview_enable_vblank;
   4550 		dev->driver->disable_vblank = valleyview_disable_vblank;
   4551 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
   4552 	} else if (IS_VALLEYVIEW(dev_priv)) {
   4553 		dev->driver->irq_handler = valleyview_irq_handler;
   4554 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
   4555 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
   4556 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
   4557 		dev->driver->enable_vblank = valleyview_enable_vblank;
   4558 		dev->driver->disable_vblank = valleyview_disable_vblank;
   4559 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
   4560 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
   4561 		dev->driver->irq_handler = gen8_irq_handler;
   4562 		dev->driver->irq_preinstall = gen8_irq_reset;
   4563 		dev->driver->irq_postinstall = gen8_irq_postinstall;
   4564 		dev->driver->irq_uninstall = gen8_irq_uninstall;
   4565 		dev->driver->enable_vblank = gen8_enable_vblank;
   4566 		dev->driver->disable_vblank = gen8_disable_vblank;
   4567 		if (IS_BROXTON(dev))
   4568 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
   4569 		else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
   4570 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
   4571 		else
   4572 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
   4573 	} else if (HAS_PCH_SPLIT(dev)) {
   4574 		dev->driver->irq_handler = ironlake_irq_handler;
   4575 		dev->driver->irq_preinstall = ironlake_irq_reset;
   4576 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
   4577 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
   4578 		dev->driver->enable_vblank = ironlake_enable_vblank;
   4579 		dev->driver->disable_vblank = ironlake_disable_vblank;
   4580 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
   4581 	} else {
   4582 		if (INTEL_INFO(dev_priv)->gen == 2) {
   4583 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
   4584 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
   4585 			dev->driver->irq_handler = i8xx_irq_handler;
   4586 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
   4587 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
   4588 			dev->driver->irq_preinstall = i915_irq_preinstall;
   4589 			dev->driver->irq_postinstall = i915_irq_postinstall;
   4590 			dev->driver->irq_uninstall = i915_irq_uninstall;
   4591 			dev->driver->irq_handler = i915_irq_handler;
   4592 		} else {
   4593 			dev->driver->irq_preinstall = i965_irq_preinstall;
   4594 			dev->driver->irq_postinstall = i965_irq_postinstall;
   4595 			dev->driver->irq_uninstall = i965_irq_uninstall;
   4596 			dev->driver->irq_handler = i965_irq_handler;
   4597 		}
   4598 		if (I915_HAS_HOTPLUG(dev_priv))
   4599 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
   4600 		dev->driver->enable_vblank = i915_enable_vblank;
   4601 		dev->driver->disable_vblank = i915_disable_vblank;
   4602 	}
   4603 }
   4604 
   4605 /**
   4606  * intel_irq_install - enables the hardware interrupt
   4607  * @dev_priv: i915 device instance
   4608  *
   4609  * This function enables the hardware interrupt handling, but leaves the hotplug
   4610  * handling still disabled. It is called after intel_irq_init().
   4611  *
   4612  * In the driver load and resume code we need working interrupts in a few places
   4613  * but don't want to deal with the hassle of concurrent probe and hotplug
   4614  * workers. Hence the split into this two-stage approach.
   4615  */
   4616 int intel_irq_install(struct drm_i915_private *dev_priv)
   4617 {
   4618 	/*
   4619 	 * We enable some interrupt sources in our postinstall hooks, so mark
   4620 	 * interrupts as enabled _before_ actually enabling them to avoid
   4621 	 * special cases in our ordering checks.
   4622 	 */
   4623 	dev_priv->pm.irqs_enabled = true;
   4624 
   4625 #ifdef __NetBSD__
   4626 	return drm_irq_install(dev_priv->dev);
   4627 #else
   4628 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
   4629 #endif
   4630 }
   4631 
   4632 /**
   4633  * intel_irq_uninstall - finilizes all irq handling
   4634  * @dev_priv: i915 device instance
   4635  *
   4636  * This stops interrupt and hotplug handling and unregisters and frees all
   4637  * resources acquired in the init functions.
   4638  */
   4639 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
   4640 {
   4641 	drm_irq_uninstall(dev_priv->dev);
   4642 	intel_hpd_cancel_work(dev_priv);
   4643 	dev_priv->pm.irqs_enabled = false;
   4644 }
   4645 
   4646 /**
   4647  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
   4648  * @dev_priv: i915 device instance
   4649  *
   4650  * This function is used to disable interrupts at runtime, both in the runtime
   4651  * pm and the system suspend/resume code.
   4652  */
   4653 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
   4654 {
   4655 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
   4656 	dev_priv->pm.irqs_enabled = false;
   4657 	synchronize_irq(dev_priv->dev->irq);
   4658 }
   4659 
   4660 /**
   4661  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
   4662  * @dev_priv: i915 device instance
   4663  *
   4664  * This function is used to enable interrupts at runtime, both in the runtime
   4665  * pm and the system suspend/resume code.
   4666  */
   4667 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
   4668 {
   4669 	dev_priv->pm.irqs_enabled = true;
   4670 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
   4671 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
   4672 }
   4673