Home | History | Annotate | Line # | Download | only in i915
i915_irq.c revision 1.20
      1 /*	$NetBSD: i915_irq.c,v 1.20 2021/12/18 23:45:28 riastradh Exp $	*/
      2 
      3 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
      4  */
      5 /*
      6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.20 2021/12/18 23:45:28 riastradh Exp $");
     33 
     34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     35 
     36 #include <linux/circ_buf.h>
     37 #include <linux/slab.h>
     38 #include <linux/sysrq.h>
     39 
     40 #include <drm/drm_drv.h>
     41 #include <drm/drm_irq.h>
     42 #include <drm/i915_drm.h>
     43 
     44 #include "display/intel_display_types.h"
     45 #include "display/intel_fifo_underrun.h"
     46 #include "display/intel_hotplug.h"
     47 #include "display/intel_lpe_audio.h"
     48 #include "display/intel_psr.h"
     49 
     50 #include "gt/intel_gt.h"
     51 #include "gt/intel_gt_irq.h"
     52 #include "gt/intel_gt_pm_irq.h"
     53 #include "gt/intel_rps.h"
     54 
     55 #include "i915_drv.h"
     56 #include "i915_irq.h"
     57 #include "i915_trace.h"
     58 #include "intel_pm.h"
     59 
     60 /**
     61  * DOC: interrupt handling
     62  *
     63  * These functions provide the basic support for enabling and disabling the
     64  * interrupt handling support. There's a lot more functionality in i915_irq.c
     65  * and related files, but that will be described in separate chapters.
     66  */
     67 
     68 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
     69 
     70 static const u32 hpd_ilk[HPD_NUM_PINS] = {
     71 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
     72 };
     73 
     74 static const u32 hpd_ivb[HPD_NUM_PINS] = {
     75 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
     76 };
     77 
     78 static const u32 hpd_bdw[HPD_NUM_PINS] = {
     79 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
     80 };
     81 
     82 static const u32 hpd_ibx[HPD_NUM_PINS] = {
     83 	[HPD_CRT] = SDE_CRT_HOTPLUG,
     84 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
     85 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
     86 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
     87 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
     88 };
     89 
     90 static const u32 hpd_cpt[HPD_NUM_PINS] = {
     91 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
     92 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
     93 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
     94 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
     95 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
     96 };
     97 
     98 static const u32 hpd_spt[HPD_NUM_PINS] = {
     99 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
    100 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
    101 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
    102 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
    103 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
    104 };
    105 
    106 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
    107 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
    108 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
    109 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
    110 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
    111 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
    112 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
    113 };
    114 
    115 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
    116 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
    117 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
    118 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
    119 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
    120 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
    121 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
    122 };
    123 
    124 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
    125 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
    126 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
    127 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
    128 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
    129 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
    130 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
    131 };
    132 
    133 /* BXT hpd list */
    134 static const u32 hpd_bxt[HPD_NUM_PINS] = {
    135 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
    136 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
    137 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
    138 };
    139 
    140 static const u32 hpd_gen11[HPD_NUM_PINS] = {
    141 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
    142 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
    143 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
    144 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
    145 };
    146 
    147 static const u32 hpd_gen12[HPD_NUM_PINS] = {
    148 	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
    149 	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
    150 	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
    151 	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
    152 	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
    153 	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
    154 };
    155 
    156 static const u32 hpd_icp[HPD_NUM_PINS] = {
    157 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
    158 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
    159 	[HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
    160 	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
    161 	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
    162 	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
    163 };
    164 
    165 static const u32 hpd_tgp[HPD_NUM_PINS] = {
    166 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
    167 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
    168 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
    169 	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
    170 	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
    171 	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
    172 	[HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
    173 	[HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
    174 	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
    175 };
    176 
    177 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
    178 		    i915_reg_t iir, i915_reg_t ier)
    179 {
    180 	intel_uncore_write(uncore, imr, 0xffffffff);
    181 	intel_uncore_posting_read(uncore, imr);
    182 
    183 	intel_uncore_write(uncore, ier, 0);
    184 
    185 	/* IIR can theoretically queue up two events. Be paranoid. */
    186 	intel_uncore_write(uncore, iir, 0xffffffff);
    187 	intel_uncore_posting_read(uncore, iir);
    188 	intel_uncore_write(uncore, iir, 0xffffffff);
    189 	intel_uncore_posting_read(uncore, iir);
    190 }
    191 
    192 void gen2_irq_reset(struct intel_uncore *uncore)
    193 {
    194 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
    195 	intel_uncore_posting_read16(uncore, GEN2_IMR);
    196 
    197 	intel_uncore_write16(uncore, GEN2_IER, 0);
    198 
    199 	/* IIR can theoretically queue up two events. Be paranoid. */
    200 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
    201 	intel_uncore_posting_read16(uncore, GEN2_IIR);
    202 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
    203 	intel_uncore_posting_read16(uncore, GEN2_IIR);
    204 }
    205 
    206 /*
    207  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
    208  */
    209 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
    210 {
    211 	u32 val = intel_uncore_read(uncore, reg);
    212 
    213 	if (val == 0)
    214 		return;
    215 
    216 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
    217 	     i915_mmio_reg_offset(reg), val);
    218 	intel_uncore_write(uncore, reg, 0xffffffff);
    219 	intel_uncore_posting_read(uncore, reg);
    220 	intel_uncore_write(uncore, reg, 0xffffffff);
    221 	intel_uncore_posting_read(uncore, reg);
    222 }
    223 
    224 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
    225 {
    226 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
    227 
    228 	if (val == 0)
    229 		return;
    230 
    231 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
    232 	     i915_mmio_reg_offset(GEN2_IIR), val);
    233 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
    234 	intel_uncore_posting_read16(uncore, GEN2_IIR);
    235 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
    236 	intel_uncore_posting_read16(uncore, GEN2_IIR);
    237 }
    238 
    239 void gen3_irq_init(struct intel_uncore *uncore,
    240 		   i915_reg_t imr, u32 imr_val,
    241 		   i915_reg_t ier, u32 ier_val,
    242 		   i915_reg_t iir)
    243 {
    244 	gen3_assert_iir_is_zero(uncore, iir);
    245 
    246 	intel_uncore_write(uncore, ier, ier_val);
    247 	intel_uncore_write(uncore, imr, imr_val);
    248 	intel_uncore_posting_read(uncore, imr);
    249 }
    250 
    251 void gen2_irq_init(struct intel_uncore *uncore,
    252 		   u32 imr_val, u32 ier_val)
    253 {
    254 	gen2_assert_iir_is_zero(uncore);
    255 
    256 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
    257 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
    258 	intel_uncore_posting_read16(uncore, GEN2_IMR);
    259 }
    260 
    261 /* For display hotplug interrupt */
    262 static inline void
    263 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
    264 				     u32 mask,
    265 				     u32 bits)
    266 {
    267 	u32 val;
    268 
    269 	lockdep_assert_held(&dev_priv->irq_lock);
    270 	WARN_ON(bits & ~mask);
    271 
    272 	val = I915_READ(PORT_HOTPLUG_EN);
    273 	val &= ~mask;
    274 	val |= bits;
    275 	I915_WRITE(PORT_HOTPLUG_EN, val);
    276 }
    277 
    278 /**
    279  * i915_hotplug_interrupt_update - update hotplug interrupt enable
    280  * @dev_priv: driver private
    281  * @mask: bits to update
    282  * @bits: bits to enable
    283  * NOTE: the HPD enable bits are modified both inside and outside
    284  * of an interrupt context. To avoid that read-modify-write cycles
    285  * interfer, these bits are protected by a spinlock. Since this
    286  * function is usually not called from a context where the lock is
    287  * held already, this function acquires the lock itself. A non-locking
    288  * version is also available.
    289  */
    290 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
    291 				   u32 mask,
    292 				   u32 bits)
    293 {
    294 	spin_lock_irq(&dev_priv->irq_lock);
    295 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
    296 	spin_unlock_irq(&dev_priv->irq_lock);
    297 }
    298 
    299 /**
    300  * ilk_update_display_irq - update DEIMR
    301  * @dev_priv: driver private
    302  * @interrupt_mask: mask of interrupt bits to update
    303  * @enabled_irq_mask: mask of interrupt bits to enable
    304  */
    305 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
    306 			    u32 interrupt_mask,
    307 			    u32 enabled_irq_mask)
    308 {
    309 	u32 new_val;
    310 
    311 	lockdep_assert_held(&dev_priv->irq_lock);
    312 
    313 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    314 
    315 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    316 		return;
    317 
    318 	new_val = dev_priv->irq_mask;
    319 	new_val &= ~interrupt_mask;
    320 	new_val |= (~enabled_irq_mask & interrupt_mask);
    321 
    322 	if (new_val != dev_priv->irq_mask) {
    323 		dev_priv->irq_mask = new_val;
    324 		I915_WRITE(DEIMR, dev_priv->irq_mask);
    325 		POSTING_READ(DEIMR);
    326 	}
    327 }
    328 
    329 /**
    330  * bdw_update_port_irq - update DE port interrupt
    331  * @dev_priv: driver private
    332  * @interrupt_mask: mask of interrupt bits to update
    333  * @enabled_irq_mask: mask of interrupt bits to enable
    334  */
    335 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
    336 				u32 interrupt_mask,
    337 				u32 enabled_irq_mask)
    338 {
    339 	u32 new_val;
    340 	u32 old_val;
    341 
    342 	lockdep_assert_held(&dev_priv->irq_lock);
    343 
    344 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    345 
    346 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    347 		return;
    348 
    349 	old_val = I915_READ(GEN8_DE_PORT_IMR);
    350 
    351 	new_val = old_val;
    352 	new_val &= ~interrupt_mask;
    353 	new_val |= (~enabled_irq_mask & interrupt_mask);
    354 
    355 	if (new_val != old_val) {
    356 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
    357 		POSTING_READ(GEN8_DE_PORT_IMR);
    358 	}
    359 }
    360 
    361 /**
    362  * bdw_update_pipe_irq - update DE pipe interrupt
    363  * @dev_priv: driver private
    364  * @pipe: pipe whose interrupt to update
    365  * @interrupt_mask: mask of interrupt bits to update
    366  * @enabled_irq_mask: mask of interrupt bits to enable
    367  */
    368 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
    369 			 enum pipe pipe,
    370 			 u32 interrupt_mask,
    371 			 u32 enabled_irq_mask)
    372 {
    373 	u32 new_val;
    374 
    375 	lockdep_assert_held(&dev_priv->irq_lock);
    376 
    377 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    378 
    379 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    380 		return;
    381 
    382 	new_val = dev_priv->de_irq_mask[pipe];
    383 	new_val &= ~interrupt_mask;
    384 	new_val |= (~enabled_irq_mask & interrupt_mask);
    385 
    386 	if (new_val != dev_priv->de_irq_mask[pipe]) {
    387 		dev_priv->de_irq_mask[pipe] = new_val;
    388 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
    389 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
    390 	}
    391 }
    392 
    393 /**
    394  * ibx_display_interrupt_update - update SDEIMR
    395  * @dev_priv: driver private
    396  * @interrupt_mask: mask of interrupt bits to update
    397  * @enabled_irq_mask: mask of interrupt bits to enable
    398  */
    399 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
    400 				  u32 interrupt_mask,
    401 				  u32 enabled_irq_mask)
    402 {
    403 	u32 sdeimr = I915_READ(SDEIMR);
    404 	sdeimr &= ~interrupt_mask;
    405 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
    406 
    407 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
    408 
    409 	lockdep_assert_held(&dev_priv->irq_lock);
    410 
    411 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
    412 		return;
    413 
    414 	I915_WRITE(SDEIMR, sdeimr);
    415 	POSTING_READ(SDEIMR);
    416 }
    417 
    418 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
    419 			      enum pipe pipe)
    420 {
    421 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
    422 	u32 enable_mask = status_mask << 16;
    423 
    424 	lockdep_assert_held(&dev_priv->irq_lock);
    425 
    426 	if (INTEL_GEN(dev_priv) < 5)
    427 		goto out;
    428 
    429 	/*
    430 	 * On pipe A we don't support the PSR interrupt yet,
    431 	 * on pipe B and C the same bit MBZ.
    432 	 */
    433 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
    434 		return 0;
    435 	/*
    436 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
    437 	 * A the same bit is for perf counters which we don't use either.
    438 	 */
    439 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
    440 		return 0;
    441 
    442 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
    443 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
    444 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
    445 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
    446 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
    447 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
    448 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
    449 
    450 out:
    451 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
    452 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
    453 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
    454 		  pipe_name(pipe), enable_mask, status_mask);
    455 
    456 	return enable_mask;
    457 }
    458 
    459 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
    460 			  enum pipe pipe, u32 status_mask)
    461 {
    462 	i915_reg_t reg = PIPESTAT(pipe);
    463 	u32 enable_mask;
    464 
    465 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
    466 		  "pipe %c: status_mask=0x%x\n",
    467 		  pipe_name(pipe), status_mask);
    468 
    469 	lockdep_assert_held(&dev_priv->irq_lock);
    470 	WARN_ON(!intel_irqs_enabled(dev_priv));
    471 
    472 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
    473 		return;
    474 
    475 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
    476 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
    477 
    478 	I915_WRITE(reg, enable_mask | status_mask);
    479 	POSTING_READ(reg);
    480 }
    481 
    482 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
    483 			   enum pipe pipe, u32 status_mask)
    484 {
    485 	i915_reg_t reg = PIPESTAT(pipe);
    486 	u32 enable_mask;
    487 
    488 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
    489 		  "pipe %c: status_mask=0x%x\n",
    490 		  pipe_name(pipe), status_mask);
    491 
    492 	lockdep_assert_held(&dev_priv->irq_lock);
    493 	WARN_ON(!intel_irqs_enabled(dev_priv));
    494 
    495 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
    496 		return;
    497 
    498 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
    499 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
    500 
    501 	I915_WRITE(reg, enable_mask | status_mask);
    502 	POSTING_READ(reg);
    503 }
    504 
    505 static bool i915_has_asle(struct drm_i915_private *dev_priv)
    506 {
    507 	if (!dev_priv->opregion.asle)
    508 		return false;
    509 
    510 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
    511 }
    512 
    513 /**
    514  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
    515  * @dev_priv: i915 device private
    516  */
    517 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
    518 {
    519 	if (!i915_has_asle(dev_priv))
    520 		return;
    521 
    522 	spin_lock_irq(&dev_priv->irq_lock);
    523 
    524 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
    525 	if (INTEL_GEN(dev_priv) >= 4)
    526 		i915_enable_pipestat(dev_priv, PIPE_A,
    527 				     PIPE_LEGACY_BLC_EVENT_STATUS);
    528 
    529 	spin_unlock_irq(&dev_priv->irq_lock);
    530 }
    531 
    532 /*
    533  * This timing diagram depicts the video signal in and
    534  * around the vertical blanking period.
    535  *
    536  * Assumptions about the fictitious mode used in this example:
    537  *  vblank_start >= 3
    538  *  vsync_start = vblank_start + 1
    539  *  vsync_end = vblank_start + 2
    540  *  vtotal = vblank_start + 3
    541  *
    542  *           start of vblank:
    543  *           latch double buffered registers
    544  *           increment frame counter (ctg+)
    545  *           generate start of vblank interrupt (gen4+)
    546  *           |
    547  *           |          frame start:
    548  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
    549  *           |          may be shifted forward 1-3 extra lines via PIPECONF
    550  *           |          |
    551  *           |          |  start of vsync:
    552  *           |          |  generate vsync interrupt
    553  *           |          |  |
    554  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
    555  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
    556  * ----va---> <-----------------vb--------------------> <--------va-------------
    557  *       |          |       <----vs----->                     |
    558  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
    559  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
    560  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
    561  *       |          |                                         |
    562  *       last visible pixel                                   first visible pixel
    563  *                  |                                         increment frame counter (gen3/4)
    564  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
    565  *
    566  * x  = horizontal active
    567  * _  = horizontal blanking
    568  * hs = horizontal sync
    569  * va = vertical active
    570  * vb = vertical blanking
    571  * vs = vertical sync
    572  * vbs = vblank_start (number)
    573  *
    574  * Summary:
    575  * - most events happen at the start of horizontal sync
    576  * - frame start happens at the start of horizontal blank, 1-4 lines
    577  *   (depending on PIPECONF settings) after the start of vblank
    578  * - gen3/4 pixel and frame counter are synchronized with the start
    579  *   of horizontal active on the first line of vertical active
    580  */
    581 
    582 /* Called from drm generic code, passed a 'crtc', which
    583  * we use as a pipe index
    584  */
    585 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
    586 {
    587 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
    588 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
    589 	const struct drm_display_mode *mode = &vblank->hwmode;
    590 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
    591 	i915_reg_t high_frame, low_frame;
    592 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
    593 	unsigned long irqflags;
    594 
    595 	/*
    596 	 * On i965gm TV output the frame counter only works up to
    597 	 * the point when we enable the TV encoder. After that the
    598 	 * frame counter ceases to work and reads zero. We need a
    599 	 * vblank wait before enabling the TV encoder and so we
    600 	 * have to enable vblank interrupts while the frame counter
    601 	 * is still in a working state. However the core vblank code
    602 	 * does not like us returning non-zero frame counter values
    603 	 * when we've told it that we don't have a working frame
    604 	 * counter. Thus we must stop non-zero values leaking out.
    605 	 */
    606 	if (!vblank->max_vblank_count)
    607 		return 0;
    608 
    609 	htotal = mode->crtc_htotal;
    610 	hsync_start = mode->crtc_hsync_start;
    611 	vbl_start = mode->crtc_vblank_start;
    612 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
    613 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
    614 
    615 	/* Convert to pixel count */
    616 	vbl_start *= htotal;
    617 
    618 	/* Start of vblank event occurs at start of hsync */
    619 	vbl_start -= htotal - hsync_start;
    620 
    621 	high_frame = PIPEFRAME(pipe);
    622 	low_frame = PIPEFRAMEPIXEL(pipe);
    623 
    624 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
    625 
    626 	/*
    627 	 * High & low register fields aren't synchronized, so make sure
    628 	 * we get a low value that's stable across two reads of the high
    629 	 * register.
    630 	 */
    631 	do {
    632 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
    633 		low   = I915_READ_FW(low_frame);
    634 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
    635 	} while (high1 != high2);
    636 
    637 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
    638 
    639 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
    640 	pixel = low & PIPE_PIXEL_MASK;
    641 	low >>= PIPE_FRAME_LOW_SHIFT;
    642 
    643 	/*
    644 	 * The frame counter increments at beginning of active.
    645 	 * Cook up a vblank counter by also checking the pixel
    646 	 * counter against vblank start.
    647 	 */
    648 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
    649 }
    650 
    651 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
    652 {
    653 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
    654 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
    655 
    656 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
    657 }
    658 
    659 /*
    660  * On certain encoders on certain platforms, pipe
    661  * scanline register will not work to get the scanline,
    662  * since the timings are driven from the PORT or issues
    663  * with scanline register updates.
    664  * This function will use Framestamp and current
    665  * timestamp registers to calculate the scanline.
    666  */
    667 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
    668 {
    669 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    670 	struct drm_vblank_crtc *vblank =
    671 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
    672 	const struct drm_display_mode *mode = &vblank->hwmode;
    673 	u32 vblank_start = mode->crtc_vblank_start;
    674 	u32 vtotal = mode->crtc_vtotal;
    675 	u32 htotal = mode->crtc_htotal;
    676 	u32 clock = mode->crtc_clock;
    677 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
    678 
    679 	/*
    680 	 * To avoid the race condition where we might cross into the
    681 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
    682 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
    683 	 * during the same frame.
    684 	 */
    685 	do {
    686 		/*
    687 		 * This field provides read back of the display
    688 		 * pipe frame time stamp. The time stamp value
    689 		 * is sampled at every start of vertical blank.
    690 		 */
    691 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
    692 
    693 		/*
    694 		 * The TIMESTAMP_CTR register has the current
    695 		 * time stamp value.
    696 		 */
    697 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
    698 
    699 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
    700 	} while (scan_post_time != scan_prev_time);
    701 
    702 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
    703 					clock), 1000 * htotal);
    704 	scanline = min(scanline, vtotal - 1);
    705 	scanline = (scanline + vblank_start) % vtotal;
    706 
    707 	return scanline;
    708 }
    709 
    710 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
    711 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
    712 {
    713 	struct drm_device *dev = crtc->base.dev;
    714 	struct drm_i915_private *dev_priv = to_i915(dev);
    715 	const struct drm_display_mode *mode;
    716 	struct drm_vblank_crtc *vblank;
    717 	enum pipe pipe = crtc->pipe;
    718 	int position, vtotal;
    719 
    720 	if (!crtc->active)
    721 		return -1;
    722 
    723 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
    724 	mode = &vblank->hwmode;
    725 
    726 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
    727 		return __intel_get_crtc_scanline_from_timestamp(crtc);
    728 
    729 	vtotal = mode->crtc_vtotal;
    730 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
    731 		vtotal /= 2;
    732 
    733 	if (IS_GEN(dev_priv, 2))
    734 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
    735 	else
    736 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
    737 
    738 	/*
    739 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
    740 	 * read it just before the start of vblank.  So try it again
    741 	 * so we don't accidentally end up spanning a vblank frame
    742 	 * increment, causing the pipe_update_end() code to squak at us.
    743 	 *
    744 	 * The nature of this problem means we can't simply check the ISR
    745 	 * bit and return the vblank start value; nor can we use the scanline
    746 	 * debug register in the transcoder as it appears to have the same
    747 	 * problem.  We may need to extend this to include other platforms,
    748 	 * but so far testing only shows the problem on HSW.
    749 	 */
    750 	if (HAS_DDI(dev_priv) && !position) {
    751 		int i, temp;
    752 
    753 		for (i = 0; i < 100; i++) {
    754 			udelay(1);
    755 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
    756 			if (temp != position) {
    757 				position = temp;
    758 				break;
    759 			}
    760 		}
    761 	}
    762 
    763 	/*
    764 	 * See update_scanline_offset() for the details on the
    765 	 * scanline_offset adjustment.
    766 	 */
    767 	return (position + crtc->scanline_offset) % vtotal;
    768 }
    769 
    770 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
    771 			      bool in_vblank_irq, int *vpos, int *hpos,
    772 			      ktime_t *stime, ktime_t *etime,
    773 			      const struct drm_display_mode *mode)
    774 {
    775 	struct drm_i915_private *dev_priv = to_i915(dev);
    776 	struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
    777 	enum pipe pipe = crtc->pipe;
    778 	int position;
    779 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
    780 	unsigned long irqflags;
    781 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
    782 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
    783 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
    784 
    785 	if (WARN_ON(!mode->crtc_clock)) {
    786 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
    787 				 "pipe %c\n", pipe_name(pipe));
    788 		return false;
    789 	}
    790 
    791 	htotal = mode->crtc_htotal;
    792 	hsync_start = mode->crtc_hsync_start;
    793 	vtotal = mode->crtc_vtotal;
    794 	vbl_start = mode->crtc_vblank_start;
    795 	vbl_end = mode->crtc_vblank_end;
    796 
    797 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
    798 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
    799 		vbl_end /= 2;
    800 		vtotal /= 2;
    801 	}
    802 
    803 	/*
    804 	 * Lock uncore.lock, as we will do multiple timing critical raw
    805 	 * register reads, potentially with preemption disabled, so the
    806 	 * following code must not block on uncore.lock.
    807 	 */
    808 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
    809 
    810 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
    811 
    812 	/* Get optional system timestamp before query. */
    813 	if (stime)
    814 		*stime = ktime_get();
    815 
    816 	if (use_scanline_counter) {
    817 		/* No obvious pixelcount register. Only query vertical
    818 		 * scanout position from Display scan line register.
    819 		 */
    820 		position = __intel_get_crtc_scanline(crtc);
    821 	} else {
    822 		/* Have access to pixelcount since start of frame.
    823 		 * We can split this into vertical and horizontal
    824 		 * scanout position.
    825 		 */
    826 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
    827 
    828 		/* convert to pixel counts */
    829 		vbl_start *= htotal;
    830 		vbl_end *= htotal;
    831 		vtotal *= htotal;
    832 
    833 		/*
    834 		 * In interlaced modes, the pixel counter counts all pixels,
    835 		 * so one field will have htotal more pixels. In order to avoid
    836 		 * the reported position from jumping backwards when the pixel
    837 		 * counter is beyond the length of the shorter field, just
    838 		 * clamp the position the length of the shorter field. This
    839 		 * matches how the scanline counter based position works since
    840 		 * the scanline counter doesn't count the two half lines.
    841 		 */
    842 		if (position >= vtotal)
    843 			position = vtotal - 1;
    844 
    845 		/*
    846 		 * Start of vblank interrupt is triggered at start of hsync,
    847 		 * just prior to the first active line of vblank. However we
    848 		 * consider lines to start at the leading edge of horizontal
    849 		 * active. So, should we get here before we've crossed into
    850 		 * the horizontal active of the first line in vblank, we would
    851 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
    852 		 * always add htotal-hsync_start to the current pixel position.
    853 		 */
    854 		position = (position + htotal - hsync_start) % vtotal;
    855 	}
    856 
    857 	/* Get optional system timestamp after query. */
    858 	if (etime)
    859 		*etime = ktime_get();
    860 
    861 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
    862 
    863 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
    864 
    865 	/*
    866 	 * While in vblank, position will be negative
    867 	 * counting up towards 0 at vbl_end. And outside
    868 	 * vblank, position will be positive counting
    869 	 * up since vbl_end.
    870 	 */
    871 	if (position >= vbl_start)
    872 		position -= vbl_end;
    873 	else
    874 		position += vtotal - vbl_end;
    875 
    876 	if (use_scanline_counter) {
    877 		*vpos = position;
    878 		*hpos = 0;
    879 	} else {
    880 		*vpos = position / htotal;
    881 		*hpos = position - (*vpos * htotal);
    882 	}
    883 
    884 	return true;
    885 }
    886 
    887 int intel_get_crtc_scanline(struct intel_crtc *crtc)
    888 {
    889 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    890 	unsigned long irqflags;
    891 	int position;
    892 
    893 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
    894 	position = __intel_get_crtc_scanline(crtc);
    895 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
    896 
    897 	return position;
    898 }
    899 
    900 /**
    901  * ivb_parity_work - Workqueue called when a parity error interrupt
    902  * occurred.
    903  * @work: workqueue struct
    904  *
    905  * Doesn't actually do anything except notify userspace. As a consequence of
    906  * this event, userspace should try to remap the bad rows since statistically
    907  * it is likely the same row is more likely to go bad again.
    908  */
    909 static void ivb_parity_work(struct work_struct *work)
    910 {
    911 	struct drm_i915_private *dev_priv =
    912 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
    913 	struct intel_gt *gt = &dev_priv->gt;
    914 	u32 error_status, row, bank, subbank;
    915 #ifndef __NetBSD__		/* XXX kobject uevent...? */
    916 	char *parity_event[6];
    917 #endif
    918 	u32 misccpctl;
    919 	u8 slice = 0;
    920 
    921 	/* We must turn off DOP level clock gating to access the L3 registers.
    922 	 * In order to prevent a get/put style interface, acquire struct mutex
    923 	 * any time we access those registers.
    924 	 */
    925 	mutex_lock(&dev_priv->drm.struct_mutex);
    926 
    927 	/* If we've screwed up tracking, just let the interrupt fire again */
    928 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
    929 		goto out;
    930 
    931 	misccpctl = I915_READ(GEN7_MISCCPCTL);
    932 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
    933 	POSTING_READ(GEN7_MISCCPCTL);
    934 
    935 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
    936 		i915_reg_t reg;
    937 
    938 		slice--;
    939 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
    940 			break;
    941 
    942 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
    943 
    944 		reg = GEN7_L3CDERRST1(slice);
    945 
    946 		error_status = I915_READ(reg);
    947 		row = GEN7_PARITY_ERROR_ROW(error_status);
    948 		bank = GEN7_PARITY_ERROR_BANK(error_status);
    949 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
    950 
    951 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
    952 		POSTING_READ(reg);
    953 
    954 #ifndef __NetBSD__		/* XXX kobject uevent...? */
    955 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
    956 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
    957 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
    958 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
    959 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
    960 		parity_event[5] = NULL;
    961 
    962 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
    963 				   KOBJ_CHANGE, parity_event);
    964 #endif
    965 
    966 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
    967 			  slice, row, bank, subbank);
    968 
    969 #ifndef __NetBSD__		/* XXX kobject uevent...? */
    970 		kfree(parity_event[4]);
    971 		kfree(parity_event[3]);
    972 		kfree(parity_event[2]);
    973 		kfree(parity_event[1]);
    974 #endif
    975 	}
    976 
    977 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
    978 
    979 out:
    980 	WARN_ON(dev_priv->l3_parity.which_slice);
    981 	spin_lock_irq(&gt->irq_lock);
    982 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
    983 	spin_unlock_irq(&gt->irq_lock);
    984 
    985 	mutex_unlock(&dev_priv->drm.struct_mutex);
    986 }
    987 
    988 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
    989 {
    990 	switch (pin) {
    991 	case HPD_PORT_C:
    992 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
    993 	case HPD_PORT_D:
    994 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
    995 	case HPD_PORT_E:
    996 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
    997 	case HPD_PORT_F:
    998 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
    999 	default:
   1000 		return false;
   1001 	}
   1002 }
   1003 
   1004 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1005 {
   1006 	switch (pin) {
   1007 	case HPD_PORT_D:
   1008 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
   1009 	case HPD_PORT_E:
   1010 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
   1011 	case HPD_PORT_F:
   1012 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
   1013 	case HPD_PORT_G:
   1014 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
   1015 	case HPD_PORT_H:
   1016 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
   1017 	case HPD_PORT_I:
   1018 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
   1019 	default:
   1020 		return false;
   1021 	}
   1022 }
   1023 
   1024 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1025 {
   1026 	switch (pin) {
   1027 	case HPD_PORT_A:
   1028 		return val & PORTA_HOTPLUG_LONG_DETECT;
   1029 	case HPD_PORT_B:
   1030 		return val & PORTB_HOTPLUG_LONG_DETECT;
   1031 	case HPD_PORT_C:
   1032 		return val & PORTC_HOTPLUG_LONG_DETECT;
   1033 	default:
   1034 		return false;
   1035 	}
   1036 }
   1037 
   1038 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1039 {
   1040 	switch (pin) {
   1041 	case HPD_PORT_A:
   1042 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
   1043 	case HPD_PORT_B:
   1044 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
   1045 	case HPD_PORT_C:
   1046 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
   1047 	default:
   1048 		return false;
   1049 	}
   1050 }
   1051 
   1052 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1053 {
   1054 	switch (pin) {
   1055 	case HPD_PORT_C:
   1056 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
   1057 	case HPD_PORT_D:
   1058 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
   1059 	case HPD_PORT_E:
   1060 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
   1061 	case HPD_PORT_F:
   1062 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
   1063 	default:
   1064 		return false;
   1065 	}
   1066 }
   1067 
   1068 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1069 {
   1070 	switch (pin) {
   1071 	case HPD_PORT_D:
   1072 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
   1073 	case HPD_PORT_E:
   1074 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
   1075 	case HPD_PORT_F:
   1076 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
   1077 	case HPD_PORT_G:
   1078 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
   1079 	case HPD_PORT_H:
   1080 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
   1081 	case HPD_PORT_I:
   1082 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
   1083 	default:
   1084 		return false;
   1085 	}
   1086 }
   1087 
   1088 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
   1089 {
   1090 	switch (pin) {
   1091 	case HPD_PORT_E:
   1092 		return val & PORTE_HOTPLUG_LONG_DETECT;
   1093 	default:
   1094 		return false;
   1095 	}
   1096 }
   1097 
   1098 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1099 {
   1100 	switch (pin) {
   1101 	case HPD_PORT_A:
   1102 		return val & PORTA_HOTPLUG_LONG_DETECT;
   1103 	case HPD_PORT_B:
   1104 		return val & PORTB_HOTPLUG_LONG_DETECT;
   1105 	case HPD_PORT_C:
   1106 		return val & PORTC_HOTPLUG_LONG_DETECT;
   1107 	case HPD_PORT_D:
   1108 		return val & PORTD_HOTPLUG_LONG_DETECT;
   1109 	default:
   1110 		return false;
   1111 	}
   1112 }
   1113 
   1114 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1115 {
   1116 	switch (pin) {
   1117 	case HPD_PORT_A:
   1118 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
   1119 	default:
   1120 		return false;
   1121 	}
   1122 }
   1123 
   1124 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1125 {
   1126 	switch (pin) {
   1127 	case HPD_PORT_B:
   1128 		return val & PORTB_HOTPLUG_LONG_DETECT;
   1129 	case HPD_PORT_C:
   1130 		return val & PORTC_HOTPLUG_LONG_DETECT;
   1131 	case HPD_PORT_D:
   1132 		return val & PORTD_HOTPLUG_LONG_DETECT;
   1133 	default:
   1134 		return false;
   1135 	}
   1136 }
   1137 
   1138 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
   1139 {
   1140 	switch (pin) {
   1141 	case HPD_PORT_B:
   1142 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
   1143 	case HPD_PORT_C:
   1144 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
   1145 	case HPD_PORT_D:
   1146 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
   1147 	default:
   1148 		return false;
   1149 	}
   1150 }
   1151 
   1152 /*
   1153  * Get a bit mask of pins that have triggered, and which ones may be long.
   1154  * This can be called multiple times with the same masks to accumulate
   1155  * hotplug detection results from several registers.
   1156  *
   1157  * Note that the caller is expected to zero out the masks initially.
   1158  */
   1159 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
   1160 			       u32 *pin_mask, u32 *long_mask,
   1161 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
   1162 			       const u32 hpd[HPD_NUM_PINS],
   1163 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
   1164 {
   1165 	enum hpd_pin pin;
   1166 
   1167 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
   1168 
   1169 	for_each_hpd_pin(pin) {
   1170 		if ((hpd[pin] & hotplug_trigger) == 0)
   1171 			continue;
   1172 
   1173 		*pin_mask |= BIT(pin);
   1174 
   1175 		if (long_pulse_detect(pin, dig_hotplug_reg))
   1176 			*long_mask |= BIT(pin);
   1177 	}
   1178 
   1179 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
   1180 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
   1181 
   1182 }
   1183 
   1184 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
   1185 {
   1186 	struct drm_i915_private *dev_priv = dev->dev_private;
   1187 
   1188 #ifdef __NetBSD__
   1189 	spin_lock(&dev_priv->gmbus_wait_lock);
   1190 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
   1191 	    &dev_priv->gmbus_wait_lock);
   1192 	spin_unlock(&dev_priv->gmbus_wait_lock);
   1193 #else
   1194 	wake_up_all(&dev_priv->gmbus_wait_queue);
   1195 #endif
   1196 }
   1197 
   1198 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
   1199 {
   1200 	struct drm_i915_private *dev_priv = dev->dev_private;
   1201 
   1202 #ifdef __NetBSD__
   1203 	spin_lock(&dev_priv->gmbus_wait_lock);
   1204 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
   1205 	    &dev_priv->gmbus_wait_lock);
   1206 	spin_unlock(&dev_priv->gmbus_wait_lock);
   1207 #else
   1208 	wake_up_all(&dev_priv->gmbus_wait_queue);
   1209 #endif
   1210 }
   1211 
   1212 #if defined(CONFIG_DEBUG_FS)
   1213 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
   1214 					 enum pipe pipe,
   1215 					 u32 crc0, u32 crc1,
   1216 					 u32 crc2, u32 crc3,
   1217 					 u32 crc4)
   1218 {
   1219 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
   1220 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   1221 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
   1222 
   1223 	trace_intel_pipe_crc(crtc, crcs);
   1224 
   1225 	spin_lock(&pipe_crc->lock);
   1226 	/*
   1227 	 * For some not yet identified reason, the first CRC is
   1228 	 * bonkers. So let's just wait for the next vblank and read
   1229 	 * out the buggy result.
   1230 	 *
   1231 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
   1232 	 * don't trust that one either.
   1233 	 */
   1234 	if (pipe_crc->skipped <= 0 ||
   1235 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
   1236 		pipe_crc->skipped++;
   1237 		spin_unlock(&pipe_crc->lock);
   1238 		return;
   1239 	}
   1240 	spin_unlock(&pipe_crc->lock);
   1241 
   1242 	drm_crtc_add_crc_entry(&crtc->base, true,
   1243 				drm_crtc_accurate_vblank_count(&crtc->base),
   1244 				crcs);
   1245 }
   1246 #else
   1247 static inline void
   1248 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
   1249 			     enum pipe pipe,
   1250 			     u32 crc0, u32 crc1,
   1251 			     u32 crc2, u32 crc3,
   1252 			     u32 crc4) {}
   1253 #endif
   1254 
   1255 
   1256 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
   1257 				     enum pipe pipe)
   1258 {
   1259 	display_pipe_crc_irq_handler(dev_priv, pipe,
   1260 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
   1261 				     0, 0, 0, 0);
   1262 }
   1263 
   1264 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
   1265 				     enum pipe pipe)
   1266 {
   1267 	display_pipe_crc_irq_handler(dev_priv, pipe,
   1268 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
   1269 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
   1270 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
   1271 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
   1272 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
   1273 }
   1274 
   1275 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
   1276 				      enum pipe pipe)
   1277 {
   1278 	u32 res1, res2;
   1279 
   1280 	if (INTEL_GEN(dev_priv) >= 3)
   1281 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
   1282 	else
   1283 		res1 = 0;
   1284 
   1285 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   1286 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
   1287 	else
   1288 		res2 = 0;
   1289 
   1290 	display_pipe_crc_irq_handler(dev_priv, pipe,
   1291 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
   1292 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
   1293 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
   1294 				     res1, res2);
   1295 }
   1296 
   1297 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
   1298 {
   1299 	enum pipe pipe;
   1300 
   1301 	for_each_pipe(dev_priv, pipe) {
   1302 		I915_WRITE(PIPESTAT(pipe),
   1303 			   PIPESTAT_INT_STATUS_MASK |
   1304 			   PIPE_FIFO_UNDERRUN_STATUS);
   1305 
   1306 		dev_priv->pipestat_irq_mask[pipe] = 0;
   1307 	}
   1308 }
   1309 
   1310 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
   1311 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
   1312 {
   1313 	enum pipe pipe;
   1314 
   1315 	spin_lock(&dev_priv->irq_lock);
   1316 
   1317 	if (!dev_priv->display_irqs_enabled) {
   1318 		spin_unlock(&dev_priv->irq_lock);
   1319 		return;
   1320 	}
   1321 
   1322 	for_each_pipe(dev_priv, pipe) {
   1323 		i915_reg_t reg;
   1324 		u32 status_mask, enable_mask, iir_bit = 0;
   1325 
   1326 		/*
   1327 		 * PIPESTAT bits get signalled even when the interrupt is
   1328 		 * disabled with the mask bits, and some of the status bits do
   1329 		 * not generate interrupts at all (like the underrun bit). Hence
   1330 		 * we need to be careful that we only handle what we want to
   1331 		 * handle.
   1332 		 */
   1333 
   1334 		/* fifo underruns are filterered in the underrun handler. */
   1335 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
   1336 
   1337 		switch (pipe) {
   1338 		default:
   1339 		case PIPE_A:
   1340 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
   1341 			break;
   1342 		case PIPE_B:
   1343 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
   1344 			break;
   1345 		case PIPE_C:
   1346 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
   1347 			break;
   1348 		}
   1349 		if (iir & iir_bit)
   1350 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
   1351 
   1352 		if (!status_mask)
   1353 			continue;
   1354 
   1355 		reg = PIPESTAT(pipe);
   1356 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
   1357 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
   1358 
   1359 		/*
   1360 		 * Clear the PIPE*STAT regs before the IIR
   1361 		 *
   1362 		 * Toggle the enable bits to make sure we get an
   1363 		 * edge in the ISR pipe event bit if we don't clear
   1364 		 * all the enabled status bits. Otherwise the edge
   1365 		 * triggered IIR on i965/g4x wouldn't notice that
   1366 		 * an interrupt is still pending.
   1367 		 */
   1368 		if (pipe_stats[pipe]) {
   1369 			I915_WRITE(reg, pipe_stats[pipe]);
   1370 			I915_WRITE(reg, enable_mask);
   1371 		}
   1372 	}
   1373 	spin_unlock(&dev_priv->irq_lock);
   1374 }
   1375 
   1376 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
   1377 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
   1378 {
   1379 	enum pipe pipe;
   1380 
   1381 	for_each_pipe(dev_priv, pipe) {
   1382 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
   1383 			drm_handle_vblank(&dev_priv->drm, pipe);
   1384 
   1385 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   1386 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
   1387 
   1388 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   1389 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1390 	}
   1391 }
   1392 
   1393 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
   1394 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
   1395 {
   1396 	bool blc_event = false;
   1397 	enum pipe pipe;
   1398 
   1399 	for_each_pipe(dev_priv, pipe) {
   1400 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
   1401 			drm_handle_vblank(&dev_priv->drm, pipe);
   1402 
   1403 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
   1404 			blc_event = true;
   1405 
   1406 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   1407 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
   1408 
   1409 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   1410 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1411 	}
   1412 
   1413 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
   1414 		intel_opregion_asle_intr(dev_priv);
   1415 }
   1416 
   1417 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
   1418 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
   1419 {
   1420 	bool blc_event = false;
   1421 	enum pipe pipe;
   1422 
   1423 	for_each_pipe(dev_priv, pipe) {
   1424 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
   1425 			drm_handle_vblank(&dev_priv->drm, pipe);
   1426 
   1427 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
   1428 			blc_event = true;
   1429 
   1430 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   1431 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
   1432 
   1433 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   1434 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1435 	}
   1436 
   1437 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
   1438 		intel_opregion_asle_intr(dev_priv);
   1439 
   1440 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
   1441 		gmbus_irq_handler(dev_priv);
   1442 }
   1443 
   1444 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
   1445 					    u32 pipe_stats[I915_MAX_PIPES])
   1446 {
   1447 	enum pipe pipe;
   1448 
   1449 	for_each_pipe(dev_priv, pipe) {
   1450 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
   1451 			drm_handle_vblank(&dev_priv->drm, pipe);
   1452 
   1453 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
   1454 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
   1455 
   1456 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
   1457 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1458 	}
   1459 
   1460 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
   1461 		gmbus_irq_handler(dev_priv);
   1462 }
   1463 
   1464 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
   1465 {
   1466 	u32 hotplug_status = 0, hotplug_status_mask;
   1467 	int i;
   1468 
   1469 	if (IS_G4X(dev_priv) ||
   1470 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   1471 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
   1472 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
   1473 	else
   1474 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
   1475 
   1476 	/*
   1477 	 * We absolutely have to clear all the pending interrupt
   1478 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
   1479 	 * interrupt bit won't have an edge, and the i965/g4x
   1480 	 * edge triggered IIR will not notice that an interrupt
   1481 	 * is still pending. We can't use PORT_HOTPLUG_EN to
   1482 	 * guarantee the edge as the act of toggling the enable
   1483 	 * bits can itself generate a new hotplug interrupt :(
   1484 	 */
   1485 	for (i = 0; i < 10; i++) {
   1486 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
   1487 
   1488 		if (tmp == 0)
   1489 			return hotplug_status;
   1490 
   1491 		hotplug_status |= tmp;
   1492 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
   1493 	}
   1494 
   1495 	WARN_ONCE(1,
   1496 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
   1497 		  I915_READ(PORT_HOTPLUG_STAT));
   1498 
   1499 	return hotplug_status;
   1500 }
   1501 
   1502 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
   1503 				 u32 hotplug_status)
   1504 {
   1505 	u32 pin_mask = 0, long_mask = 0;
   1506 
   1507 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   1508 	    IS_CHERRYVIEW(dev_priv)) {
   1509 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
   1510 
   1511 		if (hotplug_trigger) {
   1512 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
   1513 					   hotplug_trigger, hotplug_trigger,
   1514 					   hpd_status_g4x,
   1515 					   i9xx_port_hotplug_long_detect);
   1516 
   1517 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   1518 		}
   1519 
   1520 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
   1521 			dp_aux_irq_handler(dev_priv);
   1522 	} else {
   1523 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
   1524 
   1525 		if (hotplug_trigger) {
   1526 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
   1527 					   hotplug_trigger, hotplug_trigger,
   1528 					   hpd_status_i915,
   1529 					   i9xx_port_hotplug_long_detect);
   1530 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   1531 		}
   1532 	}
   1533 }
   1534 
   1535 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
   1536 {
   1537 	struct drm_i915_private *dev_priv = arg;
   1538 	irqreturn_t ret = IRQ_NONE;
   1539 
   1540 	if (!intel_irqs_enabled(dev_priv))
   1541 		return IRQ_NONE;
   1542 
   1543 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   1544 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1545 
   1546 	do {
   1547 		u32 iir, gt_iir, pm_iir;
   1548 		u32 pipe_stats[I915_MAX_PIPES] = {};
   1549 		u32 hotplug_status = 0;
   1550 		u32 ier = 0;
   1551 
   1552 		gt_iir = I915_READ(GTIIR);
   1553 		pm_iir = I915_READ(GEN6_PMIIR);
   1554 		iir = I915_READ(VLV_IIR);
   1555 
   1556 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
   1557 			break;
   1558 
   1559 		ret = IRQ_HANDLED;
   1560 
   1561 		/*
   1562 		 * Theory on interrupt generation, based on empirical evidence:
   1563 		 *
   1564 		 * x = ((VLV_IIR & VLV_IER) ||
   1565 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
   1566 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
   1567 		 *
   1568 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
   1569 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
   1570 		 * guarantee the CPU interrupt will be raised again even if we
   1571 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
   1572 		 * bits this time around.
   1573 		 */
   1574 		I915_WRITE(VLV_MASTER_IER, 0);
   1575 		ier = I915_READ(VLV_IER);
   1576 		I915_WRITE(VLV_IER, 0);
   1577 
   1578 		if (gt_iir)
   1579 			I915_WRITE(GTIIR, gt_iir);
   1580 		if (pm_iir)
   1581 			I915_WRITE(GEN6_PMIIR, pm_iir);
   1582 
   1583 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
   1584 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
   1585 
   1586 		/* Call regardless, as some status bits might not be
   1587 		 * signalled in iir */
   1588 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
   1589 
   1590 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
   1591 			   I915_LPE_PIPE_B_INTERRUPT))
   1592 			intel_lpe_audio_irq_handler(dev_priv);
   1593 
   1594 		/*
   1595 		 * VLV_IIR is single buffered, and reflects the level
   1596 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
   1597 		 */
   1598 		if (iir)
   1599 			I915_WRITE(VLV_IIR, iir);
   1600 
   1601 		I915_WRITE(VLV_IER, ier);
   1602 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
   1603 
   1604 		if (gt_iir)
   1605 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
   1606 		if (pm_iir)
   1607 			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
   1608 
   1609 		if (hotplug_status)
   1610 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
   1611 
   1612 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
   1613 	} while (0);
   1614 
   1615 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1616 
   1617 	return ret;
   1618 }
   1619 
   1620 static irqreturn_t cherryview_irq_handler(DRM_IRQ_ARGS)
   1621 {
   1622 	struct drm_i915_private *dev_priv = arg;
   1623 	irqreturn_t ret = IRQ_NONE;
   1624 
   1625 	if (!intel_irqs_enabled(dev_priv))
   1626 		return IRQ_NONE;
   1627 
   1628 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   1629 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1630 
   1631 	do {
   1632 		u32 master_ctl, iir;
   1633 		u32 pipe_stats[I915_MAX_PIPES] = {};
   1634 		u32 hotplug_status = 0;
   1635 		u32 gt_iir[4];
   1636 		u32 ier = 0;
   1637 
   1638 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
   1639 		iir = I915_READ(VLV_IIR);
   1640 
   1641 		if (master_ctl == 0 && iir == 0)
   1642 			break;
   1643 
   1644 		ret = IRQ_HANDLED;
   1645 
   1646 		/*
   1647 		 * Theory on interrupt generation, based on empirical evidence:
   1648 		 *
   1649 		 * x = ((VLV_IIR & VLV_IER) ||
   1650 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
   1651 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
   1652 		 *
   1653 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
   1654 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
   1655 		 * guarantee the CPU interrupt will be raised again even if we
   1656 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
   1657 		 * bits this time around.
   1658 		 */
   1659 		I915_WRITE(GEN8_MASTER_IRQ, 0);
   1660 		ier = I915_READ(VLV_IER);
   1661 		I915_WRITE(VLV_IER, 0);
   1662 
   1663 		gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
   1664 
   1665 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
   1666 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
   1667 
   1668 		/* Call regardless, as some status bits might not be
   1669 		 * signalled in iir */
   1670 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
   1671 
   1672 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
   1673 			   I915_LPE_PIPE_B_INTERRUPT |
   1674 			   I915_LPE_PIPE_C_INTERRUPT))
   1675 			intel_lpe_audio_irq_handler(dev_priv);
   1676 
   1677 		/*
   1678 		 * VLV_IIR is single buffered, and reflects the level
   1679 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
   1680 		 */
   1681 		if (iir)
   1682 			I915_WRITE(VLV_IIR, iir);
   1683 
   1684 		I915_WRITE(VLV_IER, ier);
   1685 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
   1686 
   1687 		gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
   1688 
   1689 		if (hotplug_status)
   1690 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
   1691 
   1692 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
   1693 	} while (0);
   1694 
   1695 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1696 
   1697 	return ret;
   1698 }
   1699 
   1700 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
   1701 				u32 hotplug_trigger,
   1702 				const u32 hpd[HPD_NUM_PINS])
   1703 {
   1704 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
   1705 
   1706 	/*
   1707 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
   1708 	 * unless we touch the hotplug register, even if hotplug_trigger is
   1709 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
   1710 	 * errors.
   1711 	 */
   1712 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
   1713 	if (!hotplug_trigger) {
   1714 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
   1715 			PORTD_HOTPLUG_STATUS_MASK |
   1716 			PORTC_HOTPLUG_STATUS_MASK |
   1717 			PORTB_HOTPLUG_STATUS_MASK;
   1718 		dig_hotplug_reg &= ~mask;
   1719 	}
   1720 
   1721 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
   1722 	if (!hotplug_trigger)
   1723 		return;
   1724 
   1725 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
   1726 			   dig_hotplug_reg, hpd,
   1727 			   pch_port_hotplug_long_detect);
   1728 
   1729 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   1730 }
   1731 
   1732 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
   1733 {
   1734 	enum pipe pipe;
   1735 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
   1736 
   1737 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
   1738 
   1739 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
   1740 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
   1741 			       SDE_AUDIO_POWER_SHIFT);
   1742 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
   1743 				 port_name(port));
   1744 	}
   1745 
   1746 	if (pch_iir & SDE_AUX_MASK)
   1747 		dp_aux_irq_handler(dev_priv);
   1748 
   1749 	if (pch_iir & SDE_GMBUS)
   1750 		gmbus_irq_handler(dev_priv);
   1751 
   1752 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
   1753 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
   1754 
   1755 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
   1756 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
   1757 
   1758 	if (pch_iir & SDE_POISON)
   1759 		DRM_ERROR("PCH poison interrupt\n");
   1760 
   1761 	if (pch_iir & SDE_FDI_MASK)
   1762 		for_each_pipe(dev_priv, pipe)
   1763 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
   1764 					 pipe_name(pipe),
   1765 					 I915_READ(FDI_RX_IIR(pipe)));
   1766 
   1767 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
   1768 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
   1769 
   1770 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
   1771 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
   1772 
   1773 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
   1774 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
   1775 
   1776 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
   1777 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
   1778 }
   1779 
   1780 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
   1781 {
   1782 	u32 err_int = I915_READ(GEN7_ERR_INT);
   1783 	enum pipe pipe;
   1784 
   1785 	if (err_int & ERR_INT_POISON)
   1786 		DRM_ERROR("Poison interrupt\n");
   1787 
   1788 	for_each_pipe(dev_priv, pipe) {
   1789 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
   1790 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1791 
   1792 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
   1793 			if (IS_IVYBRIDGE(dev_priv))
   1794 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
   1795 			else
   1796 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
   1797 		}
   1798 	}
   1799 
   1800 	I915_WRITE(GEN7_ERR_INT, err_int);
   1801 }
   1802 
   1803 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
   1804 {
   1805 	u32 serr_int = I915_READ(SERR_INT);
   1806 	enum pipe pipe;
   1807 
   1808 	if (serr_int & SERR_INT_POISON)
   1809 		DRM_ERROR("PCH poison interrupt\n");
   1810 
   1811 	for_each_pipe(dev_priv, pipe)
   1812 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
   1813 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
   1814 
   1815 	I915_WRITE(SERR_INT, serr_int);
   1816 }
   1817 
   1818 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
   1819 {
   1820 	enum pipe pipe;
   1821 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
   1822 
   1823 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
   1824 
   1825 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
   1826 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
   1827 			       SDE_AUDIO_POWER_SHIFT_CPT);
   1828 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
   1829 				 port_name(port));
   1830 	}
   1831 
   1832 	if (pch_iir & SDE_AUX_MASK_CPT)
   1833 		dp_aux_irq_handler(dev_priv);
   1834 
   1835 	if (pch_iir & SDE_GMBUS_CPT)
   1836 		gmbus_irq_handler(dev_priv);
   1837 
   1838 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
   1839 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
   1840 
   1841 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
   1842 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
   1843 
   1844 	if (pch_iir & SDE_FDI_MASK_CPT)
   1845 		for_each_pipe(dev_priv, pipe)
   1846 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
   1847 					 pipe_name(pipe),
   1848 					 I915_READ(FDI_RX_IIR(pipe)));
   1849 
   1850 	if (pch_iir & SDE_ERROR_CPT)
   1851 		cpt_serr_int_handler(dev_priv);
   1852 }
   1853 
   1854 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
   1855 {
   1856 	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
   1857 	u32 pin_mask = 0, long_mask = 0;
   1858 	bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
   1859 	const u32 *pins;
   1860 
   1861 	if (HAS_PCH_TGP(dev_priv)) {
   1862 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
   1863 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
   1864 		tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
   1865 		pins = hpd_tgp;
   1866 	} else if (HAS_PCH_JSP(dev_priv)) {
   1867 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
   1868 		tc_hotplug_trigger = 0;
   1869 		pins = hpd_tgp;
   1870 	} else if (HAS_PCH_MCC(dev_priv)) {
   1871 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
   1872 		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
   1873 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
   1874 		pins = hpd_icp;
   1875 	} else {
   1876 		WARN(!HAS_PCH_ICP(dev_priv),
   1877 		     "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
   1878 
   1879 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
   1880 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
   1881 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
   1882 		pins = hpd_icp;
   1883 	}
   1884 
   1885 	if (ddi_hotplug_trigger) {
   1886 		u32 dig_hotplug_reg;
   1887 
   1888 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
   1889 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
   1890 
   1891 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
   1892 				   ddi_hotplug_trigger,
   1893 				   dig_hotplug_reg, pins,
   1894 				   icp_ddi_port_hotplug_long_detect);
   1895 	}
   1896 
   1897 	if (tc_hotplug_trigger) {
   1898 		u32 dig_hotplug_reg;
   1899 
   1900 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
   1901 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
   1902 
   1903 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
   1904 				   tc_hotplug_trigger,
   1905 				   dig_hotplug_reg, pins,
   1906 				   tc_port_hotplug_long_detect);
   1907 	}
   1908 
   1909 	if (pin_mask)
   1910 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   1911 
   1912 	if (pch_iir & SDE_GMBUS_ICP)
   1913 		gmbus_irq_handler(dev_priv);
   1914 }
   1915 
   1916 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
   1917 {
   1918 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
   1919 		~SDE_PORTE_HOTPLUG_SPT;
   1920 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
   1921 	u32 pin_mask = 0, long_mask = 0;
   1922 
   1923 	if (hotplug_trigger) {
   1924 		u32 dig_hotplug_reg;
   1925 
   1926 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
   1927 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
   1928 
   1929 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
   1930 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
   1931 				   spt_port_hotplug_long_detect);
   1932 	}
   1933 
   1934 	if (hotplug2_trigger) {
   1935 		u32 dig_hotplug_reg;
   1936 
   1937 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
   1938 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
   1939 
   1940 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
   1941 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
   1942 				   spt_port_hotplug2_long_detect);
   1943 	}
   1944 
   1945 	if (pin_mask)
   1946 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   1947 
   1948 	if (pch_iir & SDE_GMBUS_CPT)
   1949 		gmbus_irq_handler(dev_priv);
   1950 }
   1951 
   1952 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
   1953 				u32 hotplug_trigger,
   1954 				const u32 hpd[HPD_NUM_PINS])
   1955 {
   1956 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
   1957 
   1958 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
   1959 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
   1960 
   1961 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
   1962 			   dig_hotplug_reg, hpd,
   1963 			   ilk_port_hotplug_long_detect);
   1964 
   1965 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   1966 }
   1967 
   1968 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
   1969 				    u32 de_iir)
   1970 {
   1971 	enum pipe pipe;
   1972 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
   1973 
   1974 	if (hotplug_trigger)
   1975 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
   1976 
   1977 	if (de_iir & DE_AUX_CHANNEL_A)
   1978 		dp_aux_irq_handler(dev_priv);
   1979 
   1980 	if (de_iir & DE_GSE)
   1981 		intel_opregion_asle_intr(dev_priv);
   1982 
   1983 	if (de_iir & DE_POISON)
   1984 		DRM_ERROR("Poison interrupt\n");
   1985 
   1986 	for_each_pipe(dev_priv, pipe) {
   1987 		if (de_iir & DE_PIPE_VBLANK(pipe))
   1988 			drm_handle_vblank(&dev_priv->drm, pipe);
   1989 
   1990 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
   1991 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   1992 
   1993 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
   1994 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
   1995 	}
   1996 
   1997 	/* check event from PCH */
   1998 	if (de_iir & DE_PCH_EVENT) {
   1999 		u32 pch_iir = I915_READ(SDEIIR);
   2000 
   2001 		if (HAS_PCH_CPT(dev_priv))
   2002 			cpt_irq_handler(dev_priv, pch_iir);
   2003 		else
   2004 			ibx_irq_handler(dev_priv, pch_iir);
   2005 
   2006 		/* should clear PCH hotplug event before clear CPU irq */
   2007 		I915_WRITE(SDEIIR, pch_iir);
   2008 	}
   2009 
   2010 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
   2011 		gen5_rps_irq_handler(&dev_priv->gt.rps);
   2012 }
   2013 
   2014 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
   2015 				    u32 de_iir)
   2016 {
   2017 	enum pipe pipe;
   2018 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
   2019 
   2020 	if (hotplug_trigger)
   2021 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
   2022 
   2023 	if (de_iir & DE_ERR_INT_IVB)
   2024 		ivb_err_int_handler(dev_priv);
   2025 
   2026 	if (de_iir & DE_EDP_PSR_INT_HSW) {
   2027 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
   2028 
   2029 		intel_psr_irq_handler(dev_priv, psr_iir);
   2030 		I915_WRITE(EDP_PSR_IIR, psr_iir);
   2031 	}
   2032 
   2033 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
   2034 		dp_aux_irq_handler(dev_priv);
   2035 
   2036 	if (de_iir & DE_GSE_IVB)
   2037 		intel_opregion_asle_intr(dev_priv);
   2038 
   2039 	for_each_pipe(dev_priv, pipe) {
   2040 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
   2041 			drm_handle_vblank(&dev_priv->drm, pipe);
   2042 	}
   2043 
   2044 	/* check event from PCH */
   2045 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
   2046 		u32 pch_iir = I915_READ(SDEIIR);
   2047 
   2048 		cpt_irq_handler(dev_priv, pch_iir);
   2049 
   2050 		/* clear PCH hotplug event before clear CPU irq */
   2051 		I915_WRITE(SDEIIR, pch_iir);
   2052 	}
   2053 }
   2054 
   2055 /*
   2056  * To handle irqs with the minimum potential races with fresh interrupts, we:
   2057  * 1 - Disable Master Interrupt Control.
   2058  * 2 - Find the source(s) of the interrupt.
   2059  * 3 - Clear the Interrupt Identity bits (IIR).
   2060  * 4 - Process the interrupt(s) that had bits set in the IIRs.
   2061  * 5 - Re-enable Master Interrupt Control.
   2062  */
   2063 static irqreturn_t ilk_irq_handler(DRM_IRQ_ARGS)
   2064 {
   2065 	struct drm_i915_private *dev_priv = arg;
   2066 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
   2067 	irqreturn_t ret = IRQ_NONE;
   2068 
   2069 	if (!intel_irqs_enabled(dev_priv))
   2070 		return IRQ_NONE;
   2071 
   2072 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   2073 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   2074 
   2075 	/* disable master interrupt before clearing iir  */
   2076 	de_ier = I915_READ(DEIER);
   2077 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
   2078 
   2079 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
   2080 	 * interrupts will will be stored on its back queue, and then we'll be
   2081 	 * able to process them after we restore SDEIER (as soon as we restore
   2082 	 * it, we'll get an interrupt if SDEIIR still has something to process
   2083 	 * due to its back queue). */
   2084 	if (!HAS_PCH_NOP(dev_priv)) {
   2085 		sde_ier = I915_READ(SDEIER);
   2086 		I915_WRITE(SDEIER, 0);
   2087 	}
   2088 
   2089 	/* Find, clear, then process each source of interrupt */
   2090 
   2091 	gt_iir = I915_READ(GTIIR);
   2092 	if (gt_iir) {
   2093 		I915_WRITE(GTIIR, gt_iir);
   2094 		ret = IRQ_HANDLED;
   2095 		if (INTEL_GEN(dev_priv) >= 6)
   2096 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
   2097 		else
   2098 			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
   2099 	}
   2100 
   2101 	de_iir = I915_READ(DEIIR);
   2102 	if (de_iir) {
   2103 		I915_WRITE(DEIIR, de_iir);
   2104 		ret = IRQ_HANDLED;
   2105 		if (INTEL_GEN(dev_priv) >= 7)
   2106 			ivb_display_irq_handler(dev_priv, de_iir);
   2107 		else
   2108 			ilk_display_irq_handler(dev_priv, de_iir);
   2109 	}
   2110 
   2111 	if (INTEL_GEN(dev_priv) >= 6) {
   2112 		u32 pm_iir = I915_READ(GEN6_PMIIR);
   2113 		if (pm_iir) {
   2114 			I915_WRITE(GEN6_PMIIR, pm_iir);
   2115 			ret = IRQ_HANDLED;
   2116 			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
   2117 		}
   2118 	}
   2119 
   2120 	I915_WRITE(DEIER, de_ier);
   2121 	if (!HAS_PCH_NOP(dev_priv))
   2122 		I915_WRITE(SDEIER, sde_ier);
   2123 
   2124 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   2125 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   2126 
   2127 	return ret;
   2128 }
   2129 
   2130 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
   2131 				u32 hotplug_trigger,
   2132 				const u32 hpd[HPD_NUM_PINS])
   2133 {
   2134 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
   2135 
   2136 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
   2137 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
   2138 
   2139 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
   2140 			   dig_hotplug_reg, hpd,
   2141 			   bxt_port_hotplug_long_detect);
   2142 
   2143 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   2144 }
   2145 
   2146 static irqreturn_t gen11_hpd_irq_handler(DRM_IRQ_ARGS)
   2147 {
   2148 	u32 pin_mask = 0, long_mask = 0;
   2149 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
   2150 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
   2151 	long_pulse_detect_func long_pulse_detect;
   2152 	const u32 *hpd;
   2153 
   2154 	if (INTEL_GEN(dev_priv) >= 12) {
   2155 		long_pulse_detect = gen12_port_hotplug_long_detect;
   2156 		hpd = hpd_gen12;
   2157 	} else {
   2158 		long_pulse_detect = gen11_port_hotplug_long_detect;
   2159 		hpd = hpd_gen11;
   2160 	}
   2161 
   2162 	if (trigger_tc) {
   2163 		u32 dig_hotplug_reg;
   2164 
   2165 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
   2166 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
   2167 
   2168 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
   2169 				   dig_hotplug_reg, hpd, long_pulse_detect);
   2170 	}
   2171 
   2172 	if (trigger_tbt) {
   2173 		u32 dig_hotplug_reg;
   2174 
   2175 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
   2176 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
   2177 
   2178 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
   2179 				   dig_hotplug_reg, hpd, long_pulse_detect);
   2180 	}
   2181 
   2182 	if (pin_mask)
   2183 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
   2184 	else
   2185 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
   2186 }
   2187 
   2188 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
   2189 {
   2190 	u32 mask;
   2191 
   2192 	if (INTEL_GEN(dev_priv) >= 12)
   2193 		return TGL_DE_PORT_AUX_DDIA |
   2194 			TGL_DE_PORT_AUX_DDIB |
   2195 			TGL_DE_PORT_AUX_DDIC |
   2196 			TGL_DE_PORT_AUX_USBC1 |
   2197 			TGL_DE_PORT_AUX_USBC2 |
   2198 			TGL_DE_PORT_AUX_USBC3 |
   2199 			TGL_DE_PORT_AUX_USBC4 |
   2200 			TGL_DE_PORT_AUX_USBC5 |
   2201 			TGL_DE_PORT_AUX_USBC6;
   2202 
   2203 
   2204 	mask = GEN8_AUX_CHANNEL_A;
   2205 	if (INTEL_GEN(dev_priv) >= 9)
   2206 		mask |= GEN9_AUX_CHANNEL_B |
   2207 			GEN9_AUX_CHANNEL_C |
   2208 			GEN9_AUX_CHANNEL_D;
   2209 
   2210 	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
   2211 		mask |= CNL_AUX_CHANNEL_F;
   2212 
   2213 	if (IS_GEN(dev_priv, 11))
   2214 		mask |= ICL_AUX_CHANNEL_E;
   2215 
   2216 	return mask;
   2217 }
   2218 
   2219 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
   2220 {
   2221 	if (INTEL_GEN(dev_priv) >= 11)
   2222 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
   2223 	else if (INTEL_GEN(dev_priv) >= 9)
   2224 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
   2225 	else
   2226 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
   2227 }
   2228 
   2229 static void
   2230 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
   2231 {
   2232 	bool found = false;
   2233 
   2234 	if (iir & GEN8_DE_MISC_GSE) {
   2235 		intel_opregion_asle_intr(dev_priv);
   2236 		found = true;
   2237 	}
   2238 
   2239 	if (iir & GEN8_DE_EDP_PSR) {
   2240 		u32 psr_iir;
   2241 		i915_reg_t iir_reg;
   2242 
   2243 		if (INTEL_GEN(dev_priv) >= 12)
   2244 			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
   2245 		else
   2246 			iir_reg = EDP_PSR_IIR;
   2247 
   2248 		psr_iir = I915_READ(iir_reg);
   2249 		I915_WRITE(iir_reg, psr_iir);
   2250 
   2251 		if (psr_iir)
   2252 			found = true;
   2253 
   2254 		intel_psr_irq_handler(dev_priv, psr_iir);
   2255 	}
   2256 
   2257 	if (!found)
   2258 		DRM_ERROR("Unexpected DE Misc interrupt\n");
   2259 }
   2260 
   2261 static irqreturn_t
   2262 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
   2263 {
   2264 	irqreturn_t ret = IRQ_NONE;
   2265 	u32 iir;
   2266 	enum pipe pipe;
   2267 
   2268 	if (master_ctl & GEN8_DE_MISC_IRQ) {
   2269 		iir = I915_READ(GEN8_DE_MISC_IIR);
   2270 		if (iir) {
   2271 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
   2272 			ret = IRQ_HANDLED;
   2273 			gen8_de_misc_irq_handler(dev_priv, iir);
   2274 		} else {
   2275 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
   2276 		}
   2277 	}
   2278 
   2279 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
   2280 		iir = I915_READ(GEN11_DE_HPD_IIR);
   2281 		if (iir) {
   2282 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
   2283 			ret = IRQ_HANDLED;
   2284 			gen11_hpd_irq_handler(dev_priv, iir);
   2285 		} else {
   2286 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
   2287 		}
   2288 	}
   2289 
   2290 	if (master_ctl & GEN8_DE_PORT_IRQ) {
   2291 		iir = I915_READ(GEN8_DE_PORT_IIR);
   2292 		if (iir) {
   2293 			u32 tmp_mask;
   2294 			bool found = false;
   2295 
   2296 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
   2297 			ret = IRQ_HANDLED;
   2298 
   2299 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
   2300 				dp_aux_irq_handler(dev_priv);
   2301 				found = true;
   2302 			}
   2303 
   2304 			if (IS_GEN9_LP(dev_priv)) {
   2305 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
   2306 				if (tmp_mask) {
   2307 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
   2308 							    hpd_bxt);
   2309 					found = true;
   2310 				}
   2311 			} else if (IS_BROADWELL(dev_priv)) {
   2312 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
   2313 				if (tmp_mask) {
   2314 					ilk_hpd_irq_handler(dev_priv,
   2315 							    tmp_mask, hpd_bdw);
   2316 					found = true;
   2317 				}
   2318 			}
   2319 
   2320 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
   2321 				gmbus_irq_handler(dev_priv);
   2322 				found = true;
   2323 			}
   2324 
   2325 			if (!found)
   2326 				DRM_ERROR("Unexpected DE Port interrupt\n");
   2327 		}
   2328 		else
   2329 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
   2330 	}
   2331 
   2332 	for_each_pipe(dev_priv, pipe) {
   2333 		u32 fault_errors;
   2334 
   2335 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
   2336 			continue;
   2337 
   2338 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
   2339 		if (!iir) {
   2340 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
   2341 			continue;
   2342 		}
   2343 
   2344 		ret = IRQ_HANDLED;
   2345 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
   2346 
   2347 		if (iir & GEN8_PIPE_VBLANK)
   2348 			drm_handle_vblank(&dev_priv->drm, pipe);
   2349 
   2350 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
   2351 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
   2352 
   2353 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
   2354 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
   2355 
   2356 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
   2357 		if (fault_errors)
   2358 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
   2359 				  pipe_name(pipe),
   2360 				  fault_errors);
   2361 	}
   2362 
   2363 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
   2364 	    master_ctl & GEN8_DE_PCH_IRQ) {
   2365 		/*
   2366 		 * FIXME(BDW): Assume for now that the new interrupt handling
   2367 		 * scheme also closed the SDE interrupt handling race we've seen
   2368 		 * on older pch-split platforms. But this needs testing.
   2369 		 */
   2370 		iir = I915_READ(SDEIIR);
   2371 		if (iir) {
   2372 			I915_WRITE(SDEIIR, iir);
   2373 			ret = IRQ_HANDLED;
   2374 
   2375 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
   2376 				icp_irq_handler(dev_priv, iir);
   2377 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
   2378 				spt_irq_handler(dev_priv, iir);
   2379 			else
   2380 				cpt_irq_handler(dev_priv, iir);
   2381 		} else {
   2382 			/*
   2383 			 * Like on previous PCH there seems to be something
   2384 			 * fishy going on with forwarding PCH interrupts.
   2385 			 */
   2386 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
   2387 		}
   2388 	}
   2389 
   2390 	return ret;
   2391 }
   2392 
   2393 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
   2394 {
   2395 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
   2396 
   2397 	/*
   2398 	 * Now with master disabled, get a sample of level indications
   2399 	 * for this interrupt. Indications will be cleared on related acks.
   2400 	 * New indications can and will light up during processing,
   2401 	 * and will generate new interrupt after enabling master.
   2402 	 */
   2403 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
   2404 }
   2405 
   2406 static inline void gen8_master_intr_enable(void __iomem * const regs)
   2407 {
   2408 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
   2409 }
   2410 
   2411 static irqreturn_t gen8_irq_handler(int irq, void *arg)
   2412 {
   2413 	struct drm_i915_private *dev_priv = arg;
   2414 	void __iomem * const regs = dev_priv->uncore.regs;
   2415 	u32 master_ctl;
   2416 	u32 gt_iir[4];
   2417 
   2418 	if (!intel_irqs_enabled(dev_priv))
   2419 		return IRQ_NONE;
   2420 
   2421 	master_ctl = gen8_master_intr_disable(regs);
   2422 	if (!master_ctl) {
   2423 		gen8_master_intr_enable(regs);
   2424 		return IRQ_NONE;
   2425 	}
   2426 
   2427 	/* Find, clear, then process each source of interrupt */
   2428 	gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
   2429 
   2430 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   2431 	if (master_ctl & ~GEN8_GT_IRQS) {
   2432 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   2433 		gen8_de_irq_handler(dev_priv, master_ctl);
   2434 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   2435 	}
   2436 
   2437 	gen8_master_intr_enable(regs);
   2438 
   2439 	gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
   2440 
   2441 	return IRQ_HANDLED;
   2442 }
   2443 
   2444 static u32
   2445 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
   2446 {
   2447 	void __iomem * const regs = gt->uncore->regs;
   2448 	u32 iir;
   2449 
   2450 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
   2451 		return 0;
   2452 
   2453 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
   2454 	if (likely(iir))
   2455 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
   2456 
   2457 	return iir;
   2458 }
   2459 
   2460 static void
   2461 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
   2462 {
   2463 	if (iir & GEN11_GU_MISC_GSE)
   2464 		intel_opregion_asle_intr(gt->i915);
   2465 }
   2466 
   2467 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
   2468 {
   2469 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
   2470 
   2471 	/*
   2472 	 * Now with master disabled, get a sample of level indications
   2473 	 * for this interrupt. Indications will be cleared on related acks.
   2474 	 * New indications can and will light up during processing,
   2475 	 * and will generate new interrupt after enabling master.
   2476 	 */
   2477 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
   2478 }
   2479 
   2480 static inline void gen11_master_intr_enable(void __iomem * const regs)
   2481 {
   2482 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
   2483 }
   2484 
   2485 static void
   2486 gen11_display_irq_handler(struct drm_i915_private *i915)
   2487 {
   2488 	void __iomem * const regs = i915->uncore.regs;
   2489 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
   2490 
   2491 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
   2492 	/*
   2493 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
   2494 	 * for the display related bits.
   2495 	 */
   2496 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
   2497 	gen8_de_irq_handler(i915, disp_ctl);
   2498 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
   2499 		      GEN11_DISPLAY_IRQ_ENABLE);
   2500 
   2501 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
   2502 }
   2503 
   2504 static __always_inline irqreturn_t
   2505 __gen11_irq_handler(struct drm_i915_private * const i915,
   2506 		    u32 (*intr_disable)(void __iomem * const regs),
   2507 		    void (*intr_enable)(void __iomem * const regs))
   2508 {
   2509 	void __iomem * const regs = i915->uncore.regs;
   2510 	struct intel_gt *gt = &i915->gt;
   2511 	u32 master_ctl;
   2512 	u32 gu_misc_iir;
   2513 
   2514 	if (!intel_irqs_enabled(i915))
   2515 		return IRQ_NONE;
   2516 
   2517 	master_ctl = intr_disable(regs);
   2518 	if (!master_ctl) {
   2519 		intr_enable(regs);
   2520 		return IRQ_NONE;
   2521 	}
   2522 
   2523 	/* Find, clear, then process each source of interrupt. */
   2524 	gen11_gt_irq_handler(gt, master_ctl);
   2525 
   2526 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   2527 	if (master_ctl & GEN11_DISPLAY_IRQ)
   2528 		gen11_display_irq_handler(i915);
   2529 
   2530 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
   2531 
   2532 	intr_enable(regs);
   2533 
   2534 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
   2535 
   2536 	return IRQ_HANDLED;
   2537 }
   2538 
   2539 static irqreturn_t gen11_irq_handler(int irq, void *arg)
   2540 {
   2541 	return __gen11_irq_handler(arg,
   2542 				   gen11_master_intr_disable,
   2543 				   gen11_master_intr_enable);
   2544 }
   2545 
   2546 /* Called from drm generic code, passed 'crtc' which
   2547  * we use as a pipe index
   2548  */
   2549 int i8xx_enable_vblank(struct drm_crtc *crtc)
   2550 {
   2551 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2552 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2553 	unsigned long irqflags;
   2554 
   2555 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2556 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
   2557 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2558 
   2559 	return 0;
   2560 }
   2561 
   2562 int i915gm_enable_vblank(struct drm_crtc *crtc)
   2563 {
   2564 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2565 
   2566 	/*
   2567 	 * Vblank interrupts fail to wake the device up from C2+.
   2568 	 * Disabling render clock gating during C-states avoids
   2569 	 * the problem. There is a small power cost so we do this
   2570 	 * only when vblank interrupts are actually enabled.
   2571 	 */
   2572 	if (dev_priv->vblank_enabled++ == 0)
   2573 		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
   2574 
   2575 	return i8xx_enable_vblank(crtc);
   2576 }
   2577 
   2578 int i965_enable_vblank(struct drm_crtc *crtc)
   2579 {
   2580 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2581 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2582 	unsigned long irqflags;
   2583 
   2584 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2585 	i915_enable_pipestat(dev_priv, pipe,
   2586 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
   2587 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2588 
   2589 	return 0;
   2590 }
   2591 
   2592 int ilk_enable_vblank(struct drm_crtc *crtc)
   2593 {
   2594 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2595 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2596 	unsigned long irqflags;
   2597 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
   2598 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
   2599 
   2600 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2601 	ilk_enable_display_irq(dev_priv, bit);
   2602 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2603 
   2604 	/* Even though there is no DMC, frame counter can get stuck when
   2605 	 * PSR is active as no frames are generated.
   2606 	 */
   2607 	if (HAS_PSR(dev_priv))
   2608 		drm_crtc_vblank_restore(crtc);
   2609 
   2610 	return 0;
   2611 }
   2612 
   2613 int bdw_enable_vblank(struct drm_crtc *crtc)
   2614 {
   2615 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2616 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2617 	unsigned long irqflags;
   2618 
   2619 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2620 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
   2621 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2622 
   2623 	/* Even if there is no DMC, frame counter can get stuck when
   2624 	 * PSR is active as no frames are generated, so check only for PSR.
   2625 	 */
   2626 	if (HAS_PSR(dev_priv))
   2627 		drm_crtc_vblank_restore(crtc);
   2628 
   2629 	return 0;
   2630 }
   2631 
   2632 /* Called from drm generic code, passed 'crtc' which
   2633  * we use as a pipe index
   2634  */
   2635 void i8xx_disable_vblank(struct drm_crtc *crtc)
   2636 {
   2637 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2638 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2639 	unsigned long irqflags;
   2640 
   2641 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2642 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
   2643 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2644 }
   2645 
   2646 void i915gm_disable_vblank(struct drm_crtc *crtc)
   2647 {
   2648 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2649 
   2650 	i8xx_disable_vblank(crtc);
   2651 
   2652 	if (--dev_priv->vblank_enabled == 0)
   2653 		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
   2654 }
   2655 
   2656 void i965_disable_vblank(struct drm_crtc *crtc)
   2657 {
   2658 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2659 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2660 	unsigned long irqflags;
   2661 
   2662 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2663 	i915_disable_pipestat(dev_priv, pipe,
   2664 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
   2665 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2666 }
   2667 
   2668 void ilk_disable_vblank(struct drm_crtc *crtc)
   2669 {
   2670 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2671 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2672 	unsigned long irqflags;
   2673 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
   2674 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
   2675 
   2676 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2677 	ilk_disable_display_irq(dev_priv, bit);
   2678 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2679 }
   2680 
   2681 void bdw_disable_vblank(struct drm_crtc *crtc)
   2682 {
   2683 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   2684 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
   2685 	unsigned long irqflags;
   2686 
   2687 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
   2688 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
   2689 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
   2690 }
   2691 
   2692 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
   2693 {
   2694 	struct intel_uncore *uncore = &dev_priv->uncore;
   2695 
   2696 	if (HAS_PCH_NOP(dev_priv))
   2697 		return;
   2698 
   2699 	GEN3_IRQ_RESET(uncore, SDE);
   2700 
   2701 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
   2702 		I915_WRITE(SERR_INT, 0xffffffff);
   2703 }
   2704 
   2705 /*
   2706  * SDEIER is also touched by the interrupt handler to work around missed PCH
   2707  * interrupts. Hence we can't update it after the interrupt handler is enabled -
   2708  * instead we unconditionally enable all PCH interrupt sources here, but then
   2709  * only unmask them as needed with SDEIMR.
   2710  *
   2711  * This function needs to be called before interrupts are enabled.
   2712  */
   2713 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
   2714 {
   2715 	if (HAS_PCH_NOP(dev_priv))
   2716 		return;
   2717 
   2718 	WARN_ON(I915_READ(SDEIER) != 0);
   2719 	I915_WRITE(SDEIER, 0xffffffff);
   2720 	POSTING_READ(SDEIER);
   2721 }
   2722 
   2723 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
   2724 {
   2725 	struct intel_uncore *uncore = &dev_priv->uncore;
   2726 
   2727 	if (IS_CHERRYVIEW(dev_priv))
   2728 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
   2729 	else
   2730 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
   2731 
   2732 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
   2733 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   2734 
   2735 	i9xx_pipestat_irq_reset(dev_priv);
   2736 
   2737 	GEN3_IRQ_RESET(uncore, VLV_);
   2738 	dev_priv->irq_mask = ~0u;
   2739 }
   2740 
   2741 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
   2742 {
   2743 	struct intel_uncore *uncore = &dev_priv->uncore;
   2744 
   2745 	u32 pipestat_mask;
   2746 	u32 enable_mask;
   2747 	enum pipe pipe;
   2748 
   2749 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
   2750 
   2751 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
   2752 	for_each_pipe(dev_priv, pipe)
   2753 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
   2754 
   2755 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
   2756 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   2757 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   2758 		I915_LPE_PIPE_A_INTERRUPT |
   2759 		I915_LPE_PIPE_B_INTERRUPT;
   2760 
   2761 	if (IS_CHERRYVIEW(dev_priv))
   2762 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
   2763 			I915_LPE_PIPE_C_INTERRUPT;
   2764 
   2765 	WARN_ON(dev_priv->irq_mask != ~0u);
   2766 
   2767 	dev_priv->irq_mask = ~enable_mask;
   2768 
   2769 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
   2770 }
   2771 
   2772 /* drm_dma.h hooks
   2773 */
   2774 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
   2775 {
   2776 	struct intel_uncore *uncore = &dev_priv->uncore;
   2777 
   2778 	GEN3_IRQ_RESET(uncore, DE);
   2779 	if (IS_GEN(dev_priv, 7))
   2780 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
   2781 
   2782 	if (IS_HASWELL(dev_priv)) {
   2783 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
   2784 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
   2785 	}
   2786 
   2787 	gen5_gt_irq_reset(&dev_priv->gt);
   2788 
   2789 	ibx_irq_reset(dev_priv);
   2790 }
   2791 
   2792 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
   2793 {
   2794 	I915_WRITE(VLV_MASTER_IER, 0);
   2795 	POSTING_READ(VLV_MASTER_IER);
   2796 
   2797 	gen5_gt_irq_reset(&dev_priv->gt);
   2798 
   2799 	spin_lock_irq(&dev_priv->irq_lock);
   2800 	if (dev_priv->display_irqs_enabled)
   2801 		vlv_display_irq_reset(dev_priv);
   2802 	spin_unlock_irq(&dev_priv->irq_lock);
   2803 }
   2804 
   2805 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
   2806 {
   2807 	struct intel_uncore *uncore = &dev_priv->uncore;
   2808 	enum pipe pipe;
   2809 
   2810 	gen8_master_intr_disable(dev_priv->uncore.regs);
   2811 
   2812 	gen8_gt_irq_reset(&dev_priv->gt);
   2813 
   2814 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
   2815 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
   2816 
   2817 	for_each_pipe(dev_priv, pipe)
   2818 		if (intel_display_power_is_enabled(dev_priv,
   2819 						   POWER_DOMAIN_PIPE(pipe)))
   2820 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
   2821 
   2822 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
   2823 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
   2824 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
   2825 
   2826 	if (HAS_PCH_SPLIT(dev_priv))
   2827 		ibx_irq_reset(dev_priv);
   2828 }
   2829 
   2830 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
   2831 {
   2832 	struct intel_uncore *uncore = &dev_priv->uncore;
   2833 	enum pipe pipe;
   2834 
   2835 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
   2836 
   2837 	if (INTEL_GEN(dev_priv) >= 12) {
   2838 		enum transcoder trans;
   2839 
   2840 		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
   2841 			enum intel_display_power_domain domain;
   2842 
   2843 			domain = POWER_DOMAIN_TRANSCODER(trans);
   2844 			if (!intel_display_power_is_enabled(dev_priv, domain))
   2845 				continue;
   2846 
   2847 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
   2848 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
   2849 		}
   2850 	} else {
   2851 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
   2852 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
   2853 	}
   2854 
   2855 	for_each_pipe(dev_priv, pipe)
   2856 		if (intel_display_power_is_enabled(dev_priv,
   2857 						   POWER_DOMAIN_PIPE(pipe)))
   2858 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
   2859 
   2860 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
   2861 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
   2862 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
   2863 
   2864 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
   2865 		GEN3_IRQ_RESET(uncore, SDE);
   2866 }
   2867 
   2868 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
   2869 {
   2870 	struct intel_uncore *uncore = &dev_priv->uncore;
   2871 
   2872 	gen11_master_intr_disable(dev_priv->uncore.regs);
   2873 
   2874 	gen11_gt_irq_reset(&dev_priv->gt);
   2875 	gen11_display_irq_reset(dev_priv);
   2876 
   2877 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
   2878 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
   2879 }
   2880 
   2881 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
   2882 				     u8 pipe_mask)
   2883 {
   2884 	struct intel_uncore *uncore = &dev_priv->uncore;
   2885 
   2886 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
   2887 	enum pipe pipe;
   2888 
   2889 	spin_lock_irq(&dev_priv->irq_lock);
   2890 
   2891 	if (!intel_irqs_enabled(dev_priv)) {
   2892 		spin_unlock_irq(&dev_priv->irq_lock);
   2893 		return;
   2894 	}
   2895 
   2896 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
   2897 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
   2898 				  dev_priv->de_irq_mask[pipe],
   2899 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
   2900 
   2901 	spin_unlock_irq(&dev_priv->irq_lock);
   2902 }
   2903 
   2904 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
   2905 				     u8 pipe_mask)
   2906 {
   2907 	struct intel_uncore *uncore = &dev_priv->uncore;
   2908 	enum pipe pipe;
   2909 
   2910 	spin_lock_irq(&dev_priv->irq_lock);
   2911 
   2912 	if (!intel_irqs_enabled(dev_priv)) {
   2913 		spin_unlock_irq(&dev_priv->irq_lock);
   2914 		return;
   2915 	}
   2916 
   2917 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
   2918 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
   2919 
   2920 	spin_unlock_irq(&dev_priv->irq_lock);
   2921 
   2922 	/* make sure we're done processing display irqs */
   2923 	intel_synchronize_irq(dev_priv);
   2924 }
   2925 
   2926 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
   2927 {
   2928 	struct intel_uncore *uncore = &dev_priv->uncore;
   2929 
   2930 	I915_WRITE(GEN8_MASTER_IRQ, 0);
   2931 	POSTING_READ(GEN8_MASTER_IRQ);
   2932 
   2933 	gen8_gt_irq_reset(&dev_priv->gt);
   2934 
   2935 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
   2936 
   2937 	spin_lock_irq(&dev_priv->irq_lock);
   2938 	if (dev_priv->display_irqs_enabled)
   2939 		vlv_display_irq_reset(dev_priv);
   2940 	spin_unlock_irq(&dev_priv->irq_lock);
   2941 }
   2942 
   2943 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
   2944 				  const u32 hpd[HPD_NUM_PINS])
   2945 {
   2946 	struct intel_encoder *encoder;
   2947 	u32 enabled_irqs = 0;
   2948 
   2949 	for_each_intel_encoder(&dev_priv->drm, encoder)
   2950 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
   2951 			enabled_irqs |= hpd[encoder->hpd_pin];
   2952 
   2953 	return enabled_irqs;
   2954 }
   2955 
   2956 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
   2957 {
   2958 	u32 hotplug;
   2959 
   2960 	/*
   2961 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
   2962 	 * duration to 2ms (which is the minimum in the Display Port spec).
   2963 	 * The pulse duration bits are reserved on LPT+.
   2964 	 */
   2965 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
   2966 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
   2967 		     PORTC_PULSE_DURATION_MASK |
   2968 		     PORTD_PULSE_DURATION_MASK);
   2969 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
   2970 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
   2971 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
   2972 	/*
   2973 	 * When CPU and PCH are on the same package, port A
   2974 	 * HPD must be enabled in both north and south.
   2975 	 */
   2976 	if (HAS_PCH_LPT_LP(dev_priv))
   2977 		hotplug |= PORTA_HOTPLUG_ENABLE;
   2978 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
   2979 }
   2980 
   2981 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
   2982 {
   2983 	u32 hotplug_irqs, enabled_irqs;
   2984 
   2985 	if (HAS_PCH_IBX(dev_priv)) {
   2986 		hotplug_irqs = SDE_HOTPLUG_MASK;
   2987 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
   2988 	} else {
   2989 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
   2990 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
   2991 	}
   2992 
   2993 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
   2994 
   2995 	ibx_hpd_detection_setup(dev_priv);
   2996 }
   2997 
   2998 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
   2999 				    u32 ddi_hotplug_enable_mask,
   3000 				    u32 tc_hotplug_enable_mask)
   3001 {
   3002 	u32 hotplug;
   3003 
   3004 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
   3005 	hotplug |= ddi_hotplug_enable_mask;
   3006 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
   3007 
   3008 	if (tc_hotplug_enable_mask) {
   3009 		hotplug = I915_READ(SHOTPLUG_CTL_TC);
   3010 		hotplug |= tc_hotplug_enable_mask;
   3011 		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
   3012 	}
   3013 }
   3014 
   3015 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
   3016 			      u32 sde_ddi_mask, u32 sde_tc_mask,
   3017 			      u32 ddi_enable_mask, u32 tc_enable_mask,
   3018 			      const u32 *pins)
   3019 {
   3020 	u32 hotplug_irqs, enabled_irqs;
   3021 
   3022 	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
   3023 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
   3024 
   3025 	I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
   3026 
   3027 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
   3028 
   3029 	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
   3030 }
   3031 
   3032 /*
   3033  * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
   3034  * equivalent of SDE.
   3035  */
   3036 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3037 {
   3038 	icp_hpd_irq_setup(dev_priv,
   3039 			  SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
   3040 			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
   3041 			  hpd_icp);
   3042 }
   3043 
   3044 /*
   3045  * JSP behaves exactly the same as MCC above except that port C is mapped to
   3046  * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
   3047  * masks & tables rather than ICP's masks & tables.
   3048  */
   3049 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3050 {
   3051 	icp_hpd_irq_setup(dev_priv,
   3052 			  SDE_DDI_MASK_TGP, 0,
   3053 			  TGP_DDI_HPD_ENABLE_MASK, 0,
   3054 			  hpd_tgp);
   3055 }
   3056 
   3057 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
   3058 {
   3059 	u32 hotplug;
   3060 
   3061 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
   3062 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
   3063 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
   3064 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
   3065 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
   3066 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
   3067 
   3068 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
   3069 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
   3070 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
   3071 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
   3072 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
   3073 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
   3074 }
   3075 
   3076 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3077 {
   3078 	u32 hotplug_irqs, enabled_irqs;
   3079 	const u32 *hpd;
   3080 	u32 val;
   3081 
   3082 	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
   3083 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
   3084 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
   3085 
   3086 	val = I915_READ(GEN11_DE_HPD_IMR);
   3087 	val &= ~hotplug_irqs;
   3088 	I915_WRITE(GEN11_DE_HPD_IMR, val);
   3089 	POSTING_READ(GEN11_DE_HPD_IMR);
   3090 
   3091 	gen11_hpd_detection_setup(dev_priv);
   3092 
   3093 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
   3094 		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
   3095 				  TGP_DDI_HPD_ENABLE_MASK,
   3096 				  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
   3097 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
   3098 		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
   3099 				  ICP_DDI_HPD_ENABLE_MASK,
   3100 				  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
   3101 }
   3102 
   3103 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
   3104 {
   3105 	u32 val, hotplug;
   3106 
   3107 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
   3108 	if (HAS_PCH_CNP(dev_priv)) {
   3109 		val = I915_READ(SOUTH_CHICKEN1);
   3110 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
   3111 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
   3112 		I915_WRITE(SOUTH_CHICKEN1, val);
   3113 	}
   3114 
   3115 	/* Enable digital hotplug on the PCH */
   3116 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
   3117 	hotplug |= PORTA_HOTPLUG_ENABLE |
   3118 		   PORTB_HOTPLUG_ENABLE |
   3119 		   PORTC_HOTPLUG_ENABLE |
   3120 		   PORTD_HOTPLUG_ENABLE;
   3121 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
   3122 
   3123 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
   3124 	hotplug |= PORTE_HOTPLUG_ENABLE;
   3125 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
   3126 }
   3127 
   3128 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3129 {
   3130 	u32 hotplug_irqs, enabled_irqs;
   3131 
   3132 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
   3133 		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
   3134 
   3135 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
   3136 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
   3137 
   3138 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
   3139 
   3140 	spt_hpd_detection_setup(dev_priv);
   3141 }
   3142 
   3143 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
   3144 {
   3145 	u32 hotplug;
   3146 
   3147 	/*
   3148 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
   3149 	 * duration to 2ms (which is the minimum in the Display Port spec)
   3150 	 * The pulse duration bits are reserved on HSW+.
   3151 	 */
   3152 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
   3153 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
   3154 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
   3155 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
   3156 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
   3157 }
   3158 
   3159 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3160 {
   3161 	u32 hotplug_irqs, enabled_irqs;
   3162 
   3163 	if (INTEL_GEN(dev_priv) >= 8) {
   3164 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
   3165 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
   3166 
   3167 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3168 	} else if (INTEL_GEN(dev_priv) >= 7) {
   3169 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
   3170 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
   3171 
   3172 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3173 	} else {
   3174 		hotplug_irqs = DE_DP_A_HOTPLUG;
   3175 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
   3176 
   3177 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3178 	}
   3179 
   3180 	ilk_hpd_detection_setup(dev_priv);
   3181 
   3182 	ibx_hpd_irq_setup(dev_priv);
   3183 }
   3184 
   3185 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
   3186 				      u32 enabled_irqs)
   3187 {
   3188 	u32 hotplug;
   3189 
   3190 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
   3191 	hotplug |= PORTA_HOTPLUG_ENABLE |
   3192 		   PORTB_HOTPLUG_ENABLE |
   3193 		   PORTC_HOTPLUG_ENABLE;
   3194 
   3195 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
   3196 		      hotplug, enabled_irqs);
   3197 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
   3198 
   3199 	/*
   3200 	 * For BXT invert bit has to be set based on AOB design
   3201 	 * for HPD detection logic, update it based on VBT fields.
   3202 	 */
   3203 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
   3204 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
   3205 		hotplug |= BXT_DDIA_HPD_INVERT;
   3206 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
   3207 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
   3208 		hotplug |= BXT_DDIB_HPD_INVERT;
   3209 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
   3210 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
   3211 		hotplug |= BXT_DDIC_HPD_INVERT;
   3212 
   3213 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
   3214 }
   3215 
   3216 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
   3217 {
   3218 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
   3219 }
   3220 
   3221 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3222 {
   3223 	u32 hotplug_irqs, enabled_irqs;
   3224 
   3225 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
   3226 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
   3227 
   3228 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
   3229 
   3230 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
   3231 }
   3232 
   3233 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
   3234 {
   3235 	u32 mask;
   3236 
   3237 	if (HAS_PCH_NOP(dev_priv))
   3238 		return;
   3239 
   3240 	if (HAS_PCH_IBX(dev_priv))
   3241 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
   3242 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
   3243 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
   3244 	else
   3245 		mask = SDE_GMBUS_CPT;
   3246 
   3247 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
   3248 	I915_WRITE(SDEIMR, ~mask);
   3249 
   3250 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
   3251 	    HAS_PCH_LPT(dev_priv))
   3252 		ibx_hpd_detection_setup(dev_priv);
   3253 	else
   3254 		spt_hpd_detection_setup(dev_priv);
   3255 }
   3256 
   3257 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
   3258 {
   3259 	struct intel_uncore *uncore = &dev_priv->uncore;
   3260 	u32 display_mask, extra_mask;
   3261 
   3262 	if (INTEL_GEN(dev_priv) >= 7) {
   3263 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
   3264 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
   3265 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
   3266 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
   3267 			      DE_DP_A_HOTPLUG_IVB);
   3268 	} else {
   3269 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
   3270 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
   3271 				DE_PIPEA_CRC_DONE | DE_POISON);
   3272 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
   3273 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
   3274 			      DE_DP_A_HOTPLUG);
   3275 	}
   3276 
   3277 	if (IS_HASWELL(dev_priv)) {
   3278 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
   3279 		display_mask |= DE_EDP_PSR_INT_HSW;
   3280 	}
   3281 
   3282 	dev_priv->irq_mask = ~display_mask;
   3283 
   3284 	ibx_irq_pre_postinstall(dev_priv);
   3285 
   3286 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
   3287 		      display_mask | extra_mask);
   3288 
   3289 	gen5_gt_irq_postinstall(&dev_priv->gt);
   3290 
   3291 	ilk_hpd_detection_setup(dev_priv);
   3292 
   3293 	ibx_irq_postinstall(dev_priv);
   3294 
   3295 	if (IS_IRONLAKE_M(dev_priv)) {
   3296 		/* Enable PCU event interrupts
   3297 		 *
   3298 		 * spinlocking not required here for correctness since interrupt
   3299 		 * setup is guaranteed to run in single-threaded context. But we
   3300 		 * need it to make the assert_spin_locked happy. */
   3301 		spin_lock_irq(&dev_priv->irq_lock);
   3302 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
   3303 		spin_unlock_irq(&dev_priv->irq_lock);
   3304 	}
   3305 }
   3306 
   3307 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
   3308 {
   3309 	lockdep_assert_held(&dev_priv->irq_lock);
   3310 
   3311 	if (dev_priv->display_irqs_enabled)
   3312 		return;
   3313 
   3314 	dev_priv->display_irqs_enabled = true;
   3315 
   3316 	if (intel_irqs_enabled(dev_priv)) {
   3317 		vlv_display_irq_reset(dev_priv);
   3318 		vlv_display_irq_postinstall(dev_priv);
   3319 	}
   3320 }
   3321 
   3322 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
   3323 {
   3324 	lockdep_assert_held(&dev_priv->irq_lock);
   3325 
   3326 	if (!dev_priv->display_irqs_enabled)
   3327 		return;
   3328 
   3329 	dev_priv->display_irqs_enabled = false;
   3330 
   3331 	if (intel_irqs_enabled(dev_priv))
   3332 		vlv_display_irq_reset(dev_priv);
   3333 }
   3334 
   3335 
   3336 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
   3337 {
   3338 	gen5_gt_irq_postinstall(&dev_priv->gt);
   3339 
   3340 	spin_lock_irq(&dev_priv->irq_lock);
   3341 	if (dev_priv->display_irqs_enabled)
   3342 		vlv_display_irq_postinstall(dev_priv);
   3343 	spin_unlock_irq(&dev_priv->irq_lock);
   3344 
   3345 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
   3346 	POSTING_READ(VLV_MASTER_IER);
   3347 }
   3348 
   3349 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
   3350 {
   3351 	struct intel_uncore *uncore = &dev_priv->uncore;
   3352 
   3353 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
   3354 	u32 de_pipe_enables;
   3355 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
   3356 	u32 de_port_enables;
   3357 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
   3358 	enum pipe pipe;
   3359 
   3360 	if (INTEL_GEN(dev_priv) <= 10)
   3361 		de_misc_masked |= GEN8_DE_MISC_GSE;
   3362 
   3363 	if (INTEL_GEN(dev_priv) >= 9) {
   3364 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
   3365 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
   3366 				  GEN9_AUX_CHANNEL_D;
   3367 		if (IS_GEN9_LP(dev_priv))
   3368 			de_port_masked |= BXT_DE_PORT_GMBUS;
   3369 	} else {
   3370 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
   3371 	}
   3372 
   3373 	if (INTEL_GEN(dev_priv) >= 11)
   3374 		de_port_masked |= ICL_AUX_CHANNEL_E;
   3375 
   3376 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
   3377 		de_port_masked |= CNL_AUX_CHANNEL_F;
   3378 
   3379 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
   3380 					   GEN8_PIPE_FIFO_UNDERRUN;
   3381 
   3382 	de_port_enables = de_port_masked;
   3383 	if (IS_GEN9_LP(dev_priv))
   3384 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
   3385 	else if (IS_BROADWELL(dev_priv))
   3386 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
   3387 
   3388 	if (INTEL_GEN(dev_priv) >= 12) {
   3389 		enum transcoder trans;
   3390 
   3391 		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
   3392 			enum intel_display_power_domain domain;
   3393 
   3394 			domain = POWER_DOMAIN_TRANSCODER(trans);
   3395 			if (!intel_display_power_is_enabled(dev_priv, domain))
   3396 				continue;
   3397 
   3398 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
   3399 		}
   3400 	} else {
   3401 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
   3402 	}
   3403 
   3404 	for_each_pipe(dev_priv, pipe) {
   3405 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
   3406 
   3407 		if (intel_display_power_is_enabled(dev_priv,
   3408 				POWER_DOMAIN_PIPE(pipe)))
   3409 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
   3410 					  dev_priv->de_irq_mask[pipe],
   3411 					  de_pipe_enables);
   3412 	}
   3413 
   3414 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
   3415 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
   3416 
   3417 	if (INTEL_GEN(dev_priv) >= 11) {
   3418 		u32 de_hpd_masked = 0;
   3419 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
   3420 				     GEN11_DE_TBT_HOTPLUG_MASK;
   3421 
   3422 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
   3423 			      de_hpd_enables);
   3424 		gen11_hpd_detection_setup(dev_priv);
   3425 	} else if (IS_GEN9_LP(dev_priv)) {
   3426 		bxt_hpd_detection_setup(dev_priv);
   3427 	} else if (IS_BROADWELL(dev_priv)) {
   3428 		ilk_hpd_detection_setup(dev_priv);
   3429 	}
   3430 }
   3431 
   3432 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
   3433 {
   3434 	if (HAS_PCH_SPLIT(dev_priv))
   3435 		ibx_irq_pre_postinstall(dev_priv);
   3436 
   3437 	gen8_gt_irq_postinstall(&dev_priv->gt);
   3438 	gen8_de_irq_postinstall(dev_priv);
   3439 
   3440 	if (HAS_PCH_SPLIT(dev_priv))
   3441 		ibx_irq_postinstall(dev_priv);
   3442 
   3443 	gen8_master_intr_enable(dev_priv->uncore.regs);
   3444 }
   3445 
   3446 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
   3447 {
   3448 	u32 mask = SDE_GMBUS_ICP;
   3449 
   3450 	WARN_ON(I915_READ(SDEIER) != 0);
   3451 	I915_WRITE(SDEIER, 0xffffffff);
   3452 	POSTING_READ(SDEIER);
   3453 
   3454 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
   3455 	I915_WRITE(SDEIMR, ~mask);
   3456 
   3457 	if (HAS_PCH_TGP(dev_priv))
   3458 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
   3459 					TGP_TC_HPD_ENABLE_MASK);
   3460 	else if (HAS_PCH_JSP(dev_priv))
   3461 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
   3462 	else if (HAS_PCH_MCC(dev_priv))
   3463 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
   3464 					ICP_TC_HPD_ENABLE(PORT_TC1));
   3465 	else
   3466 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
   3467 					ICP_TC_HPD_ENABLE_MASK);
   3468 }
   3469 
   3470 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
   3471 {
   3472 	struct intel_uncore *uncore = &dev_priv->uncore;
   3473 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
   3474 
   3475 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
   3476 		icp_irq_postinstall(dev_priv);
   3477 
   3478 	gen11_gt_irq_postinstall(&dev_priv->gt);
   3479 	gen8_de_irq_postinstall(dev_priv);
   3480 
   3481 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
   3482 
   3483 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
   3484 
   3485 	gen11_master_intr_enable(uncore->regs);
   3486 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
   3487 }
   3488 
   3489 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
   3490 {
   3491 	gen8_gt_irq_postinstall(&dev_priv->gt);
   3492 
   3493 	spin_lock_irq(&dev_priv->irq_lock);
   3494 	if (dev_priv->display_irqs_enabled)
   3495 		vlv_display_irq_postinstall(dev_priv);
   3496 	spin_unlock_irq(&dev_priv->irq_lock);
   3497 
   3498 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
   3499 	POSTING_READ(GEN8_MASTER_IRQ);
   3500 }
   3501 
   3502 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
   3503 {
   3504 	struct intel_uncore *uncore = &dev_priv->uncore;
   3505 
   3506 	i9xx_pipestat_irq_reset(dev_priv);
   3507 
   3508 	GEN2_IRQ_RESET(uncore);
   3509 }
   3510 
   3511 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
   3512 {
   3513 	struct intel_uncore *uncore = &dev_priv->uncore;
   3514 	u16 enable_mask;
   3515 
   3516 	intel_uncore_write16(uncore,
   3517 			     EMR,
   3518 			     ~(I915_ERROR_PAGE_TABLE |
   3519 			       I915_ERROR_MEMORY_REFRESH));
   3520 
   3521 	/* Unmask the interrupts that we always want on. */
   3522 	dev_priv->irq_mask =
   3523 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3524 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3525 		  I915_MASTER_ERROR_INTERRUPT);
   3526 
   3527 	enable_mask =
   3528 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3529 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3530 		I915_MASTER_ERROR_INTERRUPT |
   3531 		I915_USER_INTERRUPT;
   3532 
   3533 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
   3534 
   3535 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   3536 	 * just to make the assert_spin_locked check happy. */
   3537 	spin_lock_irq(&dev_priv->irq_lock);
   3538 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3539 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3540 	spin_unlock_irq(&dev_priv->irq_lock);
   3541 }
   3542 
   3543 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
   3544 			       u16 *eir, u16 *eir_stuck)
   3545 {
   3546 	struct intel_uncore *uncore = &i915->uncore;
   3547 	u16 emr;
   3548 
   3549 	*eir = intel_uncore_read16(uncore, EIR);
   3550 
   3551 	if (*eir)
   3552 		intel_uncore_write16(uncore, EIR, *eir);
   3553 
   3554 	*eir_stuck = intel_uncore_read16(uncore, EIR);
   3555 	if (*eir_stuck == 0)
   3556 		return;
   3557 
   3558 	/*
   3559 	 * Toggle all EMR bits to make sure we get an edge
   3560 	 * in the ISR master error bit if we don't clear
   3561 	 * all the EIR bits. Otherwise the edge triggered
   3562 	 * IIR on i965/g4x wouldn't notice that an interrupt
   3563 	 * is still pending. Also some EIR bits can't be
   3564 	 * cleared except by handling the underlying error
   3565 	 * (or by a GPU reset) so we mask any bit that
   3566 	 * remains set.
   3567 	 */
   3568 	emr = intel_uncore_read16(uncore, EMR);
   3569 	intel_uncore_write16(uncore, EMR, 0xffff);
   3570 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
   3571 }
   3572 
   3573 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
   3574 				   u16 eir, u16 eir_stuck)
   3575 {
   3576 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
   3577 
   3578 	if (eir_stuck)
   3579 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
   3580 }
   3581 
   3582 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
   3583 			       u32 *eir, u32 *eir_stuck)
   3584 {
   3585 	u32 emr;
   3586 
   3587 	*eir = I915_READ(EIR);
   3588 
   3589 	I915_WRITE(EIR, *eir);
   3590 
   3591 	*eir_stuck = I915_READ(EIR);
   3592 	if (*eir_stuck == 0)
   3593 		return;
   3594 
   3595 	/*
   3596 	 * Toggle all EMR bits to make sure we get an edge
   3597 	 * in the ISR master error bit if we don't clear
   3598 	 * all the EIR bits. Otherwise the edge triggered
   3599 	 * IIR on i965/g4x wouldn't notice that an interrupt
   3600 	 * is still pending. Also some EIR bits can't be
   3601 	 * cleared except by handling the underlying error
   3602 	 * (or by a GPU reset) so we mask any bit that
   3603 	 * remains set.
   3604 	 */
   3605 	emr = I915_READ(EMR);
   3606 	I915_WRITE(EMR, 0xffffffff);
   3607 	I915_WRITE(EMR, emr | *eir_stuck);
   3608 }
   3609 
   3610 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
   3611 				   u32 eir, u32 eir_stuck)
   3612 {
   3613 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
   3614 
   3615 	if (eir_stuck)
   3616 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
   3617 }
   3618 
   3619 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
   3620 {
   3621 	struct drm_i915_private *dev_priv = arg;
   3622 	irqreturn_t ret = IRQ_NONE;
   3623 
   3624 	if (!intel_irqs_enabled(dev_priv))
   3625 		return IRQ_NONE;
   3626 
   3627 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   3628 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   3629 
   3630 	do {
   3631 		u32 pipe_stats[I915_MAX_PIPES] = {};
   3632 		u16 eir = 0, eir_stuck = 0;
   3633 		u16 iir;
   3634 
   3635 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
   3636 		if (iir == 0)
   3637 			break;
   3638 
   3639 		ret = IRQ_HANDLED;
   3640 
   3641 		/* Call regardless, as some status bits might not be
   3642 		 * signalled in iir */
   3643 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
   3644 
   3645 		if (iir & I915_MASTER_ERROR_INTERRUPT)
   3646 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
   3647 
   3648 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
   3649 
   3650 		if (iir & I915_USER_INTERRUPT)
   3651 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
   3652 
   3653 		if (iir & I915_MASTER_ERROR_INTERRUPT)
   3654 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
   3655 
   3656 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
   3657 	} while (0);
   3658 
   3659 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   3660 
   3661 	return ret;
   3662 }
   3663 
   3664 static void i915_irq_reset(struct drm_i915_private *dev_priv)
   3665 {
   3666 	struct intel_uncore *uncore = &dev_priv->uncore;
   3667 
   3668 	if (I915_HAS_HOTPLUG(dev_priv)) {
   3669 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   3670 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   3671 	}
   3672 
   3673 	i9xx_pipestat_irq_reset(dev_priv);
   3674 
   3675 	GEN3_IRQ_RESET(uncore, GEN2_);
   3676 }
   3677 
   3678 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
   3679 {
   3680 	struct intel_uncore *uncore = &dev_priv->uncore;
   3681 	u32 enable_mask;
   3682 
   3683 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
   3684 			  I915_ERROR_MEMORY_REFRESH));
   3685 
   3686 	/* Unmask the interrupts that we always want on. */
   3687 	dev_priv->irq_mask =
   3688 		~(I915_ASLE_INTERRUPT |
   3689 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3690 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3691 		  I915_MASTER_ERROR_INTERRUPT);
   3692 
   3693 	enable_mask =
   3694 		I915_ASLE_INTERRUPT |
   3695 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3696 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3697 		I915_MASTER_ERROR_INTERRUPT |
   3698 		I915_USER_INTERRUPT;
   3699 
   3700 	if (I915_HAS_HOTPLUG(dev_priv)) {
   3701 		/* Enable in IER... */
   3702 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
   3703 		/* and unmask in IMR */
   3704 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
   3705 	}
   3706 
   3707 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
   3708 
   3709 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   3710 	 * just to make the assert_spin_locked check happy. */
   3711 	spin_lock_irq(&dev_priv->irq_lock);
   3712 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3713 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3714 	spin_unlock_irq(&dev_priv->irq_lock);
   3715 
   3716 	i915_enable_asle_pipestat(dev_priv);
   3717 }
   3718 
   3719 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
   3720 {
   3721 	struct drm_i915_private *dev_priv = arg;
   3722 	irqreturn_t ret = IRQ_NONE;
   3723 
   3724 	if (!intel_irqs_enabled(dev_priv))
   3725 		return IRQ_NONE;
   3726 
   3727 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   3728 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   3729 
   3730 	do {
   3731 		u32 pipe_stats[I915_MAX_PIPES] = {};
   3732 		u32 eir = 0, eir_stuck = 0;
   3733 		u32 hotplug_status = 0;
   3734 		u32 iir;
   3735 
   3736 		iir = I915_READ(GEN2_IIR);
   3737 		if (iir == 0)
   3738 			break;
   3739 
   3740 		ret = IRQ_HANDLED;
   3741 
   3742 		if (I915_HAS_HOTPLUG(dev_priv) &&
   3743 		    iir & I915_DISPLAY_PORT_INTERRUPT)
   3744 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
   3745 
   3746 		/* Call regardless, as some status bits might not be
   3747 		 * signalled in iir */
   3748 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
   3749 
   3750 		if (iir & I915_MASTER_ERROR_INTERRUPT)
   3751 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
   3752 
   3753 		I915_WRITE(GEN2_IIR, iir);
   3754 
   3755 		if (iir & I915_USER_INTERRUPT)
   3756 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
   3757 
   3758 		if (iir & I915_MASTER_ERROR_INTERRUPT)
   3759 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
   3760 
   3761 		if (hotplug_status)
   3762 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
   3763 
   3764 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
   3765 	} while (0);
   3766 
   3767 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   3768 
   3769 	return ret;
   3770 }
   3771 
   3772 static void i965_irq_reset(struct drm_i915_private *dev_priv)
   3773 {
   3774 	struct intel_uncore *uncore = &dev_priv->uncore;
   3775 
   3776 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
   3777 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
   3778 
   3779 	i9xx_pipestat_irq_reset(dev_priv);
   3780 
   3781 	GEN3_IRQ_RESET(uncore, GEN2_);
   3782 }
   3783 
   3784 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
   3785 {
   3786 	struct intel_uncore *uncore = &dev_priv->uncore;
   3787 	u32 enable_mask;
   3788 	u32 error_mask;
   3789 
   3790 	/*
   3791 	 * Enable some error detection, note the instruction error mask
   3792 	 * bit is reserved, so we leave it masked.
   3793 	 */
   3794 	if (IS_G4X(dev_priv)) {
   3795 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
   3796 			       GM45_ERROR_MEM_PRIV |
   3797 			       GM45_ERROR_CP_PRIV |
   3798 			       I915_ERROR_MEMORY_REFRESH);
   3799 	} else {
   3800 		error_mask = ~(I915_ERROR_PAGE_TABLE |
   3801 			       I915_ERROR_MEMORY_REFRESH);
   3802 	}
   3803 	I915_WRITE(EMR, error_mask);
   3804 
   3805 	/* Unmask the interrupts that we always want on. */
   3806 	dev_priv->irq_mask =
   3807 		~(I915_ASLE_INTERRUPT |
   3808 		  I915_DISPLAY_PORT_INTERRUPT |
   3809 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3810 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3811 		  I915_MASTER_ERROR_INTERRUPT);
   3812 
   3813 	enable_mask =
   3814 		I915_ASLE_INTERRUPT |
   3815 		I915_DISPLAY_PORT_INTERRUPT |
   3816 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
   3817 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
   3818 		I915_MASTER_ERROR_INTERRUPT |
   3819 		I915_USER_INTERRUPT;
   3820 
   3821 	if (IS_G4X(dev_priv))
   3822 		enable_mask |= I915_BSD_USER_INTERRUPT;
   3823 
   3824 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
   3825 
   3826 	/* Interrupt setup is already guaranteed to be single-threaded, this is
   3827 	 * just to make the assert_spin_locked check happy. */
   3828 	spin_lock_irq(&dev_priv->irq_lock);
   3829 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
   3830 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3831 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
   3832 	spin_unlock_irq(&dev_priv->irq_lock);
   3833 
   3834 	i915_enable_asle_pipestat(dev_priv);
   3835 }
   3836 
   3837 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
   3838 {
   3839 	u32 hotplug_en;
   3840 
   3841 	lockdep_assert_held(&dev_priv->irq_lock);
   3842 
   3843 	/* Note HDMI and DP share hotplug bits */
   3844 	/* enable bits are the same for all generations */
   3845 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
   3846 	/* Programming the CRT detection parameters tends
   3847 	   to generate a spurious hotplug event about three
   3848 	   seconds later.  So just do it once.
   3849 	*/
   3850 	if (IS_G4X(dev_priv))
   3851 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
   3852 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
   3853 
   3854 	/* Ignore TV since it's buggy */
   3855 	i915_hotplug_interrupt_update_locked(dev_priv,
   3856 					     HOTPLUG_INT_EN_MASK |
   3857 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
   3858 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
   3859 					     hotplug_en);
   3860 }
   3861 
   3862 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
   3863 {
   3864 	struct drm_i915_private *dev_priv = arg;
   3865 	irqreturn_t ret = IRQ_NONE;
   3866 
   3867 	if (!intel_irqs_enabled(dev_priv))
   3868 		return IRQ_NONE;
   3869 
   3870 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
   3871 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   3872 
   3873 	do {
   3874 		u32 pipe_stats[I915_MAX_PIPES] = {};
   3875 		u32 eir = 0, eir_stuck = 0;
   3876 		u32 hotplug_status = 0;
   3877 		u32 iir;
   3878 
   3879 		iir = I915_READ(GEN2_IIR);
   3880 		if (iir == 0)
   3881 			break;
   3882 
   3883 		ret = IRQ_HANDLED;
   3884 
   3885 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
   3886 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
   3887 
   3888 		/* Call regardless, as some status bits might not be
   3889 		 * signalled in iir */
   3890 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
   3891 
   3892 		if (iir & I915_MASTER_ERROR_INTERRUPT)
   3893 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
   3894 
   3895 		I915_WRITE(GEN2_IIR, iir);
   3896 
   3897 		if (iir & I915_USER_INTERRUPT)
   3898 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
   3899 
   3900 		if (iir & I915_BSD_USER_INTERRUPT)
   3901 			intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
   3902 
   3903 		if (iir & I915_MASTER_ERROR_INTERRUPT)
   3904 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
   3905 
   3906 		if (hotplug_status)
   3907 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
   3908 
   3909 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
   3910 	} while (0);
   3911 
   3912 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   3913 
   3914 	return ret;
   3915 }
   3916 
   3917 /**
   3918  * intel_irq_init - initializes irq support
   3919  * @dev_priv: i915 device instance
   3920  *
   3921  * This function initializes all the irq support including work items, timers
   3922  * and all the vtables. It does not setup the interrupt itself though.
   3923  */
   3924 void intel_irq_init(struct drm_i915_private *dev_priv)
   3925 {
   3926 	struct drm_device *dev = &dev_priv->drm;
   3927 	int i;
   3928 
   3929 	intel_hpd_init_work(dev_priv);
   3930 
   3931 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
   3932 	for (i = 0; i < MAX_L3_SLICES; ++i)
   3933 		dev_priv->l3_parity.remap_info[i] = NULL;
   3934 
   3935 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
   3936 	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
   3937 		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
   3938 
   3939 	dev->vblank_disable_immediate = true;
   3940 
   3941 	/* Most platforms treat the display irq block as an always-on
   3942 	 * power domain. vlv/chv can disable it at runtime and need
   3943 	 * special care to avoid writing any of the display block registers
   3944 	 * outside of the power domain. We defer setting up the display irqs
   3945 	 * in this case to the runtime pm.
   3946 	 */
   3947 	dev_priv->display_irqs_enabled = true;
   3948 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   3949 		dev_priv->display_irqs_enabled = false;
   3950 
   3951 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
   3952 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
   3953 	 * detection, as short HPD storms will occur as a natural part of
   3954 	 * sideband messaging with MST.
   3955 	 * On older platforms however, IRQ storms can occur with both long and
   3956 	 * short pulses, as seen on some G4x systems.
   3957 	 */
   3958 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
   3959 
   3960 	if (HAS_GMCH(dev_priv)) {
   3961 		if (I915_HAS_HOTPLUG(dev_priv))
   3962 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
   3963 	} else {
   3964 		if (HAS_PCH_JSP(dev_priv))
   3965 			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
   3966 		else if (HAS_PCH_MCC(dev_priv))
   3967 			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
   3968 		else if (INTEL_GEN(dev_priv) >= 11)
   3969 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
   3970 		else if (IS_GEN9_LP(dev_priv))
   3971 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
   3972 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
   3973 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
   3974 		else
   3975 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
   3976 	}
   3977 }
   3978 
   3979 /**
   3980  * intel_irq_fini - deinitializes IRQ support
   3981  * @i915: i915 device instance
   3982  *
   3983  * This function deinitializes all the IRQ support.
   3984  */
   3985 void intel_irq_fini(struct drm_i915_private *i915)
   3986 {
   3987 	int i;
   3988 
   3989 	for (i = 0; i < MAX_L3_SLICES; ++i)
   3990 		kfree(i915->l3_parity.remap_info[i]);
   3991 }
   3992 
   3993 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
   3994 {
   3995 	if (HAS_GMCH(dev_priv)) {
   3996 		if (IS_CHERRYVIEW(dev_priv))
   3997 			return cherryview_irq_handler;
   3998 		else if (IS_VALLEYVIEW(dev_priv))
   3999 			return valleyview_irq_handler;
   4000 		else if (IS_GEN(dev_priv, 4))
   4001 			return i965_irq_handler;
   4002 		else if (IS_GEN(dev_priv, 3))
   4003 			return i915_irq_handler;
   4004 		else
   4005 			return i8xx_irq_handler;
   4006 	} else {
   4007 		if (INTEL_GEN(dev_priv) >= 11)
   4008 			return gen11_irq_handler;
   4009 		else if (INTEL_GEN(dev_priv) >= 8)
   4010 			return gen8_irq_handler;
   4011 		else
   4012 			return ilk_irq_handler;
   4013 	}
   4014 }
   4015 
   4016 static void intel_irq_reset(struct drm_i915_private *dev_priv)
   4017 {
   4018 	if (HAS_GMCH(dev_priv)) {
   4019 		if (IS_CHERRYVIEW(dev_priv))
   4020 			cherryview_irq_reset(dev_priv);
   4021 		else if (IS_VALLEYVIEW(dev_priv))
   4022 			valleyview_irq_reset(dev_priv);
   4023 		else if (IS_GEN(dev_priv, 4))
   4024 			i965_irq_reset(dev_priv);
   4025 		else if (IS_GEN(dev_priv, 3))
   4026 			i915_irq_reset(dev_priv);
   4027 		else
   4028 			i8xx_irq_reset(dev_priv);
   4029 	} else {
   4030 		if (INTEL_GEN(dev_priv) >= 11)
   4031 			gen11_irq_reset(dev_priv);
   4032 		else if (INTEL_GEN(dev_priv) >= 8)
   4033 			gen8_irq_reset(dev_priv);
   4034 		else
   4035 			ilk_irq_reset(dev_priv);
   4036 	}
   4037 }
   4038 
   4039 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
   4040 {
   4041 	if (HAS_GMCH(dev_priv)) {
   4042 		if (IS_CHERRYVIEW(dev_priv))
   4043 			cherryview_irq_postinstall(dev_priv);
   4044 		else if (IS_VALLEYVIEW(dev_priv))
   4045 			valleyview_irq_postinstall(dev_priv);
   4046 		else if (IS_GEN(dev_priv, 4))
   4047 			i965_irq_postinstall(dev_priv);
   4048 		else if (IS_GEN(dev_priv, 3))
   4049 			i915_irq_postinstall(dev_priv);
   4050 		else
   4051 			i8xx_irq_postinstall(dev_priv);
   4052 	} else {
   4053 		if (INTEL_GEN(dev_priv) >= 11)
   4054 			gen11_irq_postinstall(dev_priv);
   4055 		else if (INTEL_GEN(dev_priv) >= 8)
   4056 			gen8_irq_postinstall(dev_priv);
   4057 		else
   4058 			ilk_irq_postinstall(dev_priv);
   4059 	}
   4060 }
   4061 
   4062 /**
   4063  * intel_irq_install - enables the hardware interrupt
   4064  * @dev_priv: i915 device instance
   4065  *
   4066  * This function enables the hardware interrupt handling, but leaves the hotplug
   4067  * handling still disabled. It is called after intel_irq_init().
   4068  *
   4069  * In the driver load and resume code we need working interrupts in a few places
   4070  * but don't want to deal with the hassle of concurrent probe and hotplug
   4071  * workers. Hence the split into this two-stage approach.
   4072  */
   4073 int intel_irq_install(struct drm_i915_private *dev_priv)
   4074 {
   4075 	int irq = dev_priv->drm.pdev->irq;
   4076 	int ret;
   4077 
   4078 	/*
   4079 	 * We enable some interrupt sources in our postinstall hooks, so mark
   4080 	 * interrupts as enabled _before_ actually enabling them to avoid
   4081 	 * special cases in our ordering checks.
   4082 	 */
   4083 	dev_priv->runtime_pm.irqs_enabled = true;
   4084 
   4085 	dev_priv->drm.irq_enabled = true;
   4086 
   4087 	intel_irq_reset(dev_priv);
   4088 
   4089 	ret = request_irq(irq, intel_irq_handler(dev_priv),
   4090 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
   4091 	if (ret < 0) {
   4092 		dev_priv->drm.irq_enabled = false;
   4093 		return ret;
   4094 	}
   4095 
   4096 	intel_irq_postinstall(dev_priv);
   4097 
   4098 	return ret;
   4099 }
   4100 
   4101 /**
   4102  * intel_irq_uninstall - finilizes all irq handling
   4103  * @dev_priv: i915 device instance
   4104  *
   4105  * This stops interrupt and hotplug handling and unregisters and frees all
   4106  * resources acquired in the init functions.
   4107  */
   4108 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
   4109 {
   4110 	int irq = dev_priv->drm.pdev->irq;
   4111 
   4112 	/*
   4113 	 * FIXME we can get called twice during driver probe
   4114 	 * error handling as well as during driver remove due to
   4115 	 * intel_modeset_driver_remove() calling us out of sequence.
   4116 	 * Would be nice if it didn't do that...
   4117 	 */
   4118 	if (!dev_priv->drm.irq_enabled)
   4119 		return;
   4120 
   4121 	dev_priv->drm.irq_enabled = false;
   4122 
   4123 	intel_irq_reset(dev_priv);
   4124 
   4125 	free_irq(irq, dev_priv);
   4126 
   4127 	intel_hpd_cancel_work(dev_priv);
   4128 	dev_priv->runtime_pm.irqs_enabled = false;
   4129 }
   4130 
   4131 /**
   4132  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
   4133  * @dev_priv: i915 device instance
   4134  *
   4135  * This function is used to disable interrupts at runtime, both in the runtime
   4136  * pm and the system suspend/resume code.
   4137  */
   4138 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
   4139 {
   4140 	intel_irq_reset(dev_priv);
   4141 	dev_priv->runtime_pm.irqs_enabled = false;
   4142 	intel_synchronize_irq(dev_priv);
   4143 }
   4144 
   4145 /**
   4146  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
   4147  * @dev_priv: i915 device instance
   4148  *
   4149  * This function is used to enable interrupts at runtime, both in the runtime
   4150  * pm and the system suspend/resume code.
   4151  */
   4152 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
   4153 {
   4154 	dev_priv->runtime_pm.irqs_enabled = true;
   4155 	intel_irq_reset(dev_priv);
   4156 	intel_irq_postinstall(dev_priv);
   4157 }
   4158 
   4159 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
   4160 {
   4161 	/*
   4162 	 * We only use drm_irq_uninstall() at unload and VT switch, so
   4163 	 * this is the only thing we need to check.
   4164 	 */
   4165 	return dev_priv->runtime_pm.irqs_enabled;
   4166 }
   4167 
   4168 void intel_synchronize_irq(struct drm_i915_private *i915)
   4169 {
   4170 	synchronize_irq(i915->drm.pdev->irq);
   4171 }
   4172