i915_irq.c revision 1.12 1 /* $NetBSD: i915_irq.c,v 1.12 2018/08/27 07:03:25 riastradh Exp $ */
2
3 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 */
5 /*
6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.12 2018/08/27 07:03:25 riastradh Exp $");
33
34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36 #ifdef __NetBSD__
37 #include <sys/cdefs.h>
38 #endif
39
40 #include <linux/printk.h>
41 #include <linux/sysrq.h>
42 #include <linux/slab.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/circ_buf.h>
45 #endif
46 #include <drm/drmP.h>
47 #include <drm/i915_drm.h>
48 #include "i915_drv.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51
52 /**
53 * DOC: interrupt handling
54 *
55 * These functions provide the basic support for enabling and disabling the
56 * interrupt handling support. There's a lot more functionality in i915_irq.c
57 * and related files, but that will be described in separate chapters.
58 */
59
60 static const u32 hpd_ilk[HPD_NUM_PINS] = {
61 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
62 };
63
64 static const u32 hpd_ivb[HPD_NUM_PINS] = {
65 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
66 };
67
68 static const u32 hpd_bdw[HPD_NUM_PINS] = {
69 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
70 };
71
72 static const u32 hpd_ibx[HPD_NUM_PINS] = {
73 [HPD_CRT] = SDE_CRT_HOTPLUG,
74 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
75 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
76 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
77 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
78 };
79
80 static const u32 hpd_cpt[HPD_NUM_PINS] = {
81 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
82 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
83 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
84 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
85 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
86 };
87
88 static const u32 hpd_spt[HPD_NUM_PINS] = {
89 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
90 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
91 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
92 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
93 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
94 };
95
96 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
97 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
98 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
99 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
100 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
101 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
102 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
103 };
104
105 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
106 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
107 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
108 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
109 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
110 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
111 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
112 };
113
114 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
115 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
116 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
117 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
118 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
119 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
120 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
121 };
122
123 /* BXT hpd list */
124 static const u32 hpd_bxt[HPD_NUM_PINS] = {
125 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
126 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
127 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
128 };
129
130 /* IIR can theoretically queue up two events. Be paranoid. */
131 #define GEN8_IRQ_RESET_NDX(type, which) do { \
132 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
133 POSTING_READ(GEN8_##type##_IMR(which)); \
134 I915_WRITE(GEN8_##type##_IER(which), 0); \
135 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
136 POSTING_READ(GEN8_##type##_IIR(which)); \
137 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
138 POSTING_READ(GEN8_##type##_IIR(which)); \
139 } while (0)
140
141 #define GEN5_IRQ_RESET(type) do { \
142 I915_WRITE(type##IMR, 0xffffffff); \
143 POSTING_READ(type##IMR); \
144 I915_WRITE(type##IER, 0); \
145 I915_WRITE(type##IIR, 0xffffffff); \
146 POSTING_READ(type##IIR); \
147 I915_WRITE(type##IIR, 0xffffffff); \
148 POSTING_READ(type##IIR); \
149 } while (0)
150
151 /*
152 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
153 */
154 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
155 {
156 u32 val = I915_READ(reg);
157
158 if (val == 0)
159 return;
160
161 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
162 reg, val);
163 I915_WRITE(reg, 0xffffffff);
164 POSTING_READ(reg);
165 I915_WRITE(reg, 0xffffffff);
166 POSTING_READ(reg);
167 }
168
169 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
170 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
171 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
172 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
173 POSTING_READ(GEN8_##type##_IMR(which)); \
174 } while (0)
175
176 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
177 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
178 I915_WRITE(type##IER, (ier_val)); \
179 I915_WRITE(type##IMR, (imr_val)); \
180 POSTING_READ(type##IMR); \
181 } while (0)
182
183 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
184
185 /* For display hotplug interrupt */
186 static inline void
187 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
188 uint32_t mask,
189 uint32_t bits)
190 {
191 uint32_t val;
192
193 assert_spin_locked(&dev_priv->irq_lock);
194 WARN_ON(bits & ~mask);
195
196 val = I915_READ(PORT_HOTPLUG_EN);
197 val &= ~mask;
198 val |= bits;
199 I915_WRITE(PORT_HOTPLUG_EN, val);
200 }
201
202 /**
203 * i915_hotplug_interrupt_update - update hotplug interrupt enable
204 * @dev_priv: driver private
205 * @mask: bits to update
206 * @bits: bits to enable
207 * NOTE: the HPD enable bits are modified both inside and outside
208 * of an interrupt context. To avoid that read-modify-write cycles
209 * interfer, these bits are protected by a spinlock. Since this
210 * function is usually not called from a context where the lock is
211 * held already, this function acquires the lock itself. A non-locking
212 * version is also available.
213 */
214 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
215 uint32_t mask,
216 uint32_t bits)
217 {
218 spin_lock_irq(&dev_priv->irq_lock);
219 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
220 spin_unlock_irq(&dev_priv->irq_lock);
221 }
222
223 /**
224 * ilk_update_display_irq - update DEIMR
225 * @dev_priv: driver private
226 * @interrupt_mask: mask of interrupt bits to update
227 * @enabled_irq_mask: mask of interrupt bits to enable
228 */
229 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
230 uint32_t interrupt_mask,
231 uint32_t enabled_irq_mask)
232 {
233 uint32_t new_val;
234
235 assert_spin_locked(&dev_priv->irq_lock);
236
237 WARN_ON(enabled_irq_mask & ~interrupt_mask);
238
239 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
240 return;
241
242 new_val = dev_priv->irq_mask;
243 new_val &= ~interrupt_mask;
244 new_val |= (~enabled_irq_mask & interrupt_mask);
245
246 if (new_val != dev_priv->irq_mask) {
247 dev_priv->irq_mask = new_val;
248 I915_WRITE(DEIMR, dev_priv->irq_mask);
249 POSTING_READ(DEIMR);
250 }
251 }
252
253 void
254 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
255 {
256 ilk_update_display_irq(dev_priv, mask, mask);
257 }
258
259 void
260 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
261 {
262 ilk_update_display_irq(dev_priv, mask, 0);
263 }
264
265 /**
266 * ilk_update_gt_irq - update GTIMR
267 * @dev_priv: driver private
268 * @interrupt_mask: mask of interrupt bits to update
269 * @enabled_irq_mask: mask of interrupt bits to enable
270 */
271 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
272 uint32_t interrupt_mask,
273 uint32_t enabled_irq_mask)
274 {
275 assert_spin_locked(&dev_priv->irq_lock);
276
277 WARN_ON(enabled_irq_mask & ~interrupt_mask);
278
279 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
280 return;
281
282 dev_priv->gt_irq_mask &= ~interrupt_mask;
283 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
284 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
285 POSTING_READ(GTIMR);
286 }
287
288 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
289 {
290 ilk_update_gt_irq(dev_priv, mask, mask);
291 }
292
293 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
294 {
295 ilk_update_gt_irq(dev_priv, mask, 0);
296 }
297
298 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
299 {
300 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
301 }
302
303 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
304 {
305 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
306 }
307
308 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
309 {
310 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
311 }
312
313 /**
314 * snb_update_pm_irq - update GEN6_PMIMR
315 * @dev_priv: driver private
316 * @interrupt_mask: mask of interrupt bits to update
317 * @enabled_irq_mask: mask of interrupt bits to enable
318 */
319 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
320 uint32_t interrupt_mask,
321 uint32_t enabled_irq_mask)
322 {
323 uint32_t new_val;
324
325 WARN_ON(enabled_irq_mask & ~interrupt_mask);
326
327 assert_spin_locked(&dev_priv->irq_lock);
328
329 new_val = dev_priv->pm_irq_mask;
330 new_val &= ~interrupt_mask;
331 new_val |= (~enabled_irq_mask & interrupt_mask);
332
333 if (new_val != dev_priv->pm_irq_mask) {
334 dev_priv->pm_irq_mask = new_val;
335 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
336 POSTING_READ(gen6_pm_imr(dev_priv));
337 }
338 }
339
340 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
341 {
342 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
343 return;
344
345 snb_update_pm_irq(dev_priv, mask, mask);
346 }
347
348 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
349 uint32_t mask)
350 {
351 snb_update_pm_irq(dev_priv, mask, 0);
352 }
353
354 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
355 {
356 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
357 return;
358
359 __gen6_disable_pm_irq(dev_priv, mask);
360 }
361
362 void gen6_reset_rps_interrupts(struct drm_device *dev)
363 {
364 struct drm_i915_private *dev_priv = dev->dev_private;
365 uint32_t reg = gen6_pm_iir(dev_priv);
366
367 spin_lock_irq(&dev_priv->irq_lock);
368 I915_WRITE(reg, dev_priv->pm_rps_events);
369 I915_WRITE(reg, dev_priv->pm_rps_events);
370 POSTING_READ(reg);
371 dev_priv->rps.pm_iir = 0;
372 spin_unlock_irq(&dev_priv->irq_lock);
373 }
374
375 void gen6_enable_rps_interrupts(struct drm_device *dev)
376 {
377 struct drm_i915_private *dev_priv = dev->dev_private;
378
379 spin_lock_irq(&dev_priv->irq_lock);
380
381 WARN_ON(dev_priv->rps.pm_iir);
382 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
383 dev_priv->rps.interrupts_enabled = true;
384 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
385 dev_priv->pm_rps_events);
386 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
387
388 spin_unlock_irq(&dev_priv->irq_lock);
389 }
390
391 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
392 {
393 /*
394 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
395 * if GEN6_PM_UP_EI_EXPIRED is masked.
396 *
397 * TODO: verify if this can be reproduced on VLV,CHV.
398 */
399 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
400 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
401
402 if (INTEL_INFO(dev_priv)->gen >= 8)
403 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
404
405 return mask;
406 }
407
408 void gen6_disable_rps_interrupts(struct drm_device *dev)
409 {
410 struct drm_i915_private *dev_priv = dev->dev_private;
411
412 spin_lock_irq(&dev_priv->irq_lock);
413 dev_priv->rps.interrupts_enabled = false;
414 spin_unlock_irq(&dev_priv->irq_lock);
415
416 cancel_work_sync(&dev_priv->rps.work);
417
418 spin_lock_irq(&dev_priv->irq_lock);
419
420 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
421
422 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
423 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
424 ~dev_priv->pm_rps_events);
425
426 spin_unlock_irq(&dev_priv->irq_lock);
427
428 synchronize_irq(dev->irq);
429 }
430
431 /**
432 * bdw_update_port_irq - update DE port interrupt
433 * @dev_priv: driver private
434 * @interrupt_mask: mask of interrupt bits to update
435 * @enabled_irq_mask: mask of interrupt bits to enable
436 */
437 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
438 uint32_t interrupt_mask,
439 uint32_t enabled_irq_mask)
440 {
441 uint32_t new_val;
442 uint32_t old_val;
443
444 assert_spin_locked(&dev_priv->irq_lock);
445
446 WARN_ON(enabled_irq_mask & ~interrupt_mask);
447
448 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
449 return;
450
451 old_val = I915_READ(GEN8_DE_PORT_IMR);
452
453 new_val = old_val;
454 new_val &= ~interrupt_mask;
455 new_val |= (~enabled_irq_mask & interrupt_mask);
456
457 if (new_val != old_val) {
458 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
459 POSTING_READ(GEN8_DE_PORT_IMR);
460 }
461 }
462
463 /**
464 * ibx_display_interrupt_update - update SDEIMR
465 * @dev_priv: driver private
466 * @interrupt_mask: mask of interrupt bits to update
467 * @enabled_irq_mask: mask of interrupt bits to enable
468 */
469 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
470 uint32_t interrupt_mask,
471 uint32_t enabled_irq_mask)
472 {
473 uint32_t sdeimr = I915_READ(SDEIMR);
474 sdeimr &= ~interrupt_mask;
475 sdeimr |= (~enabled_irq_mask & interrupt_mask);
476
477 WARN_ON(enabled_irq_mask & ~interrupt_mask);
478
479 assert_spin_locked(&dev_priv->irq_lock);
480
481 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
482 return;
483
484 I915_WRITE(SDEIMR, sdeimr);
485 POSTING_READ(SDEIMR);
486 }
487
488 static void
489 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
490 u32 enable_mask, u32 status_mask)
491 {
492 u32 reg = PIPESTAT(pipe);
493 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
494
495 assert_spin_locked(&dev_priv->irq_lock);
496 WARN_ON(!intel_irqs_enabled(dev_priv));
497
498 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
499 status_mask & ~PIPESTAT_INT_STATUS_MASK,
500 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
501 pipe_name(pipe), enable_mask, status_mask))
502 return;
503
504 if ((pipestat & enable_mask) == enable_mask)
505 return;
506
507 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
508
509 /* Enable the interrupt, clear any pending status */
510 pipestat |= enable_mask | status_mask;
511 I915_WRITE(reg, pipestat);
512 POSTING_READ(reg);
513 }
514
515 static void
516 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
517 u32 enable_mask, u32 status_mask)
518 {
519 u32 reg = PIPESTAT(pipe);
520 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
521
522 assert_spin_locked(&dev_priv->irq_lock);
523 WARN_ON(!intel_irqs_enabled(dev_priv));
524
525 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
526 status_mask & ~PIPESTAT_INT_STATUS_MASK,
527 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
528 pipe_name(pipe), enable_mask, status_mask))
529 return;
530
531 if ((pipestat & enable_mask) == 0)
532 return;
533
534 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
535
536 pipestat &= ~enable_mask;
537 I915_WRITE(reg, pipestat);
538 POSTING_READ(reg);
539 }
540
541 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
542 {
543 u32 enable_mask = status_mask << 16;
544
545 /*
546 * On pipe A we don't support the PSR interrupt yet,
547 * on pipe B and C the same bit MBZ.
548 */
549 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
550 return 0;
551 /*
552 * On pipe B and C we don't support the PSR interrupt yet, on pipe
553 * A the same bit is for perf counters which we don't use either.
554 */
555 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
556 return 0;
557
558 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
559 SPRITE0_FLIP_DONE_INT_EN_VLV |
560 SPRITE1_FLIP_DONE_INT_EN_VLV);
561 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
562 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
563 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
564 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
565
566 return enable_mask;
567 }
568
569 void
570 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
571 u32 status_mask)
572 {
573 u32 enable_mask;
574
575 if (IS_VALLEYVIEW(dev_priv->dev))
576 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
577 status_mask);
578 else
579 enable_mask = status_mask << 16;
580 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
581 }
582
583 void
584 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
585 u32 status_mask)
586 {
587 u32 enable_mask;
588
589 if (IS_VALLEYVIEW(dev_priv->dev))
590 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
591 status_mask);
592 else
593 enable_mask = status_mask << 16;
594 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
595 }
596
597 /**
598 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
599 * @dev: drm device
600 */
601 static void i915_enable_asle_pipestat(struct drm_device *dev)
602 {
603 struct drm_i915_private *dev_priv = dev->dev_private;
604
605 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
606 return;
607
608 spin_lock_irq(&dev_priv->irq_lock);
609
610 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
611 if (INTEL_INFO(dev)->gen >= 4)
612 i915_enable_pipestat(dev_priv, PIPE_A,
613 PIPE_LEGACY_BLC_EVENT_STATUS);
614
615 spin_unlock_irq(&dev_priv->irq_lock);
616 }
617
618 /*
619 * This timing diagram depicts the video signal in and
620 * around the vertical blanking period.
621 *
622 * Assumptions about the fictitious mode used in this example:
623 * vblank_start >= 3
624 * vsync_start = vblank_start + 1
625 * vsync_end = vblank_start + 2
626 * vtotal = vblank_start + 3
627 *
628 * start of vblank:
629 * latch double buffered registers
630 * increment frame counter (ctg+)
631 * generate start of vblank interrupt (gen4+)
632 * |
633 * | frame start:
634 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
635 * | may be shifted forward 1-3 extra lines via PIPECONF
636 * | |
637 * | | start of vsync:
638 * | | generate vsync interrupt
639 * | | |
640 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
641 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
642 * ----va---> <-----------------vb--------------------> <--------va-------------
643 * | | <----vs-----> |
644 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
645 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
646 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
647 * | | |
648 * last visible pixel first visible pixel
649 * | increment frame counter (gen3/4)
650 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
651 *
652 * x = horizontal active
653 * _ = horizontal blanking
654 * hs = horizontal sync
655 * va = vertical active
656 * vb = vertical blanking
657 * vs = vertical sync
658 * vbs = vblank_start (number)
659 *
660 * Summary:
661 * - most events happen at the start of horizontal sync
662 * - frame start happens at the start of horizontal blank, 1-4 lines
663 * (depending on PIPECONF settings) after the start of vblank
664 * - gen3/4 pixel and frame counter are synchronized with the start
665 * of horizontal active on the first line of vertical active
666 */
667
668 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
669 {
670 /* Gen2 doesn't have a hardware frame counter */
671 return 0;
672 }
673
674 /* Called from drm generic code, passed a 'crtc', which
675 * we use as a pipe index
676 */
677 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
678 {
679 struct drm_i915_private *dev_priv = dev->dev_private;
680 unsigned long high_frame;
681 unsigned long low_frame;
682 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
683 struct intel_crtc *intel_crtc =
684 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
685 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
686
687 htotal = mode->crtc_htotal;
688 hsync_start = mode->crtc_hsync_start;
689 vbl_start = mode->crtc_vblank_start;
690 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
691 vbl_start = DIV_ROUND_UP(vbl_start, 2);
692
693 /* Convert to pixel count */
694 vbl_start *= htotal;
695
696 /* Start of vblank event occurs at start of hsync */
697 vbl_start -= htotal - hsync_start;
698
699 high_frame = PIPEFRAME(pipe);
700 low_frame = PIPEFRAMEPIXEL(pipe);
701
702 /*
703 * High & low register fields aren't synchronized, so make sure
704 * we get a low value that's stable across two reads of the high
705 * register.
706 */
707 do {
708 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
709 low = I915_READ(low_frame);
710 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
711 } while (high1 != high2);
712
713 high1 >>= PIPE_FRAME_HIGH_SHIFT;
714 pixel = low & PIPE_PIXEL_MASK;
715 low >>= PIPE_FRAME_LOW_SHIFT;
716
717 /*
718 * The frame counter increments at beginning of active.
719 * Cook up a vblank counter by also checking the pixel
720 * counter against vblank start.
721 */
722 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
723 }
724
725 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
726 {
727 struct drm_i915_private *dev_priv = dev->dev_private;
728
729 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
730 }
731
732 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
733 #ifdef __NetBSD__
734 #define __raw_i915_read32(dev_priv, reg) bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg))
735 #else
736 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
737 #endif
738
739 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
740 {
741 struct drm_device *dev = crtc->base.dev;
742 struct drm_i915_private *dev_priv = dev->dev_private;
743 const struct drm_display_mode *mode = &crtc->base.hwmode;
744 enum i915_pipe pipe = crtc->pipe;
745 int position, vtotal;
746
747 vtotal = mode->crtc_vtotal;
748 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
749 vtotal /= 2;
750
751 if (IS_GEN2(dev))
752 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
753 else
754 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
755
756 /*
757 * On HSW, the DSL reg (0x70000) appears to return 0 if we
758 * read it just before the start of vblank. So try it again
759 * so we don't accidentally end up spanning a vblank frame
760 * increment, causing the pipe_update_end() code to squak at us.
761 *
762 * The nature of this problem means we can't simply check the ISR
763 * bit and return the vblank start value; nor can we use the scanline
764 * debug register in the transcoder as it appears to have the same
765 * problem. We may need to extend this to include other platforms,
766 * but so far testing only shows the problem on HSW.
767 */
768 if (HAS_DDI(dev) && !position) {
769 int i, temp;
770
771 for (i = 0; i < 100; i++) {
772 udelay(1);
773 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
774 DSL_LINEMASK_GEN3;
775 if (temp != position) {
776 position = temp;
777 break;
778 }
779 }
780 }
781
782 /*
783 * See update_scanline_offset() for the details on the
784 * scanline_offset adjustment.
785 */
786 return (position + crtc->scanline_offset) % vtotal;
787 }
788
789 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
790 unsigned int flags, int *vpos, int *hpos,
791 ktime_t *stime, ktime_t *etime,
792 const struct drm_display_mode *mode)
793 {
794 struct drm_i915_private *dev_priv = dev->dev_private;
795 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
796 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
797 int position;
798 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
799 bool in_vbl = true;
800 int ret = 0;
801 unsigned long irqflags;
802
803 if (WARN_ON(!mode->crtc_clock)) {
804 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
805 "pipe %c\n", pipe_name(pipe));
806 return 0;
807 }
808
809 htotal = mode->crtc_htotal;
810 hsync_start = mode->crtc_hsync_start;
811 vtotal = mode->crtc_vtotal;
812 vbl_start = mode->crtc_vblank_start;
813 vbl_end = mode->crtc_vblank_end;
814
815 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
816 vbl_start = DIV_ROUND_UP(vbl_start, 2);
817 vbl_end /= 2;
818 vtotal /= 2;
819 }
820
821 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
822
823 /*
824 * Lock uncore.lock, as we will do multiple timing critical raw
825 * register reads, potentially with preemption disabled, so the
826 * following code must not block on uncore.lock.
827 */
828 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
829
830 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
831
832 /* Get optional system timestamp before query. */
833 if (stime)
834 *stime = ktime_get();
835
836 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
837 /* No obvious pixelcount register. Only query vertical
838 * scanout position from Display scan line register.
839 */
840 position = __intel_get_crtc_scanline(intel_crtc);
841 } else {
842 /* Have access to pixelcount since start of frame.
843 * We can split this into vertical and horizontal
844 * scanout position.
845 */
846 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
847
848 /* convert to pixel counts */
849 vbl_start *= htotal;
850 vbl_end *= htotal;
851 vtotal *= htotal;
852
853 /*
854 * In interlaced modes, the pixel counter counts all pixels,
855 * so one field will have htotal more pixels. In order to avoid
856 * the reported position from jumping backwards when the pixel
857 * counter is beyond the length of the shorter field, just
858 * clamp the position the length of the shorter field. This
859 * matches how the scanline counter based position works since
860 * the scanline counter doesn't count the two half lines.
861 */
862 if (position >= vtotal)
863 position = vtotal - 1;
864
865 /*
866 * Start of vblank interrupt is triggered at start of hsync,
867 * just prior to the first active line of vblank. However we
868 * consider lines to start at the leading edge of horizontal
869 * active. So, should we get here before we've crossed into
870 * the horizontal active of the first line in vblank, we would
871 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
872 * always add htotal-hsync_start to the current pixel position.
873 */
874 position = (position + htotal - hsync_start) % vtotal;
875 }
876
877 /* Get optional system timestamp after query. */
878 if (etime)
879 *etime = ktime_get();
880
881 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
882
883 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
884
885 in_vbl = position >= vbl_start && position < vbl_end;
886
887 /*
888 * While in vblank, position will be negative
889 * counting up towards 0 at vbl_end. And outside
890 * vblank, position will be positive counting
891 * up since vbl_end.
892 */
893 if (position >= vbl_start)
894 position -= vbl_end;
895 else
896 position += vtotal - vbl_end;
897
898 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
899 *vpos = position;
900 *hpos = 0;
901 } else {
902 *vpos = position / htotal;
903 *hpos = position - (*vpos * htotal);
904 }
905
906 /* In vblank? */
907 if (in_vbl)
908 ret |= DRM_SCANOUTPOS_IN_VBLANK;
909
910 return ret;
911 }
912
913 int intel_get_crtc_scanline(struct intel_crtc *crtc)
914 {
915 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
916 unsigned long irqflags;
917 int position;
918
919 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
920 position = __intel_get_crtc_scanline(crtc);
921 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
922
923 return position;
924 }
925
926 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
927 int *max_error,
928 struct timeval *vblank_time,
929 unsigned flags)
930 {
931 struct drm_crtc *crtc;
932
933 if (pipe >= INTEL_INFO(dev)->num_pipes) {
934 DRM_ERROR("Invalid crtc %u\n", pipe);
935 return -EINVAL;
936 }
937
938 /* Get drm_crtc to timestamp: */
939 crtc = intel_get_crtc_for_pipe(dev, pipe);
940 if (crtc == NULL) {
941 DRM_ERROR("Invalid crtc %u\n", pipe);
942 return -EINVAL;
943 }
944
945 if (!crtc->hwmode.crtc_clock) {
946 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
947 return -EBUSY;
948 }
949
950 /* Helper routine in DRM core does all the work: */
951 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
952 vblank_time, flags,
953 &crtc->hwmode);
954 }
955
956 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
957 {
958 struct drm_i915_private *dev_priv = dev->dev_private;
959 u32 busy_up, busy_down, max_avg, min_avg;
960 u8 new_delay;
961
962 spin_lock(&mchdev_lock);
963
964 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
965
966 new_delay = dev_priv->ips.cur_delay;
967
968 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
969 busy_up = I915_READ(RCPREVBSYTUPAVG);
970 busy_down = I915_READ(RCPREVBSYTDNAVG);
971 max_avg = I915_READ(RCBMAXAVG);
972 min_avg = I915_READ(RCBMINAVG);
973
974 /* Handle RCS change request from hw */
975 if (busy_up > max_avg) {
976 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
977 new_delay = dev_priv->ips.cur_delay - 1;
978 if (new_delay < dev_priv->ips.max_delay)
979 new_delay = dev_priv->ips.max_delay;
980 } else if (busy_down < min_avg) {
981 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
982 new_delay = dev_priv->ips.cur_delay + 1;
983 if (new_delay > dev_priv->ips.min_delay)
984 new_delay = dev_priv->ips.min_delay;
985 }
986
987 if (ironlake_set_drps(dev, new_delay))
988 dev_priv->ips.cur_delay = new_delay;
989
990 spin_unlock(&mchdev_lock);
991
992 return;
993 }
994
995 static void notify_ring(struct intel_engine_cs *ring)
996 {
997 #ifdef __NetBSD__
998 struct drm_i915_private *dev_priv = ring->dev->dev_private;
999 unsigned long flags;
1000 #endif
1001
1002 if (!intel_ring_initialized(ring))
1003 return;
1004
1005 trace_i915_gem_request_notify(ring);
1006
1007 #ifdef __NetBSD__
1008 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1009 /* XXX Set a flag under the lock or push the lock out to callers. */
1010 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
1011 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1012 #else
1013 wake_up_all(&ring->irq_queue);
1014 #endif
1015 }
1016
1017 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1018 struct intel_rps_ei *ei)
1019 {
1020 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1021 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1022 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1023 }
1024
1025 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1026 {
1027 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1028 }
1029
1030 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1031 {
1032 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1033 struct intel_rps_ei now;
1034 u32 events = 0;
1035
1036 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1037 return 0;
1038
1039 vlv_c0_read(dev_priv, &now);
1040 if (now.cz_clock == 0)
1041 return 0;
1042
1043 if (prev->cz_clock) {
1044 u64 time, c0;
1045 unsigned int mul;
1046
1047 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
1048 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1049 mul <<= 8;
1050
1051 time = now.cz_clock - prev->cz_clock;
1052 time *= dev_priv->czclk_freq;
1053
1054 /* Workload can be split between render + media,
1055 * e.g. SwapBuffers being blitted in X after being rendered in
1056 * mesa. To account for this we need to combine both engines
1057 * into our activity counter.
1058 */
1059 c0 = now.render_c0 - prev->render_c0;
1060 c0 += now.media_c0 - prev->media_c0;
1061 c0 *= mul;
1062
1063 if (c0 > time * dev_priv->rps.up_threshold)
1064 events = GEN6_PM_RP_UP_THRESHOLD;
1065 else if (c0 < time * dev_priv->rps.down_threshold)
1066 events = GEN6_PM_RP_DOWN_THRESHOLD;
1067 }
1068
1069 dev_priv->rps.ei = now;
1070 return events;
1071 }
1072
1073 static bool any_waiters(struct drm_i915_private *dev_priv)
1074 {
1075 struct intel_engine_cs *ring;
1076 int i;
1077
1078 for_each_ring(ring, dev_priv, i)
1079 if (ring->irq_refcount)
1080 return true;
1081
1082 return false;
1083 }
1084
1085 static void gen6_pm_rps_work(struct work_struct *work)
1086 {
1087 struct drm_i915_private *dev_priv =
1088 container_of(work, struct drm_i915_private, rps.work);
1089 bool client_boost;
1090 int new_delay, adj, min, max;
1091 u32 pm_iir;
1092
1093 spin_lock_irq(&dev_priv->irq_lock);
1094 /* Speed up work cancelation during disabling rps interrupts. */
1095 if (!dev_priv->rps.interrupts_enabled) {
1096 spin_unlock_irq(&dev_priv->irq_lock);
1097 return;
1098 }
1099 pm_iir = dev_priv->rps.pm_iir;
1100 dev_priv->rps.pm_iir = 0;
1101 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1102 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1103 client_boost = dev_priv->rps.client_boost;
1104 dev_priv->rps.client_boost = false;
1105 spin_unlock_irq(&dev_priv->irq_lock);
1106
1107 /* Make sure we didn't queue anything we're not going to process. */
1108 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1109
1110 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1111 return;
1112
1113 mutex_lock(&dev_priv->rps.hw_lock);
1114
1115 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1116
1117 adj = dev_priv->rps.last_adj;
1118 new_delay = dev_priv->rps.cur_freq;
1119 min = dev_priv->rps.min_freq_softlimit;
1120 max = dev_priv->rps.max_freq_softlimit;
1121
1122 if (client_boost) {
1123 new_delay = dev_priv->rps.max_freq_softlimit;
1124 adj = 0;
1125 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1126 if (adj > 0)
1127 adj *= 2;
1128 else /* CHV needs even encode values */
1129 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1130 /*
1131 * For better performance, jump directly
1132 * to RPe if we're below it.
1133 */
1134 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1135 new_delay = dev_priv->rps.efficient_freq;
1136 adj = 0;
1137 }
1138 } else if (any_waiters(dev_priv)) {
1139 adj = 0;
1140 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1141 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1142 new_delay = dev_priv->rps.efficient_freq;
1143 else
1144 new_delay = dev_priv->rps.min_freq_softlimit;
1145 adj = 0;
1146 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1147 if (adj < 0)
1148 adj *= 2;
1149 else /* CHV needs even encode values */
1150 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1151 } else { /* unknown event */
1152 adj = 0;
1153 }
1154
1155 dev_priv->rps.last_adj = adj;
1156
1157 /* sysfs frequency interfaces may have snuck in while servicing the
1158 * interrupt
1159 */
1160 new_delay += adj;
1161 new_delay = clamp_t(int, new_delay, min, max);
1162
1163 intel_set_rps(dev_priv->dev, new_delay);
1164
1165 mutex_unlock(&dev_priv->rps.hw_lock);
1166 }
1167
1168
1169 /**
1170 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1171 * occurred.
1172 * @work: workqueue struct
1173 *
1174 * Doesn't actually do anything except notify userspace. As a consequence of
1175 * this event, userspace should try to remap the bad rows since statistically
1176 * it is likely the same row is more likely to go bad again.
1177 */
1178 static void ivybridge_parity_work(struct work_struct *work)
1179 {
1180 struct drm_i915_private *dev_priv =
1181 container_of(work, struct drm_i915_private, l3_parity.error_work);
1182 u32 error_status, row, bank, subbank;
1183 #ifndef __NetBSD__ /* XXX kobject uevent...? */
1184 char *parity_event[6];
1185 #endif
1186 uint32_t misccpctl;
1187 uint8_t slice = 0;
1188
1189 /* We must turn off DOP level clock gating to access the L3 registers.
1190 * In order to prevent a get/put style interface, acquire struct mutex
1191 * any time we access those registers.
1192 */
1193 mutex_lock(&dev_priv->dev->struct_mutex);
1194
1195 /* If we've screwed up tracking, just let the interrupt fire again */
1196 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1197 goto out;
1198
1199 misccpctl = I915_READ(GEN7_MISCCPCTL);
1200 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1201 POSTING_READ(GEN7_MISCCPCTL);
1202
1203 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1204 u32 reg;
1205
1206 slice--;
1207 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1208 break;
1209
1210 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1211
1212 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1213
1214 error_status = I915_READ(reg);
1215 row = GEN7_PARITY_ERROR_ROW(error_status);
1216 bank = GEN7_PARITY_ERROR_BANK(error_status);
1217 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1218
1219 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1220 POSTING_READ(reg);
1221
1222 #ifndef __NetBSD__ /* XXX kobject uevent...? */
1223 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1224 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1225 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1226 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1227 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1228 parity_event[5] = NULL;
1229
1230 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1231 KOBJ_CHANGE, parity_event);
1232 #endif
1233
1234 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1235 slice, row, bank, subbank);
1236
1237 #ifndef __NetBSD__ /* XXX kobject uevent...? */
1238 kfree(parity_event[4]);
1239 kfree(parity_event[3]);
1240 kfree(parity_event[2]);
1241 kfree(parity_event[1]);
1242 #endif
1243 }
1244
1245 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1246
1247 out:
1248 WARN_ON(dev_priv->l3_parity.which_slice);
1249 spin_lock_irq(&dev_priv->irq_lock);
1250 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1251 spin_unlock_irq(&dev_priv->irq_lock);
1252
1253 mutex_unlock(&dev_priv->dev->struct_mutex);
1254 }
1255
1256 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1257 {
1258 struct drm_i915_private *dev_priv = dev->dev_private;
1259
1260 if (!HAS_L3_DPF(dev))
1261 return;
1262
1263 spin_lock(&dev_priv->irq_lock);
1264 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1265 spin_unlock(&dev_priv->irq_lock);
1266
1267 iir &= GT_PARITY_ERROR(dev);
1268 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1269 dev_priv->l3_parity.which_slice |= 1 << 1;
1270
1271 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1272 dev_priv->l3_parity.which_slice |= 1 << 0;
1273
1274 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1275 }
1276
1277 static void ilk_gt_irq_handler(struct drm_device *dev,
1278 struct drm_i915_private *dev_priv,
1279 u32 gt_iir)
1280 {
1281 if (gt_iir &
1282 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1283 notify_ring(&dev_priv->ring[RCS]);
1284 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1285 notify_ring(&dev_priv->ring[VCS]);
1286 }
1287
1288 static void snb_gt_irq_handler(struct drm_device *dev,
1289 struct drm_i915_private *dev_priv,
1290 u32 gt_iir)
1291 {
1292
1293 if (gt_iir &
1294 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1295 notify_ring(&dev_priv->ring[RCS]);
1296 if (gt_iir & GT_BSD_USER_INTERRUPT)
1297 notify_ring(&dev_priv->ring[VCS]);
1298 if (gt_iir & GT_BLT_USER_INTERRUPT)
1299 notify_ring(&dev_priv->ring[BCS]);
1300
1301 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1302 GT_BSD_CS_ERROR_INTERRUPT |
1303 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1304 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1305
1306 if (gt_iir & GT_PARITY_ERROR(dev))
1307 ivybridge_parity_error_irq_handler(dev, gt_iir);
1308 }
1309
1310 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1311 u32 master_ctl)
1312 {
1313 irqreturn_t ret = IRQ_NONE;
1314
1315 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1316 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1317 if (tmp) {
1318 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1319 ret = IRQ_HANDLED;
1320
1321 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1322 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1323 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1324 notify_ring(&dev_priv->ring[RCS]);
1325
1326 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1327 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1328 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1329 notify_ring(&dev_priv->ring[BCS]);
1330 } else
1331 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1332 }
1333
1334 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1335 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1336 if (tmp) {
1337 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1338 ret = IRQ_HANDLED;
1339
1340 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1341 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1342 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1343 notify_ring(&dev_priv->ring[VCS]);
1344
1345 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1346 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1347 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1348 notify_ring(&dev_priv->ring[VCS2]);
1349 } else
1350 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1351 }
1352
1353 if (master_ctl & GEN8_GT_VECS_IRQ) {
1354 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1355 if (tmp) {
1356 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1357 ret = IRQ_HANDLED;
1358
1359 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1360 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1361 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1362 notify_ring(&dev_priv->ring[VECS]);
1363 } else
1364 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1365 }
1366
1367 if (master_ctl & GEN8_GT_PM_IRQ) {
1368 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1369 if (tmp & dev_priv->pm_rps_events) {
1370 I915_WRITE_FW(GEN8_GT_IIR(2),
1371 tmp & dev_priv->pm_rps_events);
1372 ret = IRQ_HANDLED;
1373 gen6_rps_irq_handler(dev_priv, tmp);
1374 } else
1375 DRM_ERROR("The master control interrupt lied (PM)!\n");
1376 }
1377
1378 return ret;
1379 }
1380
1381 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1382 {
1383 switch (port) {
1384 case PORT_A:
1385 return val & PORTA_HOTPLUG_LONG_DETECT;
1386 case PORT_B:
1387 return val & PORTB_HOTPLUG_LONG_DETECT;
1388 case PORT_C:
1389 return val & PORTC_HOTPLUG_LONG_DETECT;
1390 default:
1391 return false;
1392 }
1393 }
1394
1395 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1396 {
1397 switch (port) {
1398 case PORT_E:
1399 return val & PORTE_HOTPLUG_LONG_DETECT;
1400 default:
1401 return false;
1402 }
1403 }
1404
1405 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1406 {
1407 switch (port) {
1408 case PORT_A:
1409 return val & PORTA_HOTPLUG_LONG_DETECT;
1410 case PORT_B:
1411 return val & PORTB_HOTPLUG_LONG_DETECT;
1412 case PORT_C:
1413 return val & PORTC_HOTPLUG_LONG_DETECT;
1414 case PORT_D:
1415 return val & PORTD_HOTPLUG_LONG_DETECT;
1416 default:
1417 return false;
1418 }
1419 }
1420
1421 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1422 {
1423 switch (port) {
1424 case PORT_A:
1425 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1426 default:
1427 return false;
1428 }
1429 }
1430
1431 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1432 {
1433 switch (port) {
1434 case PORT_B:
1435 return val & PORTB_HOTPLUG_LONG_DETECT;
1436 case PORT_C:
1437 return val & PORTC_HOTPLUG_LONG_DETECT;
1438 case PORT_D:
1439 return val & PORTD_HOTPLUG_LONG_DETECT;
1440 default:
1441 return false;
1442 }
1443 }
1444
1445 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1446 {
1447 switch (port) {
1448 case PORT_B:
1449 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1450 case PORT_C:
1451 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1452 case PORT_D:
1453 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1454 default:
1455 return false;
1456 }
1457 }
1458
1459 /*
1460 * Get a bit mask of pins that have triggered, and which ones may be long.
1461 * This can be called multiple times with the same masks to accumulate
1462 * hotplug detection results from several registers.
1463 *
1464 * Note that the caller is expected to zero out the masks initially.
1465 */
1466 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1467 u32 hotplug_trigger, u32 dig_hotplug_reg,
1468 const u32 hpd[HPD_NUM_PINS],
1469 bool long_pulse_detect(enum port port, u32 val))
1470 {
1471 enum port port;
1472 int i;
1473
1474 for_each_hpd_pin(i) {
1475 if ((hpd[i] & hotplug_trigger) == 0)
1476 continue;
1477
1478 *pin_mask |= BIT(i);
1479
1480 if (!intel_hpd_pin_to_port(i, &port))
1481 continue;
1482
1483 if (long_pulse_detect(port, dig_hotplug_reg))
1484 *long_mask |= BIT(i);
1485 }
1486
1487 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1488 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1489
1490 }
1491
1492 static void gmbus_irq_handler(struct drm_device *dev)
1493 {
1494 struct drm_i915_private *dev_priv = dev->dev_private;
1495
1496 #ifdef __NetBSD__
1497 spin_lock(&dev_priv->gmbus_wait_lock);
1498 /* XXX Set a flag here... */
1499 DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1500 &dev_priv->gmbus_wait_lock);
1501 spin_unlock(&dev_priv->gmbus_wait_lock);
1502 #else
1503 wake_up_all(&dev_priv->gmbus_wait_queue);
1504 #endif
1505 }
1506
1507 static void dp_aux_irq_handler(struct drm_device *dev)
1508 {
1509 struct drm_i915_private *dev_priv = dev->dev_private;
1510
1511 #ifdef __NetBSD__
1512 spin_lock(&dev_priv->gmbus_wait_lock);
1513 /* XXX Set a flag here... */
1514 DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1515 &dev_priv->gmbus_wait_lock);
1516 spin_unlock(&dev_priv->gmbus_wait_lock);
1517 #else
1518 wake_up_all(&dev_priv->gmbus_wait_queue);
1519 #endif
1520 }
1521
1522 #if defined(CONFIG_DEBUG_FS)
1523 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1524 uint32_t crc0, uint32_t crc1,
1525 uint32_t crc2, uint32_t crc3,
1526 uint32_t crc4)
1527 {
1528 struct drm_i915_private *dev_priv = dev->dev_private;
1529 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1530 struct intel_pipe_crc_entry *entry;
1531 int head, tail;
1532
1533 spin_lock(&pipe_crc->lock);
1534
1535 if (!pipe_crc->entries) {
1536 spin_unlock(&pipe_crc->lock);
1537 DRM_DEBUG_KMS("spurious interrupt\n");
1538 return;
1539 }
1540
1541 head = pipe_crc->head;
1542 tail = pipe_crc->tail;
1543
1544 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1545 spin_unlock(&pipe_crc->lock);
1546 DRM_ERROR("CRC buffer overflowing\n");
1547 return;
1548 }
1549
1550 entry = &pipe_crc->entries[head];
1551
1552 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1553 entry->crc[0] = crc0;
1554 entry->crc[1] = crc1;
1555 entry->crc[2] = crc2;
1556 entry->crc[3] = crc3;
1557 entry->crc[4] = crc4;
1558
1559 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1560 pipe_crc->head = head;
1561
1562 spin_unlock(&pipe_crc->lock);
1563
1564 wake_up_interruptible(&pipe_crc->wq);
1565 }
1566 #else
1567 static inline void
1568 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1569 uint32_t crc0, uint32_t crc1,
1570 uint32_t crc2, uint32_t crc3,
1571 uint32_t crc4) {}
1572 #endif
1573
1574
1575 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1576 {
1577 struct drm_i915_private *dev_priv = dev->dev_private;
1578
1579 display_pipe_crc_irq_handler(dev, pipe,
1580 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1581 0, 0, 0, 0);
1582 }
1583
1584 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1585 {
1586 struct drm_i915_private *dev_priv = dev->dev_private;
1587
1588 display_pipe_crc_irq_handler(dev, pipe,
1589 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1590 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1591 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1592 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1593 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1594 }
1595
1596 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1597 {
1598 struct drm_i915_private *dev_priv = dev->dev_private;
1599 uint32_t res1, res2;
1600
1601 if (INTEL_INFO(dev)->gen >= 3)
1602 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1603 else
1604 res1 = 0;
1605
1606 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1607 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1608 else
1609 res2 = 0;
1610
1611 display_pipe_crc_irq_handler(dev, pipe,
1612 I915_READ(PIPE_CRC_RES_RED(pipe)),
1613 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1614 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1615 res1, res2);
1616 }
1617
1618 /* The RPS events need forcewake, so we add them to a work queue and mask their
1619 * IMR bits until the work is done. Other interrupts can be processed without
1620 * the work queue. */
1621 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1622 {
1623 if (pm_iir & dev_priv->pm_rps_events) {
1624 spin_lock(&dev_priv->irq_lock);
1625 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1626 if (dev_priv->rps.interrupts_enabled) {
1627 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
1629 }
1630 spin_unlock(&dev_priv->irq_lock);
1631 }
1632
1633 if (INTEL_INFO(dev_priv)->gen >= 8)
1634 return;
1635
1636 if (HAS_VEBOX(dev_priv->dev)) {
1637 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1638 notify_ring(&dev_priv->ring[VECS]);
1639
1640 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1641 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1642 }
1643 }
1644
1645 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1646 {
1647 if (!drm_handle_vblank(dev, pipe))
1648 return false;
1649
1650 return true;
1651 }
1652
1653 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1654 {
1655 struct drm_i915_private *dev_priv = dev->dev_private;
1656 u32 pipe_stats[I915_MAX_PIPES] = { };
1657 int pipe;
1658
1659 spin_lock(&dev_priv->irq_lock);
1660 for_each_pipe(dev_priv, pipe) {
1661 int reg;
1662 u32 mask, iir_bit = 0;
1663
1664 /*
1665 * PIPESTAT bits get signalled even when the interrupt is
1666 * disabled with the mask bits, and some of the status bits do
1667 * not generate interrupts at all (like the underrun bit). Hence
1668 * we need to be careful that we only handle what we want to
1669 * handle.
1670 */
1671
1672 /* fifo underruns are filterered in the underrun handler. */
1673 mask = PIPE_FIFO_UNDERRUN_STATUS;
1674
1675 switch (pipe) {
1676 case PIPE_A:
1677 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1678 break;
1679 case PIPE_B:
1680 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1681 break;
1682 case PIPE_C:
1683 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1684 break;
1685 }
1686 if (iir & iir_bit)
1687 mask |= dev_priv->pipestat_irq_mask[pipe];
1688
1689 if (!mask)
1690 continue;
1691
1692 reg = PIPESTAT(pipe);
1693 mask |= PIPESTAT_INT_ENABLE_MASK;
1694 pipe_stats[pipe] = I915_READ(reg) & mask;
1695
1696 /*
1697 * Clear the PIPE*STAT regs before the IIR
1698 */
1699 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1700 PIPESTAT_INT_STATUS_MASK))
1701 I915_WRITE(reg, pipe_stats[pipe]);
1702 }
1703 spin_unlock(&dev_priv->irq_lock);
1704
1705 for_each_pipe(dev_priv, pipe) {
1706 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1707 intel_pipe_handle_vblank(dev, pipe))
1708 intel_check_page_flip(dev, pipe);
1709
1710 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1711 intel_prepare_page_flip(dev, pipe);
1712 intel_finish_page_flip(dev, pipe);
1713 }
1714
1715 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1716 i9xx_pipe_crc_irq_handler(dev, pipe);
1717
1718 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1719 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1720 }
1721
1722 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1723 gmbus_irq_handler(dev);
1724 }
1725
1726 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1727 {
1728 struct drm_i915_private *dev_priv = dev->dev_private;
1729 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1730 u32 pin_mask = 0, long_mask = 0;
1731
1732 if (!hotplug_status)
1733 return;
1734
1735 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1736 /*
1737 * Make sure hotplug status is cleared before we clear IIR, or else we
1738 * may miss hotplug events.
1739 */
1740 POSTING_READ(PORT_HOTPLUG_STAT);
1741
1742 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1743 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1744
1745 if (hotplug_trigger) {
1746 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1747 hotplug_trigger, hpd_status_g4x,
1748 i9xx_port_hotplug_long_detect);
1749
1750 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1751 }
1752
1753 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1754 dp_aux_irq_handler(dev);
1755 } else {
1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1757
1758 if (hotplug_trigger) {
1759 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1760 hotplug_trigger, hpd_status_i915,
1761 i9xx_port_hotplug_long_detect);
1762 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1763 }
1764 }
1765 }
1766
1767 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
1768 {
1769 struct drm_device *dev = arg;
1770 struct drm_i915_private *dev_priv = dev->dev_private;
1771 u32 iir, gt_iir, pm_iir;
1772 irqreturn_t ret = IRQ_NONE;
1773
1774 if (!intel_irqs_enabled(dev_priv))
1775 return IRQ_NONE;
1776
1777 while (true) {
1778 /* Find, clear, then process each source of interrupt */
1779
1780 gt_iir = I915_READ(GTIIR);
1781 if (gt_iir)
1782 I915_WRITE(GTIIR, gt_iir);
1783
1784 pm_iir = I915_READ(GEN6_PMIIR);
1785 if (pm_iir)
1786 I915_WRITE(GEN6_PMIIR, pm_iir);
1787
1788 iir = I915_READ(VLV_IIR);
1789 if (iir) {
1790 /* Consume port before clearing IIR or we'll miss events */
1791 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1792 i9xx_hpd_irq_handler(dev);
1793 I915_WRITE(VLV_IIR, iir);
1794 }
1795
1796 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1797 goto out;
1798
1799 ret = IRQ_HANDLED;
1800
1801 if (gt_iir)
1802 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1803 if (pm_iir)
1804 gen6_rps_irq_handler(dev_priv, pm_iir);
1805 /* Call regardless, as some status bits might not be
1806 * signalled in iir */
1807 valleyview_pipestat_irq_handler(dev, iir);
1808 }
1809
1810 out:
1811 return ret;
1812 }
1813
1814 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1815 {
1816 struct drm_device *dev = arg;
1817 struct drm_i915_private *dev_priv = dev->dev_private;
1818 u32 master_ctl, iir;
1819 irqreturn_t ret = IRQ_NONE;
1820
1821 if (!intel_irqs_enabled(dev_priv))
1822 return IRQ_NONE;
1823
1824 for (;;) {
1825 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1826 iir = I915_READ(VLV_IIR);
1827
1828 if (master_ctl == 0 && iir == 0)
1829 break;
1830
1831 ret = IRQ_HANDLED;
1832
1833 I915_WRITE(GEN8_MASTER_IRQ, 0);
1834
1835 /* Find, clear, then process each source of interrupt */
1836
1837 if (iir) {
1838 /* Consume port before clearing IIR or we'll miss events */
1839 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1840 i9xx_hpd_irq_handler(dev);
1841 I915_WRITE(VLV_IIR, iir);
1842 }
1843
1844 gen8_gt_irq_handler(dev_priv, master_ctl);
1845
1846 /* Call regardless, as some status bits might not be
1847 * signalled in iir */
1848 valleyview_pipestat_irq_handler(dev, iir);
1849
1850 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1851 POSTING_READ(GEN8_MASTER_IRQ);
1852 }
1853
1854 return ret;
1855 }
1856
1857 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1858 const u32 hpd[HPD_NUM_PINS])
1859 {
1860 struct drm_i915_private *dev_priv = to_i915(dev);
1861 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1862
1863 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1864 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1865
1866 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1867 dig_hotplug_reg, hpd,
1868 pch_port_hotplug_long_detect);
1869
1870 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1871 }
1872
1873 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1874 {
1875 struct drm_i915_private *dev_priv = dev->dev_private;
1876 int pipe;
1877 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1878
1879 if (hotplug_trigger)
1880 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1881
1882 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1883 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1884 SDE_AUDIO_POWER_SHIFT);
1885 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1886 port_name(port));
1887 }
1888
1889 if (pch_iir & SDE_AUX_MASK)
1890 dp_aux_irq_handler(dev);
1891
1892 if (pch_iir & SDE_GMBUS)
1893 gmbus_irq_handler(dev);
1894
1895 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1896 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1897
1898 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1899 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1900
1901 if (pch_iir & SDE_POISON)
1902 DRM_ERROR("PCH poison interrupt\n");
1903
1904 if (pch_iir & SDE_FDI_MASK)
1905 for_each_pipe(dev_priv, pipe)
1906 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1907 pipe_name(pipe),
1908 I915_READ(FDI_RX_IIR(pipe)));
1909
1910 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1911 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1912
1913 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1914 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1915
1916 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1917 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1918
1919 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1920 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1921 }
1922
1923 static void ivb_err_int_handler(struct drm_device *dev)
1924 {
1925 struct drm_i915_private *dev_priv = dev->dev_private;
1926 u32 err_int = I915_READ(GEN7_ERR_INT);
1927 enum i915_pipe pipe;
1928
1929 if (err_int & ERR_INT_POISON)
1930 DRM_ERROR("Poison interrupt\n");
1931
1932 for_each_pipe(dev_priv, pipe) {
1933 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1934 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1935
1936 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1937 if (IS_IVYBRIDGE(dev))
1938 ivb_pipe_crc_irq_handler(dev, pipe);
1939 else
1940 hsw_pipe_crc_irq_handler(dev, pipe);
1941 }
1942 }
1943
1944 I915_WRITE(GEN7_ERR_INT, err_int);
1945 }
1946
1947 static void cpt_serr_int_handler(struct drm_device *dev)
1948 {
1949 struct drm_i915_private *dev_priv = dev->dev_private;
1950 u32 serr_int = I915_READ(SERR_INT);
1951
1952 if (serr_int & SERR_INT_POISON)
1953 DRM_ERROR("PCH poison interrupt\n");
1954
1955 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1956 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1957
1958 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1959 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1960
1961 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1962 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1963
1964 I915_WRITE(SERR_INT, serr_int);
1965 }
1966
1967 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1968 {
1969 struct drm_i915_private *dev_priv = dev->dev_private;
1970 int pipe;
1971 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1972
1973 if (hotplug_trigger)
1974 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1975
1976 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1977 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1978 SDE_AUDIO_POWER_SHIFT_CPT);
1979 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1980 port_name(port));
1981 }
1982
1983 if (pch_iir & SDE_AUX_MASK_CPT)
1984 dp_aux_irq_handler(dev);
1985
1986 if (pch_iir & SDE_GMBUS_CPT)
1987 gmbus_irq_handler(dev);
1988
1989 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1990 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1991
1992 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1993 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1994
1995 if (pch_iir & SDE_FDI_MASK_CPT)
1996 for_each_pipe(dev_priv, pipe)
1997 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1998 pipe_name(pipe),
1999 I915_READ(FDI_RX_IIR(pipe)));
2000
2001 if (pch_iir & SDE_ERROR_CPT)
2002 cpt_serr_int_handler(dev);
2003 }
2004
2005 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2006 {
2007 struct drm_i915_private *dev_priv = dev->dev_private;
2008 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2009 ~SDE_PORTE_HOTPLUG_SPT;
2010 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2011 u32 pin_mask = 0, long_mask = 0;
2012
2013 if (hotplug_trigger) {
2014 u32 dig_hotplug_reg;
2015
2016 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2017 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2018
2019 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2020 dig_hotplug_reg, hpd_spt,
2021 spt_port_hotplug_long_detect);
2022 }
2023
2024 if (hotplug2_trigger) {
2025 u32 dig_hotplug_reg;
2026
2027 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2028 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2029
2030 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2031 dig_hotplug_reg, hpd_spt,
2032 spt_port_hotplug2_long_detect);
2033 }
2034
2035 if (pin_mask)
2036 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2037
2038 if (pch_iir & SDE_GMBUS_CPT)
2039 gmbus_irq_handler(dev);
2040 }
2041
2042 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2043 const u32 hpd[HPD_NUM_PINS])
2044 {
2045 struct drm_i915_private *dev_priv = to_i915(dev);
2046 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2047
2048 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2049 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2050
2051 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2052 dig_hotplug_reg, hpd,
2053 ilk_port_hotplug_long_detect);
2054
2055 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2056 }
2057
2058 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2059 {
2060 struct drm_i915_private *dev_priv = dev->dev_private;
2061 enum i915_pipe pipe;
2062 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2063
2064 if (hotplug_trigger)
2065 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2066
2067 if (de_iir & DE_AUX_CHANNEL_A)
2068 dp_aux_irq_handler(dev);
2069
2070 if (de_iir & DE_GSE)
2071 intel_opregion_asle_intr(dev);
2072
2073 if (de_iir & DE_POISON)
2074 DRM_ERROR("Poison interrupt\n");
2075
2076 for_each_pipe(dev_priv, pipe) {
2077 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2078 intel_pipe_handle_vblank(dev, pipe))
2079 intel_check_page_flip(dev, pipe);
2080
2081 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2082 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2083
2084 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2085 i9xx_pipe_crc_irq_handler(dev, pipe);
2086
2087 /* plane/pipes map 1:1 on ilk+ */
2088 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2089 intel_prepare_page_flip(dev, pipe);
2090 intel_finish_page_flip_plane(dev, pipe);
2091 }
2092 }
2093
2094 /* check event from PCH */
2095 if (de_iir & DE_PCH_EVENT) {
2096 u32 pch_iir = I915_READ(SDEIIR);
2097
2098 if (HAS_PCH_CPT(dev))
2099 cpt_irq_handler(dev, pch_iir);
2100 else
2101 ibx_irq_handler(dev, pch_iir);
2102
2103 /* should clear PCH hotplug event before clear CPU irq */
2104 I915_WRITE(SDEIIR, pch_iir);
2105 }
2106
2107 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2108 ironlake_rps_change_irq_handler(dev);
2109 }
2110
2111 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2112 {
2113 struct drm_i915_private *dev_priv = dev->dev_private;
2114 enum i915_pipe pipe;
2115 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2116
2117 if (hotplug_trigger)
2118 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2119
2120 if (de_iir & DE_ERR_INT_IVB)
2121 ivb_err_int_handler(dev);
2122
2123 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2124 dp_aux_irq_handler(dev);
2125
2126 if (de_iir & DE_GSE_IVB)
2127 intel_opregion_asle_intr(dev);
2128
2129 for_each_pipe(dev_priv, pipe) {
2130 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2131 intel_pipe_handle_vblank(dev, pipe))
2132 intel_check_page_flip(dev, pipe);
2133
2134 /* plane/pipes map 1:1 on ilk+ */
2135 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2136 intel_prepare_page_flip(dev, pipe);
2137 intel_finish_page_flip_plane(dev, pipe);
2138 }
2139 }
2140
2141 /* check event from PCH */
2142 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2143 u32 pch_iir = I915_READ(SDEIIR);
2144
2145 cpt_irq_handler(dev, pch_iir);
2146
2147 /* clear PCH hotplug event before clear CPU irq */
2148 I915_WRITE(SDEIIR, pch_iir);
2149 }
2150 }
2151
2152 /*
2153 * To handle irqs with the minimum potential races with fresh interrupts, we:
2154 * 1 - Disable Master Interrupt Control.
2155 * 2 - Find the source(s) of the interrupt.
2156 * 3 - Clear the Interrupt Identity bits (IIR).
2157 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2158 * 5 - Re-enable Master Interrupt Control.
2159 */
2160 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
2161 {
2162 struct drm_device *dev = arg;
2163 struct drm_i915_private *dev_priv = dev->dev_private;
2164 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2165 irqreturn_t ret = IRQ_NONE;
2166
2167 if (!intel_irqs_enabled(dev_priv))
2168 return IRQ_NONE;
2169
2170 /* We get interrupts on unclaimed registers, so check for this before we
2171 * do any I915_{READ,WRITE}. */
2172 intel_uncore_check_errors(dev);
2173
2174 /* disable master interrupt before clearing iir */
2175 de_ier = I915_READ(DEIER);
2176 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2177 POSTING_READ(DEIER);
2178
2179 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2180 * interrupts will will be stored on its back queue, and then we'll be
2181 * able to process them after we restore SDEIER (as soon as we restore
2182 * it, we'll get an interrupt if SDEIIR still has something to process
2183 * due to its back queue). */
2184 if (!HAS_PCH_NOP(dev)) {
2185 sde_ier = I915_READ(SDEIER);
2186 I915_WRITE(SDEIER, 0);
2187 POSTING_READ(SDEIER);
2188 }
2189
2190 /* Find, clear, then process each source of interrupt */
2191
2192 gt_iir = I915_READ(GTIIR);
2193 if (gt_iir) {
2194 I915_WRITE(GTIIR, gt_iir);
2195 ret = IRQ_HANDLED;
2196 if (INTEL_INFO(dev)->gen >= 6)
2197 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2198 else
2199 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2200 }
2201
2202 de_iir = I915_READ(DEIIR);
2203 if (de_iir) {
2204 I915_WRITE(DEIIR, de_iir);
2205 ret = IRQ_HANDLED;
2206 if (INTEL_INFO(dev)->gen >= 7)
2207 ivb_display_irq_handler(dev, de_iir);
2208 else
2209 ilk_display_irq_handler(dev, de_iir);
2210 }
2211
2212 if (INTEL_INFO(dev)->gen >= 6) {
2213 u32 pm_iir = I915_READ(GEN6_PMIIR);
2214 if (pm_iir) {
2215 I915_WRITE(GEN6_PMIIR, pm_iir);
2216 ret = IRQ_HANDLED;
2217 gen6_rps_irq_handler(dev_priv, pm_iir);
2218 }
2219 }
2220
2221 I915_WRITE(DEIER, de_ier);
2222 POSTING_READ(DEIER);
2223 if (!HAS_PCH_NOP(dev)) {
2224 I915_WRITE(SDEIER, sde_ier);
2225 POSTING_READ(SDEIER);
2226 }
2227
2228 return ret;
2229 }
2230
2231 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2232 const u32 hpd[HPD_NUM_PINS])
2233 {
2234 struct drm_i915_private *dev_priv = to_i915(dev);
2235 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2236
2237 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2238 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2239
2240 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2241 dig_hotplug_reg, hpd,
2242 bxt_port_hotplug_long_detect);
2243
2244 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2245 }
2246
2247 static irqreturn_t gen8_irq_handler(DRM_IRQ_ARGS)
2248 {
2249 struct drm_device *dev = arg;
2250 struct drm_i915_private *dev_priv = dev->dev_private;
2251 u32 master_ctl;
2252 irqreturn_t ret = IRQ_NONE;
2253 uint32_t tmp = 0;
2254 enum i915_pipe pipe;
2255 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2256
2257 if (!intel_irqs_enabled(dev_priv))
2258 return IRQ_NONE;
2259
2260 if (INTEL_INFO(dev_priv)->gen >= 9)
2261 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2262 GEN9_AUX_CHANNEL_D;
2263
2264 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2265 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2266 if (!master_ctl)
2267 return IRQ_NONE;
2268
2269 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2270
2271 /* Find, clear, then process each source of interrupt */
2272
2273 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2274
2275 if (master_ctl & GEN8_DE_MISC_IRQ) {
2276 tmp = I915_READ(GEN8_DE_MISC_IIR);
2277 if (tmp) {
2278 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2279 ret = IRQ_HANDLED;
2280 if (tmp & GEN8_DE_MISC_GSE)
2281 intel_opregion_asle_intr(dev);
2282 else
2283 DRM_ERROR("Unexpected DE Misc interrupt\n");
2284 }
2285 else
2286 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2287 }
2288
2289 if (master_ctl & GEN8_DE_PORT_IRQ) {
2290 tmp = I915_READ(GEN8_DE_PORT_IIR);
2291 if (tmp) {
2292 bool found = false;
2293 u32 hotplug_trigger = 0;
2294
2295 if (IS_BROXTON(dev_priv))
2296 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2297 else if (IS_BROADWELL(dev_priv))
2298 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2299
2300 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2301 ret = IRQ_HANDLED;
2302
2303 if (tmp & aux_mask) {
2304 dp_aux_irq_handler(dev);
2305 found = true;
2306 }
2307
2308 if (hotplug_trigger) {
2309 if (IS_BROXTON(dev))
2310 bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2311 else
2312 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2313 found = true;
2314 }
2315
2316 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2317 gmbus_irq_handler(dev);
2318 found = true;
2319 }
2320
2321 if (!found)
2322 DRM_ERROR("Unexpected DE Port interrupt\n");
2323 }
2324 else
2325 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2326 }
2327
2328 for_each_pipe(dev_priv, pipe) {
2329 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2330
2331 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2332 continue;
2333
2334 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2335 if (pipe_iir) {
2336 ret = IRQ_HANDLED;
2337 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2338
2339 if (pipe_iir & GEN8_PIPE_VBLANK &&
2340 intel_pipe_handle_vblank(dev, pipe))
2341 intel_check_page_flip(dev, pipe);
2342
2343 if (INTEL_INFO(dev_priv)->gen >= 9)
2344 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2345 else
2346 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2347
2348 if (flip_done) {
2349 intel_prepare_page_flip(dev, pipe);
2350 intel_finish_page_flip_plane(dev, pipe);
2351 }
2352
2353 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2354 hsw_pipe_crc_irq_handler(dev, pipe);
2355
2356 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2357 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2358 pipe);
2359
2360
2361 if (INTEL_INFO(dev_priv)->gen >= 9)
2362 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2363 else
2364 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2365
2366 if (fault_errors)
2367 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2368 pipe_name(pipe),
2369 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2370 } else
2371 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2372 }
2373
2374 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2375 master_ctl & GEN8_DE_PCH_IRQ) {
2376 /*
2377 * FIXME(BDW): Assume for now that the new interrupt handling
2378 * scheme also closed the SDE interrupt handling race we've seen
2379 * on older pch-split platforms. But this needs testing.
2380 */
2381 u32 pch_iir = I915_READ(SDEIIR);
2382 if (pch_iir) {
2383 I915_WRITE(SDEIIR, pch_iir);
2384 ret = IRQ_HANDLED;
2385
2386 if (HAS_PCH_SPT(dev_priv))
2387 spt_irq_handler(dev, pch_iir);
2388 else
2389 cpt_irq_handler(dev, pch_iir);
2390 } else {
2391 /*
2392 * Like on previous PCH there seems to be something
2393 * fishy going on with forwarding PCH interrupts.
2394 */
2395 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2396 }
2397 }
2398
2399 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2400 POSTING_READ_FW(GEN8_MASTER_IRQ);
2401
2402 return ret;
2403 }
2404
2405 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2406 bool reset_completed)
2407 {
2408 struct intel_engine_cs *ring;
2409 int i;
2410
2411 /*
2412 * Notify all waiters for GPU completion events that reset state has
2413 * been changed, and that they need to restart their wait after
2414 * checking for potential errors (and bail out to drop locks if there is
2415 * a gpu reset pending so that i915_error_work_func can acquire them).
2416 */
2417
2418 assert_spin_locked(&dev_priv->irq_lock);
2419 #ifdef __NetBSD__
2420 for_each_ring(ring, dev_priv, i)
2421 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
2422
2423 spin_lock(&dev_priv->pending_flip_lock);
2424 DRM_SPIN_WAKEUP_ALL(&dev_priv->pending_flip_queue,
2425 &dev_priv->pending_flip_lock);
2426 spin_unlock(&dev_priv->pending_flip_lock);
2427
2428 if (reset_completed) {
2429 spin_lock(&dev_priv->gpu_error.reset_lock);
2430 DRM_SPIN_WAKEUP_ALL(&dev_priv->gpu_error.reset_queue,
2431 &dev_priv->gpu_error.reset_lock);
2432 spin_unlock(&dev_priv->gpu_error.reset_lock);
2433 }
2434 #else
2435 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2436 for_each_ring(ring, dev_priv, i)
2437 wake_up_all(&ring->irq_queue);
2438
2439 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2440 wake_up_all(&dev_priv->pending_flip_queue);
2441
2442 /*
2443 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2444 * reset state is cleared.
2445 */
2446 if (reset_completed)
2447 wake_up_all(&dev_priv->gpu_error.reset_queue);
2448 #endif
2449 }
2450
2451 /**
2452 * i915_reset_and_wakeup - do process context error handling work
2453 * @dev: drm device
2454 *
2455 * Fire an error uevent so userspace can see that a hang or error
2456 * was detected.
2457 */
2458 static void i915_reset_and_wakeup(struct drm_device *dev)
2459 {
2460 struct drm_i915_private *dev_priv = to_i915(dev);
2461 struct i915_gpu_error *error = &dev_priv->gpu_error;
2462 #ifndef __NetBSD__
2463 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2464 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2465 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2466 #endif
2467 int ret;
2468
2469 #ifndef __NetBSD__
2470 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2471 #endif
2472
2473 /*
2474 * Note that there's only one work item which does gpu resets, so we
2475 * need not worry about concurrent gpu resets potentially incrementing
2476 * error->reset_counter twice. We only need to take care of another
2477 * racing irq/hangcheck declaring the gpu dead for a second time. A
2478 * quick check for that is good enough: schedule_work ensures the
2479 * correct ordering between hang detection and this work item, and since
2480 * the reset in-progress bit is only ever set by code outside of this
2481 * work we don't need to worry about any other races.
2482 */
2483 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2484 DRM_DEBUG_DRIVER("resetting chip\n");
2485 #ifndef __NetBSD__
2486 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2487 reset_event);
2488 #endif
2489
2490 /*
2491 * In most cases it's guaranteed that we get here with an RPM
2492 * reference held, for example because there is a pending GPU
2493 * request that won't finish until the reset is done. This
2494 * isn't the case at least when we get here by doing a
2495 * simulated reset via debugs, so get an RPM reference.
2496 */
2497 intel_runtime_pm_get(dev_priv);
2498
2499 intel_prepare_reset(dev);
2500
2501 /*
2502 * All state reset _must_ be completed before we update the
2503 * reset counter, for otherwise waiters might miss the reset
2504 * pending state and not properly drop locks, resulting in
2505 * deadlocks with the reset work.
2506 */
2507 ret = i915_reset(dev);
2508
2509 intel_finish_reset(dev);
2510
2511 intel_runtime_pm_put(dev_priv);
2512
2513 if (ret == 0) {
2514 /*
2515 * After all the gem state is reset, increment the reset
2516 * counter and wake up everyone waiting for the reset to
2517 * complete.
2518 *
2519 * Since unlock operations are a one-sided barrier only,
2520 * we need to insert a barrier here to order any seqno
2521 * updates before
2522 * the counter increment.
2523 */
2524 smp_mb__before_atomic();
2525 atomic_inc(&dev_priv->gpu_error.reset_counter);
2526
2527 #ifndef __NetBSD__
2528 kobject_uevent_env(&dev->primary->kdev->kobj,
2529 KOBJ_CHANGE, reset_done_event);
2530 #endif
2531 } else {
2532 atomic_or(I915_WEDGED, &error->reset_counter);
2533 }
2534
2535 /*
2536 * Note: The wake_up also serves as a memory barrier so that
2537 * waiters see the update value of the reset counter atomic_t.
2538 */
2539 spin_lock(&dev_priv->irq_lock);
2540 i915_error_wake_up(dev_priv, true);
2541 spin_unlock(&dev_priv->irq_lock);
2542 }
2543 }
2544
2545 static void i915_report_and_clear_eir(struct drm_device *dev)
2546 {
2547 struct drm_i915_private *dev_priv = dev->dev_private;
2548 uint32_t instdone[I915_NUM_INSTDONE_REG];
2549 u32 eir = I915_READ(EIR);
2550 int pipe, i;
2551
2552 if (!eir)
2553 return;
2554
2555 pr_err("render error detected, EIR: 0x%08x\n", eir);
2556
2557 i915_get_extra_instdone(dev, instdone);
2558
2559 if (IS_G4X(dev)) {
2560 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2561 u32 ipeir = I915_READ(IPEIR_I965);
2562
2563 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2564 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2565 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2566 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2567 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2568 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2569 I915_WRITE(IPEIR_I965, ipeir);
2570 POSTING_READ(IPEIR_I965);
2571 }
2572 if (eir & GM45_ERROR_PAGE_TABLE) {
2573 u32 pgtbl_err = I915_READ(PGTBL_ER);
2574 pr_err("page table error\n");
2575 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2576 I915_WRITE(PGTBL_ER, pgtbl_err);
2577 POSTING_READ(PGTBL_ER);
2578 }
2579 }
2580
2581 if (!IS_GEN2(dev)) {
2582 if (eir & I915_ERROR_PAGE_TABLE) {
2583 u32 pgtbl_err = I915_READ(PGTBL_ER);
2584 pr_err("page table error\n");
2585 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2586 I915_WRITE(PGTBL_ER, pgtbl_err);
2587 POSTING_READ(PGTBL_ER);
2588 }
2589 }
2590
2591 if (eir & I915_ERROR_MEMORY_REFRESH) {
2592 pr_err("memory refresh error:\n");
2593 for_each_pipe(dev_priv, pipe)
2594 pr_err("pipe %c stat: 0x%08x\n",
2595 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2596 /* pipestat has already been acked */
2597 }
2598 if (eir & I915_ERROR_INSTRUCTION) {
2599 pr_err("instruction error\n");
2600 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2601 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2602 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2603 if (INTEL_INFO(dev)->gen < 4) {
2604 u32 ipeir = I915_READ(IPEIR);
2605
2606 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2607 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2608 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2609 I915_WRITE(IPEIR, ipeir);
2610 POSTING_READ(IPEIR);
2611 } else {
2612 u32 ipeir = I915_READ(IPEIR_I965);
2613
2614 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2615 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2616 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2617 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2618 I915_WRITE(IPEIR_I965, ipeir);
2619 POSTING_READ(IPEIR_I965);
2620 }
2621 }
2622
2623 I915_WRITE(EIR, eir);
2624 POSTING_READ(EIR);
2625 eir = I915_READ(EIR);
2626 if (eir) {
2627 /*
2628 * some errors might have become stuck,
2629 * mask them.
2630 */
2631 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2632 I915_WRITE(EMR, I915_READ(EMR) | eir);
2633 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2634 }
2635 }
2636
2637 /**
2638 * i915_handle_error - handle a gpu error
2639 * @dev: drm device
2640 *
2641 * Do some basic checking of register state at error time and
2642 * dump it to the syslog. Also call i915_capture_error_state() to make
2643 * sure we get a record and make it available in debugfs. Fire a uevent
2644 * so userspace knows something bad happened (should trigger collection
2645 * of a ring dump etc.).
2646 */
2647 void i915_handle_error(struct drm_device *dev, bool wedged,
2648 const char *fmt, ...)
2649 {
2650 struct drm_i915_private *dev_priv = dev->dev_private;
2651 va_list args;
2652 char error_msg[80];
2653
2654 assert_spin_locked(&dev_priv->irq_lock);
2655
2656 va_start(args, fmt);
2657 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2658 va_end(args);
2659
2660 i915_capture_error_state(dev, wedged, error_msg);
2661 i915_report_and_clear_eir(dev);
2662
2663 if (wedged) {
2664 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2665 &dev_priv->gpu_error.reset_counter);
2666
2667 /*
2668 * Wakeup waiting processes so that the reset function
2669 * i915_reset_and_wakeup doesn't deadlock trying to grab
2670 * various locks. By bumping the reset counter first, the woken
2671 * processes will see a reset in progress and back off,
2672 * releasing their locks and then wait for the reset completion.
2673 * We must do this for _all_ gpu waiters that might hold locks
2674 * that the reset work needs to acquire.
2675 *
2676 * Note: The wake_up serves as the required memory barrier to
2677 * ensure that the waiters see the updated value of the reset
2678 * counter atomic_t.
2679 */
2680 i915_error_wake_up(dev_priv, false);
2681 }
2682
2683 i915_reset_and_wakeup(dev);
2684 }
2685
2686 /* Called from drm generic code, passed 'crtc' which
2687 * we use as a pipe index
2688 */
2689 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2690 {
2691 struct drm_i915_private *dev_priv = dev->dev_private;
2692 unsigned long irqflags;
2693
2694 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2695 if (INTEL_INFO(dev)->gen >= 4)
2696 i915_enable_pipestat(dev_priv, pipe,
2697 PIPE_START_VBLANK_INTERRUPT_STATUS);
2698 else
2699 i915_enable_pipestat(dev_priv, pipe,
2700 PIPE_VBLANK_INTERRUPT_STATUS);
2701 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2702
2703 return 0;
2704 }
2705
2706 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2707 {
2708 struct drm_i915_private *dev_priv = dev->dev_private;
2709 unsigned long irqflags;
2710 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2711 DE_PIPE_VBLANK(pipe);
2712
2713 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2714 ironlake_enable_display_irq(dev_priv, bit);
2715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2716
2717 return 0;
2718 }
2719
2720 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2721 {
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2723 unsigned long irqflags;
2724
2725 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726 i915_enable_pipestat(dev_priv, pipe,
2727 PIPE_START_VBLANK_INTERRUPT_STATUS);
2728 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2729
2730 return 0;
2731 }
2732
2733 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2734 {
2735 struct drm_i915_private *dev_priv = dev->dev_private;
2736 unsigned long irqflags;
2737
2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2739 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2740 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2741 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2743 return 0;
2744 }
2745
2746 /* Called from drm generic code, passed 'crtc' which
2747 * we use as a pipe index
2748 */
2749 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2750 {
2751 struct drm_i915_private *dev_priv = dev->dev_private;
2752 unsigned long irqflags;
2753
2754 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2755 i915_disable_pipestat(dev_priv, pipe,
2756 PIPE_VBLANK_INTERRUPT_STATUS |
2757 PIPE_START_VBLANK_INTERRUPT_STATUS);
2758 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2759 }
2760
2761 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2762 {
2763 struct drm_i915_private *dev_priv = dev->dev_private;
2764 unsigned long irqflags;
2765 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2766 DE_PIPE_VBLANK(pipe);
2767
2768 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2769 ironlake_disable_display_irq(dev_priv, bit);
2770 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2771 }
2772
2773 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2774 {
2775 struct drm_i915_private *dev_priv = dev->dev_private;
2776 unsigned long irqflags;
2777
2778 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2779 i915_disable_pipestat(dev_priv, pipe,
2780 PIPE_START_VBLANK_INTERRUPT_STATUS);
2781 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2782 }
2783
2784 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2785 {
2786 struct drm_i915_private *dev_priv = dev->dev_private;
2787 unsigned long irqflags;
2788
2789 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2790 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2791 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2792 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2793 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2794 }
2795
2796 static bool
2797 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2798 {
2799 return (list_empty(&ring->request_list) ||
2800 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2801 }
2802
2803 static bool
2804 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2805 {
2806 if (INTEL_INFO(dev)->gen >= 8) {
2807 return (ipehr >> 23) == 0x1c;
2808 } else {
2809 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2810 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2811 MI_SEMAPHORE_REGISTER);
2812 }
2813 }
2814
2815 static struct intel_engine_cs *
2816 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2817 {
2818 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2819 struct intel_engine_cs *signaller;
2820 int i;
2821
2822 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2823 for_each_ring(signaller, dev_priv, i) {
2824 if (ring == signaller)
2825 continue;
2826
2827 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2828 return signaller;
2829 }
2830 } else {
2831 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2832
2833 for_each_ring(signaller, dev_priv, i) {
2834 if(ring == signaller)
2835 continue;
2836
2837 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2838 return signaller;
2839 }
2840 }
2841
2842 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2843 ring->id, ipehr, offset);
2844
2845 return NULL;
2846 }
2847
2848 static struct intel_engine_cs *
2849 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2850 {
2851 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2852 u32 cmd, ipehr, head;
2853 u64 offset = 0;
2854 int i, backwards;
2855
2856 /*
2857 * This function does not support execlist mode - any attempt to
2858 * proceed further into this function will result in a kernel panic
2859 * when dereferencing ring->buffer, which is not set up in execlist
2860 * mode.
2861 *
2862 * The correct way of doing it would be to derive the currently
2863 * executing ring buffer from the current context, which is derived
2864 * from the currently running request. Unfortunately, to get the
2865 * current request we would have to grab the struct_mutex before doing
2866 * anything else, which would be ill-advised since some other thread
2867 * might have grabbed it already and managed to hang itself, causing
2868 * the hang checker to deadlock.
2869 *
2870 * Therefore, this function does not support execlist mode in its
2871 * current form. Just return NULL and move on.
2872 */
2873 if (ring->buffer == NULL)
2874 return NULL;
2875
2876 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2877 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2878 return NULL;
2879
2880 /*
2881 * HEAD is likely pointing to the dword after the actual command,
2882 * so scan backwards until we find the MBOX. But limit it to just 3
2883 * or 4 dwords depending on the semaphore wait command size.
2884 * Note that we don't care about ACTHD here since that might
2885 * point at at batch, and semaphores are always emitted into the
2886 * ringbuffer itself.
2887 */
2888 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2889 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2890
2891 for (i = backwards; i; --i) {
2892 /*
2893 * Be paranoid and presume the hw has gone off into the wild -
2894 * our ring is smaller than what the hardware (and hence
2895 * HEAD_ADDR) allows. Also handles wrap-around.
2896 */
2897 head &= ring->buffer->size - 1;
2898
2899 /* This here seems to blow up */
2900 #ifdef __NetBSD__
2901 cmd = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2902 head);
2903 #else
2904 cmd = ioread32(ring->buffer->virtual_start + head);
2905 #endif
2906 if (cmd == ipehr)
2907 break;
2908
2909 head -= 4;
2910 }
2911
2912 if (!i)
2913 return NULL;
2914
2915 #ifdef __NetBSD__
2916 *seqno = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2917 head + 4) + 1;
2918 #else
2919 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2920 #endif
2921 if (INTEL_INFO(ring->dev)->gen >= 8) {
2922 #ifdef __NetBSD__
2923 offset = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2924 head + 12);
2925 offset <<= 32;
2926 offset |= bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2927 head + 8);
2928 #else
2929 offset = ioread32(ring->buffer->virtual_start + head + 12);
2930 offset <<= 32;
2931 offset = ioread32(ring->buffer->virtual_start + head + 8);
2932 #endif
2933 }
2934 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2935 }
2936
2937 static int semaphore_passed(struct intel_engine_cs *ring)
2938 {
2939 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2940 struct intel_engine_cs *signaller;
2941 u32 seqno;
2942
2943 ring->hangcheck.deadlock++;
2944
2945 signaller = semaphore_waits_for(ring, &seqno);
2946 if (signaller == NULL)
2947 return -1;
2948
2949 /* Prevent pathological recursion due to driver bugs */
2950 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2951 return -1;
2952
2953 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2954 return 1;
2955
2956 /* cursory check for an unkickable deadlock */
2957 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2958 semaphore_passed(signaller) < 0)
2959 return -1;
2960
2961 return 0;
2962 }
2963
2964 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2965 {
2966 struct intel_engine_cs *ring;
2967 int i;
2968
2969 for_each_ring(ring, dev_priv, i)
2970 ring->hangcheck.deadlock = 0;
2971 }
2972
2973 static enum intel_ring_hangcheck_action
2974 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2975 {
2976 struct drm_device *dev = ring->dev;
2977 struct drm_i915_private *dev_priv = dev->dev_private;
2978 u32 tmp;
2979
2980 if (acthd != ring->hangcheck.acthd) {
2981 if (acthd > ring->hangcheck.max_acthd) {
2982 ring->hangcheck.max_acthd = acthd;
2983 return HANGCHECK_ACTIVE;
2984 }
2985
2986 return HANGCHECK_ACTIVE_LOOP;
2987 }
2988
2989 if (IS_GEN2(dev))
2990 return HANGCHECK_HUNG;
2991
2992 /* Is the chip hanging on a WAIT_FOR_EVENT?
2993 * If so we can simply poke the RB_WAIT bit
2994 * and break the hang. This should work on
2995 * all but the second generation chipsets.
2996 */
2997 tmp = I915_READ_CTL(ring);
2998 if (tmp & RING_WAIT) {
2999 i915_handle_error(dev, false,
3000 "Kicking stuck wait on %s",
3001 ring->name);
3002 I915_WRITE_CTL(ring, tmp);
3003 return HANGCHECK_KICK;
3004 }
3005
3006 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3007 switch (semaphore_passed(ring)) {
3008 default:
3009 return HANGCHECK_HUNG;
3010 case 1:
3011 i915_handle_error(dev, false,
3012 "Kicking stuck semaphore on %s",
3013 ring->name);
3014 I915_WRITE_CTL(ring, tmp);
3015 return HANGCHECK_KICK;
3016 case 0:
3017 return HANGCHECK_WAIT;
3018 }
3019 }
3020
3021 return HANGCHECK_HUNG;
3022 }
3023
3024 /*
3025 * This is called when the chip hasn't reported back with completed
3026 * batchbuffers in a long time. We keep track per ring seqno progress and
3027 * if there are no progress, hangcheck score for that ring is increased.
3028 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3029 * we kick the ring. If we see no progress on three subsequent calls
3030 * we assume chip is wedged and try to fix it by resetting the chip.
3031 */
3032 static void i915_hangcheck_elapsed(struct work_struct *work)
3033 {
3034 struct drm_i915_private *dev_priv =
3035 container_of(work, typeof(*dev_priv),
3036 gpu_error.hangcheck_work.work);
3037 struct drm_device *dev = dev_priv->dev;
3038 struct intel_engine_cs *ring;
3039 int i;
3040 int busy_count = 0, rings_hung = 0;
3041 bool stuck[I915_NUM_RINGS] = { 0 };
3042 #define BUSY 1
3043 #define KICK 5
3044 #define HUNG 20
3045
3046 if (!i915.enable_hangcheck)
3047 return;
3048
3049 spin_lock(&dev_priv->irq_lock);
3050
3051 for_each_ring(ring, dev_priv, i) {
3052 u64 acthd;
3053 u32 seqno;
3054 bool busy = true;
3055
3056 semaphore_clear_deadlocks(dev_priv);
3057
3058 seqno = ring->get_seqno(ring, false);
3059 acthd = intel_ring_get_active_head(ring);
3060
3061 if (ring->hangcheck.seqno == seqno) {
3062 if (ring_idle(ring, seqno)) {
3063 ring->hangcheck.action = HANGCHECK_IDLE;
3064 #ifdef __NetBSD__
3065 if (DRM_SPIN_WAITERS_P(&ring->irq_queue,
3066 &dev_priv->irq_lock)) {
3067 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3068 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3069 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3070 ring->name);
3071 else
3072 DRM_INFO("Fake missed irq on %s\n",
3073 ring->name);
3074 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
3075 }
3076 ring->hangcheck.score += BUSY;
3077 } else {
3078 busy = false;
3079 }
3080 #else
3081 if (waitqueue_active(&ring->irq_queue)) {
3082 /* Issue a wake-up to catch stuck h/w. */
3083 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3084 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3085 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3086 ring->name);
3087 else
3088 DRM_INFO("Fake missed irq on %s\n",
3089 ring->name);
3090 wake_up_all(&ring->irq_queue);
3091 }
3092 /* Safeguard against driver failure */
3093 ring->hangcheck.score += BUSY;
3094 } else
3095 busy = false;
3096 #endif
3097 } else {
3098 /* We always increment the hangcheck score
3099 * if the ring is busy and still processing
3100 * the same request, so that no single request
3101 * can run indefinitely (such as a chain of
3102 * batches). The only time we do not increment
3103 * the hangcheck score on this ring, if this
3104 * ring is in a legitimate wait for another
3105 * ring. In that case the waiting ring is a
3106 * victim and we want to be sure we catch the
3107 * right culprit. Then every time we do kick
3108 * the ring, add a small increment to the
3109 * score so that we can catch a batch that is
3110 * being repeatedly kicked and so responsible
3111 * for stalling the machine.
3112 */
3113 ring->hangcheck.action = ring_stuck(ring,
3114 acthd);
3115
3116 switch (ring->hangcheck.action) {
3117 case HANGCHECK_IDLE:
3118 case HANGCHECK_WAIT:
3119 case HANGCHECK_ACTIVE:
3120 break;
3121 case HANGCHECK_ACTIVE_LOOP:
3122 ring->hangcheck.score += BUSY;
3123 break;
3124 case HANGCHECK_KICK:
3125 ring->hangcheck.score += KICK;
3126 break;
3127 case HANGCHECK_HUNG:
3128 ring->hangcheck.score += HUNG;
3129 stuck[i] = true;
3130 break;
3131 }
3132 }
3133 } else {
3134 ring->hangcheck.action = HANGCHECK_ACTIVE;
3135
3136 /* Gradually reduce the count so that we catch DoS
3137 * attempts across multiple batches.
3138 */
3139 if (ring->hangcheck.score > 0)
3140 ring->hangcheck.score--;
3141
3142 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3143 }
3144
3145 ring->hangcheck.seqno = seqno;
3146 ring->hangcheck.acthd = acthd;
3147 busy_count += busy;
3148 }
3149
3150 for_each_ring(ring, dev_priv, i) {
3151 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3152 DRM_INFO("%s on %s\n",
3153 stuck[i] ? "stuck" : "no progress",
3154 ring->name);
3155 rings_hung++;
3156 }
3157 }
3158
3159 if (rings_hung) {
3160 i915_handle_error(dev, true, "Ring hung");
3161 spin_unlock(&dev_priv->irq_lock);
3162 return;
3163 }
3164
3165 spin_unlock(&dev_priv->irq_lock);
3166
3167 if (busy_count)
3168 /* Reset timer case chip hangs without another request
3169 * being added */
3170 i915_queue_hangcheck(dev);
3171 }
3172
3173 void i915_queue_hangcheck(struct drm_device *dev)
3174 {
3175 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3176
3177 if (!i915.enable_hangcheck)
3178 return;
3179
3180 /* Don't continually defer the hangcheck so that it is always run at
3181 * least once after work has been scheduled on any ring. Otherwise,
3182 * we will ignore a hung ring if a second ring is kept busy.
3183 */
3184
3185 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3186 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3187 }
3188
3189 static void ibx_irq_reset(struct drm_device *dev)
3190 {
3191 struct drm_i915_private *dev_priv = dev->dev_private;
3192
3193 if (HAS_PCH_NOP(dev))
3194 return;
3195
3196 GEN5_IRQ_RESET(SDE);
3197
3198 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3199 I915_WRITE(SERR_INT, 0xffffffff);
3200 }
3201
3202 /*
3203 * SDEIER is also touched by the interrupt handler to work around missed PCH
3204 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3205 * instead we unconditionally enable all PCH interrupt sources here, but then
3206 * only unmask them as needed with SDEIMR.
3207 *
3208 * This function needs to be called before interrupts are enabled.
3209 */
3210 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3211 {
3212 struct drm_i915_private *dev_priv = dev->dev_private;
3213
3214 if (HAS_PCH_NOP(dev))
3215 return;
3216
3217 WARN_ON(I915_READ(SDEIER) != 0);
3218 I915_WRITE(SDEIER, 0xffffffff);
3219 POSTING_READ(SDEIER);
3220 }
3221
3222 static void gen5_gt_irq_reset(struct drm_device *dev)
3223 {
3224 struct drm_i915_private *dev_priv = dev->dev_private;
3225
3226 GEN5_IRQ_RESET(GT);
3227 if (INTEL_INFO(dev)->gen >= 6)
3228 GEN5_IRQ_RESET(GEN6_PM);
3229 }
3230
3231 /* drm_dma.h hooks
3232 */
3233 static void ironlake_irq_reset(struct drm_device *dev)
3234 {
3235 struct drm_i915_private *dev_priv = dev->dev_private;
3236
3237 I915_WRITE(HWSTAM, 0xffffffff);
3238
3239 GEN5_IRQ_RESET(DE);
3240 if (IS_GEN7(dev))
3241 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3242
3243 gen5_gt_irq_reset(dev);
3244
3245 ibx_irq_reset(dev);
3246 }
3247
3248 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3249 {
3250 enum pipe pipe;
3251
3252 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3253 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3254
3255 for_each_pipe(dev_priv, pipe)
3256 I915_WRITE(PIPESTAT(pipe), 0xffff);
3257
3258 GEN5_IRQ_RESET(VLV_);
3259 }
3260
3261 static void valleyview_irq_preinstall(struct drm_device *dev)
3262 {
3263 struct drm_i915_private *dev_priv = dev->dev_private;
3264
3265 /* VLV magic */
3266 I915_WRITE(VLV_IMR, 0);
3267 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3268 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3269 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3270
3271 gen5_gt_irq_reset(dev);
3272
3273 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3274
3275 vlv_display_irq_reset(dev_priv);
3276 }
3277
3278 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3279 {
3280 GEN8_IRQ_RESET_NDX(GT, 0);
3281 GEN8_IRQ_RESET_NDX(GT, 1);
3282 GEN8_IRQ_RESET_NDX(GT, 2);
3283 GEN8_IRQ_RESET_NDX(GT, 3);
3284 }
3285
3286 static void gen8_irq_reset(struct drm_device *dev)
3287 {
3288 struct drm_i915_private *dev_priv = dev->dev_private;
3289 int pipe;
3290
3291 I915_WRITE(GEN8_MASTER_IRQ, 0);
3292 POSTING_READ(GEN8_MASTER_IRQ);
3293
3294 gen8_gt_irq_reset(dev_priv);
3295
3296 for_each_pipe(dev_priv, pipe)
3297 if (intel_display_power_is_enabled(dev_priv,
3298 POWER_DOMAIN_PIPE(pipe)))
3299 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3300
3301 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3302 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3303 GEN5_IRQ_RESET(GEN8_PCU_);
3304
3305 if (HAS_PCH_SPLIT(dev))
3306 ibx_irq_reset(dev);
3307 }
3308
3309 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3310 unsigned int pipe_mask)
3311 {
3312 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3313
3314 spin_lock_irq(&dev_priv->irq_lock);
3315 if (pipe_mask & 1 << PIPE_A)
3316 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3317 dev_priv->de_irq_mask[PIPE_A],
3318 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3319 if (pipe_mask & 1 << PIPE_B)
3320 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3321 dev_priv->de_irq_mask[PIPE_B],
3322 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3323 if (pipe_mask & 1 << PIPE_C)
3324 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3325 dev_priv->de_irq_mask[PIPE_C],
3326 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3327 spin_unlock_irq(&dev_priv->irq_lock);
3328 }
3329
3330 static void cherryview_irq_preinstall(struct drm_device *dev)
3331 {
3332 struct drm_i915_private *dev_priv = dev->dev_private;
3333
3334 I915_WRITE(GEN8_MASTER_IRQ, 0);
3335 POSTING_READ(GEN8_MASTER_IRQ);
3336
3337 gen8_gt_irq_reset(dev_priv);
3338
3339 GEN5_IRQ_RESET(GEN8_PCU_);
3340
3341 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3342
3343 vlv_display_irq_reset(dev_priv);
3344 }
3345
3346 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3347 const u32 hpd[HPD_NUM_PINS])
3348 {
3349 struct drm_i915_private *dev_priv = to_i915(dev);
3350 struct intel_encoder *encoder;
3351 u32 enabled_irqs = 0;
3352
3353 for_each_intel_encoder(dev, encoder)
3354 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3355 enabled_irqs |= hpd[encoder->hpd_pin];
3356
3357 return enabled_irqs;
3358 }
3359
3360 static void ibx_hpd_irq_setup(struct drm_device *dev)
3361 {
3362 struct drm_i915_private *dev_priv = dev->dev_private;
3363 u32 hotplug_irqs, hotplug, enabled_irqs;
3364
3365 if (HAS_PCH_IBX(dev)) {
3366 hotplug_irqs = SDE_HOTPLUG_MASK;
3367 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3368 } else {
3369 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3370 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3371 }
3372
3373 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3374
3375 /*
3376 * Enable digital hotplug on the PCH, and configure the DP short pulse
3377 * duration to 2ms (which is the minimum in the Display Port spec).
3378 * The pulse duration bits are reserved on LPT+.
3379 */
3380 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3381 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3382 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3383 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3384 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3385 /*
3386 * When CPU and PCH are on the same package, port A
3387 * HPD must be enabled in both north and south.
3388 */
3389 if (HAS_PCH_LPT_LP(dev))
3390 hotplug |= PORTA_HOTPLUG_ENABLE;
3391 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3392 }
3393
3394 static void spt_hpd_irq_setup(struct drm_device *dev)
3395 {
3396 struct drm_i915_private *dev_priv = dev->dev_private;
3397 u32 hotplug_irqs, hotplug, enabled_irqs;
3398
3399 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3400 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3401
3402 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3403
3404 /* Enable digital hotplug on the PCH */
3405 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3406 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3407 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3408 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3409
3410 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3411 hotplug |= PORTE_HOTPLUG_ENABLE;
3412 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3413 }
3414
3415 static void ilk_hpd_irq_setup(struct drm_device *dev)
3416 {
3417 struct drm_i915_private *dev_priv = dev->dev_private;
3418 u32 hotplug_irqs, hotplug, enabled_irqs;
3419
3420 if (INTEL_INFO(dev)->gen >= 8) {
3421 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3422 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3423
3424 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3425 } else if (INTEL_INFO(dev)->gen >= 7) {
3426 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3427 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3428
3429 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3430 } else {
3431 hotplug_irqs = DE_DP_A_HOTPLUG;
3432 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3433
3434 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3435 }
3436
3437 /*
3438 * Enable digital hotplug on the CPU, and configure the DP short pulse
3439 * duration to 2ms (which is the minimum in the Display Port spec)
3440 * The pulse duration bits are reserved on HSW+.
3441 */
3442 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3443 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3444 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3445 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3446
3447 ibx_hpd_irq_setup(dev);
3448 }
3449
3450 static void bxt_hpd_irq_setup(struct drm_device *dev)
3451 {
3452 struct drm_i915_private *dev_priv = dev->dev_private;
3453 u32 hotplug_irqs, hotplug, enabled_irqs;
3454
3455 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3456 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3457
3458 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3459
3460 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3461 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3462 PORTA_HOTPLUG_ENABLE;
3463 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3464 }
3465
3466 static void ibx_irq_postinstall(struct drm_device *dev)
3467 {
3468 struct drm_i915_private *dev_priv = dev->dev_private;
3469 u32 mask;
3470
3471 if (HAS_PCH_NOP(dev))
3472 return;
3473
3474 if (HAS_PCH_IBX(dev))
3475 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3476 else
3477 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3478
3479 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3480 I915_WRITE(SDEIMR, ~mask);
3481 }
3482
3483 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3484 {
3485 struct drm_i915_private *dev_priv = dev->dev_private;
3486 u32 pm_irqs, gt_irqs;
3487
3488 pm_irqs = gt_irqs = 0;
3489
3490 dev_priv->gt_irq_mask = ~0;
3491 if (HAS_L3_DPF(dev)) {
3492 /* L3 parity interrupt is always unmasked. */
3493 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3494 gt_irqs |= GT_PARITY_ERROR(dev);
3495 }
3496
3497 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3498 if (IS_GEN5(dev)) {
3499 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3500 ILK_BSD_USER_INTERRUPT;
3501 } else {
3502 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3503 }
3504
3505 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3506
3507 if (INTEL_INFO(dev)->gen >= 6) {
3508 /*
3509 * RPS interrupts will get enabled/disabled on demand when RPS
3510 * itself is enabled/disabled.
3511 */
3512 if (HAS_VEBOX(dev))
3513 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3514
3515 dev_priv->pm_irq_mask = 0xffffffff;
3516 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3517 }
3518 }
3519
3520 static int ironlake_irq_postinstall(struct drm_device *dev)
3521 {
3522 struct drm_i915_private *dev_priv = dev->dev_private;
3523 u32 display_mask, extra_mask;
3524
3525 if (INTEL_INFO(dev)->gen >= 7) {
3526 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3527 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3528 DE_PLANEB_FLIP_DONE_IVB |
3529 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3530 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3531 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3532 DE_DP_A_HOTPLUG_IVB);
3533 } else {
3534 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3535 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3536 DE_AUX_CHANNEL_A |
3537 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3538 DE_POISON);
3539 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3540 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3541 DE_DP_A_HOTPLUG);
3542 }
3543
3544 dev_priv->irq_mask = ~display_mask;
3545
3546 I915_WRITE(HWSTAM, 0xeffe);
3547
3548 ibx_irq_pre_postinstall(dev);
3549
3550 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3551
3552 gen5_gt_irq_postinstall(dev);
3553
3554 ibx_irq_postinstall(dev);
3555
3556 if (IS_IRONLAKE_M(dev)) {
3557 /* Enable PCU event interrupts
3558 *
3559 * spinlocking not required here for correctness since interrupt
3560 * setup is guaranteed to run in single-threaded context. But we
3561 * need it to make the assert_spin_locked happy. */
3562 spin_lock_irq(&dev_priv->irq_lock);
3563 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3564 spin_unlock_irq(&dev_priv->irq_lock);
3565 }
3566
3567 return 0;
3568 }
3569
3570 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3571 {
3572 u32 pipestat_mask;
3573 u32 iir_mask;
3574 enum pipe pipe;
3575
3576 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3577 PIPE_FIFO_UNDERRUN_STATUS;
3578
3579 for_each_pipe(dev_priv, pipe)
3580 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3581 POSTING_READ(PIPESTAT(PIPE_A));
3582
3583 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3584 PIPE_CRC_DONE_INTERRUPT_STATUS;
3585
3586 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3587 for_each_pipe(dev_priv, pipe)
3588 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3589
3590 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3591 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3592 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3593 if (IS_CHERRYVIEW(dev_priv))
3594 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3595 dev_priv->irq_mask &= ~iir_mask;
3596
3597 I915_WRITE(VLV_IIR, iir_mask);
3598 I915_WRITE(VLV_IIR, iir_mask);
3599 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3600 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3601 POSTING_READ(VLV_IMR);
3602 }
3603
3604 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3605 {
3606 u32 pipestat_mask;
3607 u32 iir_mask;
3608 enum pipe pipe;
3609
3610 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3611 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3612 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3613 if (IS_CHERRYVIEW(dev_priv))
3614 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3615
3616 dev_priv->irq_mask |= iir_mask;
3617 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3618 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3619 I915_WRITE(VLV_IIR, iir_mask);
3620 I915_WRITE(VLV_IIR, iir_mask);
3621 POSTING_READ(VLV_IIR);
3622
3623 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3624 PIPE_CRC_DONE_INTERRUPT_STATUS;
3625
3626 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3627 for_each_pipe(dev_priv, pipe)
3628 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3629
3630 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3631 PIPE_FIFO_UNDERRUN_STATUS;
3632
3633 for_each_pipe(dev_priv, pipe)
3634 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3635 POSTING_READ(PIPESTAT(PIPE_A));
3636 }
3637
3638 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3639 {
3640 assert_spin_locked(&dev_priv->irq_lock);
3641
3642 if (dev_priv->display_irqs_enabled)
3643 return;
3644
3645 dev_priv->display_irqs_enabled = true;
3646
3647 if (intel_irqs_enabled(dev_priv))
3648 valleyview_display_irqs_install(dev_priv);
3649 }
3650
3651 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3652 {
3653 assert_spin_locked(&dev_priv->irq_lock);
3654
3655 if (!dev_priv->display_irqs_enabled)
3656 return;
3657
3658 dev_priv->display_irqs_enabled = false;
3659
3660 if (intel_irqs_enabled(dev_priv))
3661 valleyview_display_irqs_uninstall(dev_priv);
3662 }
3663
3664 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3665 {
3666 dev_priv->irq_mask = ~0;
3667
3668 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3669 POSTING_READ(PORT_HOTPLUG_EN);
3670
3671 I915_WRITE(VLV_IIR, 0xffffffff);
3672 I915_WRITE(VLV_IIR, 0xffffffff);
3673 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3674 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3675 POSTING_READ(VLV_IMR);
3676
3677 /* Interrupt setup is already guaranteed to be single-threaded, this is
3678 * just to make the assert_spin_locked check happy. */
3679 spin_lock_irq(&dev_priv->irq_lock);
3680 if (dev_priv->display_irqs_enabled)
3681 valleyview_display_irqs_install(dev_priv);
3682 spin_unlock_irq(&dev_priv->irq_lock);
3683 }
3684
3685 static int valleyview_irq_postinstall(struct drm_device *dev)
3686 {
3687 struct drm_i915_private *dev_priv = dev->dev_private;
3688
3689 vlv_display_irq_postinstall(dev_priv);
3690
3691 gen5_gt_irq_postinstall(dev);
3692
3693 /* ack & enable invalid PTE error interrupts */
3694 #if 0 /* FIXME: add support to irq handler for checking these bits */
3695 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3696 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3697 #endif
3698
3699 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3700
3701 return 0;
3702 }
3703
3704 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3705 {
3706 /* These are interrupts we'll toggle with the ring mask register */
3707 uint32_t gt_interrupts[] = {
3708 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3709 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3710 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3711 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3712 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3713 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3714 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3715 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3716 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3717 0,
3718 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3719 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3720 };
3721
3722 dev_priv->pm_irq_mask = 0xffffffff;
3723 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3724 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3725 /*
3726 * RPS interrupts will get enabled/disabled on demand when RPS itself
3727 * is enabled/disabled.
3728 */
3729 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3730 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3731 }
3732
3733 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3734 {
3735 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3736 uint32_t de_pipe_enables;
3737 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3738 u32 de_port_enables;
3739 enum pipe pipe;
3740
3741 if (INTEL_INFO(dev_priv)->gen >= 9) {
3742 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3743 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3744 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3745 GEN9_AUX_CHANNEL_D;
3746 if (IS_BROXTON(dev_priv))
3747 de_port_masked |= BXT_DE_PORT_GMBUS;
3748 } else {
3749 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3750 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3751 }
3752
3753 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3754 GEN8_PIPE_FIFO_UNDERRUN;
3755
3756 de_port_enables = de_port_masked;
3757 if (IS_BROXTON(dev_priv))
3758 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3759 else if (IS_BROADWELL(dev_priv))
3760 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3761
3762 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3763 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3764 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3765
3766 for_each_pipe(dev_priv, pipe)
3767 if (intel_display_power_is_enabled(dev_priv,
3768 POWER_DOMAIN_PIPE(pipe)))
3769 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3770 dev_priv->de_irq_mask[pipe],
3771 de_pipe_enables);
3772
3773 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3774 }
3775
3776 static int gen8_irq_postinstall(struct drm_device *dev)
3777 {
3778 struct drm_i915_private *dev_priv = dev->dev_private;
3779
3780 if (HAS_PCH_SPLIT(dev))
3781 ibx_irq_pre_postinstall(dev);
3782
3783 gen8_gt_irq_postinstall(dev_priv);
3784 gen8_de_irq_postinstall(dev_priv);
3785
3786 if (HAS_PCH_SPLIT(dev))
3787 ibx_irq_postinstall(dev);
3788
3789 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3790 POSTING_READ(GEN8_MASTER_IRQ);
3791
3792 return 0;
3793 }
3794
3795 static int cherryview_irq_postinstall(struct drm_device *dev)
3796 {
3797 struct drm_i915_private *dev_priv = dev->dev_private;
3798
3799 vlv_display_irq_postinstall(dev_priv);
3800
3801 gen8_gt_irq_postinstall(dev_priv);
3802
3803 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3804 POSTING_READ(GEN8_MASTER_IRQ);
3805
3806 return 0;
3807 }
3808
3809 static void gen8_irq_uninstall(struct drm_device *dev)
3810 {
3811 struct drm_i915_private *dev_priv = dev->dev_private;
3812
3813 if (!dev_priv)
3814 return;
3815
3816 gen8_irq_reset(dev);
3817 }
3818
3819 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3820 {
3821 /* Interrupt setup is already guaranteed to be single-threaded, this is
3822 * just to make the assert_spin_locked check happy. */
3823 spin_lock_irq(&dev_priv->irq_lock);
3824 if (dev_priv->display_irqs_enabled)
3825 valleyview_display_irqs_uninstall(dev_priv);
3826 spin_unlock_irq(&dev_priv->irq_lock);
3827
3828 vlv_display_irq_reset(dev_priv);
3829
3830 dev_priv->irq_mask = ~0;
3831 }
3832
3833 static void valleyview_irq_uninstall(struct drm_device *dev)
3834 {
3835 struct drm_i915_private *dev_priv = dev->dev_private;
3836
3837 if (!dev_priv)
3838 return;
3839
3840 I915_WRITE(VLV_MASTER_IER, 0);
3841
3842 gen5_gt_irq_reset(dev);
3843
3844 I915_WRITE(HWSTAM, 0xffffffff);
3845
3846 vlv_display_irq_uninstall(dev_priv);
3847 }
3848
3849 static void cherryview_irq_uninstall(struct drm_device *dev)
3850 {
3851 struct drm_i915_private *dev_priv = dev->dev_private;
3852
3853 if (!dev_priv)
3854 return;
3855
3856 I915_WRITE(GEN8_MASTER_IRQ, 0);
3857 POSTING_READ(GEN8_MASTER_IRQ);
3858
3859 gen8_gt_irq_reset(dev_priv);
3860
3861 GEN5_IRQ_RESET(GEN8_PCU_);
3862
3863 vlv_display_irq_uninstall(dev_priv);
3864 }
3865
3866 static void ironlake_irq_uninstall(struct drm_device *dev)
3867 {
3868 struct drm_i915_private *dev_priv = dev->dev_private;
3869
3870 if (!dev_priv)
3871 return;
3872
3873 ironlake_irq_reset(dev);
3874 }
3875
3876 static void i8xx_irq_preinstall(struct drm_device * dev)
3877 {
3878 struct drm_i915_private *dev_priv = dev->dev_private;
3879 int pipe;
3880
3881 for_each_pipe(dev_priv, pipe)
3882 I915_WRITE(PIPESTAT(pipe), 0);
3883 I915_WRITE16(IMR, 0xffff);
3884 I915_WRITE16(IER, 0x0);
3885 POSTING_READ16(IER);
3886 }
3887
3888 static int i8xx_irq_postinstall(struct drm_device *dev)
3889 {
3890 struct drm_i915_private *dev_priv = dev->dev_private;
3891
3892 I915_WRITE16(EMR,
3893 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3894
3895 /* Unmask the interrupts that we always want on. */
3896 dev_priv->irq_mask =
3897 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3898 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3899 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3900 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3901 I915_WRITE16(IMR, dev_priv->irq_mask);
3902
3903 I915_WRITE16(IER,
3904 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3905 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3906 I915_USER_INTERRUPT);
3907 POSTING_READ16(IER);
3908
3909 /* Interrupt setup is already guaranteed to be single-threaded, this is
3910 * just to make the assert_spin_locked check happy. */
3911 spin_lock_irq(&dev_priv->irq_lock);
3912 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3913 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3914 spin_unlock_irq(&dev_priv->irq_lock);
3915
3916 return 0;
3917 }
3918
3919 /*
3920 * Returns true when a page flip has completed.
3921 */
3922 static bool i8xx_handle_vblank(struct drm_device *dev,
3923 int plane, int pipe, u32 iir)
3924 {
3925 struct drm_i915_private *dev_priv = dev->dev_private;
3926 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3927
3928 if (!intel_pipe_handle_vblank(dev, pipe))
3929 return false;
3930
3931 if ((iir & flip_pending) == 0)
3932 goto check_page_flip;
3933
3934 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3935 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3936 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3937 * the flip is completed (no longer pending). Since this doesn't raise
3938 * an interrupt per se, we watch for the change at vblank.
3939 */
3940 if (I915_READ16(ISR) & flip_pending)
3941 goto check_page_flip;
3942
3943 intel_prepare_page_flip(dev, plane);
3944 intel_finish_page_flip(dev, pipe);
3945 return true;
3946
3947 check_page_flip:
3948 intel_check_page_flip(dev, pipe);
3949 return false;
3950 }
3951
3952 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
3953 {
3954 struct drm_device *dev = arg;
3955 struct drm_i915_private *dev_priv = dev->dev_private;
3956 u16 iir, new_iir;
3957 u32 pipe_stats[2];
3958 int pipe;
3959 u16 flip_mask =
3960 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3961 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3962
3963 if (!intel_irqs_enabled(dev_priv))
3964 return IRQ_NONE;
3965
3966 iir = I915_READ16(IIR);
3967 if (iir == 0)
3968 return IRQ_NONE;
3969
3970 while (iir & ~flip_mask) {
3971 /* Can't rely on pipestat interrupt bit in iir as it might
3972 * have been cleared after the pipestat interrupt was received.
3973 * It doesn't set the bit in iir again, but it still produces
3974 * interrupts (for non-MSI).
3975 */
3976 spin_lock(&dev_priv->irq_lock);
3977 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3978 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3979
3980 for_each_pipe(dev_priv, pipe) {
3981 int reg = PIPESTAT(pipe);
3982 pipe_stats[pipe] = I915_READ(reg);
3983
3984 /*
3985 * Clear the PIPE*STAT regs before the IIR
3986 */
3987 if (pipe_stats[pipe] & 0x8000ffff)
3988 I915_WRITE(reg, pipe_stats[pipe]);
3989 }
3990 spin_unlock(&dev_priv->irq_lock);
3991
3992 I915_WRITE16(IIR, iir & ~flip_mask);
3993 new_iir = I915_READ16(IIR); /* Flush posted writes */
3994
3995 if (iir & I915_USER_INTERRUPT)
3996 notify_ring(&dev_priv->ring[RCS]);
3997
3998 for_each_pipe(dev_priv, pipe) {
3999 int plane = pipe;
4000 if (HAS_FBC(dev))
4001 plane = !plane;
4002
4003 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4004 i8xx_handle_vblank(dev, plane, pipe, iir))
4005 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4006
4007 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4008 i9xx_pipe_crc_irq_handler(dev, pipe);
4009
4010 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4011 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4012 pipe);
4013 }
4014
4015 iir = new_iir;
4016 }
4017
4018 return IRQ_HANDLED;
4019 }
4020
4021 static void i8xx_irq_uninstall(struct drm_device * dev)
4022 {
4023 struct drm_i915_private *dev_priv = dev->dev_private;
4024 int pipe;
4025
4026 for_each_pipe(dev_priv, pipe) {
4027 /* Clear enable bits; then clear status bits */
4028 I915_WRITE(PIPESTAT(pipe), 0);
4029 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4030 }
4031 I915_WRITE16(IMR, 0xffff);
4032 I915_WRITE16(IER, 0x0);
4033 I915_WRITE16(IIR, I915_READ16(IIR));
4034 }
4035
4036 static void i915_irq_preinstall(struct drm_device * dev)
4037 {
4038 struct drm_i915_private *dev_priv = dev->dev_private;
4039 int pipe;
4040
4041 if (I915_HAS_HOTPLUG(dev)) {
4042 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4043 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4044 }
4045
4046 I915_WRITE16(HWSTAM, 0xeffe);
4047 for_each_pipe(dev_priv, pipe)
4048 I915_WRITE(PIPESTAT(pipe), 0);
4049 I915_WRITE(IMR, 0xffffffff);
4050 I915_WRITE(IER, 0x0);
4051 POSTING_READ(IER);
4052 }
4053
4054 static int i915_irq_postinstall(struct drm_device *dev)
4055 {
4056 struct drm_i915_private *dev_priv = dev->dev_private;
4057 u32 enable_mask;
4058
4059 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4060
4061 /* Unmask the interrupts that we always want on. */
4062 dev_priv->irq_mask =
4063 ~(I915_ASLE_INTERRUPT |
4064 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4065 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4066 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4067 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4068
4069 enable_mask =
4070 I915_ASLE_INTERRUPT |
4071 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4072 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4073 I915_USER_INTERRUPT;
4074
4075 if (I915_HAS_HOTPLUG(dev)) {
4076 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4077 POSTING_READ(PORT_HOTPLUG_EN);
4078
4079 /* Enable in IER... */
4080 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4081 /* and unmask in IMR */
4082 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4083 }
4084
4085 I915_WRITE(IMR, dev_priv->irq_mask);
4086 I915_WRITE(IER, enable_mask);
4087 POSTING_READ(IER);
4088
4089 i915_enable_asle_pipestat(dev);
4090
4091 /* Interrupt setup is already guaranteed to be single-threaded, this is
4092 * just to make the assert_spin_locked check happy. */
4093 spin_lock_irq(&dev_priv->irq_lock);
4094 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4095 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4096 spin_unlock_irq(&dev_priv->irq_lock);
4097
4098 return 0;
4099 }
4100
4101 /*
4102 * Returns true when a page flip has completed.
4103 */
4104 static bool i915_handle_vblank(struct drm_device *dev,
4105 int plane, int pipe, u32 iir)
4106 {
4107 struct drm_i915_private *dev_priv = dev->dev_private;
4108 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4109
4110 if (!intel_pipe_handle_vblank(dev, pipe))
4111 return false;
4112
4113 if ((iir & flip_pending) == 0)
4114 goto check_page_flip;
4115
4116 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4117 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4118 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4119 * the flip is completed (no longer pending). Since this doesn't raise
4120 * an interrupt per se, we watch for the change at vblank.
4121 */
4122 if (I915_READ(ISR) & flip_pending)
4123 goto check_page_flip;
4124
4125 intel_prepare_page_flip(dev, plane);
4126 intel_finish_page_flip(dev, pipe);
4127 return true;
4128
4129 check_page_flip:
4130 intel_check_page_flip(dev, pipe);
4131 return false;
4132 }
4133
4134 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
4135 {
4136 struct drm_device *dev = arg;
4137 struct drm_i915_private *dev_priv = dev->dev_private;
4138 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4139 u32 flip_mask =
4140 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4141 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4142 int pipe, ret = IRQ_NONE;
4143
4144 if (!intel_irqs_enabled(dev_priv))
4145 return IRQ_NONE;
4146
4147 iir = I915_READ(IIR);
4148 do {
4149 bool irq_received = (iir & ~flip_mask) != 0;
4150 bool blc_event = false;
4151
4152 /* Can't rely on pipestat interrupt bit in iir as it might
4153 * have been cleared after the pipestat interrupt was received.
4154 * It doesn't set the bit in iir again, but it still produces
4155 * interrupts (for non-MSI).
4156 */
4157 spin_lock(&dev_priv->irq_lock);
4158 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4159 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4160
4161 for_each_pipe(dev_priv, pipe) {
4162 int reg = PIPESTAT(pipe);
4163 pipe_stats[pipe] = I915_READ(reg);
4164
4165 /* Clear the PIPE*STAT regs before the IIR */
4166 if (pipe_stats[pipe] & 0x8000ffff) {
4167 I915_WRITE(reg, pipe_stats[pipe]);
4168 irq_received = true;
4169 }
4170 }
4171 spin_unlock(&dev_priv->irq_lock);
4172
4173 if (!irq_received)
4174 break;
4175
4176 /* Consume port. Then clear IIR or we'll miss events */
4177 if (I915_HAS_HOTPLUG(dev) &&
4178 iir & I915_DISPLAY_PORT_INTERRUPT)
4179 i9xx_hpd_irq_handler(dev);
4180
4181 I915_WRITE(IIR, iir & ~flip_mask);
4182 new_iir = I915_READ(IIR); /* Flush posted writes */
4183
4184 if (iir & I915_USER_INTERRUPT)
4185 notify_ring(&dev_priv->ring[RCS]);
4186
4187 for_each_pipe(dev_priv, pipe) {
4188 int plane = pipe;
4189 if (HAS_FBC(dev))
4190 plane = !plane;
4191
4192 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4193 i915_handle_vblank(dev, plane, pipe, iir))
4194 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4195
4196 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4197 blc_event = true;
4198
4199 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4200 i9xx_pipe_crc_irq_handler(dev, pipe);
4201
4202 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4203 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4204 pipe);
4205 }
4206
4207 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4208 intel_opregion_asle_intr(dev);
4209
4210 /* With MSI, interrupts are only generated when iir
4211 * transitions from zero to nonzero. If another bit got
4212 * set while we were handling the existing iir bits, then
4213 * we would never get another interrupt.
4214 *
4215 * This is fine on non-MSI as well, as if we hit this path
4216 * we avoid exiting the interrupt handler only to generate
4217 * another one.
4218 *
4219 * Note that for MSI this could cause a stray interrupt report
4220 * if an interrupt landed in the time between writing IIR and
4221 * the posting read. This should be rare enough to never
4222 * trigger the 99% of 100,000 interrupts test for disabling
4223 * stray interrupts.
4224 */
4225 ret = IRQ_HANDLED;
4226 iir = new_iir;
4227 } while (iir & ~flip_mask);
4228
4229 return ret;
4230 }
4231
4232 static void i915_irq_uninstall(struct drm_device * dev)
4233 {
4234 struct drm_i915_private *dev_priv = dev->dev_private;
4235 int pipe;
4236
4237 if (I915_HAS_HOTPLUG(dev)) {
4238 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4239 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4240 }
4241
4242 I915_WRITE16(HWSTAM, 0xffff);
4243 for_each_pipe(dev_priv, pipe) {
4244 /* Clear enable bits; then clear status bits */
4245 I915_WRITE(PIPESTAT(pipe), 0);
4246 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4247 }
4248 I915_WRITE(IMR, 0xffffffff);
4249 I915_WRITE(IER, 0x0);
4250
4251 I915_WRITE(IIR, I915_READ(IIR));
4252 }
4253
4254 static void i965_irq_preinstall(struct drm_device * dev)
4255 {
4256 struct drm_i915_private *dev_priv = dev->dev_private;
4257 int pipe;
4258
4259 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4260 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4261
4262 I915_WRITE(HWSTAM, 0xeffe);
4263 for_each_pipe(dev_priv, pipe)
4264 I915_WRITE(PIPESTAT(pipe), 0);
4265 I915_WRITE(IMR, 0xffffffff);
4266 I915_WRITE(IER, 0x0);
4267 POSTING_READ(IER);
4268 }
4269
4270 static int i965_irq_postinstall(struct drm_device *dev)
4271 {
4272 struct drm_i915_private *dev_priv = dev->dev_private;
4273 u32 enable_mask;
4274 u32 error_mask;
4275
4276 /* Unmask the interrupts that we always want on. */
4277 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4278 I915_DISPLAY_PORT_INTERRUPT |
4279 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4280 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4281 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4282 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4283 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4284
4285 enable_mask = ~dev_priv->irq_mask;
4286 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4287 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4288 enable_mask |= I915_USER_INTERRUPT;
4289
4290 if (IS_G4X(dev))
4291 enable_mask |= I915_BSD_USER_INTERRUPT;
4292
4293 /* Interrupt setup is already guaranteed to be single-threaded, this is
4294 * just to make the assert_spin_locked check happy. */
4295 spin_lock_irq(&dev_priv->irq_lock);
4296 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4297 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4298 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4299 spin_unlock_irq(&dev_priv->irq_lock);
4300
4301 /*
4302 * Enable some error detection, note the instruction error mask
4303 * bit is reserved, so we leave it masked.
4304 */
4305 if (IS_G4X(dev)) {
4306 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4307 GM45_ERROR_MEM_PRIV |
4308 GM45_ERROR_CP_PRIV |
4309 I915_ERROR_MEMORY_REFRESH);
4310 } else {
4311 error_mask = ~(I915_ERROR_PAGE_TABLE |
4312 I915_ERROR_MEMORY_REFRESH);
4313 }
4314 I915_WRITE(EMR, error_mask);
4315
4316 I915_WRITE(IMR, dev_priv->irq_mask);
4317 I915_WRITE(IER, enable_mask);
4318 POSTING_READ(IER);
4319
4320 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4321 POSTING_READ(PORT_HOTPLUG_EN);
4322
4323 i915_enable_asle_pipestat(dev);
4324
4325 return 0;
4326 }
4327
4328 static void i915_hpd_irq_setup(struct drm_device *dev)
4329 {
4330 struct drm_i915_private *dev_priv = dev->dev_private;
4331 u32 hotplug_en;
4332
4333 assert_spin_locked(&dev_priv->irq_lock);
4334
4335 /* Note HDMI and DP share hotplug bits */
4336 /* enable bits are the same for all generations */
4337 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4338 /* Programming the CRT detection parameters tends
4339 to generate a spurious hotplug event about three
4340 seconds later. So just do it once.
4341 */
4342 if (IS_G4X(dev))
4343 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4344 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4345
4346 /* Ignore TV since it's buggy */
4347 i915_hotplug_interrupt_update_locked(dev_priv,
4348 HOTPLUG_INT_EN_MASK |
4349 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4350 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4351 hotplug_en);
4352 }
4353
4354 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
4355 {
4356 struct drm_device *dev = arg;
4357 struct drm_i915_private *dev_priv = dev->dev_private;
4358 u32 iir, new_iir;
4359 u32 pipe_stats[I915_MAX_PIPES];
4360 int ret = IRQ_NONE, pipe;
4361 u32 flip_mask =
4362 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4363 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4364
4365 if (!intel_irqs_enabled(dev_priv))
4366 return IRQ_NONE;
4367
4368 iir = I915_READ(IIR);
4369
4370 for (;;) {
4371 bool irq_received = (iir & ~flip_mask) != 0;
4372 bool blc_event = false;
4373
4374 /* Can't rely on pipestat interrupt bit in iir as it might
4375 * have been cleared after the pipestat interrupt was received.
4376 * It doesn't set the bit in iir again, but it still produces
4377 * interrupts (for non-MSI).
4378 */
4379 spin_lock(&dev_priv->irq_lock);
4380 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4381 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4382
4383 for_each_pipe(dev_priv, pipe) {
4384 int reg = PIPESTAT(pipe);
4385 pipe_stats[pipe] = I915_READ(reg);
4386
4387 /*
4388 * Clear the PIPE*STAT regs before the IIR
4389 */
4390 if (pipe_stats[pipe] & 0x8000ffff) {
4391 I915_WRITE(reg, pipe_stats[pipe]);
4392 irq_received = true;
4393 }
4394 }
4395 spin_unlock(&dev_priv->irq_lock);
4396
4397 if (!irq_received)
4398 break;
4399
4400 ret = IRQ_HANDLED;
4401
4402 /* Consume port. Then clear IIR or we'll miss events */
4403 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4404 i9xx_hpd_irq_handler(dev);
4405
4406 I915_WRITE(IIR, iir & ~flip_mask);
4407 new_iir = I915_READ(IIR); /* Flush posted writes */
4408
4409 if (iir & I915_USER_INTERRUPT)
4410 notify_ring(&dev_priv->ring[RCS]);
4411 if (iir & I915_BSD_USER_INTERRUPT)
4412 notify_ring(&dev_priv->ring[VCS]);
4413
4414 for_each_pipe(dev_priv, pipe) {
4415 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4416 i915_handle_vblank(dev, pipe, pipe, iir))
4417 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4418
4419 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4420 blc_event = true;
4421
4422 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4423 i9xx_pipe_crc_irq_handler(dev, pipe);
4424
4425 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4426 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4427 }
4428
4429 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4430 intel_opregion_asle_intr(dev);
4431
4432 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4433 gmbus_irq_handler(dev);
4434
4435 /* With MSI, interrupts are only generated when iir
4436 * transitions from zero to nonzero. If another bit got
4437 * set while we were handling the existing iir bits, then
4438 * we would never get another interrupt.
4439 *
4440 * This is fine on non-MSI as well, as if we hit this path
4441 * we avoid exiting the interrupt handler only to generate
4442 * another one.
4443 *
4444 * Note that for MSI this could cause a stray interrupt report
4445 * if an interrupt landed in the time between writing IIR and
4446 * the posting read. This should be rare enough to never
4447 * trigger the 99% of 100,000 interrupts test for disabling
4448 * stray interrupts.
4449 */
4450 iir = new_iir;
4451 }
4452
4453 return ret;
4454 }
4455
4456 static void i965_irq_uninstall(struct drm_device * dev)
4457 {
4458 struct drm_i915_private *dev_priv = dev->dev_private;
4459 int pipe;
4460
4461 if (!dev_priv)
4462 return;
4463
4464 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4465 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4466
4467 I915_WRITE(HWSTAM, 0xffffffff);
4468 for_each_pipe(dev_priv, pipe)
4469 I915_WRITE(PIPESTAT(pipe), 0);
4470 I915_WRITE(IMR, 0xffffffff);
4471 I915_WRITE(IER, 0x0);
4472
4473 for_each_pipe(dev_priv, pipe)
4474 I915_WRITE(PIPESTAT(pipe),
4475 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4476 I915_WRITE(IIR, I915_READ(IIR));
4477 }
4478
4479 /**
4480 * intel_irq_init - initializes irq support
4481 * @dev_priv: i915 device instance
4482 *
4483 * This function initializes all the irq support including work items, timers
4484 * and all the vtables. It does not setup the interrupt itself though.
4485 */
4486 void intel_irq_init(struct drm_i915_private *dev_priv)
4487 {
4488 struct drm_device *dev = dev_priv->dev;
4489
4490 intel_hpd_init_work(dev_priv);
4491
4492 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4493 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4494
4495 /* Let's track the enabled rps events */
4496 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4497 /* WaGsvRC0ResidencyMethod:vlv */
4498 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4499 else
4500 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4501
4502 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4503 i915_hangcheck_elapsed);
4504
4505 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4506
4507 if (IS_GEN2(dev_priv)) {
4508 dev->max_vblank_count = 0;
4509 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4510 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4511 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4512 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4513 } else {
4514 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4515 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4516 }
4517
4518 /*
4519 * Opt out of the vblank disable timer on everything except gen2.
4520 * Gen2 doesn't have a hardware frame counter and so depends on
4521 * vblank interrupts to produce sane vblank seuquence numbers.
4522 */
4523 if (!IS_GEN2(dev_priv))
4524 dev->vblank_disable_immediate = true;
4525
4526 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4527 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4528
4529 if (IS_CHERRYVIEW(dev_priv)) {
4530 dev->driver->irq_handler = cherryview_irq_handler;
4531 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4532 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4533 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4534 dev->driver->enable_vblank = valleyview_enable_vblank;
4535 dev->driver->disable_vblank = valleyview_disable_vblank;
4536 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4537 } else if (IS_VALLEYVIEW(dev_priv)) {
4538 dev->driver->irq_handler = valleyview_irq_handler;
4539 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4540 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4541 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4542 dev->driver->enable_vblank = valleyview_enable_vblank;
4543 dev->driver->disable_vblank = valleyview_disable_vblank;
4544 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4545 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4546 dev->driver->irq_handler = gen8_irq_handler;
4547 dev->driver->irq_preinstall = gen8_irq_reset;
4548 dev->driver->irq_postinstall = gen8_irq_postinstall;
4549 dev->driver->irq_uninstall = gen8_irq_uninstall;
4550 dev->driver->enable_vblank = gen8_enable_vblank;
4551 dev->driver->disable_vblank = gen8_disable_vblank;
4552 if (IS_BROXTON(dev))
4553 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4554 else if (HAS_PCH_SPT(dev))
4555 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4556 else
4557 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4558 } else if (HAS_PCH_SPLIT(dev)) {
4559 dev->driver->irq_handler = ironlake_irq_handler;
4560 dev->driver->irq_preinstall = ironlake_irq_reset;
4561 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4562 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4563 dev->driver->enable_vblank = ironlake_enable_vblank;
4564 dev->driver->disable_vblank = ironlake_disable_vblank;
4565 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4566 } else {
4567 if (INTEL_INFO(dev_priv)->gen == 2) {
4568 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4569 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4570 dev->driver->irq_handler = i8xx_irq_handler;
4571 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4572 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4573 dev->driver->irq_preinstall = i915_irq_preinstall;
4574 dev->driver->irq_postinstall = i915_irq_postinstall;
4575 dev->driver->irq_uninstall = i915_irq_uninstall;
4576 dev->driver->irq_handler = i915_irq_handler;
4577 } else {
4578 dev->driver->irq_preinstall = i965_irq_preinstall;
4579 dev->driver->irq_postinstall = i965_irq_postinstall;
4580 dev->driver->irq_uninstall = i965_irq_uninstall;
4581 dev->driver->irq_handler = i965_irq_handler;
4582 }
4583 if (I915_HAS_HOTPLUG(dev_priv))
4584 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4585 dev->driver->enable_vblank = i915_enable_vblank;
4586 dev->driver->disable_vblank = i915_disable_vblank;
4587 }
4588 }
4589
4590 /**
4591 * intel_irq_install - enables the hardware interrupt
4592 * @dev_priv: i915 device instance
4593 *
4594 * This function enables the hardware interrupt handling, but leaves the hotplug
4595 * handling still disabled. It is called after intel_irq_init().
4596 *
4597 * In the driver load and resume code we need working interrupts in a few places
4598 * but don't want to deal with the hassle of concurrent probe and hotplug
4599 * workers. Hence the split into this two-stage approach.
4600 */
4601 int intel_irq_install(struct drm_i915_private *dev_priv)
4602 {
4603 /*
4604 * We enable some interrupt sources in our postinstall hooks, so mark
4605 * interrupts as enabled _before_ actually enabling them to avoid
4606 * special cases in our ordering checks.
4607 */
4608 dev_priv->pm.irqs_enabled = true;
4609
4610 #ifdef __NetBSD__
4611 return drm_irq_install(dev_priv->dev);
4612 #else
4613 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4614 #endif
4615 }
4616
4617 /**
4618 * intel_irq_uninstall - finilizes all irq handling
4619 * @dev_priv: i915 device instance
4620 *
4621 * This stops interrupt and hotplug handling and unregisters and frees all
4622 * resources acquired in the init functions.
4623 */
4624 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4625 {
4626 drm_irq_uninstall(dev_priv->dev);
4627 intel_hpd_cancel_work(dev_priv);
4628 dev_priv->pm.irqs_enabled = false;
4629 }
4630
4631 /**
4632 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4633 * @dev_priv: i915 device instance
4634 *
4635 * This function is used to disable interrupts at runtime, both in the runtime
4636 * pm and the system suspend/resume code.
4637 */
4638 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4639 {
4640 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4641 dev_priv->pm.irqs_enabled = false;
4642 synchronize_irq(dev_priv->dev->irq);
4643 }
4644
4645 /**
4646 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4647 * @dev_priv: i915 device instance
4648 *
4649 * This function is used to enable interrupts at runtime, both in the runtime
4650 * pm and the system suspend/resume code.
4651 */
4652 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4653 {
4654 dev_priv->pm.irqs_enabled = true;
4655 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4656 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4657 }
4658