i915_irq.c revision 1.16 1 /* $NetBSD: i915_irq.c,v 1.16 2018/08/27 14:52:56 riastradh Exp $ */
2
3 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 */
5 /*
6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.16 2018/08/27 14:52:56 riastradh Exp $");
33
34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36 #ifdef __NetBSD__
37 #include <sys/cdefs.h>
38 #endif
39
40 #include <linux/hardirq.h>
41 #include <linux/printk.h>
42 #include <linux/sysrq.h>
43 #include <linux/slab.h>
44 #ifdef CONFIG_DEBUG_FS
45 #include <linux/circ_buf.h>
46 #endif
47 #include <drm/drmP.h>
48 #include <drm/i915_drm.h>
49 #include "i915_drv.h"
50 #include "i915_trace.h"
51 #include "intel_drv.h"
52
53 /**
54 * DOC: interrupt handling
55 *
56 * These functions provide the basic support for enabling and disabling the
57 * interrupt handling support. There's a lot more functionality in i915_irq.c
58 * and related files, but that will be described in separate chapters.
59 */
60
61 static const u32 hpd_ilk[HPD_NUM_PINS] = {
62 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
63 };
64
65 static const u32 hpd_ivb[HPD_NUM_PINS] = {
66 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
67 };
68
69 static const u32 hpd_bdw[HPD_NUM_PINS] = {
70 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
71 };
72
73 static const u32 hpd_ibx[HPD_NUM_PINS] = {
74 [HPD_CRT] = SDE_CRT_HOTPLUG,
75 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
76 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
77 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
78 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
79 };
80
81 static const u32 hpd_cpt[HPD_NUM_PINS] = {
82 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
83 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
84 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
85 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
86 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
87 };
88
89 static const u32 hpd_spt[HPD_NUM_PINS] = {
90 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
91 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
92 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
93 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
94 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
95 };
96
97 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
98 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
99 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
100 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
101 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
102 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
103 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
104 };
105
106 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
107 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
108 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
109 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
110 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
111 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
112 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
113 };
114
115 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
116 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
117 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
118 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
119 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
120 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
121 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
122 };
123
124 /* BXT hpd list */
125 static const u32 hpd_bxt[HPD_NUM_PINS] = {
126 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
127 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
128 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
129 };
130
131 /* IIR can theoretically queue up two events. Be paranoid. */
132 #define GEN8_IRQ_RESET_NDX(type, which) do { \
133 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
134 POSTING_READ(GEN8_##type##_IMR(which)); \
135 I915_WRITE(GEN8_##type##_IER(which), 0); \
136 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
137 POSTING_READ(GEN8_##type##_IIR(which)); \
138 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
139 POSTING_READ(GEN8_##type##_IIR(which)); \
140 } while (0)
141
142 #define GEN5_IRQ_RESET(type) do { \
143 I915_WRITE(type##IMR, 0xffffffff); \
144 POSTING_READ(type##IMR); \
145 I915_WRITE(type##IER, 0); \
146 I915_WRITE(type##IIR, 0xffffffff); \
147 POSTING_READ(type##IIR); \
148 I915_WRITE(type##IIR, 0xffffffff); \
149 POSTING_READ(type##IIR); \
150 } while (0)
151
152 /*
153 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
154 */
155 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
156 {
157 u32 val = I915_READ(reg);
158
159 if (val == 0)
160 return;
161
162 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
163 reg, val);
164 I915_WRITE(reg, 0xffffffff);
165 POSTING_READ(reg);
166 I915_WRITE(reg, 0xffffffff);
167 POSTING_READ(reg);
168 }
169
170 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
171 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
172 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
173 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
174 POSTING_READ(GEN8_##type##_IMR(which)); \
175 } while (0)
176
177 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
178 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
179 I915_WRITE(type##IER, (ier_val)); \
180 I915_WRITE(type##IMR, (imr_val)); \
181 POSTING_READ(type##IMR); \
182 } while (0)
183
184 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
185
186 /* For display hotplug interrupt */
187 static inline void
188 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
189 uint32_t mask,
190 uint32_t bits)
191 {
192 uint32_t val;
193
194 assert_spin_locked(&dev_priv->irq_lock);
195 WARN_ON(bits & ~mask);
196
197 val = I915_READ(PORT_HOTPLUG_EN);
198 val &= ~mask;
199 val |= bits;
200 I915_WRITE(PORT_HOTPLUG_EN, val);
201 }
202
203 /**
204 * i915_hotplug_interrupt_update - update hotplug interrupt enable
205 * @dev_priv: driver private
206 * @mask: bits to update
207 * @bits: bits to enable
208 * NOTE: the HPD enable bits are modified both inside and outside
209 * of an interrupt context. To avoid that read-modify-write cycles
210 * interfer, these bits are protected by a spinlock. Since this
211 * function is usually not called from a context where the lock is
212 * held already, this function acquires the lock itself. A non-locking
213 * version is also available.
214 */
215 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
216 uint32_t mask,
217 uint32_t bits)
218 {
219 spin_lock_irq(&dev_priv->irq_lock);
220 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
221 spin_unlock_irq(&dev_priv->irq_lock);
222 }
223
224 /**
225 * ilk_update_display_irq - update DEIMR
226 * @dev_priv: driver private
227 * @interrupt_mask: mask of interrupt bits to update
228 * @enabled_irq_mask: mask of interrupt bits to enable
229 */
230 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
231 uint32_t interrupt_mask,
232 uint32_t enabled_irq_mask)
233 {
234 uint32_t new_val;
235
236 assert_spin_locked(&dev_priv->irq_lock);
237
238 WARN_ON(enabled_irq_mask & ~interrupt_mask);
239
240 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
241 return;
242
243 new_val = dev_priv->irq_mask;
244 new_val &= ~interrupt_mask;
245 new_val |= (~enabled_irq_mask & interrupt_mask);
246
247 if (new_val != dev_priv->irq_mask) {
248 dev_priv->irq_mask = new_val;
249 I915_WRITE(DEIMR, dev_priv->irq_mask);
250 POSTING_READ(DEIMR);
251 }
252 }
253
254 void
255 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
256 {
257 ilk_update_display_irq(dev_priv, mask, mask);
258 }
259
260 void
261 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
262 {
263 ilk_update_display_irq(dev_priv, mask, 0);
264 }
265
266 /**
267 * ilk_update_gt_irq - update GTIMR
268 * @dev_priv: driver private
269 * @interrupt_mask: mask of interrupt bits to update
270 * @enabled_irq_mask: mask of interrupt bits to enable
271 */
272 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
273 uint32_t interrupt_mask,
274 uint32_t enabled_irq_mask)
275 {
276 assert_spin_locked(&dev_priv->irq_lock);
277
278 WARN_ON(enabled_irq_mask & ~interrupt_mask);
279
280 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
281 return;
282
283 dev_priv->gt_irq_mask &= ~interrupt_mask;
284 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
285 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
286 POSTING_READ(GTIMR);
287 }
288
289 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
290 {
291 ilk_update_gt_irq(dev_priv, mask, mask);
292 }
293
294 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
295 {
296 ilk_update_gt_irq(dev_priv, mask, 0);
297 }
298
299 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
300 {
301 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
302 }
303
304 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
305 {
306 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
307 }
308
309 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
310 {
311 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
312 }
313
314 /**
315 * snb_update_pm_irq - update GEN6_PMIMR
316 * @dev_priv: driver private
317 * @interrupt_mask: mask of interrupt bits to update
318 * @enabled_irq_mask: mask of interrupt bits to enable
319 */
320 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
321 uint32_t interrupt_mask,
322 uint32_t enabled_irq_mask)
323 {
324 uint32_t new_val;
325
326 WARN_ON(enabled_irq_mask & ~interrupt_mask);
327
328 assert_spin_locked(&dev_priv->irq_lock);
329
330 new_val = dev_priv->pm_irq_mask;
331 new_val &= ~interrupt_mask;
332 new_val |= (~enabled_irq_mask & interrupt_mask);
333
334 if (new_val != dev_priv->pm_irq_mask) {
335 dev_priv->pm_irq_mask = new_val;
336 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
337 POSTING_READ(gen6_pm_imr(dev_priv));
338 }
339 }
340
341 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
342 {
343 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
344 return;
345
346 snb_update_pm_irq(dev_priv, mask, mask);
347 }
348
349 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
350 uint32_t mask)
351 {
352 snb_update_pm_irq(dev_priv, mask, 0);
353 }
354
355 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
356 {
357 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
358 return;
359
360 __gen6_disable_pm_irq(dev_priv, mask);
361 }
362
363 void gen6_reset_rps_interrupts(struct drm_device *dev)
364 {
365 struct drm_i915_private *dev_priv = dev->dev_private;
366 uint32_t reg = gen6_pm_iir(dev_priv);
367
368 spin_lock_irq(&dev_priv->irq_lock);
369 I915_WRITE(reg, dev_priv->pm_rps_events);
370 I915_WRITE(reg, dev_priv->pm_rps_events);
371 POSTING_READ(reg);
372 dev_priv->rps.pm_iir = 0;
373 spin_unlock_irq(&dev_priv->irq_lock);
374 }
375
376 void gen6_enable_rps_interrupts(struct drm_device *dev)
377 {
378 struct drm_i915_private *dev_priv = dev->dev_private;
379
380 spin_lock_irq(&dev_priv->irq_lock);
381
382 WARN_ON(dev_priv->rps.pm_iir);
383 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
384 dev_priv->rps.interrupts_enabled = true;
385 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
386 dev_priv->pm_rps_events);
387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
388
389 spin_unlock_irq(&dev_priv->irq_lock);
390 }
391
392 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
393 {
394 /*
395 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
396 * if GEN6_PM_UP_EI_EXPIRED is masked.
397 *
398 * TODO: verify if this can be reproduced on VLV,CHV.
399 */
400 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
401 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
402
403 if (INTEL_INFO(dev_priv)->gen >= 8)
404 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
405
406 return mask;
407 }
408
409 void gen6_disable_rps_interrupts(struct drm_device *dev)
410 {
411 struct drm_i915_private *dev_priv = dev->dev_private;
412
413 spin_lock_irq(&dev_priv->irq_lock);
414 dev_priv->rps.interrupts_enabled = false;
415 spin_unlock_irq(&dev_priv->irq_lock);
416
417 cancel_work_sync(&dev_priv->rps.work);
418
419 spin_lock_irq(&dev_priv->irq_lock);
420
421 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
422
423 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
424 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
425 ~dev_priv->pm_rps_events);
426
427 spin_unlock_irq(&dev_priv->irq_lock);
428
429 synchronize_irq(dev->irq);
430 }
431
432 /**
433 * bdw_update_port_irq - update DE port interrupt
434 * @dev_priv: driver private
435 * @interrupt_mask: mask of interrupt bits to update
436 * @enabled_irq_mask: mask of interrupt bits to enable
437 */
438 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
439 uint32_t interrupt_mask,
440 uint32_t enabled_irq_mask)
441 {
442 uint32_t new_val;
443 uint32_t old_val;
444
445 assert_spin_locked(&dev_priv->irq_lock);
446
447 WARN_ON(enabled_irq_mask & ~interrupt_mask);
448
449 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
450 return;
451
452 old_val = I915_READ(GEN8_DE_PORT_IMR);
453
454 new_val = old_val;
455 new_val &= ~interrupt_mask;
456 new_val |= (~enabled_irq_mask & interrupt_mask);
457
458 if (new_val != old_val) {
459 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
460 POSTING_READ(GEN8_DE_PORT_IMR);
461 }
462 }
463
464 /**
465 * ibx_display_interrupt_update - update SDEIMR
466 * @dev_priv: driver private
467 * @interrupt_mask: mask of interrupt bits to update
468 * @enabled_irq_mask: mask of interrupt bits to enable
469 */
470 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
471 uint32_t interrupt_mask,
472 uint32_t enabled_irq_mask)
473 {
474 uint32_t sdeimr = I915_READ(SDEIMR);
475 sdeimr &= ~interrupt_mask;
476 sdeimr |= (~enabled_irq_mask & interrupt_mask);
477
478 WARN_ON(enabled_irq_mask & ~interrupt_mask);
479
480 assert_spin_locked(&dev_priv->irq_lock);
481
482 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
483 return;
484
485 I915_WRITE(SDEIMR, sdeimr);
486 POSTING_READ(SDEIMR);
487 }
488
489 static void
490 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
491 u32 enable_mask, u32 status_mask)
492 {
493 u32 reg = PIPESTAT(pipe);
494 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
495
496 assert_spin_locked(&dev_priv->irq_lock);
497 WARN_ON(!intel_irqs_enabled(dev_priv));
498
499 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
500 status_mask & ~PIPESTAT_INT_STATUS_MASK,
501 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
502 pipe_name(pipe), enable_mask, status_mask))
503 return;
504
505 if ((pipestat & enable_mask) == enable_mask)
506 return;
507
508 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
509
510 /* Enable the interrupt, clear any pending status */
511 pipestat |= enable_mask | status_mask;
512 I915_WRITE(reg, pipestat);
513 POSTING_READ(reg);
514 }
515
516 static void
517 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
518 u32 enable_mask, u32 status_mask)
519 {
520 u32 reg = PIPESTAT(pipe);
521 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
522
523 assert_spin_locked(&dev_priv->irq_lock);
524 WARN_ON(!intel_irqs_enabled(dev_priv));
525
526 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
527 status_mask & ~PIPESTAT_INT_STATUS_MASK,
528 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
529 pipe_name(pipe), enable_mask, status_mask))
530 return;
531
532 if ((pipestat & enable_mask) == 0)
533 return;
534
535 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
536
537 pipestat &= ~enable_mask;
538 I915_WRITE(reg, pipestat);
539 POSTING_READ(reg);
540 }
541
542 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
543 {
544 u32 enable_mask = status_mask << 16;
545
546 /*
547 * On pipe A we don't support the PSR interrupt yet,
548 * on pipe B and C the same bit MBZ.
549 */
550 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
551 return 0;
552 /*
553 * On pipe B and C we don't support the PSR interrupt yet, on pipe
554 * A the same bit is for perf counters which we don't use either.
555 */
556 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
557 return 0;
558
559 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
560 SPRITE0_FLIP_DONE_INT_EN_VLV |
561 SPRITE1_FLIP_DONE_INT_EN_VLV);
562 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
563 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
564 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
565 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
566
567 return enable_mask;
568 }
569
570 void
571 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
572 u32 status_mask)
573 {
574 u32 enable_mask;
575
576 if (IS_VALLEYVIEW(dev_priv->dev))
577 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
578 status_mask);
579 else
580 enable_mask = status_mask << 16;
581 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
582 }
583
584 void
585 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
586 u32 status_mask)
587 {
588 u32 enable_mask;
589
590 if (IS_VALLEYVIEW(dev_priv->dev))
591 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
592 status_mask);
593 else
594 enable_mask = status_mask << 16;
595 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
596 }
597
598 /**
599 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
600 * @dev: drm device
601 */
602 static void i915_enable_asle_pipestat(struct drm_device *dev)
603 {
604 struct drm_i915_private *dev_priv = dev->dev_private;
605
606 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
607 return;
608
609 spin_lock_irq(&dev_priv->irq_lock);
610
611 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
612 if (INTEL_INFO(dev)->gen >= 4)
613 i915_enable_pipestat(dev_priv, PIPE_A,
614 PIPE_LEGACY_BLC_EVENT_STATUS);
615
616 spin_unlock_irq(&dev_priv->irq_lock);
617 }
618
619 /*
620 * This timing diagram depicts the video signal in and
621 * around the vertical blanking period.
622 *
623 * Assumptions about the fictitious mode used in this example:
624 * vblank_start >= 3
625 * vsync_start = vblank_start + 1
626 * vsync_end = vblank_start + 2
627 * vtotal = vblank_start + 3
628 *
629 * start of vblank:
630 * latch double buffered registers
631 * increment frame counter (ctg+)
632 * generate start of vblank interrupt (gen4+)
633 * |
634 * | frame start:
635 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
636 * | may be shifted forward 1-3 extra lines via PIPECONF
637 * | |
638 * | | start of vsync:
639 * | | generate vsync interrupt
640 * | | |
641 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
642 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
643 * ----va---> <-----------------vb--------------------> <--------va-------------
644 * | | <----vs-----> |
645 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
646 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
647 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
648 * | | |
649 * last visible pixel first visible pixel
650 * | increment frame counter (gen3/4)
651 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
652 *
653 * x = horizontal active
654 * _ = horizontal blanking
655 * hs = horizontal sync
656 * va = vertical active
657 * vb = vertical blanking
658 * vs = vertical sync
659 * vbs = vblank_start (number)
660 *
661 * Summary:
662 * - most events happen at the start of horizontal sync
663 * - frame start happens at the start of horizontal blank, 1-4 lines
664 * (depending on PIPECONF settings) after the start of vblank
665 * - gen3/4 pixel and frame counter are synchronized with the start
666 * of horizontal active on the first line of vertical active
667 */
668
669 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
670 {
671 /* Gen2 doesn't have a hardware frame counter */
672 return 0;
673 }
674
675 /* Called from drm generic code, passed a 'crtc', which
676 * we use as a pipe index
677 */
678 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
679 {
680 struct drm_i915_private *dev_priv = dev->dev_private;
681 unsigned long high_frame;
682 unsigned long low_frame;
683 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
684 struct intel_crtc *intel_crtc =
685 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
686 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
687
688 htotal = mode->crtc_htotal;
689 hsync_start = mode->crtc_hsync_start;
690 vbl_start = mode->crtc_vblank_start;
691 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
692 vbl_start = DIV_ROUND_UP(vbl_start, 2);
693
694 /* Convert to pixel count */
695 vbl_start *= htotal;
696
697 /* Start of vblank event occurs at start of hsync */
698 vbl_start -= htotal - hsync_start;
699
700 high_frame = PIPEFRAME(pipe);
701 low_frame = PIPEFRAMEPIXEL(pipe);
702
703 /*
704 * High & low register fields aren't synchronized, so make sure
705 * we get a low value that's stable across two reads of the high
706 * register.
707 */
708 do {
709 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
710 low = I915_READ(low_frame);
711 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
712 } while (high1 != high2);
713
714 high1 >>= PIPE_FRAME_HIGH_SHIFT;
715 pixel = low & PIPE_PIXEL_MASK;
716 low >>= PIPE_FRAME_LOW_SHIFT;
717
718 /*
719 * The frame counter increments at beginning of active.
720 * Cook up a vblank counter by also checking the pixel
721 * counter against vblank start.
722 */
723 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
724 }
725
726 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
727 {
728 struct drm_i915_private *dev_priv = dev->dev_private;
729
730 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
731 }
732
733 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
734 #ifdef __NetBSD__
735 #define __raw_i915_read32(dev_priv, reg) bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg))
736 #else
737 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
738 #endif
739
740 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
741 {
742 struct drm_device *dev = crtc->base.dev;
743 struct drm_i915_private *dev_priv = dev->dev_private;
744 const struct drm_display_mode *mode = &crtc->base.hwmode;
745 enum i915_pipe pipe = crtc->pipe;
746 int position, vtotal;
747
748 vtotal = mode->crtc_vtotal;
749 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
750 vtotal /= 2;
751
752 if (IS_GEN2(dev))
753 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
754 else
755 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
756
757 /*
758 * On HSW, the DSL reg (0x70000) appears to return 0 if we
759 * read it just before the start of vblank. So try it again
760 * so we don't accidentally end up spanning a vblank frame
761 * increment, causing the pipe_update_end() code to squak at us.
762 *
763 * The nature of this problem means we can't simply check the ISR
764 * bit and return the vblank start value; nor can we use the scanline
765 * debug register in the transcoder as it appears to have the same
766 * problem. We may need to extend this to include other platforms,
767 * but so far testing only shows the problem on HSW.
768 */
769 if (HAS_DDI(dev) && !position) {
770 int i, temp;
771
772 for (i = 0; i < 100; i++) {
773 udelay(1);
774 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
775 DSL_LINEMASK_GEN3;
776 if (temp != position) {
777 position = temp;
778 break;
779 }
780 }
781 }
782
783 /*
784 * See update_scanline_offset() for the details on the
785 * scanline_offset adjustment.
786 */
787 return (position + crtc->scanline_offset) % vtotal;
788 }
789
790 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
791 unsigned int flags, int *vpos, int *hpos,
792 ktime_t *stime, ktime_t *etime,
793 const struct drm_display_mode *mode)
794 {
795 struct drm_i915_private *dev_priv = dev->dev_private;
796 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
797 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
798 int position;
799 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
800 bool in_vbl = true;
801 int ret = 0;
802 unsigned long irqflags;
803
804 if (WARN_ON(!mode->crtc_clock)) {
805 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
806 "pipe %c\n", pipe_name(pipe));
807 return 0;
808 }
809
810 htotal = mode->crtc_htotal;
811 hsync_start = mode->crtc_hsync_start;
812 vtotal = mode->crtc_vtotal;
813 vbl_start = mode->crtc_vblank_start;
814 vbl_end = mode->crtc_vblank_end;
815
816 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
817 vbl_start = DIV_ROUND_UP(vbl_start, 2);
818 vbl_end /= 2;
819 vtotal /= 2;
820 }
821
822 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
823
824 /*
825 * Lock uncore.lock, as we will do multiple timing critical raw
826 * register reads, potentially with preemption disabled, so the
827 * following code must not block on uncore.lock.
828 */
829 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
830
831 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
832
833 /* Get optional system timestamp before query. */
834 if (stime)
835 *stime = ktime_get();
836
837 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
838 /* No obvious pixelcount register. Only query vertical
839 * scanout position from Display scan line register.
840 */
841 position = __intel_get_crtc_scanline(intel_crtc);
842 } else {
843 /* Have access to pixelcount since start of frame.
844 * We can split this into vertical and horizontal
845 * scanout position.
846 */
847 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
848
849 /* convert to pixel counts */
850 vbl_start *= htotal;
851 vbl_end *= htotal;
852 vtotal *= htotal;
853
854 /*
855 * In interlaced modes, the pixel counter counts all pixels,
856 * so one field will have htotal more pixels. In order to avoid
857 * the reported position from jumping backwards when the pixel
858 * counter is beyond the length of the shorter field, just
859 * clamp the position the length of the shorter field. This
860 * matches how the scanline counter based position works since
861 * the scanline counter doesn't count the two half lines.
862 */
863 if (position >= vtotal)
864 position = vtotal - 1;
865
866 /*
867 * Start of vblank interrupt is triggered at start of hsync,
868 * just prior to the first active line of vblank. However we
869 * consider lines to start at the leading edge of horizontal
870 * active. So, should we get here before we've crossed into
871 * the horizontal active of the first line in vblank, we would
872 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
873 * always add htotal-hsync_start to the current pixel position.
874 */
875 position = (position + htotal - hsync_start) % vtotal;
876 }
877
878 /* Get optional system timestamp after query. */
879 if (etime)
880 *etime = ktime_get();
881
882 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
883
884 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
885
886 in_vbl = position >= vbl_start && position < vbl_end;
887
888 /*
889 * While in vblank, position will be negative
890 * counting up towards 0 at vbl_end. And outside
891 * vblank, position will be positive counting
892 * up since vbl_end.
893 */
894 if (position >= vbl_start)
895 position -= vbl_end;
896 else
897 position += vtotal - vbl_end;
898
899 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
900 *vpos = position;
901 *hpos = 0;
902 } else {
903 *vpos = position / htotal;
904 *hpos = position - (*vpos * htotal);
905 }
906
907 /* In vblank? */
908 if (in_vbl)
909 ret |= DRM_SCANOUTPOS_IN_VBLANK;
910
911 return ret;
912 }
913
914 int intel_get_crtc_scanline(struct intel_crtc *crtc)
915 {
916 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
917 unsigned long irqflags;
918 int position;
919
920 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
921 position = __intel_get_crtc_scanline(crtc);
922 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
923
924 return position;
925 }
926
927 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
928 int *max_error,
929 struct timeval *vblank_time,
930 unsigned flags)
931 {
932 struct drm_crtc *crtc;
933
934 if (pipe >= INTEL_INFO(dev)->num_pipes) {
935 DRM_ERROR("Invalid crtc %u\n", pipe);
936 return -EINVAL;
937 }
938
939 /* Get drm_crtc to timestamp: */
940 crtc = intel_get_crtc_for_pipe(dev, pipe);
941 if (crtc == NULL) {
942 DRM_ERROR("Invalid crtc %u\n", pipe);
943 return -EINVAL;
944 }
945
946 if (!crtc->hwmode.crtc_clock) {
947 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
948 return -EBUSY;
949 }
950
951 /* Helper routine in DRM core does all the work: */
952 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
953 vblank_time, flags,
954 &crtc->hwmode);
955 }
956
957 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
958 {
959 struct drm_i915_private *dev_priv = dev->dev_private;
960 u32 busy_up, busy_down, max_avg, min_avg;
961 u8 new_delay;
962
963 spin_lock(&mchdev_lock);
964
965 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
966
967 new_delay = dev_priv->ips.cur_delay;
968
969 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
970 busy_up = I915_READ(RCPREVBSYTUPAVG);
971 busy_down = I915_READ(RCPREVBSYTDNAVG);
972 max_avg = I915_READ(RCBMAXAVG);
973 min_avg = I915_READ(RCBMINAVG);
974
975 /* Handle RCS change request from hw */
976 if (busy_up > max_avg) {
977 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
978 new_delay = dev_priv->ips.cur_delay - 1;
979 if (new_delay < dev_priv->ips.max_delay)
980 new_delay = dev_priv->ips.max_delay;
981 } else if (busy_down < min_avg) {
982 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
983 new_delay = dev_priv->ips.cur_delay + 1;
984 if (new_delay > dev_priv->ips.min_delay)
985 new_delay = dev_priv->ips.min_delay;
986 }
987
988 if (ironlake_set_drps(dev, new_delay))
989 dev_priv->ips.cur_delay = new_delay;
990
991 spin_unlock(&mchdev_lock);
992
993 return;
994 }
995
996 static void notify_ring(struct intel_engine_cs *ring)
997 {
998 #ifdef __NetBSD__
999 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1000 unsigned long flags;
1001 #endif
1002
1003 if (!intel_ring_initialized(ring))
1004 return;
1005
1006 trace_i915_gem_request_notify(ring);
1007
1008 #ifdef __NetBSD__
1009 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1010 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
1011 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1012 #else
1013 wake_up_all(&ring->irq_queue);
1014 #endif
1015 }
1016
1017 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1018 struct intel_rps_ei *ei)
1019 {
1020 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1021 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1022 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1023 }
1024
1025 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1026 {
1027 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1028 }
1029
1030 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1031 {
1032 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1033 struct intel_rps_ei now;
1034 u32 events = 0;
1035
1036 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1037 return 0;
1038
1039 vlv_c0_read(dev_priv, &now);
1040 if (now.cz_clock == 0)
1041 return 0;
1042
1043 if (prev->cz_clock) {
1044 u64 time, c0;
1045 unsigned int mul;
1046
1047 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
1048 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1049 mul <<= 8;
1050
1051 time = now.cz_clock - prev->cz_clock;
1052 time *= dev_priv->czclk_freq;
1053
1054 /* Workload can be split between render + media,
1055 * e.g. SwapBuffers being blitted in X after being rendered in
1056 * mesa. To account for this we need to combine both engines
1057 * into our activity counter.
1058 */
1059 c0 = now.render_c0 - prev->render_c0;
1060 c0 += now.media_c0 - prev->media_c0;
1061 c0 *= mul;
1062
1063 if (c0 > time * dev_priv->rps.up_threshold)
1064 events = GEN6_PM_RP_UP_THRESHOLD;
1065 else if (c0 < time * dev_priv->rps.down_threshold)
1066 events = GEN6_PM_RP_DOWN_THRESHOLD;
1067 }
1068
1069 dev_priv->rps.ei = now;
1070 return events;
1071 }
1072
1073 static bool any_waiters(struct drm_i915_private *dev_priv)
1074 {
1075 struct intel_engine_cs *ring;
1076 int i;
1077
1078 for_each_ring(ring, dev_priv, i)
1079 if (ring->irq_refcount)
1080 return true;
1081
1082 return false;
1083 }
1084
1085 static void gen6_pm_rps_work(struct work_struct *work)
1086 {
1087 struct drm_i915_private *dev_priv =
1088 container_of(work, struct drm_i915_private, rps.work);
1089 bool client_boost;
1090 int new_delay, adj, min, max;
1091 u32 pm_iir;
1092
1093 spin_lock_irq(&dev_priv->irq_lock);
1094 /* Speed up work cancelation during disabling rps interrupts. */
1095 if (!dev_priv->rps.interrupts_enabled) {
1096 spin_unlock_irq(&dev_priv->irq_lock);
1097 return;
1098 }
1099 pm_iir = dev_priv->rps.pm_iir;
1100 dev_priv->rps.pm_iir = 0;
1101 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1102 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1103 client_boost = dev_priv->rps.client_boost;
1104 dev_priv->rps.client_boost = false;
1105 spin_unlock_irq(&dev_priv->irq_lock);
1106
1107 /* Make sure we didn't queue anything we're not going to process. */
1108 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1109
1110 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1111 return;
1112
1113 mutex_lock(&dev_priv->rps.hw_lock);
1114
1115 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1116
1117 adj = dev_priv->rps.last_adj;
1118 new_delay = dev_priv->rps.cur_freq;
1119 min = dev_priv->rps.min_freq_softlimit;
1120 max = dev_priv->rps.max_freq_softlimit;
1121
1122 if (client_boost) {
1123 new_delay = dev_priv->rps.max_freq_softlimit;
1124 adj = 0;
1125 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1126 if (adj > 0)
1127 adj *= 2;
1128 else /* CHV needs even encode values */
1129 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1130 /*
1131 * For better performance, jump directly
1132 * to RPe if we're below it.
1133 */
1134 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1135 new_delay = dev_priv->rps.efficient_freq;
1136 adj = 0;
1137 }
1138 } else if (any_waiters(dev_priv)) {
1139 adj = 0;
1140 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1141 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1142 new_delay = dev_priv->rps.efficient_freq;
1143 else
1144 new_delay = dev_priv->rps.min_freq_softlimit;
1145 adj = 0;
1146 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1147 if (adj < 0)
1148 adj *= 2;
1149 else /* CHV needs even encode values */
1150 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1151 } else { /* unknown event */
1152 adj = 0;
1153 }
1154
1155 dev_priv->rps.last_adj = adj;
1156
1157 /* sysfs frequency interfaces may have snuck in while servicing the
1158 * interrupt
1159 */
1160 new_delay += adj;
1161 new_delay = clamp_t(int, new_delay, min, max);
1162
1163 intel_set_rps(dev_priv->dev, new_delay);
1164
1165 mutex_unlock(&dev_priv->rps.hw_lock);
1166 }
1167
1168
1169 /**
1170 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1171 * occurred.
1172 * @work: workqueue struct
1173 *
1174 * Doesn't actually do anything except notify userspace. As a consequence of
1175 * this event, userspace should try to remap the bad rows since statistically
1176 * it is likely the same row is more likely to go bad again.
1177 */
1178 static void ivybridge_parity_work(struct work_struct *work)
1179 {
1180 struct drm_i915_private *dev_priv =
1181 container_of(work, struct drm_i915_private, l3_parity.error_work);
1182 u32 error_status, row, bank, subbank;
1183 #ifndef __NetBSD__ /* XXX kobject uevent...? */
1184 char *parity_event[6];
1185 #endif
1186 uint32_t misccpctl;
1187 uint8_t slice = 0;
1188
1189 /* We must turn off DOP level clock gating to access the L3 registers.
1190 * In order to prevent a get/put style interface, acquire struct mutex
1191 * any time we access those registers.
1192 */
1193 mutex_lock(&dev_priv->dev->struct_mutex);
1194
1195 /* If we've screwed up tracking, just let the interrupt fire again */
1196 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1197 goto out;
1198
1199 misccpctl = I915_READ(GEN7_MISCCPCTL);
1200 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1201 POSTING_READ(GEN7_MISCCPCTL);
1202
1203 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1204 u32 reg;
1205
1206 slice--;
1207 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1208 break;
1209
1210 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1211
1212 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1213
1214 error_status = I915_READ(reg);
1215 row = GEN7_PARITY_ERROR_ROW(error_status);
1216 bank = GEN7_PARITY_ERROR_BANK(error_status);
1217 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1218
1219 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1220 POSTING_READ(reg);
1221
1222 #ifndef __NetBSD__ /* XXX kobject uevent...? */
1223 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1224 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1225 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1226 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1227 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1228 parity_event[5] = NULL;
1229
1230 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1231 KOBJ_CHANGE, parity_event);
1232 #endif
1233
1234 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1235 slice, row, bank, subbank);
1236
1237 #ifndef __NetBSD__ /* XXX kobject uevent...? */
1238 kfree(parity_event[4]);
1239 kfree(parity_event[3]);
1240 kfree(parity_event[2]);
1241 kfree(parity_event[1]);
1242 #endif
1243 }
1244
1245 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1246
1247 out:
1248 WARN_ON(dev_priv->l3_parity.which_slice);
1249 spin_lock_irq(&dev_priv->irq_lock);
1250 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1251 spin_unlock_irq(&dev_priv->irq_lock);
1252
1253 mutex_unlock(&dev_priv->dev->struct_mutex);
1254 }
1255
1256 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1257 {
1258 struct drm_i915_private *dev_priv = dev->dev_private;
1259
1260 if (!HAS_L3_DPF(dev))
1261 return;
1262
1263 spin_lock(&dev_priv->irq_lock);
1264 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1265 spin_unlock(&dev_priv->irq_lock);
1266
1267 iir &= GT_PARITY_ERROR(dev);
1268 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1269 dev_priv->l3_parity.which_slice |= 1 << 1;
1270
1271 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1272 dev_priv->l3_parity.which_slice |= 1 << 0;
1273
1274 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1275 }
1276
1277 static void ilk_gt_irq_handler(struct drm_device *dev,
1278 struct drm_i915_private *dev_priv,
1279 u32 gt_iir)
1280 {
1281 if (gt_iir &
1282 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1283 notify_ring(&dev_priv->ring[RCS]);
1284 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1285 notify_ring(&dev_priv->ring[VCS]);
1286 }
1287
1288 static void snb_gt_irq_handler(struct drm_device *dev,
1289 struct drm_i915_private *dev_priv,
1290 u32 gt_iir)
1291 {
1292
1293 if (gt_iir &
1294 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1295 notify_ring(&dev_priv->ring[RCS]);
1296 if (gt_iir & GT_BSD_USER_INTERRUPT)
1297 notify_ring(&dev_priv->ring[VCS]);
1298 if (gt_iir & GT_BLT_USER_INTERRUPT)
1299 notify_ring(&dev_priv->ring[BCS]);
1300
1301 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1302 GT_BSD_CS_ERROR_INTERRUPT |
1303 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1304 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1305
1306 if (gt_iir & GT_PARITY_ERROR(dev))
1307 ivybridge_parity_error_irq_handler(dev, gt_iir);
1308 }
1309
1310 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1311 u32 master_ctl)
1312 {
1313 irqreturn_t ret = IRQ_NONE;
1314
1315 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1316 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1317 if (tmp) {
1318 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1319 ret = IRQ_HANDLED;
1320
1321 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1322 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1323 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1324 notify_ring(&dev_priv->ring[RCS]);
1325
1326 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1327 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1328 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1329 notify_ring(&dev_priv->ring[BCS]);
1330 } else
1331 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1332 }
1333
1334 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1335 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1336 if (tmp) {
1337 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1338 ret = IRQ_HANDLED;
1339
1340 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1341 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1342 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1343 notify_ring(&dev_priv->ring[VCS]);
1344
1345 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1346 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1347 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1348 notify_ring(&dev_priv->ring[VCS2]);
1349 } else
1350 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1351 }
1352
1353 if (master_ctl & GEN8_GT_VECS_IRQ) {
1354 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1355 if (tmp) {
1356 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1357 ret = IRQ_HANDLED;
1358
1359 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1360 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1361 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1362 notify_ring(&dev_priv->ring[VECS]);
1363 } else
1364 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1365 }
1366
1367 if (master_ctl & GEN8_GT_PM_IRQ) {
1368 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1369 if (tmp & dev_priv->pm_rps_events) {
1370 I915_WRITE_FW(GEN8_GT_IIR(2),
1371 tmp & dev_priv->pm_rps_events);
1372 ret = IRQ_HANDLED;
1373 gen6_rps_irq_handler(dev_priv, tmp);
1374 } else
1375 DRM_ERROR("The master control interrupt lied (PM)!\n");
1376 }
1377
1378 return ret;
1379 }
1380
1381 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1382 {
1383 switch (port) {
1384 case PORT_A:
1385 return val & PORTA_HOTPLUG_LONG_DETECT;
1386 case PORT_B:
1387 return val & PORTB_HOTPLUG_LONG_DETECT;
1388 case PORT_C:
1389 return val & PORTC_HOTPLUG_LONG_DETECT;
1390 default:
1391 return false;
1392 }
1393 }
1394
1395 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1396 {
1397 switch (port) {
1398 case PORT_E:
1399 return val & PORTE_HOTPLUG_LONG_DETECT;
1400 default:
1401 return false;
1402 }
1403 }
1404
1405 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1406 {
1407 switch (port) {
1408 case PORT_A:
1409 return val & PORTA_HOTPLUG_LONG_DETECT;
1410 case PORT_B:
1411 return val & PORTB_HOTPLUG_LONG_DETECT;
1412 case PORT_C:
1413 return val & PORTC_HOTPLUG_LONG_DETECT;
1414 case PORT_D:
1415 return val & PORTD_HOTPLUG_LONG_DETECT;
1416 default:
1417 return false;
1418 }
1419 }
1420
1421 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1422 {
1423 switch (port) {
1424 case PORT_A:
1425 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1426 default:
1427 return false;
1428 }
1429 }
1430
1431 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1432 {
1433 switch (port) {
1434 case PORT_B:
1435 return val & PORTB_HOTPLUG_LONG_DETECT;
1436 case PORT_C:
1437 return val & PORTC_HOTPLUG_LONG_DETECT;
1438 case PORT_D:
1439 return val & PORTD_HOTPLUG_LONG_DETECT;
1440 default:
1441 return false;
1442 }
1443 }
1444
1445 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1446 {
1447 switch (port) {
1448 case PORT_B:
1449 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1450 case PORT_C:
1451 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1452 case PORT_D:
1453 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1454 default:
1455 return false;
1456 }
1457 }
1458
1459 /*
1460 * Get a bit mask of pins that have triggered, and which ones may be long.
1461 * This can be called multiple times with the same masks to accumulate
1462 * hotplug detection results from several registers.
1463 *
1464 * Note that the caller is expected to zero out the masks initially.
1465 */
1466 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1467 u32 hotplug_trigger, u32 dig_hotplug_reg,
1468 const u32 hpd[HPD_NUM_PINS],
1469 bool long_pulse_detect(enum port port, u32 val))
1470 {
1471 enum port port;
1472 int i;
1473
1474 for_each_hpd_pin(i) {
1475 if ((hpd[i] & hotplug_trigger) == 0)
1476 continue;
1477
1478 *pin_mask |= BIT(i);
1479
1480 if (!intel_hpd_pin_to_port(i, &port))
1481 continue;
1482
1483 if (long_pulse_detect(port, dig_hotplug_reg))
1484 *long_mask |= BIT(i);
1485 }
1486
1487 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1488 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1489
1490 }
1491
1492 static void gmbus_irq_handler(struct drm_device *dev)
1493 {
1494 struct drm_i915_private *dev_priv = dev->dev_private;
1495
1496 #ifdef __NetBSD__
1497 spin_lock(&dev_priv->gmbus_wait_lock);
1498 DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1499 &dev_priv->gmbus_wait_lock);
1500 spin_unlock(&dev_priv->gmbus_wait_lock);
1501 #else
1502 wake_up_all(&dev_priv->gmbus_wait_queue);
1503 #endif
1504 }
1505
1506 static void dp_aux_irq_handler(struct drm_device *dev)
1507 {
1508 struct drm_i915_private *dev_priv = dev->dev_private;
1509
1510 #ifdef __NetBSD__
1511 spin_lock(&dev_priv->gmbus_wait_lock);
1512 DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1513 &dev_priv->gmbus_wait_lock);
1514 spin_unlock(&dev_priv->gmbus_wait_lock);
1515 #else
1516 wake_up_all(&dev_priv->gmbus_wait_queue);
1517 #endif
1518 }
1519
1520 #if defined(CONFIG_DEBUG_FS)
1521 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1522 uint32_t crc0, uint32_t crc1,
1523 uint32_t crc2, uint32_t crc3,
1524 uint32_t crc4)
1525 {
1526 struct drm_i915_private *dev_priv = dev->dev_private;
1527 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1528 struct intel_pipe_crc_entry *entry;
1529 int head, tail;
1530
1531 spin_lock(&pipe_crc->lock);
1532
1533 if (!pipe_crc->entries) {
1534 spin_unlock(&pipe_crc->lock);
1535 DRM_DEBUG_KMS("spurious interrupt\n");
1536 return;
1537 }
1538
1539 head = pipe_crc->head;
1540 tail = pipe_crc->tail;
1541
1542 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1543 spin_unlock(&pipe_crc->lock);
1544 DRM_ERROR("CRC buffer overflowing\n");
1545 return;
1546 }
1547
1548 entry = &pipe_crc->entries[head];
1549
1550 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1551 entry->crc[0] = crc0;
1552 entry->crc[1] = crc1;
1553 entry->crc[2] = crc2;
1554 entry->crc[3] = crc3;
1555 entry->crc[4] = crc4;
1556
1557 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1558 pipe_crc->head = head;
1559
1560 spin_unlock(&pipe_crc->lock);
1561
1562 wake_up_interruptible(&pipe_crc->wq);
1563 }
1564 #else
1565 static inline void
1566 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1567 uint32_t crc0, uint32_t crc1,
1568 uint32_t crc2, uint32_t crc3,
1569 uint32_t crc4) {}
1570 #endif
1571
1572
1573 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1574 {
1575 struct drm_i915_private *dev_priv = dev->dev_private;
1576
1577 display_pipe_crc_irq_handler(dev, pipe,
1578 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1579 0, 0, 0, 0);
1580 }
1581
1582 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1583 {
1584 struct drm_i915_private *dev_priv = dev->dev_private;
1585
1586 display_pipe_crc_irq_handler(dev, pipe,
1587 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1588 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1589 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1590 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1591 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1592 }
1593
1594 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1595 {
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597 uint32_t res1, res2;
1598
1599 if (INTEL_INFO(dev)->gen >= 3)
1600 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1601 else
1602 res1 = 0;
1603
1604 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1605 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1606 else
1607 res2 = 0;
1608
1609 display_pipe_crc_irq_handler(dev, pipe,
1610 I915_READ(PIPE_CRC_RES_RED(pipe)),
1611 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1612 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1613 res1, res2);
1614 }
1615
1616 /* The RPS events need forcewake, so we add them to a work queue and mask their
1617 * IMR bits until the work is done. Other interrupts can be processed without
1618 * the work queue. */
1619 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1620 {
1621 if (pm_iir & dev_priv->pm_rps_events) {
1622 spin_lock(&dev_priv->irq_lock);
1623 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1624 if (dev_priv->rps.interrupts_enabled) {
1625 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1626 queue_work(dev_priv->wq, &dev_priv->rps.work);
1627 }
1628 spin_unlock(&dev_priv->irq_lock);
1629 }
1630
1631 if (INTEL_INFO(dev_priv)->gen >= 8)
1632 return;
1633
1634 if (HAS_VEBOX(dev_priv->dev)) {
1635 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1636 notify_ring(&dev_priv->ring[VECS]);
1637
1638 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1639 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1640 }
1641 }
1642
1643 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1644 {
1645 if (!drm_handle_vblank(dev, pipe))
1646 return false;
1647
1648 return true;
1649 }
1650
1651 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1652 {
1653 struct drm_i915_private *dev_priv = dev->dev_private;
1654 u32 pipe_stats[I915_MAX_PIPES] = { };
1655 int pipe;
1656
1657 spin_lock(&dev_priv->irq_lock);
1658 for_each_pipe(dev_priv, pipe) {
1659 int reg;
1660 u32 mask, iir_bit = 0;
1661
1662 /*
1663 * PIPESTAT bits get signalled even when the interrupt is
1664 * disabled with the mask bits, and some of the status bits do
1665 * not generate interrupts at all (like the underrun bit). Hence
1666 * we need to be careful that we only handle what we want to
1667 * handle.
1668 */
1669
1670 /* fifo underruns are filterered in the underrun handler. */
1671 mask = PIPE_FIFO_UNDERRUN_STATUS;
1672
1673 switch (pipe) {
1674 case PIPE_A:
1675 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1676 break;
1677 case PIPE_B:
1678 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1679 break;
1680 case PIPE_C:
1681 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1682 break;
1683 }
1684 if (iir & iir_bit)
1685 mask |= dev_priv->pipestat_irq_mask[pipe];
1686
1687 if (!mask)
1688 continue;
1689
1690 reg = PIPESTAT(pipe);
1691 mask |= PIPESTAT_INT_ENABLE_MASK;
1692 pipe_stats[pipe] = I915_READ(reg) & mask;
1693
1694 /*
1695 * Clear the PIPE*STAT regs before the IIR
1696 */
1697 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1698 PIPESTAT_INT_STATUS_MASK))
1699 I915_WRITE(reg, pipe_stats[pipe]);
1700 }
1701 spin_unlock(&dev_priv->irq_lock);
1702
1703 for_each_pipe(dev_priv, pipe) {
1704 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1705 intel_pipe_handle_vblank(dev, pipe))
1706 intel_check_page_flip(dev, pipe);
1707
1708 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1709 intel_prepare_page_flip(dev, pipe);
1710 intel_finish_page_flip(dev, pipe);
1711 }
1712
1713 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1714 i9xx_pipe_crc_irq_handler(dev, pipe);
1715
1716 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1717 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1718 }
1719
1720 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1721 gmbus_irq_handler(dev);
1722 }
1723
1724 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1725 {
1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1728 u32 pin_mask = 0, long_mask = 0;
1729
1730 if (!hotplug_status)
1731 return;
1732
1733 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1734 /*
1735 * Make sure hotplug status is cleared before we clear IIR, or else we
1736 * may miss hotplug events.
1737 */
1738 POSTING_READ(PORT_HOTPLUG_STAT);
1739
1740 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1741 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1742
1743 if (hotplug_trigger) {
1744 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1745 hotplug_trigger, hpd_status_g4x,
1746 i9xx_port_hotplug_long_detect);
1747
1748 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1749 }
1750
1751 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1752 dp_aux_irq_handler(dev);
1753 } else {
1754 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1755
1756 if (hotplug_trigger) {
1757 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1758 hotplug_trigger, hpd_status_i915,
1759 i9xx_port_hotplug_long_detect);
1760 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1761 }
1762 }
1763 }
1764
1765 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
1766 {
1767 struct drm_device *dev = arg;
1768 struct drm_i915_private *dev_priv = dev->dev_private;
1769 u32 iir, gt_iir, pm_iir;
1770 irqreturn_t ret = IRQ_NONE;
1771
1772 if (!intel_irqs_enabled(dev_priv))
1773 return IRQ_NONE;
1774
1775 while (true) {
1776 /* Find, clear, then process each source of interrupt */
1777
1778 gt_iir = I915_READ(GTIIR);
1779 if (gt_iir)
1780 I915_WRITE(GTIIR, gt_iir);
1781
1782 pm_iir = I915_READ(GEN6_PMIIR);
1783 if (pm_iir)
1784 I915_WRITE(GEN6_PMIIR, pm_iir);
1785
1786 iir = I915_READ(VLV_IIR);
1787 if (iir) {
1788 /* Consume port before clearing IIR or we'll miss events */
1789 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1790 i9xx_hpd_irq_handler(dev);
1791 I915_WRITE(VLV_IIR, iir);
1792 }
1793
1794 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1795 goto out;
1796
1797 ret = IRQ_HANDLED;
1798
1799 if (gt_iir)
1800 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1801 if (pm_iir)
1802 gen6_rps_irq_handler(dev_priv, pm_iir);
1803 /* Call regardless, as some status bits might not be
1804 * signalled in iir */
1805 valleyview_pipestat_irq_handler(dev, iir);
1806 }
1807
1808 out:
1809 return ret;
1810 }
1811
1812 static irqreturn_t cherryview_irq_handler(DRM_IRQ_ARGS)
1813 {
1814 struct drm_device *dev = arg;
1815 struct drm_i915_private *dev_priv = dev->dev_private;
1816 u32 master_ctl, iir;
1817 irqreturn_t ret = IRQ_NONE;
1818
1819 if (!intel_irqs_enabled(dev_priv))
1820 return IRQ_NONE;
1821
1822 for (;;) {
1823 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1824 iir = I915_READ(VLV_IIR);
1825
1826 if (master_ctl == 0 && iir == 0)
1827 break;
1828
1829 ret = IRQ_HANDLED;
1830
1831 I915_WRITE(GEN8_MASTER_IRQ, 0);
1832
1833 /* Find, clear, then process each source of interrupt */
1834
1835 if (iir) {
1836 /* Consume port before clearing IIR or we'll miss events */
1837 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1838 i9xx_hpd_irq_handler(dev);
1839 I915_WRITE(VLV_IIR, iir);
1840 }
1841
1842 gen8_gt_irq_handler(dev_priv, master_ctl);
1843
1844 /* Call regardless, as some status bits might not be
1845 * signalled in iir */
1846 valleyview_pipestat_irq_handler(dev, iir);
1847
1848 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1849 POSTING_READ(GEN8_MASTER_IRQ);
1850 }
1851
1852 return ret;
1853 }
1854
1855 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1856 const u32 hpd[HPD_NUM_PINS])
1857 {
1858 struct drm_i915_private *dev_priv = to_i915(dev);
1859 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1860
1861 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1862 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1863
1864 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1865 dig_hotplug_reg, hpd,
1866 pch_port_hotplug_long_detect);
1867
1868 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1869 }
1870
1871 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1872 {
1873 struct drm_i915_private *dev_priv = dev->dev_private;
1874 int pipe;
1875 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1876
1877 if (hotplug_trigger)
1878 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1879
1880 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1881 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1882 SDE_AUDIO_POWER_SHIFT);
1883 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1884 port_name(port));
1885 }
1886
1887 if (pch_iir & SDE_AUX_MASK)
1888 dp_aux_irq_handler(dev);
1889
1890 if (pch_iir & SDE_GMBUS)
1891 gmbus_irq_handler(dev);
1892
1893 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1894 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1895
1896 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1897 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1898
1899 if (pch_iir & SDE_POISON)
1900 DRM_ERROR("PCH poison interrupt\n");
1901
1902 if (pch_iir & SDE_FDI_MASK)
1903 for_each_pipe(dev_priv, pipe)
1904 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1905 pipe_name(pipe),
1906 I915_READ(FDI_RX_IIR(pipe)));
1907
1908 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1909 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1910
1911 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1912 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1913
1914 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1915 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1916
1917 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1918 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1919 }
1920
1921 static void ivb_err_int_handler(struct drm_device *dev)
1922 {
1923 struct drm_i915_private *dev_priv = dev->dev_private;
1924 u32 err_int = I915_READ(GEN7_ERR_INT);
1925 enum i915_pipe pipe;
1926
1927 if (err_int & ERR_INT_POISON)
1928 DRM_ERROR("Poison interrupt\n");
1929
1930 for_each_pipe(dev_priv, pipe) {
1931 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1932 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1933
1934 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1935 if (IS_IVYBRIDGE(dev))
1936 ivb_pipe_crc_irq_handler(dev, pipe);
1937 else
1938 hsw_pipe_crc_irq_handler(dev, pipe);
1939 }
1940 }
1941
1942 I915_WRITE(GEN7_ERR_INT, err_int);
1943 }
1944
1945 static void cpt_serr_int_handler(struct drm_device *dev)
1946 {
1947 struct drm_i915_private *dev_priv = dev->dev_private;
1948 u32 serr_int = I915_READ(SERR_INT);
1949
1950 if (serr_int & SERR_INT_POISON)
1951 DRM_ERROR("PCH poison interrupt\n");
1952
1953 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1954 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1955
1956 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1957 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1958
1959 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1960 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1961
1962 I915_WRITE(SERR_INT, serr_int);
1963 }
1964
1965 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1966 {
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968 int pipe;
1969 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1970
1971 if (hotplug_trigger)
1972 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1973
1974 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1975 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1976 SDE_AUDIO_POWER_SHIFT_CPT);
1977 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1978 port_name(port));
1979 }
1980
1981 if (pch_iir & SDE_AUX_MASK_CPT)
1982 dp_aux_irq_handler(dev);
1983
1984 if (pch_iir & SDE_GMBUS_CPT)
1985 gmbus_irq_handler(dev);
1986
1987 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1988 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1989
1990 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1991 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1992
1993 if (pch_iir & SDE_FDI_MASK_CPT)
1994 for_each_pipe(dev_priv, pipe)
1995 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1996 pipe_name(pipe),
1997 I915_READ(FDI_RX_IIR(pipe)));
1998
1999 if (pch_iir & SDE_ERROR_CPT)
2000 cpt_serr_int_handler(dev);
2001 }
2002
2003 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2004 {
2005 struct drm_i915_private *dev_priv = dev->dev_private;
2006 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2007 ~SDE_PORTE_HOTPLUG_SPT;
2008 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2009 u32 pin_mask = 0, long_mask = 0;
2010
2011 if (hotplug_trigger) {
2012 u32 dig_hotplug_reg;
2013
2014 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2015 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2016
2017 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2018 dig_hotplug_reg, hpd_spt,
2019 spt_port_hotplug_long_detect);
2020 }
2021
2022 if (hotplug2_trigger) {
2023 u32 dig_hotplug_reg;
2024
2025 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2026 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2027
2028 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2029 dig_hotplug_reg, hpd_spt,
2030 spt_port_hotplug2_long_detect);
2031 }
2032
2033 if (pin_mask)
2034 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2035
2036 if (pch_iir & SDE_GMBUS_CPT)
2037 gmbus_irq_handler(dev);
2038 }
2039
2040 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2041 const u32 hpd[HPD_NUM_PINS])
2042 {
2043 struct drm_i915_private *dev_priv = to_i915(dev);
2044 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2045
2046 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2047 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2048
2049 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2050 dig_hotplug_reg, hpd,
2051 ilk_port_hotplug_long_detect);
2052
2053 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2054 }
2055
2056 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2057 {
2058 struct drm_i915_private *dev_priv = dev->dev_private;
2059 enum i915_pipe pipe;
2060 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2061
2062 if (hotplug_trigger)
2063 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2064
2065 if (de_iir & DE_AUX_CHANNEL_A)
2066 dp_aux_irq_handler(dev);
2067
2068 if (de_iir & DE_GSE)
2069 intel_opregion_asle_intr(dev);
2070
2071 if (de_iir & DE_POISON)
2072 DRM_ERROR("Poison interrupt\n");
2073
2074 for_each_pipe(dev_priv, pipe) {
2075 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2076 intel_pipe_handle_vblank(dev, pipe))
2077 intel_check_page_flip(dev, pipe);
2078
2079 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2080 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2081
2082 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2083 i9xx_pipe_crc_irq_handler(dev, pipe);
2084
2085 /* plane/pipes map 1:1 on ilk+ */
2086 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2087 intel_prepare_page_flip(dev, pipe);
2088 intel_finish_page_flip_plane(dev, pipe);
2089 }
2090 }
2091
2092 /* check event from PCH */
2093 if (de_iir & DE_PCH_EVENT) {
2094 u32 pch_iir = I915_READ(SDEIIR);
2095
2096 if (HAS_PCH_CPT(dev))
2097 cpt_irq_handler(dev, pch_iir);
2098 else
2099 ibx_irq_handler(dev, pch_iir);
2100
2101 /* should clear PCH hotplug event before clear CPU irq */
2102 I915_WRITE(SDEIIR, pch_iir);
2103 }
2104
2105 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2106 ironlake_rps_change_irq_handler(dev);
2107 }
2108
2109 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2110 {
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112 enum i915_pipe pipe;
2113 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2114
2115 if (hotplug_trigger)
2116 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2117
2118 if (de_iir & DE_ERR_INT_IVB)
2119 ivb_err_int_handler(dev);
2120
2121 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2122 dp_aux_irq_handler(dev);
2123
2124 if (de_iir & DE_GSE_IVB)
2125 intel_opregion_asle_intr(dev);
2126
2127 for_each_pipe(dev_priv, pipe) {
2128 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2129 intel_pipe_handle_vblank(dev, pipe))
2130 intel_check_page_flip(dev, pipe);
2131
2132 /* plane/pipes map 1:1 on ilk+ */
2133 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2134 intel_prepare_page_flip(dev, pipe);
2135 intel_finish_page_flip_plane(dev, pipe);
2136 }
2137 }
2138
2139 /* check event from PCH */
2140 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2141 u32 pch_iir = I915_READ(SDEIIR);
2142
2143 cpt_irq_handler(dev, pch_iir);
2144
2145 /* clear PCH hotplug event before clear CPU irq */
2146 I915_WRITE(SDEIIR, pch_iir);
2147 }
2148 }
2149
2150 /*
2151 * To handle irqs with the minimum potential races with fresh interrupts, we:
2152 * 1 - Disable Master Interrupt Control.
2153 * 2 - Find the source(s) of the interrupt.
2154 * 3 - Clear the Interrupt Identity bits (IIR).
2155 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2156 * 5 - Re-enable Master Interrupt Control.
2157 */
2158 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
2159 {
2160 struct drm_device *dev = arg;
2161 struct drm_i915_private *dev_priv = dev->dev_private;
2162 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2163 irqreturn_t ret = IRQ_NONE;
2164
2165 if (!intel_irqs_enabled(dev_priv))
2166 return IRQ_NONE;
2167
2168 /* We get interrupts on unclaimed registers, so check for this before we
2169 * do any I915_{READ,WRITE}. */
2170 intel_uncore_check_errors(dev);
2171
2172 /* disable master interrupt before clearing iir */
2173 de_ier = I915_READ(DEIER);
2174 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2175 POSTING_READ(DEIER);
2176
2177 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2178 * interrupts will will be stored on its back queue, and then we'll be
2179 * able to process them after we restore SDEIER (as soon as we restore
2180 * it, we'll get an interrupt if SDEIIR still has something to process
2181 * due to its back queue). */
2182 if (!HAS_PCH_NOP(dev)) {
2183 sde_ier = I915_READ(SDEIER);
2184 I915_WRITE(SDEIER, 0);
2185 POSTING_READ(SDEIER);
2186 }
2187
2188 /* Find, clear, then process each source of interrupt */
2189
2190 gt_iir = I915_READ(GTIIR);
2191 if (gt_iir) {
2192 I915_WRITE(GTIIR, gt_iir);
2193 ret = IRQ_HANDLED;
2194 if (INTEL_INFO(dev)->gen >= 6)
2195 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2196 else
2197 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2198 }
2199
2200 de_iir = I915_READ(DEIIR);
2201 if (de_iir) {
2202 I915_WRITE(DEIIR, de_iir);
2203 ret = IRQ_HANDLED;
2204 if (INTEL_INFO(dev)->gen >= 7)
2205 ivb_display_irq_handler(dev, de_iir);
2206 else
2207 ilk_display_irq_handler(dev, de_iir);
2208 }
2209
2210 if (INTEL_INFO(dev)->gen >= 6) {
2211 u32 pm_iir = I915_READ(GEN6_PMIIR);
2212 if (pm_iir) {
2213 I915_WRITE(GEN6_PMIIR, pm_iir);
2214 ret = IRQ_HANDLED;
2215 gen6_rps_irq_handler(dev_priv, pm_iir);
2216 }
2217 }
2218
2219 I915_WRITE(DEIER, de_ier);
2220 POSTING_READ(DEIER);
2221 if (!HAS_PCH_NOP(dev)) {
2222 I915_WRITE(SDEIER, sde_ier);
2223 POSTING_READ(SDEIER);
2224 }
2225
2226 return ret;
2227 }
2228
2229 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2230 const u32 hpd[HPD_NUM_PINS])
2231 {
2232 struct drm_i915_private *dev_priv = to_i915(dev);
2233 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2234
2235 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2236 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2237
2238 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2239 dig_hotplug_reg, hpd,
2240 bxt_port_hotplug_long_detect);
2241
2242 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2243 }
2244
2245 static irqreturn_t gen8_irq_handler(DRM_IRQ_ARGS)
2246 {
2247 struct drm_device *dev = arg;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2249 u32 master_ctl;
2250 irqreturn_t ret = IRQ_NONE;
2251 uint32_t tmp = 0;
2252 enum i915_pipe pipe;
2253 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2254
2255 if (!intel_irqs_enabled(dev_priv))
2256 return IRQ_NONE;
2257
2258 if (INTEL_INFO(dev_priv)->gen >= 9)
2259 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2260 GEN9_AUX_CHANNEL_D;
2261
2262 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2263 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2264 if (!master_ctl)
2265 return IRQ_NONE;
2266
2267 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2268
2269 /* Find, clear, then process each source of interrupt */
2270
2271 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2272
2273 if (master_ctl & GEN8_DE_MISC_IRQ) {
2274 tmp = I915_READ(GEN8_DE_MISC_IIR);
2275 if (tmp) {
2276 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2277 ret = IRQ_HANDLED;
2278 if (tmp & GEN8_DE_MISC_GSE)
2279 intel_opregion_asle_intr(dev);
2280 else
2281 DRM_ERROR("Unexpected DE Misc interrupt\n");
2282 }
2283 else
2284 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2285 }
2286
2287 if (master_ctl & GEN8_DE_PORT_IRQ) {
2288 tmp = I915_READ(GEN8_DE_PORT_IIR);
2289 if (tmp) {
2290 bool found = false;
2291 u32 hotplug_trigger = 0;
2292
2293 if (IS_BROXTON(dev_priv))
2294 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2295 else if (IS_BROADWELL(dev_priv))
2296 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2297
2298 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2299 ret = IRQ_HANDLED;
2300
2301 if (tmp & aux_mask) {
2302 dp_aux_irq_handler(dev);
2303 found = true;
2304 }
2305
2306 if (hotplug_trigger) {
2307 if (IS_BROXTON(dev))
2308 bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2309 else
2310 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2311 found = true;
2312 }
2313
2314 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2315 gmbus_irq_handler(dev);
2316 found = true;
2317 }
2318
2319 if (!found)
2320 DRM_ERROR("Unexpected DE Port interrupt\n");
2321 }
2322 else
2323 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2324 }
2325
2326 for_each_pipe(dev_priv, pipe) {
2327 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2328
2329 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2330 continue;
2331
2332 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2333 if (pipe_iir) {
2334 ret = IRQ_HANDLED;
2335 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2336
2337 if (pipe_iir & GEN8_PIPE_VBLANK &&
2338 intel_pipe_handle_vblank(dev, pipe))
2339 intel_check_page_flip(dev, pipe);
2340
2341 if (INTEL_INFO(dev_priv)->gen >= 9)
2342 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2343 else
2344 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2345
2346 if (flip_done) {
2347 intel_prepare_page_flip(dev, pipe);
2348 intel_finish_page_flip_plane(dev, pipe);
2349 }
2350
2351 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2352 hsw_pipe_crc_irq_handler(dev, pipe);
2353
2354 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2355 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2356 pipe);
2357
2358
2359 if (INTEL_INFO(dev_priv)->gen >= 9)
2360 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2361 else
2362 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2363
2364 if (fault_errors)
2365 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2366 pipe_name(pipe),
2367 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2368 } else
2369 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2370 }
2371
2372 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2373 master_ctl & GEN8_DE_PCH_IRQ) {
2374 /*
2375 * FIXME(BDW): Assume for now that the new interrupt handling
2376 * scheme also closed the SDE interrupt handling race we've seen
2377 * on older pch-split platforms. But this needs testing.
2378 */
2379 u32 pch_iir = I915_READ(SDEIIR);
2380 if (pch_iir) {
2381 I915_WRITE(SDEIIR, pch_iir);
2382 ret = IRQ_HANDLED;
2383
2384 if (HAS_PCH_SPT(dev_priv))
2385 spt_irq_handler(dev, pch_iir);
2386 else
2387 cpt_irq_handler(dev, pch_iir);
2388 } else {
2389 /*
2390 * Like on previous PCH there seems to be something
2391 * fishy going on with forwarding PCH interrupts.
2392 */
2393 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2394 }
2395 }
2396
2397 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2398 POSTING_READ_FW(GEN8_MASTER_IRQ);
2399
2400 return ret;
2401 }
2402
2403 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2404 bool reset_completed)
2405 {
2406 struct intel_engine_cs *ring;
2407 int i;
2408
2409 /*
2410 * Notify all waiters for GPU completion events that reset state has
2411 * been changed, and that they need to restart their wait after
2412 * checking for potential errors (and bail out to drop locks if there is
2413 * a gpu reset pending so that i915_error_work_func can acquire them).
2414 */
2415
2416 #ifdef __NetBSD__
2417 for_each_ring(ring, dev_priv, i) {
2418 spin_lock(&dev_priv->irq_lock);
2419 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
2420 spin_unlock(&dev_priv->irq_lock);
2421 }
2422
2423 spin_lock(&dev_priv->pending_flip_lock);
2424 DRM_SPIN_WAKEUP_ALL(&dev_priv->pending_flip_queue,
2425 &dev_priv->pending_flip_lock);
2426 spin_unlock(&dev_priv->pending_flip_lock);
2427
2428 if (reset_completed) {
2429 spin_lock(&dev_priv->gpu_error.reset_lock);
2430 DRM_SPIN_WAKEUP_ALL(&dev_priv->gpu_error.reset_queue,
2431 &dev_priv->gpu_error.reset_lock);
2432 spin_unlock(&dev_priv->gpu_error.reset_lock);
2433 }
2434 #else
2435 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2436 for_each_ring(ring, dev_priv, i)
2437 wake_up_all(&ring->irq_queue);
2438
2439 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2440 wake_up_all(&dev_priv->pending_flip_queue);
2441
2442 /*
2443 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2444 * reset state is cleared.
2445 */
2446 if (reset_completed)
2447 wake_up_all(&dev_priv->gpu_error.reset_queue);
2448 #endif
2449 }
2450
2451 /**
2452 * i915_reset_and_wakeup - do process context error handling work
2453 * @dev: drm device
2454 *
2455 * Fire an error uevent so userspace can see that a hang or error
2456 * was detected.
2457 */
2458 static void i915_reset_and_wakeup(struct drm_device *dev)
2459 {
2460 struct drm_i915_private *dev_priv = to_i915(dev);
2461 struct i915_gpu_error *error = &dev_priv->gpu_error;
2462 #ifndef __NetBSD__
2463 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2464 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2465 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2466 #endif
2467 int ret;
2468
2469 #ifndef __NetBSD__
2470 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2471 #endif
2472
2473 /*
2474 * Note that there's only one work item which does gpu resets, so we
2475 * need not worry about concurrent gpu resets potentially incrementing
2476 * error->reset_counter twice. We only need to take care of another
2477 * racing irq/hangcheck declaring the gpu dead for a second time. A
2478 * quick check for that is good enough: schedule_work ensures the
2479 * correct ordering between hang detection and this work item, and since
2480 * the reset in-progress bit is only ever set by code outside of this
2481 * work we don't need to worry about any other races.
2482 */
2483 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2484 DRM_DEBUG_DRIVER("resetting chip\n");
2485 #ifndef __NetBSD__
2486 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2487 reset_event);
2488 #endif
2489
2490 /*
2491 * In most cases it's guaranteed that we get here with an RPM
2492 * reference held, for example because there is a pending GPU
2493 * request that won't finish until the reset is done. This
2494 * isn't the case at least when we get here by doing a
2495 * simulated reset via debugs, so get an RPM reference.
2496 */
2497 intel_runtime_pm_get(dev_priv);
2498
2499 intel_prepare_reset(dev);
2500
2501 /*
2502 * All state reset _must_ be completed before we update the
2503 * reset counter, for otherwise waiters might miss the reset
2504 * pending state and not properly drop locks, resulting in
2505 * deadlocks with the reset work.
2506 */
2507 ret = i915_reset(dev);
2508
2509 intel_finish_reset(dev);
2510
2511 intel_runtime_pm_put(dev_priv);
2512
2513 if (ret == 0) {
2514 /*
2515 * After all the gem state is reset, increment the reset
2516 * counter and wake up everyone waiting for the reset to
2517 * complete.
2518 *
2519 * Since unlock operations are a one-sided barrier only,
2520 * we need to insert a barrier here to order any seqno
2521 * updates before
2522 * the counter increment.
2523 */
2524 smp_mb__before_atomic();
2525 atomic_inc(&dev_priv->gpu_error.reset_counter);
2526
2527 #ifndef __NetBSD__
2528 kobject_uevent_env(&dev->primary->kdev->kobj,
2529 KOBJ_CHANGE, reset_done_event);
2530 #endif
2531 } else {
2532 atomic_or(I915_WEDGED, &error->reset_counter);
2533 }
2534
2535 /*
2536 * Note: The wake_up also serves as a memory barrier so that
2537 * waiters see the update value of the reset counter atomic_t.
2538 */
2539 i915_error_wake_up(dev_priv, true);
2540 }
2541 }
2542
2543 static void i915_report_and_clear_eir(struct drm_device *dev)
2544 {
2545 struct drm_i915_private *dev_priv = dev->dev_private;
2546 uint32_t instdone[I915_NUM_INSTDONE_REG];
2547 u32 eir = I915_READ(EIR);
2548 int pipe, i;
2549
2550 if (!eir)
2551 return;
2552
2553 pr_err("render error detected, EIR: 0x%08x\n", eir);
2554
2555 i915_get_extra_instdone(dev, instdone);
2556
2557 if (IS_G4X(dev)) {
2558 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2559 u32 ipeir = I915_READ(IPEIR_I965);
2560
2561 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2562 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2563 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2564 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2565 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2566 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2567 I915_WRITE(IPEIR_I965, ipeir);
2568 POSTING_READ(IPEIR_I965);
2569 }
2570 if (eir & GM45_ERROR_PAGE_TABLE) {
2571 u32 pgtbl_err = I915_READ(PGTBL_ER);
2572 pr_err("page table error\n");
2573 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2574 I915_WRITE(PGTBL_ER, pgtbl_err);
2575 POSTING_READ(PGTBL_ER);
2576 }
2577 }
2578
2579 if (!IS_GEN2(dev)) {
2580 if (eir & I915_ERROR_PAGE_TABLE) {
2581 u32 pgtbl_err = I915_READ(PGTBL_ER);
2582 pr_err("page table error\n");
2583 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2584 I915_WRITE(PGTBL_ER, pgtbl_err);
2585 POSTING_READ(PGTBL_ER);
2586 }
2587 }
2588
2589 if (eir & I915_ERROR_MEMORY_REFRESH) {
2590 pr_err("memory refresh error:\n");
2591 for_each_pipe(dev_priv, pipe)
2592 pr_err("pipe %c stat: 0x%08x\n",
2593 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2594 /* pipestat has already been acked */
2595 }
2596 if (eir & I915_ERROR_INSTRUCTION) {
2597 pr_err("instruction error\n");
2598 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2599 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2600 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2601 if (INTEL_INFO(dev)->gen < 4) {
2602 u32 ipeir = I915_READ(IPEIR);
2603
2604 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2605 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2606 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2607 I915_WRITE(IPEIR, ipeir);
2608 POSTING_READ(IPEIR);
2609 } else {
2610 u32 ipeir = I915_READ(IPEIR_I965);
2611
2612 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2613 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2614 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2615 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2616 I915_WRITE(IPEIR_I965, ipeir);
2617 POSTING_READ(IPEIR_I965);
2618 }
2619 }
2620
2621 I915_WRITE(EIR, eir);
2622 POSTING_READ(EIR);
2623 eir = I915_READ(EIR);
2624 if (eir) {
2625 /*
2626 * some errors might have become stuck,
2627 * mask them.
2628 */
2629 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2630 I915_WRITE(EMR, I915_READ(EMR) | eir);
2631 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2632 }
2633 }
2634
2635 /**
2636 * i915_handle_error - handle a gpu error
2637 * @dev: drm device
2638 *
2639 * Do some basic checking of register state at error time and
2640 * dump it to the syslog. Also call i915_capture_error_state() to make
2641 * sure we get a record and make it available in debugfs. Fire a uevent
2642 * so userspace knows something bad happened (should trigger collection
2643 * of a ring dump etc.).
2644 */
2645 void i915_handle_error(struct drm_device *dev, bool wedged,
2646 const char *fmt, ...)
2647 {
2648 struct drm_i915_private *dev_priv = dev->dev_private;
2649 va_list args;
2650 char error_msg[80];
2651
2652 va_start(args, fmt);
2653 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2654 va_end(args);
2655
2656 i915_capture_error_state(dev, wedged, error_msg);
2657 i915_report_and_clear_eir(dev);
2658
2659 if (wedged) {
2660 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2661 &dev_priv->gpu_error.reset_counter);
2662
2663 /*
2664 * Wakeup waiting processes so that the reset function
2665 * i915_reset_and_wakeup doesn't deadlock trying to grab
2666 * various locks. By bumping the reset counter first, the woken
2667 * processes will see a reset in progress and back off,
2668 * releasing their locks and then wait for the reset completion.
2669 * We must do this for _all_ gpu waiters that might hold locks
2670 * that the reset work needs to acquire.
2671 *
2672 * Note: The wake_up serves as the required memory barrier to
2673 * ensure that the waiters see the updated value of the reset
2674 * counter atomic_t.
2675 */
2676 i915_error_wake_up(dev_priv, false);
2677 }
2678
2679 i915_reset_and_wakeup(dev);
2680
2681 do {
2682 struct i915_error_state_file_priv error_priv;
2683 struct drm_i915_error_state_buf error_str;
2684 int ret;
2685
2686 memset(&error_priv, 0, sizeof(error_priv));
2687
2688 ret = i915_error_state_buf_init(&error_str, dev_priv, 512*1024, 0);
2689 if (ret) {
2690 DRM_ERROR("Failed to initialize error buf: %d\n", ret);
2691 break;
2692 }
2693 error_priv.dev = dev;
2694 i915_error_state_get(dev, &error_priv);
2695
2696 ret = i915_error_state_to_str(&error_str, &error_priv);
2697 if (ret) {
2698 DRM_ERROR("Failed to format error buf: %d\n", ret);
2699 i915_error_state_put(&error_priv);
2700 }
2701
2702 error_str.buf[MIN(error_str.size - 1, error_str.bytes)] = '\0';
2703 DRM_ERROR("Error state:\n%s\n", error_str.buf);
2704
2705 i915_error_state_buf_release(&error_str);
2706 i915_error_state_put(&error_priv);
2707 } while (0);
2708 }
2709
2710 /* Called from drm generic code, passed 'crtc' which
2711 * we use as a pipe index
2712 */
2713 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2714 {
2715 struct drm_i915_private *dev_priv = dev->dev_private;
2716 unsigned long irqflags;
2717
2718 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2719 if (INTEL_INFO(dev)->gen >= 4)
2720 i915_enable_pipestat(dev_priv, pipe,
2721 PIPE_START_VBLANK_INTERRUPT_STATUS);
2722 else
2723 i915_enable_pipestat(dev_priv, pipe,
2724 PIPE_VBLANK_INTERRUPT_STATUS);
2725 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2726
2727 return 0;
2728 }
2729
2730 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2731 {
2732 struct drm_i915_private *dev_priv = dev->dev_private;
2733 unsigned long irqflags;
2734 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2735 DE_PIPE_VBLANK(pipe);
2736
2737 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2738 ironlake_enable_display_irq(dev_priv, bit);
2739 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2740
2741 return 0;
2742 }
2743
2744 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2745 {
2746 struct drm_i915_private *dev_priv = dev->dev_private;
2747 unsigned long irqflags;
2748
2749 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2750 i915_enable_pipestat(dev_priv, pipe,
2751 PIPE_START_VBLANK_INTERRUPT_STATUS);
2752 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2753
2754 return 0;
2755 }
2756
2757 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2758 {
2759 struct drm_i915_private *dev_priv = dev->dev_private;
2760 unsigned long irqflags;
2761
2762 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2763 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2764 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2765 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2766 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2767 return 0;
2768 }
2769
2770 /* Called from drm generic code, passed 'crtc' which
2771 * we use as a pipe index
2772 */
2773 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2774 {
2775 struct drm_i915_private *dev_priv = dev->dev_private;
2776 unsigned long irqflags;
2777
2778 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2779 i915_disable_pipestat(dev_priv, pipe,
2780 PIPE_VBLANK_INTERRUPT_STATUS |
2781 PIPE_START_VBLANK_INTERRUPT_STATUS);
2782 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2783 }
2784
2785 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2786 {
2787 struct drm_i915_private *dev_priv = dev->dev_private;
2788 unsigned long irqflags;
2789 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2790 DE_PIPE_VBLANK(pipe);
2791
2792 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2793 ironlake_disable_display_irq(dev_priv, bit);
2794 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2795 }
2796
2797 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2798 {
2799 struct drm_i915_private *dev_priv = dev->dev_private;
2800 unsigned long irqflags;
2801
2802 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2803 i915_disable_pipestat(dev_priv, pipe,
2804 PIPE_START_VBLANK_INTERRUPT_STATUS);
2805 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2806 }
2807
2808 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2809 {
2810 struct drm_i915_private *dev_priv = dev->dev_private;
2811 unsigned long irqflags;
2812
2813 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2814 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2815 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2816 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2817 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2818 }
2819
2820 static bool
2821 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2822 {
2823 return (list_empty(&ring->request_list) ||
2824 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2825 }
2826
2827 static bool
2828 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2829 {
2830 if (INTEL_INFO(dev)->gen >= 8) {
2831 return (ipehr >> 23) == 0x1c;
2832 } else {
2833 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2834 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2835 MI_SEMAPHORE_REGISTER);
2836 }
2837 }
2838
2839 static struct intel_engine_cs *
2840 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2841 {
2842 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2843 struct intel_engine_cs *signaller;
2844 int i;
2845
2846 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2847 for_each_ring(signaller, dev_priv, i) {
2848 if (ring == signaller)
2849 continue;
2850
2851 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2852 return signaller;
2853 }
2854 } else {
2855 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2856
2857 for_each_ring(signaller, dev_priv, i) {
2858 if(ring == signaller)
2859 continue;
2860
2861 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2862 return signaller;
2863 }
2864 }
2865
2866 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016"PRIx64"\n",
2867 ring->id, ipehr, offset);
2868
2869 return NULL;
2870 }
2871
2872 static struct intel_engine_cs *
2873 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2874 {
2875 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2876 u32 cmd, ipehr, head;
2877 u64 offset = 0;
2878 int i, backwards;
2879
2880 /*
2881 * This function does not support execlist mode - any attempt to
2882 * proceed further into this function will result in a kernel panic
2883 * when dereferencing ring->buffer, which is not set up in execlist
2884 * mode.
2885 *
2886 * The correct way of doing it would be to derive the currently
2887 * executing ring buffer from the current context, which is derived
2888 * from the currently running request. Unfortunately, to get the
2889 * current request we would have to grab the struct_mutex before doing
2890 * anything else, which would be ill-advised since some other thread
2891 * might have grabbed it already and managed to hang itself, causing
2892 * the hang checker to deadlock.
2893 *
2894 * Therefore, this function does not support execlist mode in its
2895 * current form. Just return NULL and move on.
2896 */
2897 if (ring->buffer == NULL)
2898 return NULL;
2899
2900 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2901 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2902 return NULL;
2903
2904 /*
2905 * HEAD is likely pointing to the dword after the actual command,
2906 * so scan backwards until we find the MBOX. But limit it to just 3
2907 * or 4 dwords depending on the semaphore wait command size.
2908 * Note that we don't care about ACTHD here since that might
2909 * point at at batch, and semaphores are always emitted into the
2910 * ringbuffer itself.
2911 */
2912 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2913 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2914
2915 for (i = backwards; i; --i) {
2916 /*
2917 * Be paranoid and presume the hw has gone off into the wild -
2918 * our ring is smaller than what the hardware (and hence
2919 * HEAD_ADDR) allows. Also handles wrap-around.
2920 */
2921 head &= ring->buffer->size - 1;
2922
2923 /* This here seems to blow up */
2924 #ifdef __NetBSD__
2925 cmd = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2926 head);
2927 #else
2928 cmd = ioread32(ring->buffer->virtual_start + head);
2929 #endif
2930 if (cmd == ipehr)
2931 break;
2932
2933 head -= 4;
2934 }
2935
2936 if (!i)
2937 return NULL;
2938
2939 #ifdef __NetBSD__
2940 *seqno = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2941 head + 4) + 1;
2942 #else
2943 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2944 #endif
2945 if (INTEL_INFO(ring->dev)->gen >= 8) {
2946 #ifdef __NetBSD__
2947 offset = bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2948 head + 12);
2949 offset <<= 32;
2950 offset |= bus_space_read_4(ring->buffer->bst, ring->buffer->bsh,
2951 head + 8);
2952 #else
2953 offset = ioread32(ring->buffer->virtual_start + head + 12);
2954 offset <<= 32;
2955 offset = ioread32(ring->buffer->virtual_start + head + 8);
2956 #endif
2957 }
2958 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2959 }
2960
2961 static int semaphore_passed(struct intel_engine_cs *ring)
2962 {
2963 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2964 struct intel_engine_cs *signaller;
2965 u32 seqno;
2966
2967 ring->hangcheck.deadlock++;
2968
2969 signaller = semaphore_waits_for(ring, &seqno);
2970 if (signaller == NULL)
2971 return -1;
2972
2973 /* Prevent pathological recursion due to driver bugs */
2974 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2975 return -1;
2976
2977 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2978 return 1;
2979
2980 /* cursory check for an unkickable deadlock */
2981 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2982 semaphore_passed(signaller) < 0)
2983 return -1;
2984
2985 return 0;
2986 }
2987
2988 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2989 {
2990 struct intel_engine_cs *ring;
2991 int i;
2992
2993 for_each_ring(ring, dev_priv, i)
2994 ring->hangcheck.deadlock = 0;
2995 }
2996
2997 static enum intel_ring_hangcheck_action
2998 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2999 {
3000 struct drm_device *dev = ring->dev;
3001 struct drm_i915_private *dev_priv = dev->dev_private;
3002 u32 tmp;
3003
3004 if (acthd != ring->hangcheck.acthd) {
3005 if (acthd > ring->hangcheck.max_acthd) {
3006 ring->hangcheck.max_acthd = acthd;
3007 return HANGCHECK_ACTIVE;
3008 }
3009
3010 return HANGCHECK_ACTIVE_LOOP;
3011 }
3012
3013 if (IS_GEN2(dev))
3014 return HANGCHECK_HUNG;
3015
3016 /* Is the chip hanging on a WAIT_FOR_EVENT?
3017 * If so we can simply poke the RB_WAIT bit
3018 * and break the hang. This should work on
3019 * all but the second generation chipsets.
3020 */
3021 tmp = I915_READ_CTL(ring);
3022 if (tmp & RING_WAIT) {
3023 i915_handle_error(dev, false,
3024 "Kicking stuck wait on %s",
3025 ring->name);
3026 I915_WRITE_CTL(ring, tmp);
3027 return HANGCHECK_KICK;
3028 }
3029
3030 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3031 switch (semaphore_passed(ring)) {
3032 default:
3033 return HANGCHECK_HUNG;
3034 case 1:
3035 i915_handle_error(dev, false,
3036 "Kicking stuck semaphore on %s",
3037 ring->name);
3038 I915_WRITE_CTL(ring, tmp);
3039 return HANGCHECK_KICK;
3040 case 0:
3041 return HANGCHECK_WAIT;
3042 }
3043 }
3044
3045 return HANGCHECK_HUNG;
3046 }
3047
3048 /*
3049 * This is called when the chip hasn't reported back with completed
3050 * batchbuffers in a long time. We keep track per ring seqno progress and
3051 * if there are no progress, hangcheck score for that ring is increased.
3052 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3053 * we kick the ring. If we see no progress on three subsequent calls
3054 * we assume chip is wedged and try to fix it by resetting the chip.
3055 */
3056 static void i915_hangcheck_elapsed(struct work_struct *work)
3057 {
3058 struct drm_i915_private *dev_priv =
3059 container_of(work, typeof(*dev_priv),
3060 gpu_error.hangcheck_work.work);
3061 struct drm_device *dev = dev_priv->dev;
3062 struct intel_engine_cs *ring;
3063 int i;
3064 int busy_count = 0, rings_hung = 0;
3065 bool stuck[I915_NUM_RINGS] = { 0 };
3066 #define BUSY 1
3067 #define KICK 5
3068 #define HUNG 20
3069
3070 if (!i915.enable_hangcheck)
3071 return;
3072
3073 for_each_ring(ring, dev_priv, i) {
3074 u64 acthd;
3075 u32 seqno;
3076 bool busy = true;
3077
3078 semaphore_clear_deadlocks(dev_priv);
3079
3080 seqno = ring->get_seqno(ring, false);
3081 acthd = intel_ring_get_active_head(ring);
3082
3083 if (ring->hangcheck.seqno == seqno) {
3084 if (ring_idle(ring, seqno)) {
3085 ring->hangcheck.action = HANGCHECK_IDLE;
3086 #ifdef __NetBSD__
3087 spin_lock(&dev_priv->irq_lock);
3088 if (DRM_SPIN_WAITERS_P(&ring->irq_queue,
3089 &dev_priv->irq_lock)) {
3090 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3091 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3092 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3093 ring->name);
3094 else
3095 DRM_INFO("Fake missed irq on %s\n",
3096 ring->name);
3097 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
3098 }
3099 ring->hangcheck.score += BUSY;
3100 } else {
3101 busy = false;
3102 }
3103 spin_unlock(&dev_priv->irq_lock);
3104 #else
3105 if (waitqueue_active(&ring->irq_queue)) {
3106 /* Issue a wake-up to catch stuck h/w. */
3107 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3108 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3109 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3110 ring->name);
3111 else
3112 DRM_INFO("Fake missed irq on %s\n",
3113 ring->name);
3114 wake_up_all(&ring->irq_queue);
3115 }
3116 /* Safeguard against driver failure */
3117 ring->hangcheck.score += BUSY;
3118 } else
3119 busy = false;
3120 #endif
3121 } else {
3122 /* We always increment the hangcheck score
3123 * if the ring is busy and still processing
3124 * the same request, so that no single request
3125 * can run indefinitely (such as a chain of
3126 * batches). The only time we do not increment
3127 * the hangcheck score on this ring, if this
3128 * ring is in a legitimate wait for another
3129 * ring. In that case the waiting ring is a
3130 * victim and we want to be sure we catch the
3131 * right culprit. Then every time we do kick
3132 * the ring, add a small increment to the
3133 * score so that we can catch a batch that is
3134 * being repeatedly kicked and so responsible
3135 * for stalling the machine.
3136 */
3137 ring->hangcheck.action = ring_stuck(ring,
3138 acthd);
3139
3140 switch (ring->hangcheck.action) {
3141 case HANGCHECK_IDLE:
3142 case HANGCHECK_WAIT:
3143 case HANGCHECK_ACTIVE:
3144 break;
3145 case HANGCHECK_ACTIVE_LOOP:
3146 ring->hangcheck.score += BUSY;
3147 break;
3148 case HANGCHECK_KICK:
3149 ring->hangcheck.score += KICK;
3150 break;
3151 case HANGCHECK_HUNG:
3152 ring->hangcheck.score += HUNG;
3153 stuck[i] = true;
3154 break;
3155 }
3156 }
3157 } else {
3158 ring->hangcheck.action = HANGCHECK_ACTIVE;
3159
3160 /* Gradually reduce the count so that we catch DoS
3161 * attempts across multiple batches.
3162 */
3163 if (ring->hangcheck.score > 0)
3164 ring->hangcheck.score--;
3165
3166 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3167 }
3168
3169 ring->hangcheck.seqno = seqno;
3170 ring->hangcheck.acthd = acthd;
3171 busy_count += busy;
3172 }
3173
3174 for_each_ring(ring, dev_priv, i) {
3175 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3176 DRM_INFO("%s on %s\n",
3177 stuck[i] ? "stuck" : "no progress",
3178 ring->name);
3179 rings_hung++;
3180 }
3181 }
3182
3183 if (rings_hung) {
3184 i915_handle_error(dev, true, "Ring hung");
3185 return;
3186 }
3187
3188 if (busy_count)
3189 /* Reset timer case chip hangs without another request
3190 * being added */
3191 i915_queue_hangcheck(dev);
3192 }
3193
3194 void i915_queue_hangcheck(struct drm_device *dev)
3195 {
3196 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3197
3198 if (!i915.enable_hangcheck)
3199 return;
3200
3201 /* Don't continually defer the hangcheck so that it is always run at
3202 * least once after work has been scheduled on any ring. Otherwise,
3203 * we will ignore a hung ring if a second ring is kept busy.
3204 */
3205
3206 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3207 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3208 }
3209
3210 static void ibx_irq_reset(struct drm_device *dev)
3211 {
3212 struct drm_i915_private *dev_priv = dev->dev_private;
3213
3214 if (HAS_PCH_NOP(dev))
3215 return;
3216
3217 GEN5_IRQ_RESET(SDE);
3218
3219 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3220 I915_WRITE(SERR_INT, 0xffffffff);
3221 }
3222
3223 /*
3224 * SDEIER is also touched by the interrupt handler to work around missed PCH
3225 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3226 * instead we unconditionally enable all PCH interrupt sources here, but then
3227 * only unmask them as needed with SDEIMR.
3228 *
3229 * This function needs to be called before interrupts are enabled.
3230 */
3231 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3232 {
3233 struct drm_i915_private *dev_priv = dev->dev_private;
3234
3235 if (HAS_PCH_NOP(dev))
3236 return;
3237
3238 WARN_ON(I915_READ(SDEIER) != 0);
3239 I915_WRITE(SDEIER, 0xffffffff);
3240 POSTING_READ(SDEIER);
3241 }
3242
3243 static void gen5_gt_irq_reset(struct drm_device *dev)
3244 {
3245 struct drm_i915_private *dev_priv = dev->dev_private;
3246
3247 GEN5_IRQ_RESET(GT);
3248 if (INTEL_INFO(dev)->gen >= 6)
3249 GEN5_IRQ_RESET(GEN6_PM);
3250 }
3251
3252 /* drm_dma.h hooks
3253 */
3254 static void ironlake_irq_reset(struct drm_device *dev)
3255 {
3256 struct drm_i915_private *dev_priv = dev->dev_private;
3257
3258 I915_WRITE(HWSTAM, 0xffffffff);
3259
3260 GEN5_IRQ_RESET(DE);
3261 if (IS_GEN7(dev))
3262 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3263
3264 gen5_gt_irq_reset(dev);
3265
3266 ibx_irq_reset(dev);
3267 }
3268
3269 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3270 {
3271 enum i915_pipe pipe;
3272
3273 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3274 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3275
3276 for_each_pipe(dev_priv, pipe)
3277 I915_WRITE(PIPESTAT(pipe), 0xffff);
3278
3279 GEN5_IRQ_RESET(VLV_);
3280 }
3281
3282 static void valleyview_irq_preinstall(struct drm_device *dev)
3283 {
3284 struct drm_i915_private *dev_priv = dev->dev_private;
3285
3286 /* VLV magic */
3287 I915_WRITE(VLV_IMR, 0);
3288 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3289 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3290 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3291
3292 gen5_gt_irq_reset(dev);
3293
3294 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3295
3296 vlv_display_irq_reset(dev_priv);
3297 }
3298
3299 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3300 {
3301 GEN8_IRQ_RESET_NDX(GT, 0);
3302 GEN8_IRQ_RESET_NDX(GT, 1);
3303 GEN8_IRQ_RESET_NDX(GT, 2);
3304 GEN8_IRQ_RESET_NDX(GT, 3);
3305 }
3306
3307 static void gen8_irq_reset(struct drm_device *dev)
3308 {
3309 struct drm_i915_private *dev_priv = dev->dev_private;
3310 int pipe;
3311
3312 I915_WRITE(GEN8_MASTER_IRQ, 0);
3313 POSTING_READ(GEN8_MASTER_IRQ);
3314
3315 gen8_gt_irq_reset(dev_priv);
3316
3317 for_each_pipe(dev_priv, pipe)
3318 if (intel_display_power_is_enabled(dev_priv,
3319 POWER_DOMAIN_PIPE(pipe)))
3320 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3321
3322 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3323 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3324 GEN5_IRQ_RESET(GEN8_PCU_);
3325
3326 if (HAS_PCH_SPLIT(dev))
3327 ibx_irq_reset(dev);
3328 }
3329
3330 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3331 unsigned int pipe_mask)
3332 {
3333 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3334
3335 spin_lock_irq(&dev_priv->irq_lock);
3336 if (pipe_mask & 1 << PIPE_A)
3337 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3338 dev_priv->de_irq_mask[PIPE_A],
3339 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3340 if (pipe_mask & 1 << PIPE_B)
3341 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3342 dev_priv->de_irq_mask[PIPE_B],
3343 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3344 if (pipe_mask & 1 << PIPE_C)
3345 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3346 dev_priv->de_irq_mask[PIPE_C],
3347 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3348 spin_unlock_irq(&dev_priv->irq_lock);
3349 }
3350
3351 static void cherryview_irq_preinstall(struct drm_device *dev)
3352 {
3353 struct drm_i915_private *dev_priv = dev->dev_private;
3354
3355 I915_WRITE(GEN8_MASTER_IRQ, 0);
3356 POSTING_READ(GEN8_MASTER_IRQ);
3357
3358 gen8_gt_irq_reset(dev_priv);
3359
3360 GEN5_IRQ_RESET(GEN8_PCU_);
3361
3362 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3363
3364 vlv_display_irq_reset(dev_priv);
3365 }
3366
3367 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3368 const u32 hpd[HPD_NUM_PINS])
3369 {
3370 struct drm_i915_private *dev_priv = to_i915(dev);
3371 struct intel_encoder *encoder;
3372 u32 enabled_irqs = 0;
3373
3374 for_each_intel_encoder(dev, encoder)
3375 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3376 enabled_irqs |= hpd[encoder->hpd_pin];
3377
3378 return enabled_irqs;
3379 }
3380
3381 static void ibx_hpd_irq_setup(struct drm_device *dev)
3382 {
3383 struct drm_i915_private *dev_priv = dev->dev_private;
3384 u32 hotplug_irqs, hotplug, enabled_irqs;
3385
3386 if (HAS_PCH_IBX(dev)) {
3387 hotplug_irqs = SDE_HOTPLUG_MASK;
3388 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3389 } else {
3390 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3391 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3392 }
3393
3394 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3395
3396 /*
3397 * Enable digital hotplug on the PCH, and configure the DP short pulse
3398 * duration to 2ms (which is the minimum in the Display Port spec).
3399 * The pulse duration bits are reserved on LPT+.
3400 */
3401 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3402 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3403 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3404 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3405 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3406 /*
3407 * When CPU and PCH are on the same package, port A
3408 * HPD must be enabled in both north and south.
3409 */
3410 if (HAS_PCH_LPT_LP(dev))
3411 hotplug |= PORTA_HOTPLUG_ENABLE;
3412 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3413 }
3414
3415 static void spt_hpd_irq_setup(struct drm_device *dev)
3416 {
3417 struct drm_i915_private *dev_priv = dev->dev_private;
3418 u32 hotplug_irqs, hotplug, enabled_irqs;
3419
3420 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3421 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3422
3423 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3424
3425 /* Enable digital hotplug on the PCH */
3426 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3427 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3428 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3429 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3430
3431 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3432 hotplug |= PORTE_HOTPLUG_ENABLE;
3433 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3434 }
3435
3436 static void ilk_hpd_irq_setup(struct drm_device *dev)
3437 {
3438 struct drm_i915_private *dev_priv = dev->dev_private;
3439 u32 hotplug_irqs, hotplug, enabled_irqs;
3440
3441 if (INTEL_INFO(dev)->gen >= 8) {
3442 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3443 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3444
3445 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3446 } else if (INTEL_INFO(dev)->gen >= 7) {
3447 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3448 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3449
3450 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3451 } else {
3452 hotplug_irqs = DE_DP_A_HOTPLUG;
3453 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3454
3455 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3456 }
3457
3458 /*
3459 * Enable digital hotplug on the CPU, and configure the DP short pulse
3460 * duration to 2ms (which is the minimum in the Display Port spec)
3461 * The pulse duration bits are reserved on HSW+.
3462 */
3463 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3464 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3465 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3466 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3467
3468 ibx_hpd_irq_setup(dev);
3469 }
3470
3471 static void bxt_hpd_irq_setup(struct drm_device *dev)
3472 {
3473 struct drm_i915_private *dev_priv = dev->dev_private;
3474 u32 hotplug_irqs, hotplug, enabled_irqs;
3475
3476 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3477 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3478
3479 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3480
3481 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3482 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3483 PORTA_HOTPLUG_ENABLE;
3484 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3485 }
3486
3487 static void ibx_irq_postinstall(struct drm_device *dev)
3488 {
3489 struct drm_i915_private *dev_priv = dev->dev_private;
3490 u32 mask;
3491
3492 if (HAS_PCH_NOP(dev))
3493 return;
3494
3495 if (HAS_PCH_IBX(dev))
3496 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3497 else
3498 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3499
3500 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3501 I915_WRITE(SDEIMR, ~mask);
3502 }
3503
3504 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3505 {
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 u32 pm_irqs, gt_irqs;
3508
3509 pm_irqs = gt_irqs = 0;
3510
3511 dev_priv->gt_irq_mask = ~0;
3512 if (HAS_L3_DPF(dev)) {
3513 /* L3 parity interrupt is always unmasked. */
3514 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3515 gt_irqs |= GT_PARITY_ERROR(dev);
3516 }
3517
3518 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3519 if (IS_GEN5(dev)) {
3520 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3521 ILK_BSD_USER_INTERRUPT;
3522 } else {
3523 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3524 }
3525
3526 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3527
3528 if (INTEL_INFO(dev)->gen >= 6) {
3529 /*
3530 * RPS interrupts will get enabled/disabled on demand when RPS
3531 * itself is enabled/disabled.
3532 */
3533 if (HAS_VEBOX(dev))
3534 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3535
3536 dev_priv->pm_irq_mask = 0xffffffff;
3537 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3538 }
3539 }
3540
3541 static int ironlake_irq_postinstall(struct drm_device *dev)
3542 {
3543 struct drm_i915_private *dev_priv = dev->dev_private;
3544 u32 display_mask, extra_mask;
3545
3546 if (INTEL_INFO(dev)->gen >= 7) {
3547 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3548 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3549 DE_PLANEB_FLIP_DONE_IVB |
3550 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3551 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3552 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3553 DE_DP_A_HOTPLUG_IVB);
3554 } else {
3555 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3556 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3557 DE_AUX_CHANNEL_A |
3558 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3559 DE_POISON);
3560 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3561 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3562 DE_DP_A_HOTPLUG);
3563 }
3564
3565 dev_priv->irq_mask = ~display_mask;
3566
3567 I915_WRITE(HWSTAM, 0xeffe);
3568
3569 ibx_irq_pre_postinstall(dev);
3570
3571 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3572
3573 gen5_gt_irq_postinstall(dev);
3574
3575 ibx_irq_postinstall(dev);
3576
3577 if (IS_IRONLAKE_M(dev)) {
3578 /* Enable PCU event interrupts
3579 *
3580 * spinlocking not required here for correctness since interrupt
3581 * setup is guaranteed to run in single-threaded context. But we
3582 * need it to make the assert_spin_locked happy. */
3583 spin_lock_irq(&dev_priv->irq_lock);
3584 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3585 spin_unlock_irq(&dev_priv->irq_lock);
3586 }
3587
3588 return 0;
3589 }
3590
3591 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3592 {
3593 u32 pipestat_mask;
3594 u32 iir_mask;
3595 enum i915_pipe pipe;
3596
3597 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3598 PIPE_FIFO_UNDERRUN_STATUS;
3599
3600 for_each_pipe(dev_priv, pipe)
3601 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3602 POSTING_READ(PIPESTAT(PIPE_A));
3603
3604 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3605 PIPE_CRC_DONE_INTERRUPT_STATUS;
3606
3607 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3608 for_each_pipe(dev_priv, pipe)
3609 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3610
3611 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3612 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3613 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3614 if (IS_CHERRYVIEW(dev_priv))
3615 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3616 dev_priv->irq_mask &= ~iir_mask;
3617
3618 I915_WRITE(VLV_IIR, iir_mask);
3619 I915_WRITE(VLV_IIR, iir_mask);
3620 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3621 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3622 POSTING_READ(VLV_IMR);
3623 }
3624
3625 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3626 {
3627 u32 pipestat_mask;
3628 u32 iir_mask;
3629 enum i915_pipe pipe;
3630
3631 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3632 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3633 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3634 if (IS_CHERRYVIEW(dev_priv))
3635 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3636
3637 dev_priv->irq_mask |= iir_mask;
3638 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3639 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3640 I915_WRITE(VLV_IIR, iir_mask);
3641 I915_WRITE(VLV_IIR, iir_mask);
3642 POSTING_READ(VLV_IIR);
3643
3644 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3645 PIPE_CRC_DONE_INTERRUPT_STATUS;
3646
3647 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3648 for_each_pipe(dev_priv, pipe)
3649 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3650
3651 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3652 PIPE_FIFO_UNDERRUN_STATUS;
3653
3654 for_each_pipe(dev_priv, pipe)
3655 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3656 POSTING_READ(PIPESTAT(PIPE_A));
3657 }
3658
3659 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3660 {
3661 assert_spin_locked(&dev_priv->irq_lock);
3662
3663 if (dev_priv->display_irqs_enabled)
3664 return;
3665
3666 dev_priv->display_irqs_enabled = true;
3667
3668 if (intel_irqs_enabled(dev_priv))
3669 valleyview_display_irqs_install(dev_priv);
3670 }
3671
3672 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3673 {
3674 assert_spin_locked(&dev_priv->irq_lock);
3675
3676 if (!dev_priv->display_irqs_enabled)
3677 return;
3678
3679 dev_priv->display_irqs_enabled = false;
3680
3681 if (intel_irqs_enabled(dev_priv))
3682 valleyview_display_irqs_uninstall(dev_priv);
3683 }
3684
3685 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3686 {
3687 dev_priv->irq_mask = ~0;
3688
3689 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3690 POSTING_READ(PORT_HOTPLUG_EN);
3691
3692 I915_WRITE(VLV_IIR, 0xffffffff);
3693 I915_WRITE(VLV_IIR, 0xffffffff);
3694 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3695 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3696 POSTING_READ(VLV_IMR);
3697
3698 /* Interrupt setup is already guaranteed to be single-threaded, this is
3699 * just to make the assert_spin_locked check happy. */
3700 spin_lock_irq(&dev_priv->irq_lock);
3701 if (dev_priv->display_irqs_enabled)
3702 valleyview_display_irqs_install(dev_priv);
3703 spin_unlock_irq(&dev_priv->irq_lock);
3704 }
3705
3706 static int valleyview_irq_postinstall(struct drm_device *dev)
3707 {
3708 struct drm_i915_private *dev_priv = dev->dev_private;
3709
3710 vlv_display_irq_postinstall(dev_priv);
3711
3712 gen5_gt_irq_postinstall(dev);
3713
3714 /* ack & enable invalid PTE error interrupts */
3715 #if 0 /* FIXME: add support to irq handler for checking these bits */
3716 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3717 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3718 #endif
3719
3720 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3721
3722 return 0;
3723 }
3724
3725 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3726 {
3727 /* These are interrupts we'll toggle with the ring mask register */
3728 uint32_t gt_interrupts[] = {
3729 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3730 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3731 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3732 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3733 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3734 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3735 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3736 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3737 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3738 0,
3739 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3740 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3741 };
3742
3743 dev_priv->pm_irq_mask = 0xffffffff;
3744 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3745 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3746 /*
3747 * RPS interrupts will get enabled/disabled on demand when RPS itself
3748 * is enabled/disabled.
3749 */
3750 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3751 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3752 }
3753
3754 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3755 {
3756 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3757 uint32_t de_pipe_enables;
3758 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3759 u32 de_port_enables;
3760 enum i915_pipe pipe;
3761
3762 if (INTEL_INFO(dev_priv)->gen >= 9) {
3763 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3764 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3765 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3766 GEN9_AUX_CHANNEL_D;
3767 if (IS_BROXTON(dev_priv))
3768 de_port_masked |= BXT_DE_PORT_GMBUS;
3769 } else {
3770 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3771 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3772 }
3773
3774 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3775 GEN8_PIPE_FIFO_UNDERRUN;
3776
3777 de_port_enables = de_port_masked;
3778 if (IS_BROXTON(dev_priv))
3779 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3780 else if (IS_BROADWELL(dev_priv))
3781 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3782
3783 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3784 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3785 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3786
3787 for_each_pipe(dev_priv, pipe)
3788 if (intel_display_power_is_enabled(dev_priv,
3789 POWER_DOMAIN_PIPE(pipe)))
3790 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3791 dev_priv->de_irq_mask[pipe],
3792 de_pipe_enables);
3793
3794 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3795 }
3796
3797 static int gen8_irq_postinstall(struct drm_device *dev)
3798 {
3799 struct drm_i915_private *dev_priv = dev->dev_private;
3800
3801 if (HAS_PCH_SPLIT(dev))
3802 ibx_irq_pre_postinstall(dev);
3803
3804 gen8_gt_irq_postinstall(dev_priv);
3805 gen8_de_irq_postinstall(dev_priv);
3806
3807 if (HAS_PCH_SPLIT(dev))
3808 ibx_irq_postinstall(dev);
3809
3810 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3811 POSTING_READ(GEN8_MASTER_IRQ);
3812
3813 return 0;
3814 }
3815
3816 static int cherryview_irq_postinstall(struct drm_device *dev)
3817 {
3818 struct drm_i915_private *dev_priv = dev->dev_private;
3819
3820 vlv_display_irq_postinstall(dev_priv);
3821
3822 gen8_gt_irq_postinstall(dev_priv);
3823
3824 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3825 POSTING_READ(GEN8_MASTER_IRQ);
3826
3827 return 0;
3828 }
3829
3830 static void gen8_irq_uninstall(struct drm_device *dev)
3831 {
3832 struct drm_i915_private *dev_priv = dev->dev_private;
3833
3834 if (!dev_priv)
3835 return;
3836
3837 gen8_irq_reset(dev);
3838 }
3839
3840 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3841 {
3842 /* Interrupt setup is already guaranteed to be single-threaded, this is
3843 * just to make the assert_spin_locked check happy. */
3844 spin_lock_irq(&dev_priv->irq_lock);
3845 if (dev_priv->display_irqs_enabled)
3846 valleyview_display_irqs_uninstall(dev_priv);
3847 spin_unlock_irq(&dev_priv->irq_lock);
3848
3849 vlv_display_irq_reset(dev_priv);
3850
3851 dev_priv->irq_mask = ~0;
3852 }
3853
3854 static void valleyview_irq_uninstall(struct drm_device *dev)
3855 {
3856 struct drm_i915_private *dev_priv = dev->dev_private;
3857
3858 if (!dev_priv)
3859 return;
3860
3861 I915_WRITE(VLV_MASTER_IER, 0);
3862
3863 gen5_gt_irq_reset(dev);
3864
3865 I915_WRITE(HWSTAM, 0xffffffff);
3866
3867 vlv_display_irq_uninstall(dev_priv);
3868 }
3869
3870 static void cherryview_irq_uninstall(struct drm_device *dev)
3871 {
3872 struct drm_i915_private *dev_priv = dev->dev_private;
3873
3874 if (!dev_priv)
3875 return;
3876
3877 I915_WRITE(GEN8_MASTER_IRQ, 0);
3878 POSTING_READ(GEN8_MASTER_IRQ);
3879
3880 gen8_gt_irq_reset(dev_priv);
3881
3882 GEN5_IRQ_RESET(GEN8_PCU_);
3883
3884 vlv_display_irq_uninstall(dev_priv);
3885 }
3886
3887 static void ironlake_irq_uninstall(struct drm_device *dev)
3888 {
3889 struct drm_i915_private *dev_priv = dev->dev_private;
3890
3891 if (!dev_priv)
3892 return;
3893
3894 ironlake_irq_reset(dev);
3895 }
3896
3897 static void i8xx_irq_preinstall(struct drm_device * dev)
3898 {
3899 struct drm_i915_private *dev_priv = dev->dev_private;
3900 int pipe;
3901
3902 for_each_pipe(dev_priv, pipe)
3903 I915_WRITE(PIPESTAT(pipe), 0);
3904 I915_WRITE16(IMR, 0xffff);
3905 I915_WRITE16(IER, 0x0);
3906 POSTING_READ16(IER);
3907 }
3908
3909 static int i8xx_irq_postinstall(struct drm_device *dev)
3910 {
3911 struct drm_i915_private *dev_priv = dev->dev_private;
3912
3913 I915_WRITE16(EMR,
3914 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3915
3916 /* Unmask the interrupts that we always want on. */
3917 dev_priv->irq_mask =
3918 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3919 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3920 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3921 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3922 I915_WRITE16(IMR, dev_priv->irq_mask);
3923
3924 I915_WRITE16(IER,
3925 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3926 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3927 I915_USER_INTERRUPT);
3928 POSTING_READ16(IER);
3929
3930 /* Interrupt setup is already guaranteed to be single-threaded, this is
3931 * just to make the assert_spin_locked check happy. */
3932 spin_lock_irq(&dev_priv->irq_lock);
3933 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3934 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3935 spin_unlock_irq(&dev_priv->irq_lock);
3936
3937 return 0;
3938 }
3939
3940 /*
3941 * Returns true when a page flip has completed.
3942 */
3943 static bool i8xx_handle_vblank(struct drm_device *dev,
3944 int plane, int pipe, u32 iir)
3945 {
3946 struct drm_i915_private *dev_priv = dev->dev_private;
3947 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3948
3949 if (!intel_pipe_handle_vblank(dev, pipe))
3950 return false;
3951
3952 if ((iir & flip_pending) == 0)
3953 goto check_page_flip;
3954
3955 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3956 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3957 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3958 * the flip is completed (no longer pending). Since this doesn't raise
3959 * an interrupt per se, we watch for the change at vblank.
3960 */
3961 if (I915_READ16(ISR) & flip_pending)
3962 goto check_page_flip;
3963
3964 intel_prepare_page_flip(dev, plane);
3965 intel_finish_page_flip(dev, pipe);
3966 return true;
3967
3968 check_page_flip:
3969 intel_check_page_flip(dev, pipe);
3970 return false;
3971 }
3972
3973 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
3974 {
3975 struct drm_device *dev = arg;
3976 struct drm_i915_private *dev_priv = dev->dev_private;
3977 u16 iir, new_iir;
3978 u32 pipe_stats[2];
3979 int pipe;
3980 u16 flip_mask =
3981 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3982 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3983
3984 if (!intel_irqs_enabled(dev_priv))
3985 return IRQ_NONE;
3986
3987 iir = I915_READ16(IIR);
3988 if (iir == 0)
3989 return IRQ_NONE;
3990
3991 while (iir & ~flip_mask) {
3992 /* Can't rely on pipestat interrupt bit in iir as it might
3993 * have been cleared after the pipestat interrupt was received.
3994 * It doesn't set the bit in iir again, but it still produces
3995 * interrupts (for non-MSI).
3996 */
3997 spin_lock(&dev_priv->irq_lock);
3998 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3999 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4000
4001 for_each_pipe(dev_priv, pipe) {
4002 int reg = PIPESTAT(pipe);
4003 pipe_stats[pipe] = I915_READ(reg);
4004
4005 /*
4006 * Clear the PIPE*STAT regs before the IIR
4007 */
4008 if (pipe_stats[pipe] & 0x8000ffff)
4009 I915_WRITE(reg, pipe_stats[pipe]);
4010 }
4011 spin_unlock(&dev_priv->irq_lock);
4012
4013 I915_WRITE16(IIR, iir & ~flip_mask);
4014 new_iir = I915_READ16(IIR); /* Flush posted writes */
4015
4016 if (iir & I915_USER_INTERRUPT)
4017 notify_ring(&dev_priv->ring[RCS]);
4018
4019 for_each_pipe(dev_priv, pipe) {
4020 int plane = pipe;
4021 if (HAS_FBC(dev))
4022 plane = !plane;
4023
4024 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4025 i8xx_handle_vblank(dev, plane, pipe, iir))
4026 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4027
4028 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4029 i9xx_pipe_crc_irq_handler(dev, pipe);
4030
4031 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4032 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4033 pipe);
4034 }
4035
4036 iir = new_iir;
4037 }
4038
4039 return IRQ_HANDLED;
4040 }
4041
4042 static void i8xx_irq_uninstall(struct drm_device * dev)
4043 {
4044 struct drm_i915_private *dev_priv = dev->dev_private;
4045 int pipe;
4046
4047 for_each_pipe(dev_priv, pipe) {
4048 /* Clear enable bits; then clear status bits */
4049 I915_WRITE(PIPESTAT(pipe), 0);
4050 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4051 }
4052 I915_WRITE16(IMR, 0xffff);
4053 I915_WRITE16(IER, 0x0);
4054 I915_WRITE16(IIR, I915_READ16(IIR));
4055 }
4056
4057 static void i915_irq_preinstall(struct drm_device * dev)
4058 {
4059 struct drm_i915_private *dev_priv = dev->dev_private;
4060 int pipe;
4061
4062 if (I915_HAS_HOTPLUG(dev)) {
4063 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4064 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4065 }
4066
4067 I915_WRITE16(HWSTAM, 0xeffe);
4068 for_each_pipe(dev_priv, pipe)
4069 I915_WRITE(PIPESTAT(pipe), 0);
4070 I915_WRITE(IMR, 0xffffffff);
4071 I915_WRITE(IER, 0x0);
4072 POSTING_READ(IER);
4073 }
4074
4075 static int i915_irq_postinstall(struct drm_device *dev)
4076 {
4077 struct drm_i915_private *dev_priv = dev->dev_private;
4078 u32 enable_mask;
4079
4080 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4081
4082 /* Unmask the interrupts that we always want on. */
4083 dev_priv->irq_mask =
4084 ~(I915_ASLE_INTERRUPT |
4085 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4086 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4087 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4088 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4089
4090 enable_mask =
4091 I915_ASLE_INTERRUPT |
4092 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4093 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4094 I915_USER_INTERRUPT;
4095
4096 if (I915_HAS_HOTPLUG(dev)) {
4097 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4098 POSTING_READ(PORT_HOTPLUG_EN);
4099
4100 /* Enable in IER... */
4101 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4102 /* and unmask in IMR */
4103 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4104 }
4105
4106 I915_WRITE(IMR, dev_priv->irq_mask);
4107 I915_WRITE(IER, enable_mask);
4108 POSTING_READ(IER);
4109
4110 i915_enable_asle_pipestat(dev);
4111
4112 /* Interrupt setup is already guaranteed to be single-threaded, this is
4113 * just to make the assert_spin_locked check happy. */
4114 spin_lock_irq(&dev_priv->irq_lock);
4115 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4116 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4117 spin_unlock_irq(&dev_priv->irq_lock);
4118
4119 return 0;
4120 }
4121
4122 /*
4123 * Returns true when a page flip has completed.
4124 */
4125 static bool i915_handle_vblank(struct drm_device *dev,
4126 int plane, int pipe, u32 iir)
4127 {
4128 struct drm_i915_private *dev_priv = dev->dev_private;
4129 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4130
4131 if (!intel_pipe_handle_vblank(dev, pipe))
4132 return false;
4133
4134 if ((iir & flip_pending) == 0)
4135 goto check_page_flip;
4136
4137 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4138 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4139 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4140 * the flip is completed (no longer pending). Since this doesn't raise
4141 * an interrupt per se, we watch for the change at vblank.
4142 */
4143 if (I915_READ(ISR) & flip_pending)
4144 goto check_page_flip;
4145
4146 intel_prepare_page_flip(dev, plane);
4147 intel_finish_page_flip(dev, pipe);
4148 return true;
4149
4150 check_page_flip:
4151 intel_check_page_flip(dev, pipe);
4152 return false;
4153 }
4154
4155 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
4156 {
4157 struct drm_device *dev = arg;
4158 struct drm_i915_private *dev_priv = dev->dev_private;
4159 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4160 u32 flip_mask =
4161 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4162 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4163 int pipe, ret = IRQ_NONE;
4164
4165 if (!intel_irqs_enabled(dev_priv))
4166 return IRQ_NONE;
4167
4168 iir = I915_READ(IIR);
4169 do {
4170 bool irq_received = (iir & ~flip_mask) != 0;
4171 bool blc_event = false;
4172
4173 /* Can't rely on pipestat interrupt bit in iir as it might
4174 * have been cleared after the pipestat interrupt was received.
4175 * It doesn't set the bit in iir again, but it still produces
4176 * interrupts (for non-MSI).
4177 */
4178 spin_lock(&dev_priv->irq_lock);
4179 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4180 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4181
4182 for_each_pipe(dev_priv, pipe) {
4183 int reg = PIPESTAT(pipe);
4184 pipe_stats[pipe] = I915_READ(reg);
4185
4186 /* Clear the PIPE*STAT regs before the IIR */
4187 if (pipe_stats[pipe] & 0x8000ffff) {
4188 I915_WRITE(reg, pipe_stats[pipe]);
4189 irq_received = true;
4190 }
4191 }
4192 spin_unlock(&dev_priv->irq_lock);
4193
4194 if (!irq_received)
4195 break;
4196
4197 /* Consume port. Then clear IIR or we'll miss events */
4198 if (I915_HAS_HOTPLUG(dev) &&
4199 iir & I915_DISPLAY_PORT_INTERRUPT)
4200 i9xx_hpd_irq_handler(dev);
4201
4202 I915_WRITE(IIR, iir & ~flip_mask);
4203 new_iir = I915_READ(IIR); /* Flush posted writes */
4204
4205 if (iir & I915_USER_INTERRUPT)
4206 notify_ring(&dev_priv->ring[RCS]);
4207
4208 for_each_pipe(dev_priv, pipe) {
4209 int plane = pipe;
4210 if (HAS_FBC(dev))
4211 plane = !plane;
4212
4213 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4214 i915_handle_vblank(dev, plane, pipe, iir))
4215 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4216
4217 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4218 blc_event = true;
4219
4220 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4221 i9xx_pipe_crc_irq_handler(dev, pipe);
4222
4223 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4224 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4225 pipe);
4226 }
4227
4228 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4229 intel_opregion_asle_intr(dev);
4230
4231 /* With MSI, interrupts are only generated when iir
4232 * transitions from zero to nonzero. If another bit got
4233 * set while we were handling the existing iir bits, then
4234 * we would never get another interrupt.
4235 *
4236 * This is fine on non-MSI as well, as if we hit this path
4237 * we avoid exiting the interrupt handler only to generate
4238 * another one.
4239 *
4240 * Note that for MSI this could cause a stray interrupt report
4241 * if an interrupt landed in the time between writing IIR and
4242 * the posting read. This should be rare enough to never
4243 * trigger the 99% of 100,000 interrupts test for disabling
4244 * stray interrupts.
4245 */
4246 ret = IRQ_HANDLED;
4247 iir = new_iir;
4248 } while (iir & ~flip_mask);
4249
4250 return ret;
4251 }
4252
4253 static void i915_irq_uninstall(struct drm_device * dev)
4254 {
4255 struct drm_i915_private *dev_priv = dev->dev_private;
4256 int pipe;
4257
4258 if (I915_HAS_HOTPLUG(dev)) {
4259 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4260 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4261 }
4262
4263 I915_WRITE16(HWSTAM, 0xffff);
4264 for_each_pipe(dev_priv, pipe) {
4265 /* Clear enable bits; then clear status bits */
4266 I915_WRITE(PIPESTAT(pipe), 0);
4267 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4268 }
4269 I915_WRITE(IMR, 0xffffffff);
4270 I915_WRITE(IER, 0x0);
4271
4272 I915_WRITE(IIR, I915_READ(IIR));
4273 }
4274
4275 static void i965_irq_preinstall(struct drm_device * dev)
4276 {
4277 struct drm_i915_private *dev_priv = dev->dev_private;
4278 int pipe;
4279
4280 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4281 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4282
4283 I915_WRITE(HWSTAM, 0xeffe);
4284 for_each_pipe(dev_priv, pipe)
4285 I915_WRITE(PIPESTAT(pipe), 0);
4286 I915_WRITE(IMR, 0xffffffff);
4287 I915_WRITE(IER, 0x0);
4288 POSTING_READ(IER);
4289 }
4290
4291 static int i965_irq_postinstall(struct drm_device *dev)
4292 {
4293 struct drm_i915_private *dev_priv = dev->dev_private;
4294 u32 enable_mask;
4295 u32 error_mask;
4296
4297 /* Unmask the interrupts that we always want on. */
4298 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4299 I915_DISPLAY_PORT_INTERRUPT |
4300 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4301 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4302 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4303 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4304 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4305
4306 enable_mask = ~dev_priv->irq_mask;
4307 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4308 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4309 enable_mask |= I915_USER_INTERRUPT;
4310
4311 if (IS_G4X(dev))
4312 enable_mask |= I915_BSD_USER_INTERRUPT;
4313
4314 /* Interrupt setup is already guaranteed to be single-threaded, this is
4315 * just to make the assert_spin_locked check happy. */
4316 spin_lock_irq(&dev_priv->irq_lock);
4317 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4318 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4319 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4320 spin_unlock_irq(&dev_priv->irq_lock);
4321
4322 /*
4323 * Enable some error detection, note the instruction error mask
4324 * bit is reserved, so we leave it masked.
4325 */
4326 if (IS_G4X(dev)) {
4327 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4328 GM45_ERROR_MEM_PRIV |
4329 GM45_ERROR_CP_PRIV |
4330 I915_ERROR_MEMORY_REFRESH);
4331 } else {
4332 error_mask = ~(I915_ERROR_PAGE_TABLE |
4333 I915_ERROR_MEMORY_REFRESH);
4334 }
4335 I915_WRITE(EMR, error_mask);
4336
4337 I915_WRITE(IMR, dev_priv->irq_mask);
4338 I915_WRITE(IER, enable_mask);
4339 POSTING_READ(IER);
4340
4341 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4342 POSTING_READ(PORT_HOTPLUG_EN);
4343
4344 i915_enable_asle_pipestat(dev);
4345
4346 return 0;
4347 }
4348
4349 static void i915_hpd_irq_setup(struct drm_device *dev)
4350 {
4351 struct drm_i915_private *dev_priv = dev->dev_private;
4352 u32 hotplug_en;
4353
4354 assert_spin_locked(&dev_priv->irq_lock);
4355
4356 /* Note HDMI and DP share hotplug bits */
4357 /* enable bits are the same for all generations */
4358 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4359 /* Programming the CRT detection parameters tends
4360 to generate a spurious hotplug event about three
4361 seconds later. So just do it once.
4362 */
4363 if (IS_G4X(dev))
4364 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4365 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4366
4367 /* Ignore TV since it's buggy */
4368 i915_hotplug_interrupt_update_locked(dev_priv,
4369 HOTPLUG_INT_EN_MASK |
4370 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4371 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4372 hotplug_en);
4373 }
4374
4375 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
4376 {
4377 struct drm_device *dev = arg;
4378 struct drm_i915_private *dev_priv = dev->dev_private;
4379 u32 iir, new_iir;
4380 u32 pipe_stats[I915_MAX_PIPES];
4381 int ret = IRQ_NONE, pipe;
4382 u32 flip_mask =
4383 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4384 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4385
4386 if (!intel_irqs_enabled(dev_priv))
4387 return IRQ_NONE;
4388
4389 iir = I915_READ(IIR);
4390
4391 for (;;) {
4392 bool irq_received = (iir & ~flip_mask) != 0;
4393 bool blc_event = false;
4394
4395 /* Can't rely on pipestat interrupt bit in iir as it might
4396 * have been cleared after the pipestat interrupt was received.
4397 * It doesn't set the bit in iir again, but it still produces
4398 * interrupts (for non-MSI).
4399 */
4400 spin_lock(&dev_priv->irq_lock);
4401 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4402 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4403
4404 for_each_pipe(dev_priv, pipe) {
4405 int reg = PIPESTAT(pipe);
4406 pipe_stats[pipe] = I915_READ(reg);
4407
4408 /*
4409 * Clear the PIPE*STAT regs before the IIR
4410 */
4411 if (pipe_stats[pipe] & 0x8000ffff) {
4412 I915_WRITE(reg, pipe_stats[pipe]);
4413 irq_received = true;
4414 }
4415 }
4416 spin_unlock(&dev_priv->irq_lock);
4417
4418 if (!irq_received)
4419 break;
4420
4421 ret = IRQ_HANDLED;
4422
4423 /* Consume port. Then clear IIR or we'll miss events */
4424 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4425 i9xx_hpd_irq_handler(dev);
4426
4427 I915_WRITE(IIR, iir & ~flip_mask);
4428 new_iir = I915_READ(IIR); /* Flush posted writes */
4429
4430 if (iir & I915_USER_INTERRUPT)
4431 notify_ring(&dev_priv->ring[RCS]);
4432 if (iir & I915_BSD_USER_INTERRUPT)
4433 notify_ring(&dev_priv->ring[VCS]);
4434
4435 for_each_pipe(dev_priv, pipe) {
4436 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4437 i915_handle_vblank(dev, pipe, pipe, iir))
4438 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4439
4440 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4441 blc_event = true;
4442
4443 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4444 i9xx_pipe_crc_irq_handler(dev, pipe);
4445
4446 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4447 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4448 }
4449
4450 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4451 intel_opregion_asle_intr(dev);
4452
4453 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4454 gmbus_irq_handler(dev);
4455
4456 /* With MSI, interrupts are only generated when iir
4457 * transitions from zero to nonzero. If another bit got
4458 * set while we were handling the existing iir bits, then
4459 * we would never get another interrupt.
4460 *
4461 * This is fine on non-MSI as well, as if we hit this path
4462 * we avoid exiting the interrupt handler only to generate
4463 * another one.
4464 *
4465 * Note that for MSI this could cause a stray interrupt report
4466 * if an interrupt landed in the time between writing IIR and
4467 * the posting read. This should be rare enough to never
4468 * trigger the 99% of 100,000 interrupts test for disabling
4469 * stray interrupts.
4470 */
4471 iir = new_iir;
4472 }
4473
4474 return ret;
4475 }
4476
4477 static void i965_irq_uninstall(struct drm_device * dev)
4478 {
4479 struct drm_i915_private *dev_priv = dev->dev_private;
4480 int pipe;
4481
4482 if (!dev_priv)
4483 return;
4484
4485 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4486 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4487
4488 I915_WRITE(HWSTAM, 0xffffffff);
4489 for_each_pipe(dev_priv, pipe)
4490 I915_WRITE(PIPESTAT(pipe), 0);
4491 I915_WRITE(IMR, 0xffffffff);
4492 I915_WRITE(IER, 0x0);
4493
4494 for_each_pipe(dev_priv, pipe)
4495 I915_WRITE(PIPESTAT(pipe),
4496 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4497 I915_WRITE(IIR, I915_READ(IIR));
4498 }
4499
4500 /**
4501 * intel_irq_init - initializes irq support
4502 * @dev_priv: i915 device instance
4503 *
4504 * This function initializes all the irq support including work items, timers
4505 * and all the vtables. It does not setup the interrupt itself though.
4506 */
4507 void intel_irq_init(struct drm_i915_private *dev_priv)
4508 {
4509 struct drm_device *dev = dev_priv->dev;
4510
4511 intel_hpd_init_work(dev_priv);
4512
4513 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4514 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4515
4516 /* Let's track the enabled rps events */
4517 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4518 /* WaGsvRC0ResidencyMethod:vlv */
4519 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4520 else
4521 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4522
4523 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4524 i915_hangcheck_elapsed);
4525
4526 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4527
4528 if (IS_GEN2(dev_priv)) {
4529 dev->max_vblank_count = 0;
4530 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4531 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4532 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4533 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4534 } else {
4535 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4536 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4537 }
4538
4539 /*
4540 * Opt out of the vblank disable timer on everything except gen2.
4541 * Gen2 doesn't have a hardware frame counter and so depends on
4542 * vblank interrupts to produce sane vblank seuquence numbers.
4543 */
4544 if (!IS_GEN2(dev_priv))
4545 dev->vblank_disable_immediate = true;
4546
4547 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4548 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4549
4550 if (IS_CHERRYVIEW(dev_priv)) {
4551 dev->driver->irq_handler = cherryview_irq_handler;
4552 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4553 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4554 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4555 dev->driver->enable_vblank = valleyview_enable_vblank;
4556 dev->driver->disable_vblank = valleyview_disable_vblank;
4557 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4558 } else if (IS_VALLEYVIEW(dev_priv)) {
4559 dev->driver->irq_handler = valleyview_irq_handler;
4560 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4561 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4562 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4563 dev->driver->enable_vblank = valleyview_enable_vblank;
4564 dev->driver->disable_vblank = valleyview_disable_vblank;
4565 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4566 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4567 dev->driver->irq_handler = gen8_irq_handler;
4568 dev->driver->irq_preinstall = gen8_irq_reset;
4569 dev->driver->irq_postinstall = gen8_irq_postinstall;
4570 dev->driver->irq_uninstall = gen8_irq_uninstall;
4571 dev->driver->enable_vblank = gen8_enable_vblank;
4572 dev->driver->disable_vblank = gen8_disable_vblank;
4573 if (IS_BROXTON(dev))
4574 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4575 else if (HAS_PCH_SPT(dev))
4576 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4577 else
4578 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4579 } else if (HAS_PCH_SPLIT(dev)) {
4580 dev->driver->irq_handler = ironlake_irq_handler;
4581 dev->driver->irq_preinstall = ironlake_irq_reset;
4582 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4583 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4584 dev->driver->enable_vblank = ironlake_enable_vblank;
4585 dev->driver->disable_vblank = ironlake_disable_vblank;
4586 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4587 } else {
4588 if (INTEL_INFO(dev_priv)->gen == 2) {
4589 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4590 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4591 dev->driver->irq_handler = i8xx_irq_handler;
4592 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4593 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4594 dev->driver->irq_preinstall = i915_irq_preinstall;
4595 dev->driver->irq_postinstall = i915_irq_postinstall;
4596 dev->driver->irq_uninstall = i915_irq_uninstall;
4597 dev->driver->irq_handler = i915_irq_handler;
4598 } else {
4599 dev->driver->irq_preinstall = i965_irq_preinstall;
4600 dev->driver->irq_postinstall = i965_irq_postinstall;
4601 dev->driver->irq_uninstall = i965_irq_uninstall;
4602 dev->driver->irq_handler = i965_irq_handler;
4603 }
4604 if (I915_HAS_HOTPLUG(dev_priv))
4605 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4606 dev->driver->enable_vblank = i915_enable_vblank;
4607 dev->driver->disable_vblank = i915_disable_vblank;
4608 }
4609 }
4610
4611 /**
4612 * intel_irq_install - enables the hardware interrupt
4613 * @dev_priv: i915 device instance
4614 *
4615 * This function enables the hardware interrupt handling, but leaves the hotplug
4616 * handling still disabled. It is called after intel_irq_init().
4617 *
4618 * In the driver load and resume code we need working interrupts in a few places
4619 * but don't want to deal with the hassle of concurrent probe and hotplug
4620 * workers. Hence the split into this two-stage approach.
4621 */
4622 int intel_irq_install(struct drm_i915_private *dev_priv)
4623 {
4624 /*
4625 * We enable some interrupt sources in our postinstall hooks, so mark
4626 * interrupts as enabled _before_ actually enabling them to avoid
4627 * special cases in our ordering checks.
4628 */
4629 dev_priv->pm.irqs_enabled = true;
4630
4631 #ifdef __NetBSD__
4632 return drm_irq_install(dev_priv->dev);
4633 #else
4634 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4635 #endif
4636 }
4637
4638 /**
4639 * intel_irq_uninstall - finilizes all irq handling
4640 * @dev_priv: i915 device instance
4641 *
4642 * This stops interrupt and hotplug handling and unregisters and frees all
4643 * resources acquired in the init functions.
4644 */
4645 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4646 {
4647 drm_irq_uninstall(dev_priv->dev);
4648 intel_hpd_cancel_work(dev_priv);
4649 dev_priv->pm.irqs_enabled = false;
4650 }
4651
4652 /**
4653 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4654 * @dev_priv: i915 device instance
4655 *
4656 * This function is used to disable interrupts at runtime, both in the runtime
4657 * pm and the system suspend/resume code.
4658 */
4659 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4660 {
4661 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4662 dev_priv->pm.irqs_enabled = false;
4663 synchronize_irq(dev_priv->dev->irq);
4664 }
4665
4666 /**
4667 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4668 * @dev_priv: i915 device instance
4669 *
4670 * This function is used to enable interrupts at runtime, both in the runtime
4671 * pm and the system suspend/resume code.
4672 */
4673 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4674 {
4675 dev_priv->pm.irqs_enabled = true;
4676 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4677 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4678 }
4679