i915_drv.c revision 1.6.30.2 1 /* $NetBSD: i915_drv.c,v 1.6.30.2 2020/04/08 14:08:23 martin Exp $ */
2
3 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
4 */
5 /*
6 *
7 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
26 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: i915_drv.c,v 1.6.30.2 2020/04/08 14:08:23 martin Exp $");
34
35 #include <linux/device.h>
36 #include <linux/acpi.h>
37 #include <drm/drmP.h>
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_trace.h"
41 #include "intel_drv.h"
42
43 #include <linux/console.h>
44 #include <linux/module.h>
45 #include <linux/pm_runtime.h>
46 #include <drm/drm_crtc_helper.h>
47
48 static struct drm_driver driver;
49
50 #ifdef __NetBSD__
51 /* XXX Kludge to expose this to NetBSD driver attachment goop. */
52 struct drm_driver *const i915_drm_driver = &driver;
53 #endif
54
55 #define GEN_DEFAULT_PIPEOFFSETS \
56 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
57 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
58 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
59 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
60 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
61
62 #define GEN_CHV_PIPEOFFSETS \
63 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
64 CHV_PIPE_C_OFFSET }, \
65 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
66 CHV_TRANSCODER_C_OFFSET, }, \
67 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
68 CHV_PALETTE_C_OFFSET }
69
70 #define CURSOR_OFFSETS \
71 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
72
73 #define IVB_CURSOR_OFFSETS \
74 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
75
76 static const struct intel_device_info intel_i830_info = {
77 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
78 .has_overlay = 1, .overlay_needs_physical = 1,
79 .ring_mask = RENDER_RING,
80 GEN_DEFAULT_PIPEOFFSETS,
81 CURSOR_OFFSETS,
82 };
83
84 static const struct intel_device_info intel_845g_info = {
85 .gen = 2, .num_pipes = 1,
86 .has_overlay = 1, .overlay_needs_physical = 1,
87 .ring_mask = RENDER_RING,
88 GEN_DEFAULT_PIPEOFFSETS,
89 CURSOR_OFFSETS,
90 };
91
92 static const struct intel_device_info intel_i85x_info = {
93 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
94 .cursor_needs_physical = 1,
95 .has_overlay = 1, .overlay_needs_physical = 1,
96 .has_fbc = 1,
97 .ring_mask = RENDER_RING,
98 GEN_DEFAULT_PIPEOFFSETS,
99 CURSOR_OFFSETS,
100 };
101
102 static const struct intel_device_info intel_i865g_info = {
103 .gen = 2, .num_pipes = 1,
104 .has_overlay = 1, .overlay_needs_physical = 1,
105 .ring_mask = RENDER_RING,
106 GEN_DEFAULT_PIPEOFFSETS,
107 CURSOR_OFFSETS,
108 };
109
110 static const struct intel_device_info intel_i915g_info = {
111 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
112 .has_overlay = 1, .overlay_needs_physical = 1,
113 .ring_mask = RENDER_RING,
114 GEN_DEFAULT_PIPEOFFSETS,
115 CURSOR_OFFSETS,
116 };
117 static const struct intel_device_info intel_i915gm_info = {
118 .gen = 3, .is_mobile = 1, .num_pipes = 2,
119 .cursor_needs_physical = 1,
120 .has_overlay = 1, .overlay_needs_physical = 1,
121 .supports_tv = 1,
122 .has_fbc = 1,
123 .ring_mask = RENDER_RING,
124 GEN_DEFAULT_PIPEOFFSETS,
125 CURSOR_OFFSETS,
126 };
127 static const struct intel_device_info intel_i945g_info = {
128 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
129 .has_overlay = 1, .overlay_needs_physical = 1,
130 .ring_mask = RENDER_RING,
131 GEN_DEFAULT_PIPEOFFSETS,
132 CURSOR_OFFSETS,
133 };
134 static const struct intel_device_info intel_i945gm_info = {
135 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
136 .has_hotplug = 1, .cursor_needs_physical = 1,
137 .has_overlay = 1, .overlay_needs_physical = 1,
138 .supports_tv = 1,
139 .has_fbc = 1,
140 .ring_mask = RENDER_RING,
141 GEN_DEFAULT_PIPEOFFSETS,
142 CURSOR_OFFSETS,
143 };
144
145 static const struct intel_device_info intel_i965g_info = {
146 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
147 .has_hotplug = 1,
148 .has_overlay = 1,
149 .ring_mask = RENDER_RING,
150 GEN_DEFAULT_PIPEOFFSETS,
151 CURSOR_OFFSETS,
152 };
153
154 static const struct intel_device_info intel_i965gm_info = {
155 .gen = 4, .is_crestline = 1, .num_pipes = 2,
156 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
157 .has_overlay = 1,
158 .supports_tv = 1,
159 .ring_mask = RENDER_RING,
160 GEN_DEFAULT_PIPEOFFSETS,
161 CURSOR_OFFSETS,
162 };
163
164 static const struct intel_device_info intel_g33_info = {
165 .gen = 3, .is_g33 = 1, .num_pipes = 2,
166 .need_gfx_hws = 1, .has_hotplug = 1,
167 .has_overlay = 1,
168 .ring_mask = RENDER_RING,
169 GEN_DEFAULT_PIPEOFFSETS,
170 CURSOR_OFFSETS,
171 };
172
173 static const struct intel_device_info intel_g45_info = {
174 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
175 .has_pipe_cxsr = 1, .has_hotplug = 1,
176 .ring_mask = RENDER_RING | BSD_RING,
177 GEN_DEFAULT_PIPEOFFSETS,
178 CURSOR_OFFSETS,
179 };
180
181 static const struct intel_device_info intel_gm45_info = {
182 .gen = 4, .is_g4x = 1, .num_pipes = 2,
183 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
184 .has_pipe_cxsr = 1, .has_hotplug = 1,
185 .supports_tv = 1,
186 .ring_mask = RENDER_RING | BSD_RING,
187 GEN_DEFAULT_PIPEOFFSETS,
188 CURSOR_OFFSETS,
189 };
190
191 static const struct intel_device_info intel_pineview_info = {
192 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
193 .need_gfx_hws = 1, .has_hotplug = 1,
194 .has_overlay = 1,
195 GEN_DEFAULT_PIPEOFFSETS,
196 CURSOR_OFFSETS,
197 };
198
199 static const struct intel_device_info intel_ironlake_d_info = {
200 .gen = 5, .num_pipes = 2,
201 .need_gfx_hws = 1, .has_hotplug = 1,
202 .ring_mask = RENDER_RING | BSD_RING,
203 GEN_DEFAULT_PIPEOFFSETS,
204 CURSOR_OFFSETS,
205 };
206
207 static const struct intel_device_info intel_ironlake_m_info = {
208 .gen = 5, .is_mobile = 1, .num_pipes = 2,
209 .need_gfx_hws = 1, .has_hotplug = 1,
210 .has_fbc = 1,
211 .ring_mask = RENDER_RING | BSD_RING,
212 GEN_DEFAULT_PIPEOFFSETS,
213 CURSOR_OFFSETS,
214 };
215
216 static const struct intel_device_info intel_sandybridge_d_info = {
217 .gen = 6, .num_pipes = 2,
218 .need_gfx_hws = 1, .has_hotplug = 1,
219 .has_fbc = 1,
220 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
221 .has_llc = 1,
222 GEN_DEFAULT_PIPEOFFSETS,
223 CURSOR_OFFSETS,
224 };
225
226 static const struct intel_device_info intel_sandybridge_m_info = {
227 .gen = 6, .is_mobile = 1, .num_pipes = 2,
228 .need_gfx_hws = 1, .has_hotplug = 1,
229 .has_fbc = 1,
230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
231 .has_llc = 1,
232 GEN_DEFAULT_PIPEOFFSETS,
233 CURSOR_OFFSETS,
234 };
235
236 #define GEN7_FEATURES \
237 .gen = 7, .num_pipes = 3, \
238 .need_gfx_hws = 1, .has_hotplug = 1, \
239 .has_fbc = 1, \
240 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
241 .has_llc = 1
242
243 static const struct intel_device_info intel_ivybridge_d_info = {
244 GEN7_FEATURES,
245 .is_ivybridge = 1,
246 GEN_DEFAULT_PIPEOFFSETS,
247 IVB_CURSOR_OFFSETS,
248 };
249
250 static const struct intel_device_info intel_ivybridge_m_info = {
251 GEN7_FEATURES,
252 .is_ivybridge = 1,
253 .is_mobile = 1,
254 GEN_DEFAULT_PIPEOFFSETS,
255 IVB_CURSOR_OFFSETS,
256 };
257
258 static const struct intel_device_info intel_ivybridge_q_info = {
259 GEN7_FEATURES,
260 .is_ivybridge = 1,
261 .num_pipes = 0, /* legal, last one wins */
262 GEN_DEFAULT_PIPEOFFSETS,
263 IVB_CURSOR_OFFSETS,
264 };
265
266 static const struct intel_device_info intel_valleyview_m_info = {
267 GEN7_FEATURES,
268 .is_mobile = 1,
269 .num_pipes = 2,
270 .is_valleyview = 1,
271 .display_mmio_offset = VLV_DISPLAY_BASE,
272 .has_fbc = 0, /* legal, last one wins */
273 .has_llc = 0, /* legal, last one wins */
274 GEN_DEFAULT_PIPEOFFSETS,
275 CURSOR_OFFSETS,
276 };
277
278 static const struct intel_device_info intel_valleyview_d_info = {
279 GEN7_FEATURES,
280 .num_pipes = 2,
281 .is_valleyview = 1,
282 .display_mmio_offset = VLV_DISPLAY_BASE,
283 .has_fbc = 0, /* legal, last one wins */
284 .has_llc = 0, /* legal, last one wins */
285 GEN_DEFAULT_PIPEOFFSETS,
286 CURSOR_OFFSETS,
287 };
288
289 static const struct intel_device_info intel_haswell_d_info = {
290 GEN7_FEATURES,
291 .is_haswell = 1,
292 .has_ddi = 1,
293 .has_fpga_dbg = 1,
294 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
295 GEN_DEFAULT_PIPEOFFSETS,
296 IVB_CURSOR_OFFSETS,
297 };
298
299 static const struct intel_device_info intel_haswell_m_info = {
300 GEN7_FEATURES,
301 .is_haswell = 1,
302 .is_mobile = 1,
303 .has_ddi = 1,
304 .has_fpga_dbg = 1,
305 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
306 GEN_DEFAULT_PIPEOFFSETS,
307 IVB_CURSOR_OFFSETS,
308 };
309
310 static const struct intel_device_info intel_broadwell_d_info = {
311 .gen = 8, .num_pipes = 3,
312 .need_gfx_hws = 1, .has_hotplug = 1,
313 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
314 .has_llc = 1,
315 .has_ddi = 1,
316 .has_fpga_dbg = 1,
317 .has_fbc = 1,
318 GEN_DEFAULT_PIPEOFFSETS,
319 IVB_CURSOR_OFFSETS,
320 };
321
322 static const struct intel_device_info intel_broadwell_m_info = {
323 .gen = 8, .is_mobile = 1, .num_pipes = 3,
324 .need_gfx_hws = 1, .has_hotplug = 1,
325 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
326 .has_llc = 1,
327 .has_ddi = 1,
328 .has_fpga_dbg = 1,
329 .has_fbc = 1,
330 GEN_DEFAULT_PIPEOFFSETS,
331 IVB_CURSOR_OFFSETS,
332 };
333
334 static const struct intel_device_info intel_broadwell_gt3d_info = {
335 .gen = 8, .num_pipes = 3,
336 .need_gfx_hws = 1, .has_hotplug = 1,
337 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
338 .has_llc = 1,
339 .has_ddi = 1,
340 .has_fpga_dbg = 1,
341 .has_fbc = 1,
342 GEN_DEFAULT_PIPEOFFSETS,
343 IVB_CURSOR_OFFSETS,
344 };
345
346 static const struct intel_device_info intel_broadwell_gt3m_info = {
347 .gen = 8, .is_mobile = 1, .num_pipes = 3,
348 .need_gfx_hws = 1, .has_hotplug = 1,
349 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
350 .has_llc = 1,
351 .has_ddi = 1,
352 .has_fpga_dbg = 1,
353 .has_fbc = 1,
354 GEN_DEFAULT_PIPEOFFSETS,
355 IVB_CURSOR_OFFSETS,
356 };
357
358 static const struct intel_device_info intel_cherryview_info = {
359 .gen = 8, .num_pipes = 3,
360 .need_gfx_hws = 1, .has_hotplug = 1,
361 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
362 .is_valleyview = 1,
363 .display_mmio_offset = VLV_DISPLAY_BASE,
364 GEN_CHV_PIPEOFFSETS,
365 CURSOR_OFFSETS,
366 };
367
368 static const struct intel_device_info intel_skylake_info = {
369 .is_skylake = 1,
370 .gen = 9, .num_pipes = 3,
371 .need_gfx_hws = 1, .has_hotplug = 1,
372 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
373 .has_llc = 1,
374 .has_ddi = 1,
375 .has_fpga_dbg = 1,
376 .has_fbc = 1,
377 GEN_DEFAULT_PIPEOFFSETS,
378 IVB_CURSOR_OFFSETS,
379 };
380
381 static const struct intel_device_info intel_skylake_gt3_info = {
382 .is_skylake = 1,
383 .gen = 9, .num_pipes = 3,
384 .need_gfx_hws = 1, .has_hotplug = 1,
385 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
386 .has_llc = 1,
387 .has_ddi = 1,
388 .has_fpga_dbg = 1,
389 .has_fbc = 1,
390 GEN_DEFAULT_PIPEOFFSETS,
391 IVB_CURSOR_OFFSETS,
392 };
393
394 static const struct intel_device_info intel_broxton_info = {
395 .is_preliminary = 1,
396 .gen = 9,
397 .need_gfx_hws = 1, .has_hotplug = 1,
398 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
399 .num_pipes = 3,
400 .has_ddi = 1,
401 .has_fpga_dbg = 1,
402 .has_fbc = 1,
403 GEN_DEFAULT_PIPEOFFSETS,
404 IVB_CURSOR_OFFSETS,
405 };
406
407 static const struct intel_device_info intel_kabylake_info = {
408 .is_kabylake = 1,
409 .gen = 9,
410 .num_pipes = 3,
411 .need_gfx_hws = 1, .has_hotplug = 1,
412 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
413 .has_llc = 1,
414 .has_ddi = 1,
415 .has_fpga_dbg = 1,
416 .has_fbc = 1,
417 GEN_DEFAULT_PIPEOFFSETS,
418 IVB_CURSOR_OFFSETS,
419 };
420
421 static const struct intel_device_info intel_kabylake_gt3_info = {
422 .is_kabylake = 1,
423 .gen = 9,
424 .num_pipes = 3,
425 .need_gfx_hws = 1, .has_hotplug = 1,
426 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
427 .has_llc = 1,
428 .has_ddi = 1,
429 .has_fpga_dbg = 1,
430 .has_fbc = 1,
431 GEN_DEFAULT_PIPEOFFSETS,
432 IVB_CURSOR_OFFSETS,
433 };
434
435 /*
436 * Make sure any device matches here are from most specific to most
437 * general. For example, since the Quanta match is based on the subsystem
438 * and subvendor IDs, we need it to come before the more general IVB
439 * PCI ID matches, otherwise we'll use the wrong info struct above.
440 */
441 #define INTEL_PCI_IDS \
442 INTEL_I830_IDS(&intel_i830_info), \
443 INTEL_I845G_IDS(&intel_845g_info), \
444 INTEL_I85X_IDS(&intel_i85x_info), \
445 INTEL_I865G_IDS(&intel_i865g_info), \
446 INTEL_I915G_IDS(&intel_i915g_info), \
447 INTEL_I915GM_IDS(&intel_i915gm_info), \
448 INTEL_I945G_IDS(&intel_i945g_info), \
449 INTEL_I945GM_IDS(&intel_i945gm_info), \
450 INTEL_I965G_IDS(&intel_i965g_info), \
451 INTEL_G33_IDS(&intel_g33_info), \
452 INTEL_I965GM_IDS(&intel_i965gm_info), \
453 INTEL_GM45_IDS(&intel_gm45_info), \
454 INTEL_G45_IDS(&intel_g45_info), \
455 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
456 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
457 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
458 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
459 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
460 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
461 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
462 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
463 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
464 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
465 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
466 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
467 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
468 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
469 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
470 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
471 INTEL_CHV_IDS(&intel_cherryview_info), \
472 INTEL_SKL_GT1_IDS(&intel_skylake_info), \
473 INTEL_SKL_GT2_IDS(&intel_skylake_info), \
474 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
475 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), \
476 INTEL_BXT_IDS(&intel_broxton_info), \
477 INTEL_KBL_GT1_IDS(&intel_kabylake_info), \
478 INTEL_KBL_GT2_IDS(&intel_kabylake_info), \
479 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), \
480 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info)
481
482
483 static const struct pci_device_id pciidlist[] = { /* aka */
484 INTEL_PCI_IDS,
485 {0, 0, 0, 0, 0, 0, 0}
486 };
487
488 MODULE_DEVICE_TABLE(pci, pciidlist);
489
490 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
491 {
492 enum intel_pch ret = PCH_NOP;
493
494 /*
495 * In a virtualized passthrough environment we can be in a
496 * setup where the ISA bridge is not able to be passed through.
497 * In this case, a south bridge can be emulated and we have to
498 * make an educated guess as to which PCH is really there.
499 */
500
501 if (IS_GEN5(dev)) {
502 ret = PCH_IBX;
503 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
504 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
505 ret = PCH_CPT;
506 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
507 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
508 ret = PCH_LPT;
509 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
510 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
511 ret = PCH_SPT;
512 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
513 }
514
515 return ret;
516 }
517
518 #ifdef __NetBSD__
519 /* XXX Kludge to expose this to NetBSD driver attachment goop. */
520 const struct pci_device_id *const i915_device_ids = pciidlist;
521 const size_t i915_n_device_ids = __arraycount(pciidlist);
522 #endif
523
524 void intel_detect_pch(struct drm_device *dev)
525 {
526 struct drm_i915_private *dev_priv = dev->dev_private;
527 struct pci_dev *pch = NULL;
528
529 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
530 * (which really amounts to a PCH but no South Display).
531 */
532 if (INTEL_INFO(dev)->num_pipes == 0) {
533 dev_priv->pch_type = PCH_NOP;
534 return;
535 }
536
537 /*
538 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
539 * make graphics device passthrough work easy for VMM, that only
540 * need to expose ISA bridge to let driver know the real hardware
541 * underneath. This is a requirement from virtualization team.
542 *
543 * In some virtualized environments (e.g. XEN), there is irrelevant
544 * ISA bridge in the system. To work reliably, we should scan trhough
545 * all the ISA bridge devices and check for the first match, instead
546 * of only checking the first one.
547 */
548 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
549 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
550 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
551 dev_priv->pch_id = id;
552
553 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
554 dev_priv->pch_type = PCH_IBX;
555 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
556 WARN_ON(!IS_GEN5(dev));
557 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
558 dev_priv->pch_type = PCH_CPT;
559 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
560 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
561 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
562 /* PantherPoint is CPT compatible */
563 dev_priv->pch_type = PCH_CPT;
564 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
565 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
566 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
567 dev_priv->pch_type = PCH_LPT;
568 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
569 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
570 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
571 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
572 dev_priv->pch_type = PCH_LPT;
573 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
574 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
575 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
576 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
577 dev_priv->pch_type = PCH_SPT;
578 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
579 WARN_ON(!IS_SKYLAKE(dev) &&
580 !IS_KABYLAKE(dev));
581 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
582 dev_priv->pch_type = PCH_SPT;
583 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
584 WARN_ON(!IS_SKYLAKE(dev) &&
585 !IS_KABYLAKE(dev));
586 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
587 dev_priv->pch_type = PCH_KBP;
588 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
589 WARN_ON(!IS_KABYLAKE(dev_priv));
590 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
591 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
592 pch->subsystem_vendor == 0x1af4 &&
593 pch->subsystem_device == 0x1100)) {
594 dev_priv->pch_type = intel_virt_detect_pch(dev);
595 } else
596 continue;
597
598 break;
599 }
600 }
601 if (!pch)
602 DRM_DEBUG_KMS("No PCH found.\n");
603
604 pci_dev_put(pch);
605 }
606
607 bool i915_semaphore_is_enabled(struct drm_device *dev)
608 {
609 if (INTEL_INFO(dev)->gen < 6)
610 return false;
611
612 if (i915.semaphores >= 0)
613 return i915.semaphores;
614
615 /* TODO: make semaphores and Execlists play nicely together */
616 if (i915.enable_execlists)
617 return false;
618
619 /* Until we get further testing... */
620 if (IS_GEN8(dev))
621 return false;
622
623 #ifdef CONFIG_INTEL_IOMMU
624 /* Enable semaphores on SNB when IO remapping is off */
625 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
626 return false;
627 #endif
628
629 return true;
630 }
631
632 void i915_firmware_load_error_print(const char *fw_path, int err)
633 {
634 DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
635
636 /*
637 * If the reason is not known assume -ENOENT since that's the most
638 * usual failure mode.
639 */
640 if (!err)
641 err = -ENOENT;
642
643 if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
644 return;
645
646 DRM_ERROR(
647 "The driver is built-in, so to load the firmware you need to\n"
648 "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
649 "in your initrd/initramfs image.\n");
650 }
651
652 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
653 {
654 struct drm_device *dev = dev_priv->dev;
655 struct drm_encoder *encoder;
656
657 drm_modeset_lock_all(dev);
658 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
659 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
660
661 if (intel_encoder->suspend)
662 intel_encoder->suspend(intel_encoder);
663 }
664 drm_modeset_unlock_all(dev);
665 }
666
667 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
668 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
669 bool rpm_resume);
670 static int skl_resume_prepare(struct drm_i915_private *dev_priv);
671 static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
672
673
674 int i915_drm_suspend(struct drm_device *dev)
675 {
676 struct drm_i915_private *dev_priv = dev->dev_private;
677 pci_power_t opregion_target_state;
678 int error;
679
680 /* ignore lid events during suspend */
681 mutex_lock(&dev_priv->modeset_restore_lock);
682 dev_priv->modeset_restore = MODESET_SUSPENDED;
683 mutex_unlock(&dev_priv->modeset_restore_lock);
684
685 /* We do a lot of poking in a lot of registers, make sure they work
686 * properly. */
687 intel_display_set_init_power(dev_priv, true);
688
689 drm_kms_helper_poll_disable(dev);
690
691 #ifndef __NetBSD__ /* pmf handles this for us. */
692 pci_save_state(dev->pdev);
693 #endif
694
695 error = i915_gem_suspend(dev);
696 if (error) {
697 #ifdef __NetBSD__
698 dev_err(pci_dev_dev(dev->pdev),
699 "GEM idle failed, resume might fail\n");
700 #else
701 dev_err(&dev->pdev->dev,
702 "GEM idle failed, resume might fail\n");
703 #endif
704 return error;
705 }
706
707 intel_guc_suspend(dev);
708
709 intel_suspend_gt_powersave(dev);
710
711 /*
712 * Disable CRTCs directly since we want to preserve sw state
713 * for _thaw. Also, power gate the CRTC power wells.
714 */
715 drm_modeset_lock_all(dev);
716 intel_display_suspend(dev);
717 drm_modeset_unlock_all(dev);
718
719 intel_dp_mst_suspend(dev);
720
721 intel_runtime_pm_disable_interrupts(dev_priv);
722 intel_hpd_cancel_work(dev_priv);
723
724 intel_suspend_encoders(dev_priv);
725
726 intel_suspend_hw(dev);
727
728 i915_gem_suspend_gtt_mappings(dev);
729
730 i915_save_state(dev);
731
732 opregion_target_state = PCI_D3cold;
733 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
734 if (acpi_target_system_state() < ACPI_STATE_S3)
735 opregion_target_state = PCI_D1;
736 #endif
737 intel_opregion_notify_adapter(dev, opregion_target_state);
738
739 intel_uncore_forcewake_reset(dev, false);
740 intel_opregion_fini(dev);
741
742 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
743
744 dev_priv->suspend_count++;
745
746 intel_display_set_init_power(dev_priv, false);
747
748 return 0;
749 }
750
751 int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
752 {
753 struct drm_i915_private *dev_priv = drm_dev->dev_private;
754 int ret;
755
756 ret = intel_suspend_complete(dev_priv);
757
758 if (ret) {
759 DRM_ERROR("Suspend complete failed: %d\n", ret);
760
761 return ret;
762 }
763
764 i915_rc6_ctx_wa_suspend(dev_priv);
765
766 #ifndef __NetBSD__ /* pmf handles this for us. */
767 pci_disable_device(drm_dev->pdev);
768 /*
769 * During hibernation on some platforms the BIOS may try to access
770 * the device even though it's already in D3 and hang the machine. So
771 * leave the device in D0 on those platforms and hope the BIOS will
772 * power down the device properly. The issue was seen on multiple old
773 * GENs with different BIOS vendors, so having an explicit blacklist
774 * is inpractical; apply the workaround on everything pre GEN6. The
775 * platforms where the issue was seen:
776 * Lenovo Thinkpad X301, X61s, X60, T60, X41
777 * Fujitsu FSC S7110
778 * Acer Aspire 1830T
779 */
780 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
781 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
782 #endif
783
784 return 0;
785 }
786
787 int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
788 {
789 int error;
790
791 if (!dev || !dev->dev_private) {
792 DRM_ERROR("dev: %p\n", dev);
793 DRM_ERROR("DRM not initialized, aborting suspend.\n");
794 return -ENODEV;
795 }
796
797 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
798 state.event != PM_EVENT_FREEZE))
799 return -EINVAL;
800
801 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
802 return 0;
803
804 error = i915_drm_suspend(dev);
805 if (error)
806 return error;
807
808 return i915_drm_suspend_late(dev, false);
809 }
810
811 int i915_drm_resume(struct drm_device *dev)
812 {
813 struct drm_i915_private *dev_priv = dev->dev_private;
814
815 mutex_lock(&dev->struct_mutex);
816 i915_gem_restore_gtt_mappings(dev);
817 mutex_unlock(&dev->struct_mutex);
818
819 i915_restore_state(dev);
820 intel_opregion_setup(dev);
821
822 intel_init_pch_refclk(dev);
823 drm_mode_config_reset(dev);
824
825 /*
826 * Interrupts have to be enabled before any batches are run. If not the
827 * GPU will hang. i915_gem_init_hw() will initiate batches to
828 * update/restore the context.
829 *
830 * Modeset enabling in intel_modeset_init_hw() also needs working
831 * interrupts.
832 */
833 intel_runtime_pm_enable_interrupts(dev_priv);
834
835 mutex_lock(&dev->struct_mutex);
836 if (i915_gem_init_hw(dev)) {
837 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
838 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
839 }
840 mutex_unlock(&dev->struct_mutex);
841
842 intel_guc_resume(dev);
843
844 intel_modeset_init_hw(dev);
845
846 spin_lock_irq(&dev_priv->irq_lock);
847 if (dev_priv->display.hpd_irq_setup)
848 dev_priv->display.hpd_irq_setup(dev);
849 spin_unlock_irq(&dev_priv->irq_lock);
850
851 drm_modeset_lock_all(dev);
852 intel_display_resume(dev);
853 drm_modeset_unlock_all(dev);
854
855 intel_dp_mst_resume(dev);
856
857 /*
858 * ... but also need to make sure that hotplug processing
859 * doesn't cause havoc. Like in the driver load code we don't
860 * bother with the tiny race here where we might loose hotplug
861 * notifications.
862 * */
863 intel_hpd_init(dev_priv);
864 /* Config may have changed between suspend and resume */
865 drm_helper_hpd_irq_event(dev);
866
867 intel_opregion_init(dev);
868
869 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
870
871 mutex_lock(&dev_priv->modeset_restore_lock);
872 dev_priv->modeset_restore = MODESET_DONE;
873 mutex_unlock(&dev_priv->modeset_restore_lock);
874
875 intel_opregion_notify_adapter(dev, PCI_D0);
876
877 drm_kms_helper_poll_enable(dev);
878
879 return 0;
880 }
881
882 int i915_drm_resume_early(struct drm_device *dev)
883 {
884 struct drm_i915_private *dev_priv = dev->dev_private;
885 int ret = 0;
886
887 #ifndef __NetBSD__ /* pmf handles this for us. */
888 /*
889 * We have a resume ordering issue with the snd-hda driver also
890 * requiring our device to be power up. Due to the lack of a
891 * parent/child relationship we currently solve this with an early
892 * resume hook.
893 *
894 * FIXME: This should be solved with a special hdmi sink device or
895 * similar so that power domains can be employed.
896 */
897 if (pci_enable_device(dev->pdev))
898 return -EIO;
899 #endif
900
901 /* XXX pmf probably handles this for us too. */
902 pci_set_master(dev->pdev);
903
904 if (IS_VALLEYVIEW(dev_priv))
905 ret = vlv_resume_prepare(dev_priv, false);
906 if (ret)
907 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
908 ret);
909
910 intel_uncore_early_sanitize(dev, true);
911
912 if (IS_BROXTON(dev))
913 ret = bxt_resume_prepare(dev_priv);
914 else if (IS_SKYLAKE(dev_priv))
915 ret = skl_resume_prepare(dev_priv);
916 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
917 hsw_disable_pc8(dev_priv);
918
919 intel_uncore_sanitize(dev);
920 intel_power_domains_init_hw(dev_priv);
921
922 i915_rc6_ctx_wa_resume(dev_priv);
923
924 return ret;
925 }
926
927 int i915_resume_switcheroo(struct drm_device *dev)
928 {
929 int ret;
930
931 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
932 return 0;
933
934 ret = i915_drm_resume_early(dev);
935 if (ret)
936 return ret;
937
938 return i915_drm_resume(dev);
939 }
940
941 /**
942 * i915_reset - reset chip after a hang
943 * @dev: drm device to reset
944 *
945 * Reset the chip. Useful if a hang is detected. Returns zero on successful
946 * reset or otherwise an error code.
947 *
948 * Procedure is fairly simple:
949 * - reset the chip using the reset reg
950 * - re-init context state
951 * - re-init hardware status page
952 * - re-init ring buffer
953 * - re-init interrupt state
954 * - re-init display
955 */
956 int i915_reset(struct drm_device *dev)
957 {
958 struct drm_i915_private *dev_priv = dev->dev_private;
959 bool simulated;
960 int ret;
961
962 intel_reset_gt_powersave(dev);
963
964 mutex_lock(&dev->struct_mutex);
965
966 i915_gem_reset(dev);
967
968 simulated = dev_priv->gpu_error.stop_rings != 0;
969
970 ret = intel_gpu_reset(dev);
971
972 /* Also reset the gpu hangman. */
973 if (simulated) {
974 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
975 dev_priv->gpu_error.stop_rings = 0;
976 if (ret == -ENODEV) {
977 DRM_INFO("Reset not implemented, but ignoring "
978 "error for simulated gpu hangs\n");
979 ret = 0;
980 }
981 }
982
983 if (i915_stop_ring_allow_warn(dev_priv))
984 pr_notice("drm/i915: Resetting chip after gpu hang\n");
985
986 if (ret) {
987 DRM_ERROR("Failed to reset chip: %i\n", ret);
988 mutex_unlock(&dev->struct_mutex);
989 return ret;
990 }
991
992 intel_overlay_reset(dev_priv);
993
994 /* Ok, now get things going again... */
995
996 /*
997 * Everything depends on having the GTT running, so we need to start
998 * there. Fortunately we don't need to do this unless we reset the
999 * chip at a PCI level.
1000 *
1001 * Next we need to restore the context, but we don't use those
1002 * yet either...
1003 *
1004 * Ring buffer needs to be re-initialized in the KMS case, or if X
1005 * was running at the time of the reset (i.e. we weren't VT
1006 * switched away).
1007 */
1008
1009 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
1010 dev_priv->gpu_error.reload_in_reset = true;
1011
1012 ret = i915_gem_init_hw(dev);
1013
1014 dev_priv->gpu_error.reload_in_reset = false;
1015
1016 mutex_unlock(&dev->struct_mutex);
1017 if (ret) {
1018 DRM_ERROR("Failed hw init on reset %d\n", ret);
1019 return ret;
1020 }
1021
1022 /*
1023 * rps/rc6 re-init is necessary to restore state lost after the
1024 * reset and the re-install of gt irqs. Skip for ironlake per
1025 * previous concerns that it doesn't respond well to some forms
1026 * of re-init after reset.
1027 */
1028 if (INTEL_INFO(dev)->gen > 5)
1029 intel_enable_gt_powersave(dev);
1030
1031 return 0;
1032 }
1033
1034 #ifdef __NetBSD__
1035
1036 static const struct uvm_pagerops i915_gem_uvm_ops = {
1037 .pgo_reference = drm_gem_pager_reference,
1038 .pgo_detach = drm_gem_pager_detach,
1039 .pgo_fault = i915_gem_fault,
1040 };
1041
1042 #else
1043
1044 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1045 {
1046 struct intel_device_info *intel_info =
1047 (struct intel_device_info *) ent->driver_data;
1048
1049 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
1050 DRM_INFO("This hardware requires preliminary hardware support.\n"
1051 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
1052 return -ENODEV;
1053 }
1054
1055 /* Only bind to function 0 of the device. Early generations
1056 * used function 1 as a placeholder for multi-head. This causes
1057 * us confusion instead, especially on the systems where both
1058 * functions have the same PCI-ID!
1059 */
1060 if (PCI_FUNC(pdev->devfn))
1061 return -ENODEV;
1062
1063 return drm_get_pci_dev(pdev, ent, &driver);
1064 }
1065
1066 static void
1067 i915_pci_remove(struct pci_dev *pdev)
1068 {
1069 struct drm_device *dev = pci_get_drvdata(pdev);
1070
1071 drm_put_dev(dev);
1072 }
1073 #endif
1074
1075 #ifndef __NetBSD__
1076 static int i915_pm_suspend(struct device *dev)
1077 {
1078 struct pci_dev *pdev = to_pci_dev(dev);
1079 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1080
1081 if (!drm_dev || !drm_dev->dev_private) {
1082 dev_err(dev, "DRM not initialized, aborting suspend.\n");
1083 return -ENODEV;
1084 }
1085
1086 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1087 return 0;
1088
1089 return i915_drm_suspend(drm_dev);
1090 }
1091
1092 static int i915_pm_suspend_late(struct device *dev)
1093 {
1094 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1095
1096 /*
1097 * We have a suspend ordering issue with the snd-hda driver also
1098 * requiring our device to be power up. Due to the lack of a
1099 * parent/child relationship we currently solve this with an late
1100 * suspend hook.
1101 *
1102 * FIXME: This should be solved with a special hdmi sink device or
1103 * similar so that power domains can be employed.
1104 */
1105 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1106 return 0;
1107
1108 return i915_drm_suspend_late(drm_dev, false);
1109 }
1110
1111 static int i915_pm_poweroff_late(struct device *dev)
1112 {
1113 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1114
1115 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1116 return 0;
1117
1118 return i915_drm_suspend_late(drm_dev, true);
1119 }
1120
1121 static int i915_pm_resume_early(struct device *dev)
1122 {
1123 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1124
1125 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1126 return 0;
1127
1128 return i915_drm_resume_early(drm_dev);
1129 }
1130
1131 static int i915_pm_resume(struct device *dev)
1132 {
1133 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1134
1135 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1136 return 0;
1137
1138 return i915_drm_resume(drm_dev);
1139 }
1140 #endif
1141
1142 static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1143 {
1144 /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1145
1146 skl_uninit_cdclk(dev_priv);
1147
1148 return 0;
1149 }
1150
1151 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1152 {
1153 hsw_enable_pc8(dev_priv);
1154
1155 return 0;
1156 }
1157
1158 static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1159 {
1160 struct drm_device *dev = dev_priv->dev;
1161
1162 /* TODO: when DC5 support is added disable DC5 here. */
1163
1164 broxton_ddi_phy_uninit(dev);
1165 broxton_uninit_cdclk(dev);
1166 bxt_enable_dc9(dev_priv);
1167
1168 return 0;
1169 }
1170
1171 static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1172 {
1173 struct drm_device *dev = dev_priv->dev;
1174
1175 /* TODO: when CSR FW support is added make sure the FW is loaded */
1176
1177 bxt_disable_dc9(dev_priv);
1178
1179 /*
1180 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1181 * is available.
1182 */
1183 broxton_init_cdclk(dev);
1184 broxton_ddi_phy_init(dev);
1185 intel_prepare_ddi(dev);
1186
1187 return 0;
1188 }
1189
1190 static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1191 {
1192 struct drm_device *dev = dev_priv->dev;
1193
1194 skl_init_cdclk(dev_priv);
1195 intel_csr_load_program(dev);
1196
1197 return 0;
1198 }
1199
1200 /*
1201 * Save all Gunit registers that may be lost after a D3 and a subsequent
1202 * S0i[R123] transition. The list of registers needing a save/restore is
1203 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1204 * registers in the following way:
1205 * - Driver: saved/restored by the driver
1206 * - Punit : saved/restored by the Punit firmware
1207 * - No, w/o marking: no need to save/restore, since the register is R/O or
1208 * used internally by the HW in a way that doesn't depend
1209 * keeping the content across a suspend/resume.
1210 * - Debug : used for debugging
1211 *
1212 * We save/restore all registers marked with 'Driver', with the following
1213 * exceptions:
1214 * - Registers out of use, including also registers marked with 'Debug'.
1215 * These have no effect on the driver's operation, so we don't save/restore
1216 * them to reduce the overhead.
1217 * - Registers that are fully setup by an initialization function called from
1218 * the resume path. For example many clock gating and RPS/RC6 registers.
1219 * - Registers that provide the right functionality with their reset defaults.
1220 *
1221 * TODO: Except for registers that based on the above 3 criteria can be safely
1222 * ignored, we save/restore all others, practically treating the HW context as
1223 * a black-box for the driver. Further investigation is needed to reduce the
1224 * saved/restored registers even further, by following the same 3 criteria.
1225 */
1226 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1227 {
1228 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1229 int i;
1230
1231 /* GAM 0x4000-0x4770 */
1232 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1233 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1234 s->arb_mode = I915_READ(ARB_MODE);
1235 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1236 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1237
1238 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1239 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
1240
1241 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1242 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1243
1244 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1245 s->ecochk = I915_READ(GAM_ECOCHK);
1246 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1247 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1248
1249 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1250
1251 /* MBC 0x9024-0x91D0, 0x8500 */
1252 s->g3dctl = I915_READ(VLV_G3DCTL);
1253 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1254 s->mbctl = I915_READ(GEN6_MBCTL);
1255
1256 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1257 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1258 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1259 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1260 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1261 s->rstctl = I915_READ(GEN6_RSTCTL);
1262 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1263
1264 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1265 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1266 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1267 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1268 s->ecobus = I915_READ(ECOBUS);
1269 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1270 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1271 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1272 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1273 s->rcedata = I915_READ(VLV_RCEDATA);
1274 s->spare2gh = I915_READ(VLV_SPAREG2H);
1275
1276 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1277 s->gt_imr = I915_READ(GTIMR);
1278 s->gt_ier = I915_READ(GTIER);
1279 s->pm_imr = I915_READ(GEN6_PMIMR);
1280 s->pm_ier = I915_READ(GEN6_PMIER);
1281
1282 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1283 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
1284
1285 /* GT SA CZ domain, 0x100000-0x138124 */
1286 s->tilectl = I915_READ(TILECTL);
1287 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1288 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1289 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1290 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1291
1292 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1293 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1294 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1295 s->pcbr = I915_READ(VLV_PCBR);
1296 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1297
1298 /*
1299 * Not saving any of:
1300 * DFT, 0x9800-0x9EC0
1301 * SARB, 0xB000-0xB1FC
1302 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1303 * PCI CFG
1304 */
1305 }
1306
1307 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1308 {
1309 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1310 u32 val;
1311 int i;
1312
1313 /* GAM 0x4000-0x4770 */
1314 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1315 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1316 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1317 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1318 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1319
1320 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1321 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
1322
1323 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1324 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1325
1326 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1327 I915_WRITE(GAM_ECOCHK, s->ecochk);
1328 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1329 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1330
1331 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1332
1333 /* MBC 0x9024-0x91D0, 0x8500 */
1334 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1335 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1336 I915_WRITE(GEN6_MBCTL, s->mbctl);
1337
1338 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1339 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1340 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1341 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1342 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1343 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1344 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1345
1346 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1347 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1348 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1349 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1350 I915_WRITE(ECOBUS, s->ecobus);
1351 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1352 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1353 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1354 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1355 I915_WRITE(VLV_RCEDATA, s->rcedata);
1356 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1357
1358 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1359 I915_WRITE(GTIMR, s->gt_imr);
1360 I915_WRITE(GTIER, s->gt_ier);
1361 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1362 I915_WRITE(GEN6_PMIER, s->pm_ier);
1363
1364 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1365 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
1366
1367 /* GT SA CZ domain, 0x100000-0x138124 */
1368 I915_WRITE(TILECTL, s->tilectl);
1369 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1370 /*
1371 * Preserve the GT allow wake and GFX force clock bit, they are not
1372 * be restored, as they are used to control the s0ix suspend/resume
1373 * sequence by the caller.
1374 */
1375 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1376 val &= VLV_GTLC_ALLOWWAKEREQ;
1377 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1378 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1379
1380 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1381 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1382 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1383 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1384
1385 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1386
1387 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1388 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1389 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1390 I915_WRITE(VLV_PCBR, s->pcbr);
1391 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1392 }
1393
1394 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1395 {
1396 u32 val;
1397 int err;
1398
1399 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1400
1401 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1402 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1403 if (force_on)
1404 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1405 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1406
1407 if (!force_on)
1408 return 0;
1409
1410 err = wait_for(COND, 20);
1411 if (err)
1412 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1413 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1414
1415 return err;
1416 #undef COND
1417 }
1418
1419 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1420 {
1421 u32 val;
1422 int err = 0;
1423
1424 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1425 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1426 if (allow)
1427 val |= VLV_GTLC_ALLOWWAKEREQ;
1428 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1429 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1430
1431 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1432 allow)
1433 err = wait_for(COND, 1);
1434 if (err)
1435 DRM_ERROR("timeout disabling GT waking\n");
1436 return err;
1437 #undef COND
1438 }
1439
1440 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1441 bool wait_for_on)
1442 {
1443 u32 mask;
1444 u32 val;
1445 int err;
1446
1447 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1448 val = wait_for_on ? mask : 0;
1449 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1450 if (COND)
1451 return 0;
1452
1453 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1454 wait_for_on ? "on" : "off",
1455 I915_READ(VLV_GTLC_PW_STATUS));
1456
1457 /*
1458 * RC6 transitioning can be delayed up to 2 msec (see
1459 * valleyview_enable_rps), use 3 msec for safety.
1460 */
1461 err = wait_for(COND, 3);
1462 if (err)
1463 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1464 wait_for_on ? "on" : "off");
1465
1466 return err;
1467 #undef COND
1468 }
1469
1470 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1471 {
1472 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1473 return;
1474
1475 DRM_ERROR("GT register access while GT waking disabled\n");
1476 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1477 }
1478
1479 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1480 {
1481 u32 mask;
1482 int err;
1483
1484 /*
1485 * Bspec defines the following GT well on flags as debug only, so
1486 * don't treat them as hard failures.
1487 */
1488 (void)vlv_wait_for_gt_wells(dev_priv, false);
1489
1490 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1491 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1492
1493 vlv_check_no_gt_access(dev_priv);
1494
1495 err = vlv_force_gfx_clock(dev_priv, true);
1496 if (err)
1497 goto err1;
1498
1499 err = vlv_allow_gt_wake(dev_priv, false);
1500 if (err)
1501 goto err2;
1502
1503 if (!IS_CHERRYVIEW(dev_priv->dev))
1504 vlv_save_gunit_s0ix_state(dev_priv);
1505
1506 err = vlv_force_gfx_clock(dev_priv, false);
1507 if (err)
1508 goto err2;
1509
1510 return 0;
1511
1512 err2:
1513 /* For safety always re-enable waking and disable gfx clock forcing */
1514 vlv_allow_gt_wake(dev_priv, true);
1515 err1:
1516 vlv_force_gfx_clock(dev_priv, false);
1517
1518 return err;
1519 }
1520
1521 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1522 bool rpm_resume)
1523 {
1524 struct drm_device *dev = dev_priv->dev;
1525 int err;
1526 int ret;
1527
1528 /*
1529 * If any of the steps fail just try to continue, that's the best we
1530 * can do at this point. Return the first error code (which will also
1531 * leave RPM permanently disabled).
1532 */
1533 ret = vlv_force_gfx_clock(dev_priv, true);
1534
1535 if (!IS_CHERRYVIEW(dev_priv->dev))
1536 vlv_restore_gunit_s0ix_state(dev_priv);
1537
1538 err = vlv_allow_gt_wake(dev_priv, true);
1539 if (!ret)
1540 ret = err;
1541
1542 err = vlv_force_gfx_clock(dev_priv, false);
1543 if (!ret)
1544 ret = err;
1545
1546 vlv_check_no_gt_access(dev_priv);
1547
1548 if (rpm_resume) {
1549 intel_init_clock_gating(dev);
1550 i915_gem_restore_fences(dev);
1551 }
1552
1553 return ret;
1554 }
1555
1556 #ifndef __NetBSD__ /* XXX runtime pm */
1557 static int intel_runtime_suspend(struct device *device)
1558 {
1559 struct pci_dev *pdev = to_pci_dev(device);
1560 struct drm_device *dev = pci_get_drvdata(pdev);
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 int ret;
1563
1564 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1565 return -ENODEV;
1566
1567 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1568 return -ENODEV;
1569
1570 DRM_DEBUG_KMS("Suspending device\n");
1571
1572 /*
1573 * We could deadlock here in case another thread holding struct_mutex
1574 * calls RPM suspend concurrently, since the RPM suspend will wait
1575 * first for this RPM suspend to finish. In this case the concurrent
1576 * RPM resume will be followed by its RPM suspend counterpart. Still
1577 * for consistency return -EAGAIN, which will reschedule this suspend.
1578 */
1579 if (!mutex_trylock(&dev->struct_mutex)) {
1580 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1581 /*
1582 * Bump the expiration timestamp, otherwise the suspend won't
1583 * be rescheduled.
1584 */
1585 pm_runtime_mark_last_busy(device);
1586
1587 return -EAGAIN;
1588 }
1589 /*
1590 * We are safe here against re-faults, since the fault handler takes
1591 * an RPM reference.
1592 */
1593 i915_gem_release_all_mmaps(dev_priv);
1594 mutex_unlock(&dev->struct_mutex);
1595
1596 intel_guc_suspend(dev);
1597
1598 intel_suspend_gt_powersave(dev);
1599 intel_runtime_pm_disable_interrupts(dev_priv);
1600
1601 ret = intel_suspend_complete(dev_priv);
1602 if (ret) {
1603 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1604 intel_runtime_pm_enable_interrupts(dev_priv);
1605
1606 return ret;
1607 }
1608
1609 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1610 intel_uncore_forcewake_reset(dev, false);
1611 dev_priv->pm.suspended = true;
1612
1613 /*
1614 * FIXME: We really should find a document that references the arguments
1615 * used below!
1616 */
1617 if (IS_BROADWELL(dev)) {
1618 /*
1619 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1620 * being detected, and the call we do at intel_runtime_resume()
1621 * won't be able to restore them. Since PCI_D3hot matches the
1622 * actual specification and appears to be working, use it.
1623 */
1624 intel_opregion_notify_adapter(dev, PCI_D3hot);
1625 } else {
1626 /*
1627 * current versions of firmware which depend on this opregion
1628 * notification have repurposed the D1 definition to mean
1629 * "runtime suspended" vs. what you would normally expect (D3)
1630 * to distinguish it from notifications that might be sent via
1631 * the suspend path.
1632 */
1633 intel_opregion_notify_adapter(dev, PCI_D1);
1634 }
1635
1636 assert_forcewakes_inactive(dev_priv);
1637
1638 DRM_DEBUG_KMS("Device suspended\n");
1639 return 0;
1640 }
1641
1642 static int intel_runtime_resume(struct device *device)
1643 {
1644 struct pci_dev *pdev = to_pci_dev(device);
1645 struct drm_device *dev = pci_get_drvdata(pdev);
1646 struct drm_i915_private *dev_priv = dev->dev_private;
1647 int ret = 0;
1648
1649 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1650 return -ENODEV;
1651
1652 DRM_DEBUG_KMS("Resuming device\n");
1653
1654 intel_opregion_notify_adapter(dev, PCI_D0);
1655 dev_priv->pm.suspended = false;
1656
1657 intel_guc_resume(dev);
1658
1659 if (IS_GEN6(dev_priv))
1660 intel_init_pch_refclk(dev);
1661
1662 if (IS_BROXTON(dev))
1663 ret = bxt_resume_prepare(dev_priv);
1664 else if (IS_SKYLAKE(dev))
1665 ret = skl_resume_prepare(dev_priv);
1666 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1667 hsw_disable_pc8(dev_priv);
1668 else if (IS_VALLEYVIEW(dev_priv))
1669 ret = vlv_resume_prepare(dev_priv, true);
1670
1671 /*
1672 * No point of rolling back things in case of an error, as the best
1673 * we can do is to hope that things will still work (and disable RPM).
1674 */
1675 i915_gem_init_swizzling(dev);
1676 gen6_update_ring_freq(dev);
1677
1678 intel_runtime_pm_enable_interrupts(dev_priv);
1679
1680 /*
1681 * On VLV/CHV display interrupts are part of the display
1682 * power well, so hpd is reinitialized from there. For
1683 * everyone else do it here.
1684 */
1685 if (!IS_VALLEYVIEW(dev_priv))
1686 intel_hpd_init(dev_priv);
1687
1688 intel_enable_gt_powersave(dev);
1689
1690 if (ret)
1691 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1692 else
1693 DRM_DEBUG_KMS("Device resumed\n");
1694
1695 return ret;
1696 }
1697 #endif
1698
1699 /*
1700 * This function implements common functionality of runtime and system
1701 * suspend sequence.
1702 */
1703 static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1704 {
1705 int ret;
1706
1707 if (IS_BROXTON(dev_priv))
1708 ret = bxt_suspend_complete(dev_priv);
1709 else if (IS_SKYLAKE(dev_priv))
1710 ret = skl_suspend_complete(dev_priv);
1711 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1712 ret = hsw_suspend_complete(dev_priv);
1713 else if (IS_VALLEYVIEW(dev_priv))
1714 ret = vlv_suspend_complete(dev_priv);
1715 else
1716 ret = 0;
1717
1718 return ret;
1719 }
1720
1721 #ifndef __NetBSD__
1722
1723 static const struct dev_pm_ops i915_pm_ops = {
1724 /*
1725 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1726 * PMSG_RESUME]
1727 */
1728 .suspend = i915_pm_suspend,
1729 .suspend_late = i915_pm_suspend_late,
1730 .resume_early = i915_pm_resume_early,
1731 .resume = i915_pm_resume,
1732
1733 /*
1734 * S4 event handlers
1735 * @freeze, @freeze_late : called (1) before creating the
1736 * hibernation image [PMSG_FREEZE] and
1737 * (2) after rebooting, before restoring
1738 * the image [PMSG_QUIESCE]
1739 * @thaw, @thaw_early : called (1) after creating the hibernation
1740 * image, before writing it [PMSG_THAW]
1741 * and (2) after failing to create or
1742 * restore the image [PMSG_RECOVER]
1743 * @poweroff, @poweroff_late: called after writing the hibernation
1744 * image, before rebooting [PMSG_HIBERNATE]
1745 * @restore, @restore_early : called after rebooting and restoring the
1746 * hibernation image [PMSG_RESTORE]
1747 */
1748 .freeze = i915_pm_suspend,
1749 .freeze_late = i915_pm_suspend_late,
1750 .thaw_early = i915_pm_resume_early,
1751 .thaw = i915_pm_resume,
1752 .poweroff = i915_pm_suspend,
1753 .poweroff_late = i915_pm_poweroff_late,
1754 .restore_early = i915_pm_resume_early,
1755 .restore = i915_pm_resume,
1756
1757 /* S0ix (via runtime suspend) event handlers */
1758 .runtime_suspend = intel_runtime_suspend,
1759 .runtime_resume = intel_runtime_resume,
1760 };
1761
1762 static const struct vm_operations_struct i915_gem_vm_ops = {
1763 .fault = i915_gem_fault,
1764 .open = drm_gem_vm_open,
1765 .close = drm_gem_vm_close,
1766 };
1767
1768 static const struct file_operations i915_driver_fops = {
1769 .owner = THIS_MODULE,
1770 .open = drm_open,
1771 .release = drm_release,
1772 .unlocked_ioctl = drm_ioctl,
1773 .mmap = drm_gem_mmap,
1774 .poll = drm_poll,
1775 .read = drm_read,
1776 #ifdef CONFIG_COMPAT
1777 .compat_ioctl = i915_compat_ioctl,
1778 #endif
1779 .llseek = noop_llseek,
1780 };
1781
1782 #endif /* defined(__NetBSD__) */
1783
1784 static struct drm_driver driver = {
1785 /* Don't use MTRRs here; the Xserver or userspace app should
1786 * deal with them for Intel hardware.
1787 */
1788 .driver_features =
1789 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1790 DRIVER_RENDER | DRIVER_MODESET,
1791 .load = i915_driver_load,
1792 .unload = i915_driver_unload,
1793 .open = i915_driver_open,
1794 .lastclose = i915_driver_lastclose,
1795 .preclose = i915_driver_preclose,
1796 .postclose = i915_driver_postclose,
1797 .set_busid = drm_pci_set_busid,
1798 #ifdef __NetBSD__
1799 .request_irq = drm_pci_request_irq,
1800 .free_irq = drm_pci_free_irq,
1801 #endif
1802
1803 #if defined(CONFIG_DEBUG_FS)
1804 .debugfs_init = i915_debugfs_init,
1805 .debugfs_cleanup = i915_debugfs_cleanup,
1806 #endif
1807 .gem_free_object = i915_gem_free_object,
1808 #ifdef __NetBSD__
1809 /* XXX Not clear the `or legacy' part is important here. */
1810 .mmap_object = &drm_gem_mmap_object,
1811 .gem_uvm_ops = &i915_gem_uvm_ops,
1812 #else
1813 .gem_vm_ops = &i915_gem_vm_ops,
1814 #endif
1815
1816 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1817 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1818 .gem_prime_export = i915_gem_prime_export,
1819 .gem_prime_import = i915_gem_prime_import,
1820
1821 .dumb_create = i915_gem_dumb_create,
1822 .dumb_map_offset = i915_gem_mmap_gtt,
1823 .dumb_destroy = drm_gem_dumb_destroy,
1824 .ioctls = i915_ioctls,
1825 #ifdef __NetBSD__
1826 .fops = NULL,
1827 #else
1828 .fops = &i915_driver_fops,
1829 #endif
1830 .name = DRIVER_NAME,
1831 .desc = DRIVER_DESC,
1832 .date = DRIVER_DATE,
1833 .major = DRIVER_MAJOR,
1834 .minor = DRIVER_MINOR,
1835 .patchlevel = DRIVER_PATCHLEVEL,
1836 };
1837
1838 #ifndef __NetBSD__
1839 static struct pci_driver i915_pci_driver = {
1840 .name = DRIVER_NAME,
1841 .id_table = pciidlist,
1842 .probe = i915_pci_probe,
1843 .remove = i915_pci_remove,
1844 .driver.pm = &i915_pm_ops,
1845 };
1846 #endif
1847
1848 #ifndef __NetBSD__
1849 static int __init i915_init(void)
1850 {
1851 driver.num_ioctls = i915_max_ioctl;
1852
1853 /*
1854 * Enable KMS by default, unless explicitly overriden by
1855 * either the i915.modeset prarameter or by the
1856 * vga_text_mode_force boot option.
1857 */
1858
1859 if (i915.modeset == 0)
1860 driver.driver_features &= ~DRIVER_MODESET;
1861
1862 #ifdef CONFIG_VGA_CONSOLE
1863 if (vgacon_text_force() && i915.modeset == -1)
1864 driver.driver_features &= ~DRIVER_MODESET;
1865 #endif
1866
1867 if (!(driver.driver_features & DRIVER_MODESET)) {
1868 /* Silently fail loading to not upset userspace. */
1869 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1870 return 0;
1871 }
1872
1873 if (i915.nuclear_pageflip)
1874 driver.driver_features |= DRIVER_ATOMIC;
1875
1876 return drm_pci_init(&driver, &i915_pci_driver);
1877 }
1878
1879 static void __exit i915_exit(void)
1880 {
1881 if (!(driver.driver_features & DRIVER_MODESET))
1882 return; /* Never loaded a driver. */
1883
1884 drm_pci_exit(&driver, &i915_pci_driver);
1885 }
1886
1887 module_init(i915_init);
1888 module_exit(i915_exit);
1889 #endif
1890
1891 MODULE_AUTHOR("Tungsten Graphics, Inc.");
1892 MODULE_AUTHOR("Intel Corporation");
1893
1894 MODULE_DESCRIPTION(DRIVER_DESC);
1895 MODULE_LICENSE("GPL and additional rights");
1896