Home | History | Annotate | Line # | Download | only in display
      1 /*	$NetBSD: intel_display.c,v 1.12 2021/12/19 12:37:17 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2006-2007 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     23  * DEALINGS IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *	Eric Anholt <eric (at) anholt.net>
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: intel_display.c,v 1.12 2021/12/19 12:37:17 riastradh Exp $");
     31 
     32 #include "intel_display.h"	/* for pipe_drmhack */
     33 
     34 #include <linux/i2c.h>
     35 #include <linux/input.h>
     36 #include <linux/intel-iommu.h>
     37 #include <linux/kernel.h>
     38 #include <linux/module.h>
     39 #include <linux/dma-resv.h>
     40 #include <linux/slab.h>
     41 
     42 #include <drm/drm_atomic.h>
     43 #include <drm/drm_atomic_helper.h>
     44 #include <drm/drm_atomic_uapi.h>
     45 #include <drm/drm_dp_helper.h>
     46 #include <drm/drm_edid.h>
     47 #include <drm/drm_fourcc.h>
     48 #include <drm/drm_plane_helper.h>
     49 #include <drm/drm_probe_helper.h>
     50 #include <drm/drm_rect.h>
     51 #include <drm/i915_drm.h>
     52 
     53 #include "display/intel_crt.h"
     54 #include "display/intel_ddi.h"
     55 #include "display/intel_dp.h"
     56 #include "display/intel_dp_mst.h"
     57 #include "display/intel_dsi.h"
     58 #include "display/intel_dvo.h"
     59 #include "display/intel_gmbus.h"
     60 #include "display/intel_hdmi.h"
     61 #include "display/intel_lvds.h"
     62 #include "display/intel_sdvo.h"
     63 #include "display/intel_tv.h"
     64 #include "display/intel_vdsc.h"
     65 
     66 #include "gt/intel_rps.h"
     67 
     68 #include "i915_drv.h"
     69 #include "i915_trace.h"
     70 #include "intel_acpi.h"
     71 #include "intel_atomic.h"
     72 #include "intel_atomic_plane.h"
     73 #include "intel_bw.h"
     74 #include "intel_cdclk.h"
     75 #include "intel_color.h"
     76 #include "intel_display_types.h"
     77 #include "intel_dp_link_training.h"
     78 #include "intel_fbc.h"
     79 #include "intel_fbdev.h"
     80 #include "intel_fifo_underrun.h"
     81 #include "intel_frontbuffer.h"
     82 #include "intel_hdcp.h"
     83 #include "intel_hotplug.h"
     84 #include "intel_overlay.h"
     85 #include "intel_pipe_crc.h"
     86 #include "intel_pm.h"
     87 #include "intel_psr.h"
     88 #include "intel_quirks.h"
     89 #include "intel_sideband.h"
     90 #include "intel_sprite.h"
     91 #include "intel_tc.h"
     92 #include "intel_vga.h"
     93 
     94 #include <linux/nbsd-namespace.h>
     95 
     96 /* Primary plane formats for gen <= 3 */
     97 static const u32 i8xx_primary_formats[] = {
     98 	DRM_FORMAT_C8,
     99 	DRM_FORMAT_XRGB1555,
    100 	DRM_FORMAT_RGB565,
    101 	DRM_FORMAT_XRGB8888,
    102 };
    103 
    104 /* Primary plane formats for ivb (no fp16 due to hw issue) */
    105 static const u32 ivb_primary_formats[] = {
    106 	DRM_FORMAT_C8,
    107 	DRM_FORMAT_RGB565,
    108 	DRM_FORMAT_XRGB8888,
    109 	DRM_FORMAT_XBGR8888,
    110 	DRM_FORMAT_XRGB2101010,
    111 	DRM_FORMAT_XBGR2101010,
    112 };
    113 
    114 /* Primary plane formats for gen >= 4, except ivb */
    115 static const u32 i965_primary_formats[] = {
    116 	DRM_FORMAT_C8,
    117 	DRM_FORMAT_RGB565,
    118 	DRM_FORMAT_XRGB8888,
    119 	DRM_FORMAT_XBGR8888,
    120 	DRM_FORMAT_XRGB2101010,
    121 	DRM_FORMAT_XBGR2101010,
    122 	DRM_FORMAT_XBGR16161616F,
    123 };
    124 
    125 /* Primary plane formats for vlv/chv */
    126 static const u32 vlv_primary_formats[] = {
    127 	DRM_FORMAT_C8,
    128 	DRM_FORMAT_RGB565,
    129 	DRM_FORMAT_XRGB8888,
    130 	DRM_FORMAT_XBGR8888,
    131 	DRM_FORMAT_ARGB8888,
    132 	DRM_FORMAT_ABGR8888,
    133 	DRM_FORMAT_XRGB2101010,
    134 	DRM_FORMAT_XBGR2101010,
    135 	DRM_FORMAT_ARGB2101010,
    136 	DRM_FORMAT_ABGR2101010,
    137 	DRM_FORMAT_XBGR16161616F,
    138 };
    139 
    140 static const u64 i9xx_format_modifiers[] = {
    141 	I915_FORMAT_MOD_X_TILED,
    142 	DRM_FORMAT_MOD_LINEAR,
    143 	DRM_FORMAT_MOD_INVALID
    144 };
    145 
    146 /* Cursor formats */
    147 static const u32 intel_cursor_formats[] = {
    148 	DRM_FORMAT_ARGB8888,
    149 };
    150 
    151 static const u64 cursor_format_modifiers[] = {
    152 	DRM_FORMAT_MOD_LINEAR,
    153 	DRM_FORMAT_MOD_INVALID
    154 };
    155 
    156 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
    157 				struct intel_crtc_state *pipe_config);
    158 static void ilk_pch_clock_get(struct intel_crtc *crtc,
    159 			      struct intel_crtc_state *pipe_config);
    160 
    161 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
    162 				  struct drm_i915_gem_object *obj,
    163 				  struct drm_mode_fb_cmd2 *mode_cmd);
    164 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
    165 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
    166 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
    167 					 const struct intel_link_m_n *m_n,
    168 					 const struct intel_link_m_n *m2_n2);
    169 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
    170 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
    171 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
    172 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
    173 static void vlv_prepare_pll(struct intel_crtc *crtc,
    174 			    const struct intel_crtc_state *pipe_config);
    175 static void chv_prepare_pll(struct intel_crtc *crtc,
    176 			    const struct intel_crtc_state *pipe_config);
    177 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
    178 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
    179 static void intel_modeset_setup_hw_state(struct drm_device *dev,
    180 					 struct drm_modeset_acquire_ctx *ctx);
    181 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
    182 
    183 struct intel_limit {
    184 	struct {
    185 		int min, max;
    186 	} dot, vco, n, m, m1, m2, p, p1;
    187 
    188 	struct {
    189 		int dot_limit;
    190 		int p2_slow, p2_fast;
    191 	} p2;
    192 };
    193 
    194 /* returns HPLL frequency in kHz */
    195 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
    196 {
    197 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
    198 
    199 	/* Obtain SKU information */
    200 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
    201 		CCK_FUSE_HPLL_FREQ_MASK;
    202 
    203 	return vco_freq[hpll_freq] * 1000;
    204 }
    205 
    206 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
    207 		      const char *name, u32 reg, int ref_freq)
    208 {
    209 	u32 val;
    210 	int divider;
    211 
    212 	val = vlv_cck_read(dev_priv, reg);
    213 	divider = val & CCK_FREQUENCY_VALUES;
    214 
    215 	WARN((val & CCK_FREQUENCY_STATUS) !=
    216 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
    217 	     "%s change in progress\n", name);
    218 
    219 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
    220 }
    221 
    222 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
    223 			   const char *name, u32 reg)
    224 {
    225 	int hpll;
    226 
    227 	vlv_cck_get(dev_priv);
    228 
    229 	if (dev_priv->hpll_freq == 0)
    230 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
    231 
    232 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
    233 
    234 	vlv_cck_put(dev_priv);
    235 
    236 	return hpll;
    237 }
    238 
    239 static void intel_update_czclk(struct drm_i915_private *dev_priv)
    240 {
    241 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
    242 		return;
    243 
    244 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
    245 						      CCK_CZ_CLOCK_CONTROL);
    246 
    247 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
    248 }
    249 
    250 static inline u32 /* units of 100MHz */
    251 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
    252 		    const struct intel_crtc_state *pipe_config)
    253 {
    254 	if (HAS_DDI(dev_priv))
    255 		return pipe_config->port_clock; /* SPLL */
    256 	else
    257 		return dev_priv->fdi_pll_freq;
    258 }
    259 
    260 static const struct intel_limit intel_limits_i8xx_dac = {
    261 	.dot = { .min = 25000, .max = 350000 },
    262 	.vco = { .min = 908000, .max = 1512000 },
    263 	.n = { .min = 2, .max = 16 },
    264 	.m = { .min = 96, .max = 140 },
    265 	.m1 = { .min = 18, .max = 26 },
    266 	.m2 = { .min = 6, .max = 16 },
    267 	.p = { .min = 4, .max = 128 },
    268 	.p1 = { .min = 2, .max = 33 },
    269 	.p2 = { .dot_limit = 165000,
    270 		.p2_slow = 4, .p2_fast = 2 },
    271 };
    272 
    273 static const struct intel_limit intel_limits_i8xx_dvo = {
    274 	.dot = { .min = 25000, .max = 350000 },
    275 	.vco = { .min = 908000, .max = 1512000 },
    276 	.n = { .min = 2, .max = 16 },
    277 	.m = { .min = 96, .max = 140 },
    278 	.m1 = { .min = 18, .max = 26 },
    279 	.m2 = { .min = 6, .max = 16 },
    280 	.p = { .min = 4, .max = 128 },
    281 	.p1 = { .min = 2, .max = 33 },
    282 	.p2 = { .dot_limit = 165000,
    283 		.p2_slow = 4, .p2_fast = 4 },
    284 };
    285 
    286 static const struct intel_limit intel_limits_i8xx_lvds = {
    287 	.dot = { .min = 25000, .max = 350000 },
    288 	.vco = { .min = 908000, .max = 1512000 },
    289 	.n = { .min = 2, .max = 16 },
    290 	.m = { .min = 96, .max = 140 },
    291 	.m1 = { .min = 18, .max = 26 },
    292 	.m2 = { .min = 6, .max = 16 },
    293 	.p = { .min = 4, .max = 128 },
    294 	.p1 = { .min = 1, .max = 6 },
    295 	.p2 = { .dot_limit = 165000,
    296 		.p2_slow = 14, .p2_fast = 7 },
    297 };
    298 
    299 static const struct intel_limit intel_limits_i9xx_sdvo = {
    300 	.dot = { .min = 20000, .max = 400000 },
    301 	.vco = { .min = 1400000, .max = 2800000 },
    302 	.n = { .min = 1, .max = 6 },
    303 	.m = { .min = 70, .max = 120 },
    304 	.m1 = { .min = 8, .max = 18 },
    305 	.m2 = { .min = 3, .max = 7 },
    306 	.p = { .min = 5, .max = 80 },
    307 	.p1 = { .min = 1, .max = 8 },
    308 	.p2 = { .dot_limit = 200000,
    309 		.p2_slow = 10, .p2_fast = 5 },
    310 };
    311 
    312 static const struct intel_limit intel_limits_i9xx_lvds = {
    313 	.dot = { .min = 20000, .max = 400000 },
    314 	.vco = { .min = 1400000, .max = 2800000 },
    315 	.n = { .min = 1, .max = 6 },
    316 	.m = { .min = 70, .max = 120 },
    317 	.m1 = { .min = 8, .max = 18 },
    318 	.m2 = { .min = 3, .max = 7 },
    319 	.p = { .min = 7, .max = 98 },
    320 	.p1 = { .min = 1, .max = 8 },
    321 	.p2 = { .dot_limit = 112000,
    322 		.p2_slow = 14, .p2_fast = 7 },
    323 };
    324 
    325 
    326 static const struct intel_limit intel_limits_g4x_sdvo = {
    327 	.dot = { .min = 25000, .max = 270000 },
    328 	.vco = { .min = 1750000, .max = 3500000},
    329 	.n = { .min = 1, .max = 4 },
    330 	.m = { .min = 104, .max = 138 },
    331 	.m1 = { .min = 17, .max = 23 },
    332 	.m2 = { .min = 5, .max = 11 },
    333 	.p = { .min = 10, .max = 30 },
    334 	.p1 = { .min = 1, .max = 3},
    335 	.p2 = { .dot_limit = 270000,
    336 		.p2_slow = 10,
    337 		.p2_fast = 10
    338 	},
    339 };
    340 
    341 static const struct intel_limit intel_limits_g4x_hdmi = {
    342 	.dot = { .min = 22000, .max = 400000 },
    343 	.vco = { .min = 1750000, .max = 3500000},
    344 	.n = { .min = 1, .max = 4 },
    345 	.m = { .min = 104, .max = 138 },
    346 	.m1 = { .min = 16, .max = 23 },
    347 	.m2 = { .min = 5, .max = 11 },
    348 	.p = { .min = 5, .max = 80 },
    349 	.p1 = { .min = 1, .max = 8},
    350 	.p2 = { .dot_limit = 165000,
    351 		.p2_slow = 10, .p2_fast = 5 },
    352 };
    353 
    354 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
    355 	.dot = { .min = 20000, .max = 115000 },
    356 	.vco = { .min = 1750000, .max = 3500000 },
    357 	.n = { .min = 1, .max = 3 },
    358 	.m = { .min = 104, .max = 138 },
    359 	.m1 = { .min = 17, .max = 23 },
    360 	.m2 = { .min = 5, .max = 11 },
    361 	.p = { .min = 28, .max = 112 },
    362 	.p1 = { .min = 2, .max = 8 },
    363 	.p2 = { .dot_limit = 0,
    364 		.p2_slow = 14, .p2_fast = 14
    365 	},
    366 };
    367 
    368 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
    369 	.dot = { .min = 80000, .max = 224000 },
    370 	.vco = { .min = 1750000, .max = 3500000 },
    371 	.n = { .min = 1, .max = 3 },
    372 	.m = { .min = 104, .max = 138 },
    373 	.m1 = { .min = 17, .max = 23 },
    374 	.m2 = { .min = 5, .max = 11 },
    375 	.p = { .min = 14, .max = 42 },
    376 	.p1 = { .min = 2, .max = 6 },
    377 	.p2 = { .dot_limit = 0,
    378 		.p2_slow = 7, .p2_fast = 7
    379 	},
    380 };
    381 
    382 static const struct intel_limit pnv_limits_sdvo = {
    383 	.dot = { .min = 20000, .max = 400000},
    384 	.vco = { .min = 1700000, .max = 3500000 },
    385 	/* Pineview's Ncounter is a ring counter */
    386 	.n = { .min = 3, .max = 6 },
    387 	.m = { .min = 2, .max = 256 },
    388 	/* Pineview only has one combined m divider, which we treat as m2. */
    389 	.m1 = { .min = 0, .max = 0 },
    390 	.m2 = { .min = 0, .max = 254 },
    391 	.p = { .min = 5, .max = 80 },
    392 	.p1 = { .min = 1, .max = 8 },
    393 	.p2 = { .dot_limit = 200000,
    394 		.p2_slow = 10, .p2_fast = 5 },
    395 };
    396 
    397 static const struct intel_limit pnv_limits_lvds = {
    398 	.dot = { .min = 20000, .max = 400000 },
    399 	.vco = { .min = 1700000, .max = 3500000 },
    400 	.n = { .min = 3, .max = 6 },
    401 	.m = { .min = 2, .max = 256 },
    402 	.m1 = { .min = 0, .max = 0 },
    403 	.m2 = { .min = 0, .max = 254 },
    404 	.p = { .min = 7, .max = 112 },
    405 	.p1 = { .min = 1, .max = 8 },
    406 	.p2 = { .dot_limit = 112000,
    407 		.p2_slow = 14, .p2_fast = 14 },
    408 };
    409 
    410 /* Ironlake / Sandybridge
    411  *
    412  * We calculate clock using (register_value + 2) for N/M1/M2, so here
    413  * the range value for them is (actual_value - 2).
    414  */
    415 static const struct intel_limit ilk_limits_dac = {
    416 	.dot = { .min = 25000, .max = 350000 },
    417 	.vco = { .min = 1760000, .max = 3510000 },
    418 	.n = { .min = 1, .max = 5 },
    419 	.m = { .min = 79, .max = 127 },
    420 	.m1 = { .min = 12, .max = 22 },
    421 	.m2 = { .min = 5, .max = 9 },
    422 	.p = { .min = 5, .max = 80 },
    423 	.p1 = { .min = 1, .max = 8 },
    424 	.p2 = { .dot_limit = 225000,
    425 		.p2_slow = 10, .p2_fast = 5 },
    426 };
    427 
    428 static const struct intel_limit ilk_limits_single_lvds = {
    429 	.dot = { .min = 25000, .max = 350000 },
    430 	.vco = { .min = 1760000, .max = 3510000 },
    431 	.n = { .min = 1, .max = 3 },
    432 	.m = { .min = 79, .max = 118 },
    433 	.m1 = { .min = 12, .max = 22 },
    434 	.m2 = { .min = 5, .max = 9 },
    435 	.p = { .min = 28, .max = 112 },
    436 	.p1 = { .min = 2, .max = 8 },
    437 	.p2 = { .dot_limit = 225000,
    438 		.p2_slow = 14, .p2_fast = 14 },
    439 };
    440 
    441 static const struct intel_limit ilk_limits_dual_lvds = {
    442 	.dot = { .min = 25000, .max = 350000 },
    443 	.vco = { .min = 1760000, .max = 3510000 },
    444 	.n = { .min = 1, .max = 3 },
    445 	.m = { .min = 79, .max = 127 },
    446 	.m1 = { .min = 12, .max = 22 },
    447 	.m2 = { .min = 5, .max = 9 },
    448 	.p = { .min = 14, .max = 56 },
    449 	.p1 = { .min = 2, .max = 8 },
    450 	.p2 = { .dot_limit = 225000,
    451 		.p2_slow = 7, .p2_fast = 7 },
    452 };
    453 
    454 /* LVDS 100mhz refclk limits. */
    455 static const struct intel_limit ilk_limits_single_lvds_100m = {
    456 	.dot = { .min = 25000, .max = 350000 },
    457 	.vco = { .min = 1760000, .max = 3510000 },
    458 	.n = { .min = 1, .max = 2 },
    459 	.m = { .min = 79, .max = 126 },
    460 	.m1 = { .min = 12, .max = 22 },
    461 	.m2 = { .min = 5, .max = 9 },
    462 	.p = { .min = 28, .max = 112 },
    463 	.p1 = { .min = 2, .max = 8 },
    464 	.p2 = { .dot_limit = 225000,
    465 		.p2_slow = 14, .p2_fast = 14 },
    466 };
    467 
    468 static const struct intel_limit ilk_limits_dual_lvds_100m = {
    469 	.dot = { .min = 25000, .max = 350000 },
    470 	.vco = { .min = 1760000, .max = 3510000 },
    471 	.n = { .min = 1, .max = 3 },
    472 	.m = { .min = 79, .max = 126 },
    473 	.m1 = { .min = 12, .max = 22 },
    474 	.m2 = { .min = 5, .max = 9 },
    475 	.p = { .min = 14, .max = 42 },
    476 	.p1 = { .min = 2, .max = 6 },
    477 	.p2 = { .dot_limit = 225000,
    478 		.p2_slow = 7, .p2_fast = 7 },
    479 };
    480 
    481 static const struct intel_limit intel_limits_vlv = {
    482 	 /*
    483 	  * These are the data rate limits (measured in fast clocks)
    484 	  * since those are the strictest limits we have. The fast
    485 	  * clock and actual rate limits are more relaxed, so checking
    486 	  * them would make no difference.
    487 	  */
    488 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
    489 	.vco = { .min = 4000000, .max = 6000000 },
    490 	.n = { .min = 1, .max = 7 },
    491 	.m1 = { .min = 2, .max = 3 },
    492 	.m2 = { .min = 11, .max = 156 },
    493 	.p1 = { .min = 2, .max = 3 },
    494 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
    495 };
    496 
    497 static const struct intel_limit intel_limits_chv = {
    498 	/*
    499 	 * These are the data rate limits (measured in fast clocks)
    500 	 * since those are the strictest limits we have.  The fast
    501 	 * clock and actual rate limits are more relaxed, so checking
    502 	 * them would make no difference.
    503 	 */
    504 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
    505 	.vco = { .min = 4800000, .max = 6480000 },
    506 	.n = { .min = 1, .max = 1 },
    507 	.m1 = { .min = 2, .max = 2 },
    508 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
    509 	.p1 = { .min = 2, .max = 4 },
    510 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
    511 };
    512 
    513 static const struct intel_limit intel_limits_bxt = {
    514 	/* FIXME: find real dot limits */
    515 	.dot = { .min = 0, .max = INT_MAX },
    516 	.vco = { .min = 4800000, .max = 6700000 },
    517 	.n = { .min = 1, .max = 1 },
    518 	.m1 = { .min = 2, .max = 2 },
    519 	/* FIXME: find real m2 limits */
    520 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
    521 	.p1 = { .min = 2, .max = 4 },
    522 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
    523 };
    524 
    525 /* WA Display #0827: Gen9:all */
    526 static void
    527 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
    528 {
    529 	if (enable)
    530 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    531 			   I915_READ(CLKGATE_DIS_PSL(pipe)) |
    532 			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
    533 	else
    534 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    535 			   I915_READ(CLKGATE_DIS_PSL(pipe)) &
    536 			   ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
    537 }
    538 
    539 /* Wa_2006604312:icl */
    540 static void
    541 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
    542 		       bool enable)
    543 {
    544 	if (enable)
    545 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    546 			   I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
    547 	else
    548 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    549 			   I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
    550 }
    551 
    552 static bool
    553 needs_modeset(const struct intel_crtc_state *state)
    554 {
    555 	return drm_atomic_crtc_needs_modeset(&state->uapi);
    556 }
    557 
    558 bool
    559 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
    560 {
    561 	return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
    562 		crtc_state->sync_mode_slaves_mask);
    563 }
    564 
    565 static bool
    566 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
    567 {
    568 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
    569 }
    570 
    571 /*
    572  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
    573  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
    574  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
    575  * The helpers' return value is the rate of the clock that is fed to the
    576  * display engine's pipe which can be the above fast dot clock rate or a
    577  * divided-down version of it.
    578  */
    579 /* m1 is reserved as 0 in Pineview, n is a ring counter */
    580 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
    581 {
    582 	clock->m = clock->m2 + 2;
    583 	clock->p = clock->p1 * clock->p2;
    584 	if (WARN_ON(clock->n == 0 || clock->p == 0))
    585 		return 0;
    586 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
    587 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    588 
    589 	return clock->dot;
    590 }
    591 
    592 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
    593 {
    594 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
    595 }
    596 
    597 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
    598 {
    599 	clock->m = i9xx_dpll_compute_m(clock);
    600 	clock->p = clock->p1 * clock->p2;
    601 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
    602 		return 0;
    603 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
    604 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    605 
    606 	return clock->dot;
    607 }
    608 
    609 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
    610 {
    611 	clock->m = clock->m1 * clock->m2;
    612 	clock->p = clock->p1 * clock->p2;
    613 	if (WARN_ON(clock->n == 0 || clock->p == 0))
    614 		return 0;
    615 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
    616 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    617 
    618 	return clock->dot / 5;
    619 }
    620 
    621 int chv_calc_dpll_params(int refclk, struct dpll *clock)
    622 {
    623 	clock->m = clock->m1 * clock->m2;
    624 	clock->p = clock->p1 * clock->p2;
    625 	if (WARN_ON(clock->n == 0 || clock->p == 0))
    626 		return 0;
    627 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
    628 					   clock->n << 22);
    629 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    630 
    631 	return clock->dot / 5;
    632 }
    633 
    634 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
    635 
    636 /*
    637  * Returns whether the given set of divisors are valid for a given refclk with
    638  * the given connectors.
    639  */
    640 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
    641 			       const struct intel_limit *limit,
    642 			       const struct dpll *clock)
    643 {
    644 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
    645 		INTELPllInvalid("n out of range\n");
    646 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
    647 		INTELPllInvalid("p1 out of range\n");
    648 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
    649 		INTELPllInvalid("m2 out of range\n");
    650 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
    651 		INTELPllInvalid("m1 out of range\n");
    652 
    653 	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
    654 	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
    655 		if (clock->m1 <= clock->m2)
    656 			INTELPllInvalid("m1 <= m2\n");
    657 
    658 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
    659 	    !IS_GEN9_LP(dev_priv)) {
    660 		if (clock->p < limit->p.min || limit->p.max < clock->p)
    661 			INTELPllInvalid("p out of range\n");
    662 		if (clock->m < limit->m.min || limit->m.max < clock->m)
    663 			INTELPllInvalid("m out of range\n");
    664 	}
    665 
    666 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
    667 		INTELPllInvalid("vco out of range\n");
    668 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
    669 	 * connector, etc., rather than just a single range.
    670 	 */
    671 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
    672 		INTELPllInvalid("dot out of range\n");
    673 
    674 	return true;
    675 }
    676 
    677 static int
    678 i9xx_select_p2_div(const struct intel_limit *limit,
    679 		   const struct intel_crtc_state *crtc_state,
    680 		   int target)
    681 {
    682 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
    683 
    684 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
    685 		/*
    686 		 * For LVDS just rely on its current settings for dual-channel.
    687 		 * We haven't figured out how to reliably set up different
    688 		 * single/dual channel state, if we even can.
    689 		 */
    690 		if (intel_is_dual_link_lvds(dev_priv))
    691 			return limit->p2.p2_fast;
    692 		else
    693 			return limit->p2.p2_slow;
    694 	} else {
    695 		if (target < limit->p2.dot_limit)
    696 			return limit->p2.p2_slow;
    697 		else
    698 			return limit->p2.p2_fast;
    699 	}
    700 }
    701 
    702 /*
    703  * Returns a set of divisors for the desired target clock with the given
    704  * refclk, or FALSE.  The returned values represent the clock equation:
    705  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    706  *
    707  * Target and reference clocks are specified in kHz.
    708  *
    709  * If match_clock is provided, then best_clock P divider must match the P
    710  * divider from @match_clock used for LVDS downclocking.
    711  */
    712 static bool
    713 i9xx_find_best_dpll(const struct intel_limit *limit,
    714 		    struct intel_crtc_state *crtc_state,
    715 		    int target, int refclk, struct dpll *match_clock,
    716 		    struct dpll *best_clock)
    717 {
    718 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
    719 	struct dpll clock;
    720 	int err = target;
    721 
    722 	memset(best_clock, 0, sizeof(*best_clock));
    723 
    724 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
    725 
    726 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
    727 	     clock.m1++) {
    728 		for (clock.m2 = limit->m2.min;
    729 		     clock.m2 <= limit->m2.max; clock.m2++) {
    730 			if (clock.m2 >= clock.m1)
    731 				break;
    732 			for (clock.n = limit->n.min;
    733 			     clock.n <= limit->n.max; clock.n++) {
    734 				for (clock.p1 = limit->p1.min;
    735 					clock.p1 <= limit->p1.max; clock.p1++) {
    736 					int this_err;
    737 
    738 					i9xx_calc_dpll_params(refclk, &clock);
    739 					if (!intel_PLL_is_valid(to_i915(dev),
    740 								limit,
    741 								&clock))
    742 						continue;
    743 					if (match_clock &&
    744 					    clock.p != match_clock->p)
    745 						continue;
    746 
    747 					this_err = abs(clock.dot - target);
    748 					if (this_err < err) {
    749 						*best_clock = clock;
    750 						err = this_err;
    751 					}
    752 				}
    753 			}
    754 		}
    755 	}
    756 
    757 	return (err != target);
    758 }
    759 
    760 /*
    761  * Returns a set of divisors for the desired target clock with the given
    762  * refclk, or FALSE.  The returned values represent the clock equation:
    763  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    764  *
    765  * Target and reference clocks are specified in kHz.
    766  *
    767  * If match_clock is provided, then best_clock P divider must match the P
    768  * divider from @match_clock used for LVDS downclocking.
    769  */
    770 static bool
    771 pnv_find_best_dpll(const struct intel_limit *limit,
    772 		   struct intel_crtc_state *crtc_state,
    773 		   int target, int refclk, struct dpll *match_clock,
    774 		   struct dpll *best_clock)
    775 {
    776 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
    777 	struct dpll clock;
    778 	int err = target;
    779 
    780 	memset(best_clock, 0, sizeof(*best_clock));
    781 
    782 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
    783 
    784 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
    785 	     clock.m1++) {
    786 		for (clock.m2 = limit->m2.min;
    787 		     clock.m2 <= limit->m2.max; clock.m2++) {
    788 			for (clock.n = limit->n.min;
    789 			     clock.n <= limit->n.max; clock.n++) {
    790 				for (clock.p1 = limit->p1.min;
    791 					clock.p1 <= limit->p1.max; clock.p1++) {
    792 					int this_err;
    793 
    794 					pnv_calc_dpll_params(refclk, &clock);
    795 					if (!intel_PLL_is_valid(to_i915(dev),
    796 								limit,
    797 								&clock))
    798 						continue;
    799 					if (match_clock &&
    800 					    clock.p != match_clock->p)
    801 						continue;
    802 
    803 					this_err = abs(clock.dot - target);
    804 					if (this_err < err) {
    805 						*best_clock = clock;
    806 						err = this_err;
    807 					}
    808 				}
    809 			}
    810 		}
    811 	}
    812 
    813 	return (err != target);
    814 }
    815 
    816 /*
    817  * Returns a set of divisors for the desired target clock with the given
    818  * refclk, or FALSE.  The returned values represent the clock equation:
    819  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    820  *
    821  * Target and reference clocks are specified in kHz.
    822  *
    823  * If match_clock is provided, then best_clock P divider must match the P
    824  * divider from @match_clock used for LVDS downclocking.
    825  */
    826 static bool
    827 g4x_find_best_dpll(const struct intel_limit *limit,
    828 		   struct intel_crtc_state *crtc_state,
    829 		   int target, int refclk, struct dpll *match_clock,
    830 		   struct dpll *best_clock)
    831 {
    832 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
    833 	struct dpll clock;
    834 	int max_n;
    835 	bool found = false;
    836 	/* approximately equals target * 0.00585 */
    837 	int err_most = (target >> 8) + (target >> 9);
    838 
    839 	memset(best_clock, 0, sizeof(*best_clock));
    840 
    841 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
    842 
    843 	max_n = limit->n.max;
    844 	/* based on hardware requirement, prefer smaller n to precision */
    845 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
    846 		/* based on hardware requirement, prefere larger m1,m2 */
    847 		for (clock.m1 = limit->m1.max;
    848 		     clock.m1 >= limit->m1.min; clock.m1--) {
    849 			for (clock.m2 = limit->m2.max;
    850 			     clock.m2 >= limit->m2.min; clock.m2--) {
    851 				for (clock.p1 = limit->p1.max;
    852 				     clock.p1 >= limit->p1.min; clock.p1--) {
    853 					int this_err;
    854 
    855 					i9xx_calc_dpll_params(refclk, &clock);
    856 					if (!intel_PLL_is_valid(to_i915(dev),
    857 								limit,
    858 								&clock))
    859 						continue;
    860 
    861 					this_err = abs(clock.dot - target);
    862 					if (this_err < err_most) {
    863 						*best_clock = clock;
    864 						err_most = this_err;
    865 						max_n = clock.n;
    866 						found = true;
    867 					}
    868 				}
    869 			}
    870 		}
    871 	}
    872 	return found;
    873 }
    874 
    875 /*
    876  * Check if the calculated PLL configuration is more optimal compared to the
    877  * best configuration and error found so far. Return the calculated error.
    878  */
    879 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
    880 			       const struct dpll *calculated_clock,
    881 			       const struct dpll *best_clock,
    882 			       unsigned int best_error_ppm,
    883 			       unsigned int *error_ppm)
    884 {
    885 	/*
    886 	 * For CHV ignore the error and consider only the P value.
    887 	 * Prefer a bigger P value based on HW requirements.
    888 	 */
    889 	if (IS_CHERRYVIEW(to_i915(dev))) {
    890 		*error_ppm = 0;
    891 
    892 		return calculated_clock->p > best_clock->p;
    893 	}
    894 
    895 	if (WARN_ON_ONCE(!target_freq))
    896 		return false;
    897 
    898 	*error_ppm = div_u64(1000000ULL *
    899 				abs(target_freq - calculated_clock->dot),
    900 			     target_freq);
    901 	/*
    902 	 * Prefer a better P value over a better (smaller) error if the error
    903 	 * is small. Ensure this preference for future configurations too by
    904 	 * setting the error to 0.
    905 	 */
    906 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
    907 		*error_ppm = 0;
    908 
    909 		return true;
    910 	}
    911 
    912 	return *error_ppm + 10 < best_error_ppm;
    913 }
    914 
    915 /*
    916  * Returns a set of divisors for the desired target clock with the given
    917  * refclk, or FALSE.  The returned values represent the clock equation:
    918  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    919  */
    920 static bool
    921 vlv_find_best_dpll(const struct intel_limit *limit,
    922 		   struct intel_crtc_state *crtc_state,
    923 		   int target, int refclk, struct dpll *match_clock,
    924 		   struct dpll *best_clock)
    925 {
    926 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    927 	struct drm_device *dev = crtc->base.dev;
    928 	struct dpll clock;
    929 	unsigned int bestppm = 1000000;
    930 	/* min update 19.2 MHz */
    931 	int max_n = min(limit->n.max, refclk / 19200);
    932 	bool found = false;
    933 
    934 	target *= 5; /* fast clock */
    935 
    936 	memset(best_clock, 0, sizeof(*best_clock));
    937 
    938 	/* based on hardware requirement, prefer smaller n to precision */
    939 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
    940 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
    941 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
    942 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
    943 				clock.p = clock.p1 * clock.p2;
    944 				/* based on hardware requirement, prefer bigger m1,m2 values */
    945 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
    946 					unsigned int ppm = 0; /*XXXGCC*/
    947 
    948 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
    949 								     refclk * clock.m1);
    950 
    951 					vlv_calc_dpll_params(refclk, &clock);
    952 
    953 					if (!intel_PLL_is_valid(to_i915(dev),
    954 								limit,
    955 								&clock))
    956 						continue;
    957 
    958 					if (!vlv_PLL_is_optimal(dev, target,
    959 								&clock,
    960 								best_clock,
    961 								bestppm, &ppm))
    962 						continue;
    963 
    964 					*best_clock = clock;
    965 					bestppm = ppm;
    966 					found = true;
    967 				}
    968 			}
    969 		}
    970 	}
    971 
    972 	return found;
    973 }
    974 
    975 /*
    976  * Returns a set of divisors for the desired target clock with the given
    977  * refclk, or FALSE.  The returned values represent the clock equation:
    978  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    979  */
    980 static bool
    981 chv_find_best_dpll(const struct intel_limit *limit,
    982 		   struct intel_crtc_state *crtc_state,
    983 		   int target, int refclk, struct dpll *match_clock,
    984 		   struct dpll *best_clock)
    985 {
    986 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    987 	struct drm_device *dev = crtc->base.dev;
    988 	unsigned int best_error_ppm;
    989 	struct dpll clock;
    990 	u64 m2;
    991 	int found = false;
    992 
    993 	memset(best_clock, 0, sizeof(*best_clock));
    994 	best_error_ppm = 1000000;
    995 
    996 	/*
    997 	 * Based on hardware doc, the n always set to 1, and m1 always
    998 	 * set to 2.  If requires to support 200Mhz refclk, we need to
    999 	 * revisit this because n may not 1 anymore.
   1000 	 */
   1001 	clock.n = 1, clock.m1 = 2;
   1002 	target *= 5;	/* fast clock */
   1003 
   1004 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
   1005 		for (clock.p2 = limit->p2.p2_fast;
   1006 				clock.p2 >= limit->p2.p2_slow;
   1007 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
   1008 			unsigned int error_ppm = 0; /*XXXGCC*/
   1009 
   1010 			clock.p = clock.p1 * clock.p2;
   1011 
   1012 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
   1013 						   refclk * clock.m1);
   1014 
   1015 			if (m2 > INT_MAX/clock.m1)
   1016 				continue;
   1017 
   1018 			clock.m2 = m2;
   1019 
   1020 			chv_calc_dpll_params(refclk, &clock);
   1021 
   1022 			if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
   1023 				continue;
   1024 
   1025 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
   1026 						best_error_ppm, &error_ppm))
   1027 				continue;
   1028 
   1029 			*best_clock = clock;
   1030 			best_error_ppm = error_ppm;
   1031 			found = true;
   1032 		}
   1033 	}
   1034 
   1035 	return found;
   1036 }
   1037 
   1038 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
   1039 			struct dpll *best_clock)
   1040 {
   1041 	int refclk = 100000;
   1042 	const struct intel_limit *limit = &intel_limits_bxt;
   1043 
   1044 	return chv_find_best_dpll(limit, crtc_state,
   1045 				  crtc_state->port_clock, refclk,
   1046 				  NULL, best_clock);
   1047 }
   1048 
   1049 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
   1050 				    enum pipe pipe)
   1051 {
   1052 	i915_reg_t reg = PIPEDSL(pipe);
   1053 	u32 line1, line2;
   1054 	u32 line_mask;
   1055 
   1056 	if (IS_GEN(dev_priv, 2))
   1057 		line_mask = DSL_LINEMASK_GEN2;
   1058 	else
   1059 		line_mask = DSL_LINEMASK_GEN3;
   1060 
   1061 	line1 = I915_READ(reg) & line_mask;
   1062 	msleep(5);
   1063 	line2 = I915_READ(reg) & line_mask;
   1064 
   1065 	return line1 != line2;
   1066 }
   1067 
   1068 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
   1069 {
   1070 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1071 	enum pipe pipe = crtc->pipe;
   1072 
   1073 	/* Wait for the display line to settle/start moving */
   1074 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
   1075 		DRM_ERROR("pipe %c scanline %s wait timed out\n",
   1076 			  pipe_name(pipe), onoff(state));
   1077 }
   1078 
   1079 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
   1080 {
   1081 	wait_for_pipe_scanline_moving(crtc, false);
   1082 }
   1083 
   1084 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
   1085 {
   1086 	wait_for_pipe_scanline_moving(crtc, true);
   1087 }
   1088 
   1089 static void
   1090 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
   1091 {
   1092 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   1093 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1094 
   1095 	if (INTEL_GEN(dev_priv) >= 4) {
   1096 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
   1097 		i915_reg_t reg = PIPECONF(cpu_transcoder);
   1098 
   1099 		/* Wait for the Pipe State to go off */
   1100 		if (intel_de_wait_for_clear(dev_priv, reg,
   1101 					    I965_PIPECONF_ACTIVE, 100))
   1102 			WARN(1, "pipe_off wait timed out\n");
   1103 	} else {
   1104 		intel_wait_for_pipe_scanline_stopped(crtc);
   1105 	}
   1106 }
   1107 
   1108 /* Only for pre-ILK configs */
   1109 void assert_pll(struct drm_i915_private *dev_priv,
   1110 		enum pipe pipe, bool state)
   1111 {
   1112 	u32 val;
   1113 	bool cur_state;
   1114 
   1115 	val = I915_READ(DPLL(pipe));
   1116 	cur_state = !!(val & DPLL_VCO_ENABLE);
   1117 	I915_STATE_WARN(cur_state != state,
   1118 	     "PLL state assertion failure (expected %s, current %s)\n",
   1119 			onoff(state), onoff(cur_state));
   1120 }
   1121 
   1122 /* XXX: the dsi pll is shared between MIPI DSI ports */
   1123 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
   1124 {
   1125 	u32 val;
   1126 	bool cur_state;
   1127 
   1128 	vlv_cck_get(dev_priv);
   1129 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
   1130 	vlv_cck_put(dev_priv);
   1131 
   1132 	cur_state = val & DSI_PLL_VCO_EN;
   1133 	I915_STATE_WARN(cur_state != state,
   1134 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
   1135 			onoff(state), onoff(cur_state));
   1136 }
   1137 
   1138 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
   1139 			  enum pipe pipe, bool state)
   1140 {
   1141 	bool cur_state;
   1142 
   1143 	if (HAS_DDI(dev_priv)) {
   1144 		/*
   1145 		 * DDI does not have a specific FDI_TX register.
   1146 		 *
   1147 		 * FDI is never fed from EDP transcoder
   1148 		 * so pipe->transcoder cast is fine here.
   1149 		 */
   1150 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
   1151 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
   1152 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
   1153 	} else {
   1154 		u32 val = I915_READ(FDI_TX_CTL(pipe));
   1155 		cur_state = !!(val & FDI_TX_ENABLE);
   1156 	}
   1157 	I915_STATE_WARN(cur_state != state,
   1158 	     "FDI TX state assertion failure (expected %s, current %s)\n",
   1159 			onoff(state), onoff(cur_state));
   1160 }
   1161 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
   1162 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
   1163 
   1164 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
   1165 			  enum pipe pipe, bool state)
   1166 {
   1167 	u32 val;
   1168 	bool cur_state;
   1169 
   1170 	val = I915_READ(FDI_RX_CTL(pipe));
   1171 	cur_state = !!(val & FDI_RX_ENABLE);
   1172 	I915_STATE_WARN(cur_state != state,
   1173 	     "FDI RX state assertion failure (expected %s, current %s)\n",
   1174 			onoff(state), onoff(cur_state));
   1175 }
   1176 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
   1177 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
   1178 
   1179 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
   1180 				      enum pipe pipe)
   1181 {
   1182 	u32 val;
   1183 
   1184 	/* ILK FDI PLL is always enabled */
   1185 	if (IS_GEN(dev_priv, 5))
   1186 		return;
   1187 
   1188 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
   1189 	if (HAS_DDI(dev_priv))
   1190 		return;
   1191 
   1192 	val = I915_READ(FDI_TX_CTL(pipe));
   1193 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
   1194 }
   1195 
   1196 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
   1197 		       enum pipe pipe, bool state)
   1198 {
   1199 	u32 val;
   1200 	bool cur_state;
   1201 
   1202 	val = I915_READ(FDI_RX_CTL(pipe));
   1203 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
   1204 	I915_STATE_WARN(cur_state != state,
   1205 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
   1206 			onoff(state), onoff(cur_state));
   1207 }
   1208 
   1209 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
   1210 {
   1211 	i915_reg_t pp_reg;
   1212 	u32 val;
   1213 	enum pipe panel_pipe = INVALID_PIPE;
   1214 	bool locked = true;
   1215 
   1216 	if (WARN_ON(HAS_DDI(dev_priv)))
   1217 		return;
   1218 
   1219 	if (HAS_PCH_SPLIT(dev_priv)) {
   1220 		u32 port_sel;
   1221 
   1222 		pp_reg = PP_CONTROL(0);
   1223 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
   1224 
   1225 		switch (port_sel) {
   1226 		case PANEL_PORT_SELECT_LVDS:
   1227 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
   1228 			break;
   1229 		case PANEL_PORT_SELECT_DPA:
   1230 			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
   1231 			break;
   1232 		case PANEL_PORT_SELECT_DPC:
   1233 			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
   1234 			break;
   1235 		case PANEL_PORT_SELECT_DPD:
   1236 			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
   1237 			break;
   1238 		default:
   1239 			MISSING_CASE(port_sel);
   1240 			break;
   1241 		}
   1242 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   1243 		/* presumably write lock depends on pipe, not port select */
   1244 		pp_reg = PP_CONTROL(pipe);
   1245 		panel_pipe = pipe;
   1246 	} else {
   1247 		u32 port_sel;
   1248 
   1249 		pp_reg = PP_CONTROL(0);
   1250 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
   1251 
   1252 		WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
   1253 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
   1254 	}
   1255 
   1256 	val = I915_READ(pp_reg);
   1257 	if (!(val & PANEL_POWER_ON) ||
   1258 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
   1259 		locked = false;
   1260 
   1261 	I915_STATE_WARN(panel_pipe == pipe && locked,
   1262 	     "panel assertion failure, pipe %c regs locked\n",
   1263 	     pipe_name(pipe));
   1264 }
   1265 
   1266 void assert_pipe(struct drm_i915_private *dev_priv,
   1267 		 enum transcoder cpu_transcoder, bool state)
   1268 {
   1269 	bool cur_state;
   1270 	enum intel_display_power_domain power_domain;
   1271 	intel_wakeref_t wakeref;
   1272 
   1273 	/* we keep both pipes enabled on 830 */
   1274 	if (IS_I830(dev_priv))
   1275 		state = true;
   1276 
   1277 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   1278 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   1279 	if (wakeref) {
   1280 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
   1281 		cur_state = !!(val & PIPECONF_ENABLE);
   1282 
   1283 		intel_display_power_put(dev_priv, power_domain, wakeref);
   1284 	} else {
   1285 		cur_state = false;
   1286 	}
   1287 
   1288 	I915_STATE_WARN(cur_state != state,
   1289 			"transcoder %s assertion failure (expected %s, current %s)\n",
   1290 			transcoder_name(cpu_transcoder),
   1291 			onoff(state), onoff(cur_state));
   1292 }
   1293 
   1294 static void assert_plane(struct intel_plane *plane, bool state)
   1295 {
   1296 	enum pipe pipe;
   1297 	bool cur_state;
   1298 
   1299 	cur_state = plane->get_hw_state(plane, &pipe);
   1300 
   1301 	I915_STATE_WARN(cur_state != state,
   1302 			"%s assertion failure (expected %s, current %s)\n",
   1303 			plane->base.name, onoff(state), onoff(cur_state));
   1304 }
   1305 
   1306 #define assert_plane_enabled(p) assert_plane(p, true)
   1307 #define assert_plane_disabled(p) assert_plane(p, false)
   1308 
   1309 static void assert_planes_disabled(struct intel_crtc *crtc)
   1310 {
   1311 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1312 	struct intel_plane *plane;
   1313 
   1314 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
   1315 		assert_plane_disabled(plane);
   1316 }
   1317 
   1318 static void assert_vblank_disabled(struct drm_crtc *crtc)
   1319 {
   1320 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
   1321 		drm_crtc_vblank_put(crtc);
   1322 }
   1323 
   1324 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
   1325 				    enum pipe pipe)
   1326 {
   1327 	u32 val;
   1328 	bool enabled;
   1329 
   1330 	val = I915_READ(PCH_TRANSCONF(pipe));
   1331 	enabled = !!(val & TRANS_ENABLE);
   1332 	I915_STATE_WARN(enabled,
   1333 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
   1334 	     pipe_name(pipe));
   1335 }
   1336 
   1337 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
   1338 				   enum pipe pipe, enum port port,
   1339 				   i915_reg_t dp_reg)
   1340 {
   1341 	enum pipe port_pipe;
   1342 	bool state;
   1343 
   1344 	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
   1345 
   1346 	I915_STATE_WARN(state && port_pipe == pipe,
   1347 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
   1348 			port_name(port), pipe_name(pipe));
   1349 
   1350 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
   1351 			"IBX PCH DP %c still using transcoder B\n",
   1352 			port_name(port));
   1353 }
   1354 
   1355 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
   1356 				     enum pipe pipe, enum port port,
   1357 				     i915_reg_t hdmi_reg)
   1358 {
   1359 	enum pipe port_pipe;
   1360 	bool state;
   1361 
   1362 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
   1363 
   1364 	I915_STATE_WARN(state && port_pipe == pipe,
   1365 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
   1366 			port_name(port), pipe_name(pipe));
   1367 
   1368 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
   1369 			"IBX PCH HDMI %c still using transcoder B\n",
   1370 			port_name(port));
   1371 }
   1372 
   1373 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
   1374 				      enum pipe pipe)
   1375 {
   1376 	enum pipe port_pipe;
   1377 
   1378 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
   1379 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
   1380 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
   1381 
   1382 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
   1383 			port_pipe == pipe,
   1384 			"PCH VGA enabled on transcoder %c, should be disabled\n",
   1385 			pipe_name(pipe));
   1386 
   1387 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
   1388 			port_pipe == pipe,
   1389 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
   1390 			pipe_name(pipe));
   1391 
   1392 	/* PCH SDVOB multiplex with HDMIB */
   1393 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
   1394 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
   1395 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
   1396 }
   1397 
   1398 static void _vlv_enable_pll(struct intel_crtc *crtc,
   1399 			    const struct intel_crtc_state *pipe_config)
   1400 {
   1401 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1402 	enum pipe pipe = crtc->pipe;
   1403 
   1404 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
   1405 	POSTING_READ(DPLL(pipe));
   1406 	udelay(150);
   1407 
   1408 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
   1409 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
   1410 }
   1411 
   1412 static void vlv_enable_pll(struct intel_crtc *crtc,
   1413 			   const struct intel_crtc_state *pipe_config)
   1414 {
   1415 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1416 	enum pipe pipe = crtc->pipe;
   1417 
   1418 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
   1419 
   1420 	/* PLL is protected by panel, make sure we can write it */
   1421 	assert_panel_unlocked(dev_priv, pipe);
   1422 
   1423 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
   1424 		_vlv_enable_pll(crtc, pipe_config);
   1425 
   1426 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
   1427 	POSTING_READ(DPLL_MD(pipe));
   1428 }
   1429 
   1430 
   1431 static void _chv_enable_pll(struct intel_crtc *crtc,
   1432 			    const struct intel_crtc_state *pipe_config)
   1433 {
   1434 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1435 	enum pipe pipe = crtc->pipe;
   1436 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   1437 	u32 tmp;
   1438 
   1439 	vlv_dpio_get(dev_priv);
   1440 
   1441 	/* Enable back the 10bit clock to display controller */
   1442 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
   1443 	tmp |= DPIO_DCLKP_EN;
   1444 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
   1445 
   1446 	vlv_dpio_put(dev_priv);
   1447 
   1448 	/*
   1449 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
   1450 	 */
   1451 	udelay(1);
   1452 
   1453 	/* Enable PLL */
   1454 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
   1455 
   1456 	/* Check PLL is locked */
   1457 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
   1458 		DRM_ERROR("PLL %d failed to lock\n", pipe);
   1459 }
   1460 
   1461 static void chv_enable_pll(struct intel_crtc *crtc,
   1462 			   const struct intel_crtc_state *pipe_config)
   1463 {
   1464 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1465 	enum pipe pipe = crtc->pipe;
   1466 
   1467 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
   1468 
   1469 	/* PLL is protected by panel, make sure we can write it */
   1470 	assert_panel_unlocked(dev_priv, pipe);
   1471 
   1472 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
   1473 		_chv_enable_pll(crtc, pipe_config);
   1474 
   1475 	if (pipe != PIPE_A) {
   1476 		/*
   1477 		 * WaPixelRepeatModeFixForC0:chv
   1478 		 *
   1479 		 * DPLLCMD is AWOL. Use chicken bits to propagate
   1480 		 * the value from DPLLBMD to either pipe B or C.
   1481 		 */
   1482 		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
   1483 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
   1484 		I915_WRITE(CBR4_VLV, 0);
   1485 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
   1486 
   1487 		/*
   1488 		 * DPLLB VGA mode also seems to cause problems.
   1489 		 * We should always have it disabled.
   1490 		 */
   1491 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
   1492 	} else {
   1493 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
   1494 		POSTING_READ(DPLL_MD(pipe));
   1495 	}
   1496 }
   1497 
   1498 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
   1499 {
   1500 	if (IS_I830(dev_priv))
   1501 		return false;
   1502 
   1503 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
   1504 }
   1505 
   1506 static void i9xx_enable_pll(struct intel_crtc *crtc,
   1507 			    const struct intel_crtc_state *crtc_state)
   1508 {
   1509 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1510 	i915_reg_t reg = DPLL(crtc->pipe);
   1511 	u32 dpll = crtc_state->dpll_hw_state.dpll;
   1512 	int i;
   1513 
   1514 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
   1515 
   1516 	/* PLL is protected by panel, make sure we can write it */
   1517 	if (i9xx_has_pps(dev_priv))
   1518 		assert_panel_unlocked(dev_priv, crtc->pipe);
   1519 
   1520 	/*
   1521 	 * Apparently we need to have VGA mode enabled prior to changing
   1522 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
   1523 	 * dividers, even though the register value does change.
   1524 	 */
   1525 	I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
   1526 	I915_WRITE(reg, dpll);
   1527 
   1528 	/* Wait for the clocks to stabilize. */
   1529 	POSTING_READ(reg);
   1530 	udelay(150);
   1531 
   1532 	if (INTEL_GEN(dev_priv) >= 4) {
   1533 		I915_WRITE(DPLL_MD(crtc->pipe),
   1534 			   crtc_state->dpll_hw_state.dpll_md);
   1535 	} else {
   1536 		/* The pixel multiplier can only be updated once the
   1537 		 * DPLL is enabled and the clocks are stable.
   1538 		 *
   1539 		 * So write it again.
   1540 		 */
   1541 		I915_WRITE(reg, dpll);
   1542 	}
   1543 
   1544 	/* We do this three times for luck */
   1545 	for (i = 0; i < 3; i++) {
   1546 		I915_WRITE(reg, dpll);
   1547 		POSTING_READ(reg);
   1548 		udelay(150); /* wait for warmup */
   1549 	}
   1550 }
   1551 
   1552 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
   1553 {
   1554 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1555 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1556 	enum pipe pipe = crtc->pipe;
   1557 
   1558 	/* Don't disable pipe or pipe PLLs if needed */
   1559 	if (IS_I830(dev_priv))
   1560 		return;
   1561 
   1562 	/* Make sure the pipe isn't still relying on us */
   1563 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
   1564 
   1565 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
   1566 	POSTING_READ(DPLL(pipe));
   1567 }
   1568 
   1569 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
   1570 {
   1571 	u32 val;
   1572 
   1573 	/* Make sure the pipe isn't still relying on us */
   1574 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
   1575 
   1576 	val = DPLL_INTEGRATED_REF_CLK_VLV |
   1577 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   1578 	if (pipe != PIPE_A)
   1579 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
   1580 
   1581 	I915_WRITE(DPLL(pipe), val);
   1582 	POSTING_READ(DPLL(pipe));
   1583 }
   1584 
   1585 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
   1586 {
   1587 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   1588 	u32 val;
   1589 
   1590 	/* Make sure the pipe isn't still relying on us */
   1591 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
   1592 
   1593 	val = DPLL_SSC_REF_CLK_CHV |
   1594 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   1595 	if (pipe != PIPE_A)
   1596 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
   1597 
   1598 	I915_WRITE(DPLL(pipe), val);
   1599 	POSTING_READ(DPLL(pipe));
   1600 
   1601 	vlv_dpio_get(dev_priv);
   1602 
   1603 	/* Disable 10bit clock to display controller */
   1604 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
   1605 	val &= ~DPIO_DCLKP_EN;
   1606 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
   1607 
   1608 	vlv_dpio_put(dev_priv);
   1609 }
   1610 
   1611 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
   1612 			 struct intel_digital_port *dport,
   1613 			 unsigned int expected_mask)
   1614 {
   1615 	u32 port_mask;
   1616 	i915_reg_t dpll_reg;
   1617 
   1618 	switch (dport->base.port) {
   1619 	case PORT_B:
   1620 		port_mask = DPLL_PORTB_READY_MASK;
   1621 		dpll_reg = DPLL(0);
   1622 		break;
   1623 	case PORT_C:
   1624 		port_mask = DPLL_PORTC_READY_MASK;
   1625 		dpll_reg = DPLL(0);
   1626 		expected_mask <<= 4;
   1627 		break;
   1628 	case PORT_D:
   1629 		port_mask = DPLL_PORTD_READY_MASK;
   1630 		dpll_reg = DPIO_PHY_STATUS;
   1631 		break;
   1632 	default:
   1633 		BUG();
   1634 	}
   1635 
   1636 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
   1637 				       port_mask, expected_mask, 1000))
   1638 		WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
   1639 		     dport->base.base.base.id, dport->base.base.name,
   1640 		     I915_READ(dpll_reg) & port_mask, expected_mask);
   1641 }
   1642 
   1643 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
   1644 {
   1645 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1646 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1647 	enum pipe pipe = crtc->pipe;
   1648 	i915_reg_t reg;
   1649 	u32 val, pipeconf_val;
   1650 
   1651 	/* Make sure PCH DPLL is enabled */
   1652 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
   1653 
   1654 	/* FDI must be feeding us bits for PCH ports */
   1655 	assert_fdi_tx_enabled(dev_priv, pipe);
   1656 	assert_fdi_rx_enabled(dev_priv, pipe);
   1657 
   1658 	if (HAS_PCH_CPT(dev_priv)) {
   1659 		reg = TRANS_CHICKEN2(pipe);
   1660 		val = I915_READ(reg);
   1661 		/*
   1662 		 * Workaround: Set the timing override bit
   1663 		 * before enabling the pch transcoder.
   1664 		 */
   1665 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
   1666 		/* Configure frame start delay to match the CPU */
   1667 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
   1668 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
   1669 		I915_WRITE(reg, val);
   1670 	}
   1671 
   1672 	reg = PCH_TRANSCONF(pipe);
   1673 	val = I915_READ(reg);
   1674 	pipeconf_val = I915_READ(PIPECONF(pipe));
   1675 
   1676 	if (HAS_PCH_IBX(dev_priv)) {
   1677 		/* Configure frame start delay to match the CPU */
   1678 		val &= ~TRANS_FRAME_START_DELAY_MASK;
   1679 		val |= TRANS_FRAME_START_DELAY(0);
   1680 
   1681 		/*
   1682 		 * Make the BPC in transcoder be consistent with
   1683 		 * that in pipeconf reg. For HDMI we must use 8bpc
   1684 		 * here for both 8bpc and 12bpc.
   1685 		 */
   1686 		val &= ~PIPECONF_BPC_MASK;
   1687 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
   1688 			val |= PIPECONF_8BPC;
   1689 		else
   1690 			val |= pipeconf_val & PIPECONF_BPC_MASK;
   1691 	}
   1692 
   1693 	val &= ~TRANS_INTERLACE_MASK;
   1694 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
   1695 		if (HAS_PCH_IBX(dev_priv) &&
   1696 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   1697 			val |= TRANS_LEGACY_INTERLACED_ILK;
   1698 		else
   1699 			val |= TRANS_INTERLACED;
   1700 	} else {
   1701 		val |= TRANS_PROGRESSIVE;
   1702 	}
   1703 
   1704 	I915_WRITE(reg, val | TRANS_ENABLE);
   1705 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
   1706 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
   1707 }
   1708 
   1709 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
   1710 				      enum transcoder cpu_transcoder)
   1711 {
   1712 	u32 val, pipeconf_val;
   1713 
   1714 	/* FDI must be feeding us bits for PCH ports */
   1715 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
   1716 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
   1717 
   1718 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
   1719 	/* Workaround: set timing override bit. */
   1720 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
   1721 	/* Configure frame start delay to match the CPU */
   1722 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
   1723 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
   1724 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
   1725 
   1726 	val = TRANS_ENABLE;
   1727 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
   1728 
   1729 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
   1730 	    PIPECONF_INTERLACED_ILK)
   1731 		val |= TRANS_INTERLACED;
   1732 	else
   1733 		val |= TRANS_PROGRESSIVE;
   1734 
   1735 	I915_WRITE(LPT_TRANSCONF, val);
   1736 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
   1737 				  TRANS_STATE_ENABLE, 100))
   1738 		DRM_ERROR("Failed to enable PCH transcoder\n");
   1739 }
   1740 
   1741 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
   1742 				       enum pipe pipe)
   1743 {
   1744 	i915_reg_t reg;
   1745 	u32 val;
   1746 
   1747 	/* FDI relies on the transcoder */
   1748 	assert_fdi_tx_disabled(dev_priv, pipe);
   1749 	assert_fdi_rx_disabled(dev_priv, pipe);
   1750 
   1751 	/* Ports must be off as well */
   1752 	assert_pch_ports_disabled(dev_priv, pipe);
   1753 
   1754 	reg = PCH_TRANSCONF(pipe);
   1755 	val = I915_READ(reg);
   1756 	val &= ~TRANS_ENABLE;
   1757 	I915_WRITE(reg, val);
   1758 	/* wait for PCH transcoder off, transcoder state */
   1759 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
   1760 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
   1761 
   1762 	if (HAS_PCH_CPT(dev_priv)) {
   1763 		/* Workaround: Clear the timing override chicken bit again. */
   1764 		reg = TRANS_CHICKEN2(pipe);
   1765 		val = I915_READ(reg);
   1766 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
   1767 		I915_WRITE(reg, val);
   1768 	}
   1769 }
   1770 
   1771 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
   1772 {
   1773 	u32 val;
   1774 
   1775 	val = I915_READ(LPT_TRANSCONF);
   1776 	val &= ~TRANS_ENABLE;
   1777 	I915_WRITE(LPT_TRANSCONF, val);
   1778 	/* wait for PCH transcoder off, transcoder state */
   1779 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
   1780 				    TRANS_STATE_ENABLE, 50))
   1781 		DRM_ERROR("Failed to disable PCH transcoder\n");
   1782 
   1783 	/* Workaround: clear timing override bit. */
   1784 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
   1785 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
   1786 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
   1787 }
   1788 
   1789 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
   1790 {
   1791 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1792 
   1793 	if (HAS_PCH_LPT(dev_priv))
   1794 		return PIPE_A;
   1795 	else
   1796 		return crtc->pipe;
   1797 }
   1798 
   1799 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
   1800 {
   1801 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1802 
   1803 	/*
   1804 	 * On i965gm the hardware frame counter reads
   1805 	 * zero when the TV encoder is enabled :(
   1806 	 */
   1807 	if (IS_I965GM(dev_priv) &&
   1808 	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
   1809 		return 0;
   1810 
   1811 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   1812 		return 0xffffffff; /* full 32 bit counter */
   1813 	else if (INTEL_GEN(dev_priv) >= 3)
   1814 		return 0xffffff; /* only 24 bits of frame count */
   1815 	else
   1816 		return 0; /* Gen2 doesn't have a hardware frame counter */
   1817 }
   1818 
   1819 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
   1820 {
   1821 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1822 
   1823 	assert_vblank_disabled(&crtc->base);
   1824 	drm_crtc_set_max_vblank_count(&crtc->base,
   1825 				      intel_crtc_max_vblank_count(crtc_state));
   1826 	drm_crtc_vblank_on(&crtc->base);
   1827 }
   1828 
   1829 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
   1830 {
   1831 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1832 
   1833 	drm_crtc_vblank_off(&crtc->base);
   1834 	assert_vblank_disabled(&crtc->base);
   1835 }
   1836 
   1837 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
   1838 {
   1839 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   1840 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1841 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
   1842 	enum pipe pipe = crtc->pipe;
   1843 	i915_reg_t reg;
   1844 	u32 val;
   1845 
   1846 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
   1847 
   1848 	assert_planes_disabled(crtc);
   1849 
   1850 	/*
   1851 	 * A pipe without a PLL won't actually be able to drive bits from
   1852 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
   1853 	 * need the check.
   1854 	 */
   1855 	if (HAS_GMCH(dev_priv)) {
   1856 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
   1857 			assert_dsi_pll_enabled(dev_priv);
   1858 		else
   1859 			assert_pll_enabled(dev_priv, pipe);
   1860 	} else {
   1861 		if (new_crtc_state->has_pch_encoder) {
   1862 			/* if driving the PCH, we need FDI enabled */
   1863 			assert_fdi_rx_pll_enabled(dev_priv,
   1864 						  intel_crtc_pch_transcoder(crtc));
   1865 			assert_fdi_tx_pll_enabled(dev_priv,
   1866 						  (enum pipe) cpu_transcoder);
   1867 		}
   1868 		/* FIXME: assert CPU port conditions for SNB+ */
   1869 	}
   1870 
   1871 	trace_intel_pipe_enable(crtc);
   1872 
   1873 	reg = PIPECONF(cpu_transcoder);
   1874 	val = I915_READ(reg);
   1875 	if (val & PIPECONF_ENABLE) {
   1876 		/* we keep both pipes enabled on 830 */
   1877 		WARN_ON(!IS_I830(dev_priv));
   1878 		return;
   1879 	}
   1880 
   1881 	I915_WRITE(reg, val | PIPECONF_ENABLE);
   1882 	POSTING_READ(reg);
   1883 
   1884 	/*
   1885 	 * Until the pipe starts PIPEDSL reads will return a stale value,
   1886 	 * which causes an apparent vblank timestamp jump when PIPEDSL
   1887 	 * resets to its proper value. That also messes up the frame count
   1888 	 * when it's derived from the timestamps. So let's wait for the
   1889 	 * pipe to start properly before we call drm_crtc_vblank_on()
   1890 	 */
   1891 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
   1892 		intel_wait_for_pipe_scanline_moving(crtc);
   1893 }
   1894 
   1895 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
   1896 {
   1897 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   1898 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1899 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
   1900 	enum pipe pipe = crtc->pipe;
   1901 	i915_reg_t reg;
   1902 	u32 val;
   1903 
   1904 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
   1905 
   1906 	/*
   1907 	 * Make sure planes won't keep trying to pump pixels to us,
   1908 	 * or we might hang the display.
   1909 	 */
   1910 	assert_planes_disabled(crtc);
   1911 
   1912 	trace_intel_pipe_disable(crtc);
   1913 
   1914 	reg = PIPECONF(cpu_transcoder);
   1915 	val = I915_READ(reg);
   1916 	if ((val & PIPECONF_ENABLE) == 0)
   1917 		return;
   1918 
   1919 	/*
   1920 	 * Double wide has implications for planes
   1921 	 * so best keep it disabled when not needed.
   1922 	 */
   1923 	if (old_crtc_state->double_wide)
   1924 		val &= ~PIPECONF_DOUBLE_WIDE;
   1925 
   1926 	/* Don't disable pipe or pipe PLLs if needed */
   1927 	if (!IS_I830(dev_priv))
   1928 		val &= ~PIPECONF_ENABLE;
   1929 
   1930 	I915_WRITE(reg, val);
   1931 	if ((val & PIPECONF_ENABLE) == 0)
   1932 		intel_wait_for_pipe_off(old_crtc_state);
   1933 }
   1934 
   1935 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
   1936 {
   1937 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
   1938 }
   1939 
   1940 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
   1941 {
   1942 	if (!is_ccs_modifier(fb->modifier))
   1943 		return false;
   1944 
   1945 	return plane >= fb->format->num_planes / 2;
   1946 }
   1947 
   1948 static bool is_gen12_ccs_modifier(u64 modifier)
   1949 {
   1950 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
   1951 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
   1952 
   1953 }
   1954 
   1955 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
   1956 {
   1957 	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
   1958 }
   1959 
   1960 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
   1961 {
   1962 	if (is_ccs_modifier(fb->modifier))
   1963 		return is_ccs_plane(fb, plane);
   1964 
   1965 	return plane == 1;
   1966 }
   1967 
   1968 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
   1969 {
   1970 	WARN_ON(!is_ccs_modifier(fb->modifier) ||
   1971 		(main_plane && main_plane >= fb->format->num_planes / 2));
   1972 
   1973 	return fb->format->num_planes / 2 + main_plane;
   1974 }
   1975 
   1976 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
   1977 {
   1978 	WARN_ON(!is_ccs_modifier(fb->modifier) ||
   1979 		ccs_plane < fb->format->num_planes / 2);
   1980 
   1981 	return ccs_plane - fb->format->num_planes / 2;
   1982 }
   1983 
   1984 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
   1985 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
   1986 {
   1987 	if (is_ccs_modifier(fb->modifier))
   1988 		return main_to_ccs_plane(fb, main_plane);
   1989 
   1990 	return 1;
   1991 }
   1992 
   1993 bool
   1994 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
   1995 				    uint64_t modifier)
   1996 {
   1997 	return info->is_yuv &&
   1998 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
   1999 }
   2000 
   2001 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
   2002 				   int color_plane)
   2003 {
   2004 	return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
   2005 	       color_plane == 1;
   2006 }
   2007 
   2008 static unsigned int
   2009 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
   2010 {
   2011 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2012 	unsigned int cpp = fb->format->cpp[color_plane];
   2013 
   2014 	switch (fb->modifier) {
   2015 	case DRM_FORMAT_MOD_LINEAR:
   2016 		return intel_tile_size(dev_priv);
   2017 	case I915_FORMAT_MOD_X_TILED:
   2018 		if (IS_GEN(dev_priv, 2))
   2019 			return 128;
   2020 		else
   2021 			return 512;
   2022 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2023 		if (is_ccs_plane(fb, color_plane))
   2024 			return 128;
   2025 		/* fall through */
   2026 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2027 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2028 		if (is_ccs_plane(fb, color_plane))
   2029 			return 64;
   2030 		/* fall through */
   2031 	case I915_FORMAT_MOD_Y_TILED:
   2032 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
   2033 			return 128;
   2034 		else
   2035 			return 512;
   2036 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   2037 		if (is_ccs_plane(fb, color_plane))
   2038 			return 128;
   2039 		/* fall through */
   2040 	case I915_FORMAT_MOD_Yf_TILED:
   2041 		switch (cpp) {
   2042 		case 1:
   2043 			return 64;
   2044 		case 2:
   2045 		case 4:
   2046 			return 128;
   2047 		case 8:
   2048 		case 16:
   2049 			return 256;
   2050 		default:
   2051 			MISSING_CASE(cpp);
   2052 			return cpp;
   2053 		}
   2054 		break;
   2055 	default:
   2056 		MISSING_CASE(fb->modifier);
   2057 		return cpp;
   2058 	}
   2059 }
   2060 
   2061 static unsigned int
   2062 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
   2063 {
   2064 	if (is_gen12_ccs_plane(fb, color_plane))
   2065 		return 1;
   2066 
   2067 	return intel_tile_size(to_i915(fb->dev)) /
   2068 		intel_tile_width_bytes(fb, color_plane);
   2069 }
   2070 
   2071 /* Return the tile dimensions in pixel units */
   2072 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
   2073 			    unsigned int *tile_width,
   2074 			    unsigned int *tile_height)
   2075 {
   2076 	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
   2077 	unsigned int cpp = fb->format->cpp[color_plane];
   2078 
   2079 	*tile_width = tile_width_bytes / cpp;
   2080 	*tile_height = intel_tile_height(fb, color_plane);
   2081 }
   2082 
   2083 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
   2084 					int color_plane)
   2085 {
   2086 	unsigned int tile_width, tile_height;
   2087 
   2088 	intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
   2089 
   2090 	return fb->pitches[color_plane] * tile_height;
   2091 }
   2092 
   2093 unsigned int
   2094 intel_fb_align_height(const struct drm_framebuffer *fb,
   2095 		      int color_plane, unsigned int height)
   2096 {
   2097 	unsigned int tile_height = intel_tile_height(fb, color_plane);
   2098 
   2099 	return ALIGN(height, tile_height);
   2100 }
   2101 
   2102 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
   2103 {
   2104 	unsigned int size = 0;
   2105 	int i;
   2106 
   2107 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
   2108 		size += rot_info->plane[i].width * rot_info->plane[i].height;
   2109 
   2110 	return size;
   2111 }
   2112 
   2113 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
   2114 {
   2115 	unsigned int size = 0;
   2116 	int i;
   2117 
   2118 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
   2119 		size += rem_info->plane[i].width * rem_info->plane[i].height;
   2120 
   2121 	return size;
   2122 }
   2123 
   2124 static void
   2125 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
   2126 			const struct drm_framebuffer *fb,
   2127 			unsigned int rotation)
   2128 {
   2129 	view->type = I915_GGTT_VIEW_NORMAL;
   2130 	if (drm_rotation_90_or_270(rotation)) {
   2131 		view->type = I915_GGTT_VIEW_ROTATED;
   2132 		view->rotated = to_intel_framebuffer((struct drm_framebuffer *)__UNCONST(fb))->rot_info;
   2133 	}
   2134 }
   2135 
   2136 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
   2137 {
   2138 	if (IS_I830(dev_priv))
   2139 		return 16 * 1024;
   2140 	else if (IS_I85X(dev_priv))
   2141 		return 256;
   2142 	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
   2143 		return 32;
   2144 	else
   2145 		return 4 * 1024;
   2146 }
   2147 
   2148 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
   2149 {
   2150 	if (INTEL_GEN(dev_priv) >= 9)
   2151 		return 256 * 1024;
   2152 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
   2153 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   2154 		return 128 * 1024;
   2155 	else if (INTEL_GEN(dev_priv) >= 4)
   2156 		return 4 * 1024;
   2157 	else
   2158 		return 0;
   2159 }
   2160 
   2161 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
   2162 					 int color_plane)
   2163 {
   2164 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2165 
   2166 	/* AUX_DIST needs only 4K alignment */
   2167 	if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
   2168 	    is_ccs_plane(fb, color_plane))
   2169 		return 4096;
   2170 
   2171 	switch (fb->modifier) {
   2172 	case DRM_FORMAT_MOD_LINEAR:
   2173 		return intel_linear_alignment(dev_priv);
   2174 	case I915_FORMAT_MOD_X_TILED:
   2175 		if (INTEL_GEN(dev_priv) >= 9)
   2176 			return 256 * 1024;
   2177 		return 0;
   2178 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2179 		if (is_semiplanar_uv_plane(fb, color_plane))
   2180 			return intel_tile_row_size(fb, color_plane);
   2181 		/* Fall-through */
   2182 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2183 		return 16 * 1024;
   2184 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2185 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   2186 	case I915_FORMAT_MOD_Y_TILED:
   2187 		if (INTEL_GEN(dev_priv) >= 12 &&
   2188 		    is_semiplanar_uv_plane(fb, color_plane))
   2189 			return intel_tile_row_size(fb, color_plane);
   2190 		/* Fall-through */
   2191 	case I915_FORMAT_MOD_Yf_TILED:
   2192 		return 1 * 1024 * 1024;
   2193 	default:
   2194 		MISSING_CASE(fb->modifier);
   2195 		return 0;
   2196 	}
   2197 }
   2198 
   2199 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
   2200 {
   2201 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   2202 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   2203 
   2204 	return INTEL_GEN(dev_priv) < 4 ||
   2205 		(plane->has_fbc &&
   2206 		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
   2207 }
   2208 
   2209 struct i915_vma *
   2210 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
   2211 			   const struct i915_ggtt_view *view,
   2212 			   bool uses_fence,
   2213 			   unsigned long *out_flags)
   2214 {
   2215 	struct drm_device *dev = fb->dev;
   2216 	struct drm_i915_private *dev_priv = to_i915(dev);
   2217 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   2218 	intel_wakeref_t wakeref;
   2219 	struct i915_vma *vma;
   2220 	unsigned int pinctl;
   2221 	u32 alignment;
   2222 
   2223 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
   2224 		return ERR_PTR(-EINVAL);
   2225 
   2226 	alignment = intel_surf_alignment(fb, 0);
   2227 	if (WARN_ON(alignment && !is_power_of_2(alignment)))
   2228 		return ERR_PTR(-EINVAL);
   2229 
   2230 	/* Note that the w/a also requires 64 PTE of padding following the
   2231 	 * bo. We currently fill all unused PTE with the shadow page and so
   2232 	 * we should always have valid PTE following the scanout preventing
   2233 	 * the VT-d warning.
   2234 	 */
   2235 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
   2236 		alignment = 256 * 1024;
   2237 
   2238 	/*
   2239 	 * Global gtt pte registers are special registers which actually forward
   2240 	 * writes to a chunk of system memory. Which means that there is no risk
   2241 	 * that the register values disappear as soon as we call
   2242 	 * intel_runtime_pm_put(), so it is correct to wrap only the
   2243 	 * pin/unpin/fence and not more.
   2244 	 */
   2245 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
   2246 
   2247 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
   2248 
   2249 	/*
   2250 	 * Valleyview is definitely limited to scanning out the first
   2251 	 * 512MiB. Lets presume this behaviour was inherited from the
   2252 	 * g4x display engine and that all earlier gen are similarly
   2253 	 * limited. Testing suggests that it is a little more
   2254 	 * complicated than this. For example, Cherryview appears quite
   2255 	 * happy to scanout from anywhere within its global aperture.
   2256 	 */
   2257 	pinctl = 0;
   2258 	if (HAS_GMCH(dev_priv))
   2259 		pinctl |= PIN_MAPPABLE;
   2260 
   2261 	vma = i915_gem_object_pin_to_display_plane(obj,
   2262 						   alignment, view, pinctl);
   2263 	if (IS_ERR(vma))
   2264 		goto err;
   2265 
   2266 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
   2267 		int ret;
   2268 
   2269 		/*
   2270 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
   2271 		 * fence, whereas 965+ only requires a fence if using
   2272 		 * framebuffer compression.  For simplicity, we always, when
   2273 		 * possible, install a fence as the cost is not that onerous.
   2274 		 *
   2275 		 * If we fail to fence the tiled scanout, then either the
   2276 		 * modeset will reject the change (which is highly unlikely as
   2277 		 * the affected systems, all but one, do not have unmappable
   2278 		 * space) or we will not be able to enable full powersaving
   2279 		 * techniques (also likely not to apply due to various limits
   2280 		 * FBC and the like impose on the size of the buffer, which
   2281 		 * presumably we violated anyway with this unmappable buffer).
   2282 		 * Anyway, it is presumably better to stumble onwards with
   2283 		 * something and try to run the system in a "less than optimal"
   2284 		 * mode that matches the user configuration.
   2285 		 */
   2286 		ret = i915_vma_pin_fence(vma);
   2287 		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
   2288 			i915_gem_object_unpin_from_display_plane(vma);
   2289 			vma = ERR_PTR(ret);
   2290 			goto err;
   2291 		}
   2292 
   2293 		if (ret == 0 && vma->fence)
   2294 			*out_flags |= PLANE_HAS_FENCE;
   2295 	}
   2296 
   2297 	i915_vma_get(vma);
   2298 err:
   2299 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
   2300 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
   2301 	return vma;
   2302 }
   2303 
   2304 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
   2305 {
   2306 	i915_gem_object_lock(vma->obj);
   2307 	if (flags & PLANE_HAS_FENCE)
   2308 		i915_vma_unpin_fence(vma);
   2309 	i915_gem_object_unpin_from_display_plane(vma);
   2310 	i915_gem_object_unlock(vma->obj);
   2311 
   2312 	i915_vma_put(vma);
   2313 }
   2314 
   2315 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
   2316 			  unsigned int rotation)
   2317 {
   2318 	if (drm_rotation_90_or_270(rotation))
   2319 		return to_intel_framebuffer((struct drm_framebuffer *)__UNCONST(fb))->rotated[color_plane].pitch;
   2320 	else
   2321 		return fb->pitches[color_plane];
   2322 }
   2323 
   2324 /*
   2325  * Convert the x/y offsets into a linear offset.
   2326  * Only valid with 0/180 degree rotation, which is fine since linear
   2327  * offset is only used with linear buffers on pre-hsw and tiled buffers
   2328  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
   2329  */
   2330 u32 intel_fb_xy_to_linear(int x, int y,
   2331 			  const struct intel_plane_state *state,
   2332 			  int color_plane)
   2333 {
   2334 	const struct drm_framebuffer *fb = state->hw.fb;
   2335 	unsigned int cpp = fb->format->cpp[color_plane];
   2336 	unsigned int pitch = state->color_plane[color_plane].stride;
   2337 
   2338 	return y * pitch + x * cpp;
   2339 }
   2340 
   2341 /*
   2342  * Add the x/y offsets derived from fb->offsets[] to the user
   2343  * specified plane src x/y offsets. The resulting x/y offsets
   2344  * specify the start of scanout from the beginning of the gtt mapping.
   2345  */
   2346 void intel_add_fb_offsets(int *x, int *y,
   2347 			  const struct intel_plane_state *state,
   2348 			  int color_plane)
   2349 
   2350 {
   2351 	*x += state->color_plane[color_plane].x;
   2352 	*y += state->color_plane[color_plane].y;
   2353 }
   2354 
   2355 static u32 intel_adjust_tile_offset(int *x, int *y,
   2356 				    unsigned int tile_width,
   2357 				    unsigned int tile_height,
   2358 				    unsigned int tile_size,
   2359 				    unsigned int pitch_tiles,
   2360 				    u32 old_offset,
   2361 				    u32 new_offset)
   2362 {
   2363 	unsigned int pitch_pixels = pitch_tiles * tile_width;
   2364 	unsigned int tiles;
   2365 
   2366 	WARN_ON(old_offset & (tile_size - 1));
   2367 	WARN_ON(new_offset & (tile_size - 1));
   2368 	WARN_ON(new_offset > old_offset);
   2369 
   2370 	tiles = (old_offset - new_offset) / tile_size;
   2371 
   2372 	*y += tiles / pitch_tiles * tile_height;
   2373 	*x += tiles % pitch_tiles * tile_width;
   2374 
   2375 	/* minimize x in case it got needlessly big */
   2376 	*y += *x / pitch_pixels * tile_height;
   2377 	*x %= pitch_pixels;
   2378 
   2379 	return new_offset;
   2380 }
   2381 
   2382 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
   2383 {
   2384 	return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
   2385 	       is_gen12_ccs_plane(fb, color_plane);
   2386 }
   2387 
   2388 static u32 intel_adjust_aligned_offset(int *x, int *y,
   2389 				       const struct drm_framebuffer *fb,
   2390 				       int color_plane,
   2391 				       unsigned int rotation,
   2392 				       unsigned int pitch,
   2393 				       u32 old_offset, u32 new_offset)
   2394 {
   2395 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2396 	unsigned int cpp = fb->format->cpp[color_plane];
   2397 
   2398 	WARN_ON(new_offset > old_offset);
   2399 
   2400 	if (!is_surface_linear(fb, color_plane)) {
   2401 		unsigned int tile_size, tile_width, tile_height;
   2402 		unsigned int pitch_tiles;
   2403 
   2404 		tile_size = intel_tile_size(dev_priv);
   2405 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
   2406 
   2407 		if (drm_rotation_90_or_270(rotation)) {
   2408 			pitch_tiles = pitch / tile_height;
   2409 			swap(tile_width, tile_height);
   2410 		} else {
   2411 			pitch_tiles = pitch / (tile_width * cpp);
   2412 		}
   2413 
   2414 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
   2415 					 tile_size, pitch_tiles,
   2416 					 old_offset, new_offset);
   2417 	} else {
   2418 		old_offset += *y * pitch + *x * cpp;
   2419 
   2420 		*y = (old_offset - new_offset) / pitch;
   2421 		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
   2422 	}
   2423 
   2424 	return new_offset;
   2425 }
   2426 
   2427 /*
   2428  * Adjust the tile offset by moving the difference into
   2429  * the x/y offsets.
   2430  */
   2431 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
   2432 					     const struct intel_plane_state *state,
   2433 					     int color_plane,
   2434 					     u32 old_offset, u32 new_offset)
   2435 {
   2436 	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
   2437 					   state->hw.rotation,
   2438 					   state->color_plane[color_plane].stride,
   2439 					   old_offset, new_offset);
   2440 }
   2441 
   2442 /*
   2443  * Computes the aligned offset to the base tile and adjusts
   2444  * x, y. bytes per pixel is assumed to be a power-of-two.
   2445  *
   2446  * In the 90/270 rotated case, x and y are assumed
   2447  * to be already rotated to match the rotated GTT view, and
   2448  * pitch is the tile_height aligned framebuffer height.
   2449  *
   2450  * This function is used when computing the derived information
   2451  * under intel_framebuffer, so using any of that information
   2452  * here is not allowed. Anything under drm_framebuffer can be
   2453  * used. This is why the user has to pass in the pitch since it
   2454  * is specified in the rotated orientation.
   2455  */
   2456 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
   2457 					int *x, int *y,
   2458 					const struct drm_framebuffer *fb,
   2459 					int color_plane,
   2460 					unsigned int pitch,
   2461 					unsigned int rotation,
   2462 					u32 alignment)
   2463 {
   2464 	unsigned int cpp = fb->format->cpp[color_plane];
   2465 	u32 offset, offset_aligned;
   2466 
   2467 	if (!is_surface_linear(fb, color_plane)) {
   2468 		unsigned int tile_size, tile_width, tile_height;
   2469 		unsigned int tile_rows, tiles, pitch_tiles;
   2470 
   2471 		tile_size = intel_tile_size(dev_priv);
   2472 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
   2473 
   2474 		if (drm_rotation_90_or_270(rotation)) {
   2475 			pitch_tiles = pitch / tile_height;
   2476 			swap(tile_width, tile_height);
   2477 		} else {
   2478 			pitch_tiles = pitch / (tile_width * cpp);
   2479 		}
   2480 
   2481 		tile_rows = *y / tile_height;
   2482 		*y %= tile_height;
   2483 
   2484 		tiles = *x / tile_width;
   2485 		*x %= tile_width;
   2486 
   2487 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
   2488 
   2489 		offset_aligned = offset;
   2490 		if (alignment)
   2491 			offset_aligned = rounddown(offset_aligned, alignment);
   2492 
   2493 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
   2494 					 tile_size, pitch_tiles,
   2495 					 offset, offset_aligned);
   2496 	} else {
   2497 		offset = *y * pitch + *x * cpp;
   2498 		offset_aligned = offset;
   2499 		if (alignment) {
   2500 			offset_aligned = rounddown(offset_aligned, alignment);
   2501 			*y = (offset % alignment) / pitch;
   2502 			*x = ((offset % alignment) - *y * pitch) / cpp;
   2503 		} else {
   2504 			*y = *x = 0;
   2505 		}
   2506 	}
   2507 
   2508 	return offset_aligned;
   2509 }
   2510 
   2511 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
   2512 					      const struct intel_plane_state *state,
   2513 					      int color_plane)
   2514 {
   2515 	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
   2516 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
   2517 	const struct drm_framebuffer *fb = state->hw.fb;
   2518 	unsigned int rotation = state->hw.rotation;
   2519 	int pitch = state->color_plane[color_plane].stride;
   2520 	u32 alignment;
   2521 
   2522 	if (intel_plane->id == PLANE_CURSOR)
   2523 		alignment = intel_cursor_alignment(dev_priv);
   2524 	else
   2525 		alignment = intel_surf_alignment(fb, color_plane);
   2526 
   2527 	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
   2528 					    pitch, rotation, alignment);
   2529 }
   2530 
   2531 /* Convert the fb->offset[] into x/y offsets */
   2532 static int intel_fb_offset_to_xy(int *x, int *y,
   2533 				 const struct drm_framebuffer *fb,
   2534 				 int color_plane)
   2535 {
   2536 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2537 	unsigned int height;
   2538 	u32 alignment;
   2539 
   2540 	if (INTEL_GEN(dev_priv) >= 12 &&
   2541 	    is_semiplanar_uv_plane(fb, color_plane))
   2542 		alignment = intel_tile_row_size(fb, color_plane);
   2543 	else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
   2544 		alignment = intel_tile_size(dev_priv);
   2545 	else
   2546 		alignment = 0;
   2547 
   2548 	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
   2549 		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
   2550 			      fb->offsets[color_plane], color_plane);
   2551 		return -EINVAL;
   2552 	}
   2553 
   2554 	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
   2555 	height = ALIGN(height, intel_tile_height(fb, color_plane));
   2556 
   2557 	/* Catch potential overflows early */
   2558 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
   2559 			    fb->offsets[color_plane])) {
   2560 		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
   2561 			      fb->offsets[color_plane], fb->pitches[color_plane],
   2562 			      color_plane);
   2563 		return -ERANGE;
   2564 	}
   2565 
   2566 	*x = 0;
   2567 	*y = 0;
   2568 
   2569 	intel_adjust_aligned_offset(x, y,
   2570 				    fb, color_plane, DRM_MODE_ROTATE_0,
   2571 				    fb->pitches[color_plane],
   2572 				    fb->offsets[color_plane], 0);
   2573 
   2574 	return 0;
   2575 }
   2576 
   2577 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
   2578 {
   2579 	switch (fb_modifier) {
   2580 	case I915_FORMAT_MOD_X_TILED:
   2581 		return I915_TILING_X;
   2582 	case I915_FORMAT_MOD_Y_TILED:
   2583 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2584 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2585 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2586 		return I915_TILING_Y;
   2587 	default:
   2588 		return I915_TILING_NONE;
   2589 	}
   2590 }
   2591 
   2592 /*
   2593  * From the Sky Lake PRM:
   2594  * "The Color Control Surface (CCS) contains the compression status of
   2595  *  the cache-line pairs. The compression state of the cache-line pair
   2596  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
   2597  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
   2598  *  cache-line-pairs. CCS is always Y tiled."
   2599  *
   2600  * Since cache line pairs refers to horizontally adjacent cache lines,
   2601  * each cache line in the CCS corresponds to an area of 32x16 cache
   2602  * lines on the main surface. Since each pixel is 4 bytes, this gives
   2603  * us a ratio of one byte in the CCS for each 8x16 pixels in the
   2604  * main surface.
   2605  */
   2606 static const struct drm_format_info skl_ccs_formats[] = {
   2607 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
   2608 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
   2609 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
   2610 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
   2611 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
   2612 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
   2613 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
   2614 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
   2615 };
   2616 
   2617 /*
   2618  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
   2619  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
   2620  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
   2621  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
   2622  * the main surface.
   2623  */
   2624 static const struct drm_format_info gen12_ccs_formats[] = {
   2625 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
   2626 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2627 	  .hsub = 1, .vsub = 1, },
   2628 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
   2629 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2630 	  .hsub = 1, .vsub = 1, },
   2631 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
   2632 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2633 	  .hsub = 1, .vsub = 1, .has_alpha = true },
   2634 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
   2635 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2636 	  .hsub = 1, .vsub = 1, .has_alpha = true },
   2637 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
   2638 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2639 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2640 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
   2641 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2642 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2643 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
   2644 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2645 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2646 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
   2647 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2648 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2649 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
   2650 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
   2651 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2652 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
   2653 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
   2654 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2655 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
   2656 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
   2657 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2658 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
   2659 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
   2660 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2661 };
   2662 
   2663 static const struct drm_format_info *
   2664 lookup_format_info(const struct drm_format_info formats[],
   2665 		   int num_formats, u32 format)
   2666 {
   2667 	int i;
   2668 
   2669 	for (i = 0; i < num_formats; i++) {
   2670 		if (formats[i].format == format)
   2671 			return &formats[i];
   2672 	}
   2673 
   2674 	return NULL;
   2675 }
   2676 
   2677 static const struct drm_format_info *
   2678 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
   2679 {
   2680 	switch (cmd->modifier[0]) {
   2681 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2682 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   2683 		return lookup_format_info(skl_ccs_formats,
   2684 					  ARRAY_SIZE(skl_ccs_formats),
   2685 					  cmd->pixel_format);
   2686 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2687 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2688 		return lookup_format_info(gen12_ccs_formats,
   2689 					  ARRAY_SIZE(gen12_ccs_formats),
   2690 					  cmd->pixel_format);
   2691 	default:
   2692 		return NULL;
   2693 	}
   2694 }
   2695 
   2696 bool is_ccs_modifier(u64 modifier)
   2697 {
   2698 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
   2699 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
   2700 	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
   2701 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
   2702 }
   2703 
   2704 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
   2705 {
   2706 	return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
   2707 			    512) * 64;
   2708 }
   2709 
   2710 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
   2711 			      u32 pixel_format, u64 modifier)
   2712 {
   2713 	struct intel_crtc *crtc;
   2714 	struct intel_plane *plane;
   2715 
   2716 	/*
   2717 	 * We assume the primary plane for pipe A has
   2718 	 * the highest stride limits of them all.
   2719 	 */
   2720 	crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
   2721 	if (!crtc)
   2722 		return 0;
   2723 
   2724 	plane = to_intel_plane(crtc->base.primary);
   2725 
   2726 	return plane->max_stride(plane, pixel_format, modifier,
   2727 				 DRM_MODE_ROTATE_0);
   2728 }
   2729 
   2730 static
   2731 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
   2732 			u32 pixel_format, u64 modifier)
   2733 {
   2734 	/*
   2735 	 * Arbitrary limit for gen4+ chosen to match the
   2736 	 * render engine max stride.
   2737 	 *
   2738 	 * The new CCS hash mode makes remapping impossible
   2739 	 */
   2740 	if (!is_ccs_modifier(modifier)) {
   2741 		if (INTEL_GEN(dev_priv) >= 7)
   2742 			return 256*1024;
   2743 		else if (INTEL_GEN(dev_priv) >= 4)
   2744 			return 128*1024;
   2745 	}
   2746 
   2747 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
   2748 }
   2749 
   2750 static u32
   2751 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
   2752 {
   2753 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2754 	u32 tile_width;
   2755 
   2756 	if (is_surface_linear(fb, color_plane)) {
   2757 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
   2758 							   fb->format->format,
   2759 							   fb->modifier);
   2760 
   2761 		/*
   2762 		 * To make remapping with linear generally feasible
   2763 		 * we need the stride to be page aligned.
   2764 		 */
   2765 		if (fb->pitches[color_plane] > max_stride &&
   2766 		    !is_ccs_modifier(fb->modifier))
   2767 			return intel_tile_size(dev_priv);
   2768 		else
   2769 			return 64;
   2770 	}
   2771 
   2772 	tile_width = intel_tile_width_bytes(fb, color_plane);
   2773 	if (is_ccs_modifier(fb->modifier)) {
   2774 		/*
   2775 		 * Display WA #0531: skl,bxt,kbl,glk
   2776 		 *
   2777 		 * Render decompression and plane width > 3840
   2778 		 * combined with horizontal panning requires the
   2779 		 * plane stride to be a multiple of 4. We'll just
   2780 		 * require the entire fb to accommodate that to avoid
   2781 		 * potential runtime errors at plane configuration time.
   2782 		 */
   2783 		if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
   2784 			tile_width *= 4;
   2785 		/*
   2786 		 * The main surface pitch must be padded to a multiple of four
   2787 		 * tile widths.
   2788 		 */
   2789 		else if (INTEL_GEN(dev_priv) >= 12)
   2790 			tile_width *= 4;
   2791 	}
   2792 	return tile_width;
   2793 }
   2794 
   2795 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
   2796 {
   2797 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   2798 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   2799 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   2800 	int i;
   2801 
   2802 	/* We don't want to deal with remapping with cursors */
   2803 	if (plane->id == PLANE_CURSOR)
   2804 		return false;
   2805 
   2806 	/*
   2807 	 * The display engine limits already match/exceed the
   2808 	 * render engine limits, so not much point in remapping.
   2809 	 * Would also need to deal with the fence POT alignment
   2810 	 * and gen2 2KiB GTT tile size.
   2811 	 */
   2812 	if (INTEL_GEN(dev_priv) < 4)
   2813 		return false;
   2814 
   2815 	/*
   2816 	 * The new CCS hash mode isn't compatible with remapping as
   2817 	 * the virtual address of the pages affects the compressed data.
   2818 	 */
   2819 	if (is_ccs_modifier(fb->modifier))
   2820 		return false;
   2821 
   2822 	/* Linear needs a page aligned stride for remapping */
   2823 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
   2824 		unsigned int alignment = intel_tile_size(dev_priv) - 1;
   2825 
   2826 		for (i = 0; i < fb->format->num_planes; i++) {
   2827 			if (fb->pitches[i] & alignment)
   2828 				return false;
   2829 		}
   2830 	}
   2831 
   2832 	return true;
   2833 }
   2834 
   2835 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
   2836 {
   2837 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   2838 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   2839 	unsigned int rotation = plane_state->hw.rotation;
   2840 	u32 stride, max_stride;
   2841 
   2842 	/*
   2843 	 * No remapping for invisible planes since we don't have
   2844 	 * an actual source viewport to remap.
   2845 	 */
   2846 	if (!plane_state->uapi.visible)
   2847 		return false;
   2848 
   2849 	if (!intel_plane_can_remap(plane_state))
   2850 		return false;
   2851 
   2852 	/*
   2853 	 * FIXME: aux plane limits on gen9+ are
   2854 	 * unclear in Bspec, for now no checking.
   2855 	 */
   2856 	stride = intel_fb_pitch(fb, 0, rotation);
   2857 	max_stride = plane->max_stride(plane, fb->format->format,
   2858 				       fb->modifier, rotation);
   2859 
   2860 	return stride > max_stride;
   2861 }
   2862 
   2863 static void
   2864 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
   2865 			       const struct drm_framebuffer *fb,
   2866 			       int color_plane)
   2867 {
   2868 	int main_plane;
   2869 
   2870 	if (color_plane == 0) {
   2871 		*hsub = 1;
   2872 		*vsub = 1;
   2873 
   2874 		return;
   2875 	}
   2876 
   2877 	/*
   2878 	 * TODO: Deduct the subsampling from the char block for all CCS
   2879 	 * formats and planes.
   2880 	 */
   2881 	if (!is_gen12_ccs_plane(fb, color_plane)) {
   2882 		*hsub = fb->format->hsub;
   2883 		*vsub = fb->format->vsub;
   2884 
   2885 		return;
   2886 	}
   2887 
   2888 	main_plane = ccs_to_main_plane(fb, color_plane);
   2889 	*hsub = drm_format_info_block_width(fb->format, color_plane) /
   2890 		drm_format_info_block_width(fb->format, main_plane);
   2891 
   2892 	/*
   2893 	 * The min stride check in the core framebuffer_check() function
   2894 	 * assumes that format->hsub applies to every plane except for the
   2895 	 * first plane. That's incorrect for the CCS AUX plane of the first
   2896 	 * plane, but for the above check to pass we must define the block
   2897 	 * width with that subsampling applied to it. Adjust the width here
   2898 	 * accordingly, so we can calculate the actual subsampling factor.
   2899 	 */
   2900 	if (main_plane == 0)
   2901 		*hsub *= fb->format->hsub;
   2902 
   2903 	*vsub = 32;
   2904 }
   2905 static int
   2906 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
   2907 {
   2908 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   2909 	int main_plane;
   2910 	int hsub, vsub;
   2911 	int tile_width, tile_height;
   2912 	int ccs_x, ccs_y;
   2913 	int main_x, main_y;
   2914 
   2915 	if (!is_ccs_plane(fb, ccs_plane))
   2916 		return 0;
   2917 
   2918 	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
   2919 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
   2920 
   2921 	tile_width *= hsub;
   2922 	tile_height *= vsub;
   2923 
   2924 	ccs_x = (x * hsub) % tile_width;
   2925 	ccs_y = (y * vsub) % tile_height;
   2926 
   2927 	main_plane = ccs_to_main_plane(fb, ccs_plane);
   2928 	main_x = intel_fb->normal[main_plane].x % tile_width;
   2929 	main_y = intel_fb->normal[main_plane].y % tile_height;
   2930 
   2931 	/*
   2932 	 * CCS doesn't have its own x/y offset register, so the intra CCS tile
   2933 	 * x/y offsets must match between CCS and the main surface.
   2934 	 */
   2935 	if (main_x != ccs_x || main_y != ccs_y) {
   2936 		DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
   2937 			      main_x, main_y,
   2938 			      ccs_x, ccs_y,
   2939 			      intel_fb->normal[main_plane].x,
   2940 			      intel_fb->normal[main_plane].y,
   2941 			      x, y);
   2942 		return -EINVAL;
   2943 	}
   2944 
   2945 	return 0;
   2946 }
   2947 
   2948 static void
   2949 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
   2950 {
   2951 	int main_plane = is_ccs_plane(fb, color_plane) ?
   2952 			 ccs_to_main_plane(fb, color_plane) : 0;
   2953 	int main_hsub, main_vsub;
   2954 	int hsub, vsub;
   2955 
   2956 	intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
   2957 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
   2958 	*w = fb->width / main_hsub / hsub;
   2959 	*h = fb->height / main_vsub / vsub;
   2960 }
   2961 
   2962 /*
   2963  * Setup the rotated view for an FB plane and return the size the GTT mapping
   2964  * requires for this view.
   2965  */
   2966 static u32
   2967 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
   2968 		  u32 gtt_offset_rotated, int x, int y,
   2969 		  unsigned int width, unsigned int height,
   2970 		  unsigned int tile_size,
   2971 		  unsigned int tile_width, unsigned int tile_height,
   2972 		  struct drm_framebuffer *fb)
   2973 {
   2974 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   2975 	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
   2976 	unsigned int pitch_tiles;
   2977 	struct drm_rect r;
   2978 
   2979 	/* Y or Yf modifiers required for 90/270 rotation */
   2980 	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
   2981 	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
   2982 		return 0;
   2983 
   2984 	if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
   2985 		return 0;
   2986 
   2987 	rot_info->plane[plane] = *plane_info;
   2988 
   2989 	intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
   2990 
   2991 	/* rotate the x/y offsets to match the GTT view */
   2992 	drm_rect_init(&r, x, y, width, height);
   2993 	drm_rect_rotate(&r,
   2994 			plane_info->width * tile_width,
   2995 			plane_info->height * tile_height,
   2996 			DRM_MODE_ROTATE_270);
   2997 	x = r.x1;
   2998 	y = r.y1;
   2999 
   3000 	/* rotate the tile dimensions to match the GTT view */
   3001 	pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
   3002 	swap(tile_width, tile_height);
   3003 
   3004 	/*
   3005 	 * We only keep the x/y offsets, so push all of the
   3006 	 * gtt offset into the x/y offsets.
   3007 	 */
   3008 	intel_adjust_tile_offset(&x, &y,
   3009 				 tile_width, tile_height,
   3010 				 tile_size, pitch_tiles,
   3011 				 gtt_offset_rotated * tile_size, 0);
   3012 
   3013 	/*
   3014 	 * First pixel of the framebuffer from
   3015 	 * the start of the rotated gtt mapping.
   3016 	 */
   3017 	intel_fb->rotated[plane].x = x;
   3018 	intel_fb->rotated[plane].y = y;
   3019 
   3020 	return plane_info->width * plane_info->height;
   3021 }
   3022 
   3023 static int
   3024 intel_fill_fb_info(struct drm_i915_private *dev_priv,
   3025 		   struct drm_framebuffer *fb)
   3026 {
   3027 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   3028 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   3029 	u32 gtt_offset_rotated = 0;
   3030 	unsigned int max_size = 0;
   3031 	int i, num_planes = fb->format->num_planes;
   3032 	unsigned int tile_size = intel_tile_size(dev_priv);
   3033 
   3034 	for (i = 0; i < num_planes; i++) {
   3035 		unsigned int width, height;
   3036 		unsigned int cpp, size;
   3037 		u32 offset;
   3038 		int x, y;
   3039 		int ret;
   3040 
   3041 		cpp = fb->format->cpp[i];
   3042 		intel_fb_plane_dims(&width, &height, fb, i);
   3043 
   3044 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
   3045 		if (ret) {
   3046 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
   3047 				      i, fb->offsets[i]);
   3048 			return ret;
   3049 		}
   3050 
   3051 		ret = intel_fb_check_ccs_xy(fb, i, x, y);
   3052 		if (ret)
   3053 			return ret;
   3054 
   3055 		/*
   3056 		 * The fence (if used) is aligned to the start of the object
   3057 		 * so having the framebuffer wrap around across the edge of the
   3058 		 * fenced region doesn't really work. We have no API to configure
   3059 		 * the fence start offset within the object (nor could we probably
   3060 		 * on gen2/3). So it's just easier if we just require that the
   3061 		 * fb layout agrees with the fence layout. We already check that the
   3062 		 * fb stride matches the fence stride elsewhere.
   3063 		 */
   3064 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
   3065 		    (x + width) * cpp > fb->pitches[i]) {
   3066 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
   3067 				      i, fb->offsets[i]);
   3068 			return -EINVAL;
   3069 		}
   3070 
   3071 		/*
   3072 		 * First pixel of the framebuffer from
   3073 		 * the start of the normal gtt mapping.
   3074 		 */
   3075 		intel_fb->normal[i].x = x;
   3076 		intel_fb->normal[i].y = y;
   3077 
   3078 		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
   3079 						      fb->pitches[i],
   3080 						      DRM_MODE_ROTATE_0,
   3081 						      tile_size);
   3082 		offset /= tile_size;
   3083 
   3084 		if (!is_surface_linear(fb, i)) {
   3085 			struct intel_remapped_plane_info plane_info;
   3086 			unsigned int tile_width, tile_height;
   3087 
   3088 			intel_tile_dims(fb, i, &tile_width, &tile_height);
   3089 
   3090 			plane_info.offset = offset;
   3091 			plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
   3092 							 tile_width * cpp);
   3093 			plane_info.width = DIV_ROUND_UP(x + width, tile_width);
   3094 			plane_info.height = DIV_ROUND_UP(y + height,
   3095 							 tile_height);
   3096 
   3097 			/* how many tiles does this plane need */
   3098 			size = plane_info.stride * plane_info.height;
   3099 			/*
   3100 			 * If the plane isn't horizontally tile aligned,
   3101 			 * we need one more tile.
   3102 			 */
   3103 			if (x != 0)
   3104 				size++;
   3105 
   3106 			gtt_offset_rotated +=
   3107 				setup_fb_rotation(i, &plane_info,
   3108 						  gtt_offset_rotated,
   3109 						  x, y, width, height,
   3110 						  tile_size,
   3111 						  tile_width, tile_height,
   3112 						  fb);
   3113 		} else {
   3114 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
   3115 					    x * cpp, tile_size);
   3116 		}
   3117 
   3118 		/* how many tiles in total needed in the bo */
   3119 		max_size = max(max_size, offset + size);
   3120 	}
   3121 
   3122 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
   3123 		DRM_DEBUG_KMS("fb too big for bo (need %"PRIu64" bytes, have %zu bytes)\n",
   3124 			      mul_u32_u32(max_size, tile_size), obj->base.size);
   3125 		return -EINVAL;
   3126 	}
   3127 
   3128 	return 0;
   3129 }
   3130 
   3131 static void
   3132 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
   3133 {
   3134 	struct drm_i915_private *dev_priv =
   3135 		to_i915(plane_state->uapi.plane->dev);
   3136 	struct drm_framebuffer *fb = plane_state->hw.fb;
   3137 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   3138 	struct intel_rotation_info *info = &plane_state->view.rotated;
   3139 	unsigned int rotation = plane_state->hw.rotation;
   3140 	int i, num_planes = fb->format->num_planes;
   3141 	unsigned int tile_size = intel_tile_size(dev_priv);
   3142 	unsigned int src_x, src_y;
   3143 	unsigned int src_w, src_h;
   3144 	u32 gtt_offset = 0;
   3145 
   3146 	memset(&plane_state->view, 0, sizeof(plane_state->view));
   3147 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
   3148 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
   3149 
   3150 	src_x = plane_state->uapi.src.x1 >> 16;
   3151 	src_y = plane_state->uapi.src.y1 >> 16;
   3152 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   3153 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   3154 
   3155 	WARN_ON(is_ccs_modifier(fb->modifier));
   3156 
   3157 	/* Make src coordinates relative to the viewport */
   3158 	drm_rect_translate(&plane_state->uapi.src,
   3159 			   -(src_x << 16), -(src_y << 16));
   3160 
   3161 	/* Rotate src coordinates to match rotated GTT view */
   3162 	if (drm_rotation_90_or_270(rotation))
   3163 		drm_rect_rotate(&plane_state->uapi.src,
   3164 				src_w << 16, src_h << 16,
   3165 				DRM_MODE_ROTATE_270);
   3166 
   3167 	for (i = 0; i < num_planes; i++) {
   3168 		unsigned int hsub = i ? fb->format->hsub : 1;
   3169 		unsigned int vsub = i ? fb->format->vsub : 1;
   3170 		unsigned int cpp = fb->format->cpp[i];
   3171 		unsigned int tile_width, tile_height;
   3172 		unsigned int width, height;
   3173 		unsigned int pitch_tiles;
   3174 		unsigned int x, y;
   3175 		u32 offset;
   3176 
   3177 		intel_tile_dims(fb, i, &tile_width, &tile_height);
   3178 
   3179 		x = src_x / hsub;
   3180 		y = src_y / vsub;
   3181 		width = src_w / hsub;
   3182 		height = src_h / vsub;
   3183 
   3184 		/*
   3185 		 * First pixel of the src viewport from the
   3186 		 * start of the normal gtt mapping.
   3187 		 */
   3188 		x += intel_fb->normal[i].x;
   3189 		y += intel_fb->normal[i].y;
   3190 
   3191 		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
   3192 						      fb, i, fb->pitches[i],
   3193 						      DRM_MODE_ROTATE_0, tile_size);
   3194 		offset /= tile_size;
   3195 
   3196 		WARN_ON(i >= ARRAY_SIZE(info->plane));
   3197 		info->plane[i].offset = offset;
   3198 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
   3199 						     tile_width * cpp);
   3200 		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
   3201 		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
   3202 
   3203 		if (drm_rotation_90_or_270(rotation)) {
   3204 			struct drm_rect r;
   3205 
   3206 			/* rotate the x/y offsets to match the GTT view */
   3207 			drm_rect_init(&r, x, y, width, height);
   3208 			drm_rect_rotate(&r,
   3209 					info->plane[i].width * tile_width,
   3210 					info->plane[i].height * tile_height,
   3211 					DRM_MODE_ROTATE_270);
   3212 			x = r.x1;
   3213 			y = r.y1;
   3214 
   3215 			pitch_tiles = info->plane[i].height;
   3216 			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
   3217 
   3218 			/* rotate the tile dimensions to match the GTT view */
   3219 			swap(tile_width, tile_height);
   3220 		} else {
   3221 			pitch_tiles = info->plane[i].width;
   3222 			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
   3223 		}
   3224 
   3225 		/*
   3226 		 * We only keep the x/y offsets, so push all of the
   3227 		 * gtt offset into the x/y offsets.
   3228 		 */
   3229 		intel_adjust_tile_offset(&x, &y,
   3230 					 tile_width, tile_height,
   3231 					 tile_size, pitch_tiles,
   3232 					 gtt_offset * tile_size, 0);
   3233 
   3234 		gtt_offset += info->plane[i].width * info->plane[i].height;
   3235 
   3236 		plane_state->color_plane[i].offset = 0;
   3237 		plane_state->color_plane[i].x = x;
   3238 		plane_state->color_plane[i].y = y;
   3239 	}
   3240 }
   3241 
   3242 static int
   3243 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
   3244 {
   3245 	const struct intel_framebuffer *fb =
   3246 		to_intel_framebuffer(plane_state->hw.fb);
   3247 	unsigned int rotation = plane_state->hw.rotation;
   3248 	int i, num_planes;
   3249 
   3250 	if (!fb)
   3251 		return 0;
   3252 
   3253 	num_planes = fb->base.format->num_planes;
   3254 
   3255 	if (intel_plane_needs_remap(plane_state)) {
   3256 		intel_plane_remap_gtt(plane_state);
   3257 
   3258 		/*
   3259 		 * Sometimes even remapping can't overcome
   3260 		 * the stride limitations :( Can happen with
   3261 		 * big plane sizes and suitably misaligned
   3262 		 * offsets.
   3263 		 */
   3264 		return intel_plane_check_stride(plane_state);
   3265 	}
   3266 
   3267 	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
   3268 
   3269 	for (i = 0; i < num_planes; i++) {
   3270 		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
   3271 		plane_state->color_plane[i].offset = 0;
   3272 
   3273 		if (drm_rotation_90_or_270(rotation)) {
   3274 			plane_state->color_plane[i].x = fb->rotated[i].x;
   3275 			plane_state->color_plane[i].y = fb->rotated[i].y;
   3276 		} else {
   3277 			plane_state->color_plane[i].x = fb->normal[i].x;
   3278 			plane_state->color_plane[i].y = fb->normal[i].y;
   3279 		}
   3280 	}
   3281 
   3282 	/* Rotate src coordinates to match rotated GTT view */
   3283 	if (drm_rotation_90_or_270(rotation))
   3284 		drm_rect_rotate(&plane_state->uapi.src,
   3285 				fb->base.width << 16, fb->base.height << 16,
   3286 				DRM_MODE_ROTATE_270);
   3287 
   3288 	return intel_plane_check_stride(plane_state);
   3289 }
   3290 
   3291 static int i9xx_format_to_fourcc(int format)
   3292 {
   3293 	switch (format) {
   3294 	case DISPPLANE_8BPP:
   3295 		return DRM_FORMAT_C8;
   3296 	case DISPPLANE_BGRA555:
   3297 		return DRM_FORMAT_ARGB1555;
   3298 	case DISPPLANE_BGRX555:
   3299 		return DRM_FORMAT_XRGB1555;
   3300 	case DISPPLANE_BGRX565:
   3301 		return DRM_FORMAT_RGB565;
   3302 	default:
   3303 	case DISPPLANE_BGRX888:
   3304 		return DRM_FORMAT_XRGB8888;
   3305 	case DISPPLANE_RGBX888:
   3306 		return DRM_FORMAT_XBGR8888;
   3307 	case DISPPLANE_BGRA888:
   3308 		return DRM_FORMAT_ARGB8888;
   3309 	case DISPPLANE_RGBA888:
   3310 		return DRM_FORMAT_ABGR8888;
   3311 	case DISPPLANE_BGRX101010:
   3312 		return DRM_FORMAT_XRGB2101010;
   3313 	case DISPPLANE_RGBX101010:
   3314 		return DRM_FORMAT_XBGR2101010;
   3315 	case DISPPLANE_BGRA101010:
   3316 		return DRM_FORMAT_ARGB2101010;
   3317 	case DISPPLANE_RGBA101010:
   3318 		return DRM_FORMAT_ABGR2101010;
   3319 	case DISPPLANE_RGBX161616:
   3320 		return DRM_FORMAT_XBGR16161616F;
   3321 	}
   3322 }
   3323 
   3324 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
   3325 {
   3326 	switch (format) {
   3327 	case PLANE_CTL_FORMAT_RGB_565:
   3328 		return DRM_FORMAT_RGB565;
   3329 	case PLANE_CTL_FORMAT_NV12:
   3330 		return DRM_FORMAT_NV12;
   3331 	case PLANE_CTL_FORMAT_P010:
   3332 		return DRM_FORMAT_P010;
   3333 	case PLANE_CTL_FORMAT_P012:
   3334 		return DRM_FORMAT_P012;
   3335 	case PLANE_CTL_FORMAT_P016:
   3336 		return DRM_FORMAT_P016;
   3337 	case PLANE_CTL_FORMAT_Y210:
   3338 		return DRM_FORMAT_Y210;
   3339 	case PLANE_CTL_FORMAT_Y212:
   3340 		return DRM_FORMAT_Y212;
   3341 	case PLANE_CTL_FORMAT_Y216:
   3342 		return DRM_FORMAT_Y216;
   3343 	case PLANE_CTL_FORMAT_Y410:
   3344 		return DRM_FORMAT_XVYU2101010;
   3345 	case PLANE_CTL_FORMAT_Y412:
   3346 		return DRM_FORMAT_XVYU12_16161616;
   3347 	case PLANE_CTL_FORMAT_Y416:
   3348 		return DRM_FORMAT_XVYU16161616;
   3349 	default:
   3350 	case PLANE_CTL_FORMAT_XRGB_8888:
   3351 		if (rgb_order) {
   3352 			if (alpha)
   3353 				return DRM_FORMAT_ABGR8888;
   3354 			else
   3355 				return DRM_FORMAT_XBGR8888;
   3356 		} else {
   3357 			if (alpha)
   3358 				return DRM_FORMAT_ARGB8888;
   3359 			else
   3360 				return DRM_FORMAT_XRGB8888;
   3361 		}
   3362 	case PLANE_CTL_FORMAT_XRGB_2101010:
   3363 		if (rgb_order) {
   3364 			if (alpha)
   3365 				return DRM_FORMAT_ABGR2101010;
   3366 			else
   3367 				return DRM_FORMAT_XBGR2101010;
   3368 		} else {
   3369 			if (alpha)
   3370 				return DRM_FORMAT_ARGB2101010;
   3371 			else
   3372 				return DRM_FORMAT_XRGB2101010;
   3373 		}
   3374 	case PLANE_CTL_FORMAT_XRGB_16161616F:
   3375 		if (rgb_order) {
   3376 			if (alpha)
   3377 				return DRM_FORMAT_ABGR16161616F;
   3378 			else
   3379 				return DRM_FORMAT_XBGR16161616F;
   3380 		} else {
   3381 			if (alpha)
   3382 				return DRM_FORMAT_ARGB16161616F;
   3383 			else
   3384 				return DRM_FORMAT_XRGB16161616F;
   3385 		}
   3386 	}
   3387 }
   3388 
   3389 static bool
   3390 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
   3391 			      struct intel_initial_plane_config *plane_config)
   3392 {
   3393 	struct drm_device *dev = crtc->base.dev;
   3394 	struct drm_i915_private *dev_priv = to_i915(dev);
   3395 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
   3396 	struct drm_framebuffer *fb = &plane_config->fb->base;
   3397 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
   3398 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
   3399 				    PAGE_SIZE);
   3400 	struct drm_i915_gem_object *obj;
   3401 	bool ret = false;
   3402 
   3403 	size_aligned -= base_aligned;
   3404 
   3405 	if (plane_config->size == 0)
   3406 		return false;
   3407 
   3408 	/* If the FB is too big, just don't use it since fbdev is not very
   3409 	 * important and we should probably use that space with FBC or other
   3410 	 * features. */
   3411 	if (size_aligned * 2 > dev_priv->stolen_usable_size)
   3412 		return false;
   3413 
   3414 	switch (fb->modifier) {
   3415 	case DRM_FORMAT_MOD_LINEAR:
   3416 	case I915_FORMAT_MOD_X_TILED:
   3417 	case I915_FORMAT_MOD_Y_TILED:
   3418 		break;
   3419 	default:
   3420 		DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%"PRIx64"\n",
   3421 				 fb->modifier);
   3422 		return false;
   3423 	}
   3424 
   3425 	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
   3426 							     base_aligned,
   3427 							     base_aligned,
   3428 							     size_aligned);
   3429 	if (IS_ERR(obj))
   3430 		return false;
   3431 
   3432 	switch (plane_config->tiling) {
   3433 	case I915_TILING_NONE:
   3434 		break;
   3435 	case I915_TILING_X:
   3436 	case I915_TILING_Y:
   3437 		obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
   3438 		break;
   3439 	default:
   3440 		MISSING_CASE(plane_config->tiling);
   3441 		goto out;
   3442 	}
   3443 
   3444 	mode_cmd.pixel_format = fb->format->format;
   3445 	mode_cmd.width = fb->width;
   3446 	mode_cmd.height = fb->height;
   3447 	mode_cmd.pitches[0] = fb->pitches[0];
   3448 	mode_cmd.modifier[0] = fb->modifier;
   3449 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
   3450 
   3451 	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
   3452 		DRM_DEBUG_KMS("intel fb init failed\n");
   3453 		goto out;
   3454 	}
   3455 
   3456 
   3457 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
   3458 	ret = true;
   3459 out:
   3460 	i915_gem_object_put(obj);
   3461 	return ret;
   3462 }
   3463 
   3464 static void
   3465 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
   3466 			struct intel_plane_state *plane_state,
   3467 			bool visible)
   3468 {
   3469 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   3470 
   3471 	plane_state->uapi.visible = visible;
   3472 
   3473 	if (visible)
   3474 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
   3475 	else
   3476 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
   3477 }
   3478 
   3479 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
   3480 {
   3481 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   3482 	struct drm_plane *plane;
   3483 
   3484 	/*
   3485 	 * Active_planes aliases if multiple "primary" or cursor planes
   3486 	 * have been used on the same (or wrong) pipe. plane_mask uses
   3487 	 * unique ids, hence we can use that to reconstruct active_planes.
   3488 	 */
   3489 	crtc_state->active_planes = 0;
   3490 
   3491 	drm_for_each_plane_mask(plane, &dev_priv->drm,
   3492 				crtc_state->uapi.plane_mask)
   3493 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
   3494 }
   3495 
   3496 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
   3497 					 struct intel_plane *plane)
   3498 {
   3499 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3500 	struct intel_crtc_state *crtc_state =
   3501 		to_intel_crtc_state(crtc->base.state);
   3502 	struct intel_plane_state *plane_state =
   3503 		to_intel_plane_state(plane->base.state);
   3504 
   3505 	DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
   3506 		      plane->base.base.id, plane->base.name,
   3507 		      crtc->base.base.id, crtc->base.name);
   3508 
   3509 	intel_set_plane_visible(crtc_state, plane_state, false);
   3510 	fixup_active_planes(crtc_state);
   3511 	crtc_state->data_rate[plane->id] = 0;
   3512 	crtc_state->min_cdclk[plane->id] = 0;
   3513 
   3514 	if (plane->id == PLANE_PRIMARY)
   3515 		hsw_disable_ips(crtc_state);
   3516 
   3517 	/*
   3518 	 * Vblank time updates from the shadow to live plane control register
   3519 	 * are blocked if the memory self-refresh mode is active at that
   3520 	 * moment. So to make sure the plane gets truly disabled, disable
   3521 	 * first the self-refresh mode. The self-refresh enable bit in turn
   3522 	 * will be checked/applied by the HW only at the next frame start
   3523 	 * event which is after the vblank start event, so we need to have a
   3524 	 * wait-for-vblank between disabling the plane and the pipe.
   3525 	 */
   3526 	if (HAS_GMCH(dev_priv) &&
   3527 	    intel_set_memory_cxsr(dev_priv, false))
   3528 		intel_wait_for_vblank(dev_priv, crtc->pipe);
   3529 
   3530 	/*
   3531 	 * Gen2 reports pipe underruns whenever all planes are disabled.
   3532 	 * So disable underrun reporting before all the planes get disabled.
   3533 	 */
   3534 	if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
   3535 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
   3536 
   3537 	intel_disable_plane(plane, crtc_state);
   3538 }
   3539 
   3540 static struct intel_frontbuffer *
   3541 to_intel_frontbuffer(struct drm_framebuffer *fb)
   3542 {
   3543 	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
   3544 }
   3545 
   3546 static void
   3547 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
   3548 			     struct intel_initial_plane_config *plane_config)
   3549 {
   3550 	struct drm_device *dev = intel_crtc->base.dev;
   3551 	struct drm_i915_private *dev_priv = to_i915(dev);
   3552 	struct drm_crtc *c;
   3553 	struct drm_plane *primary = intel_crtc->base.primary;
   3554 	struct drm_plane_state *plane_state = primary->state;
   3555 	struct intel_plane *intel_plane = to_intel_plane(primary);
   3556 	struct intel_plane_state *intel_state =
   3557 		to_intel_plane_state(plane_state);
   3558 	struct drm_framebuffer *fb;
   3559 
   3560 	if (!plane_config->fb)
   3561 		return;
   3562 
   3563 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
   3564 		fb = &plane_config->fb->base;
   3565 		goto valid_fb;
   3566 	}
   3567 
   3568 	kfree(plane_config->fb);
   3569 
   3570 	/*
   3571 	 * Failed to alloc the obj, check to see if we should share
   3572 	 * an fb with another CRTC instead
   3573 	 */
   3574 	for_each_crtc(dev, c) {
   3575 		struct intel_plane_state *state;
   3576 
   3577 		if (c == &intel_crtc->base)
   3578 			continue;
   3579 
   3580 		if (!to_intel_crtc(c)->active)
   3581 			continue;
   3582 
   3583 		state = to_intel_plane_state(c->primary->state);
   3584 		if (!state->vma)
   3585 			continue;
   3586 
   3587 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
   3588 			fb = state->hw.fb;
   3589 			drm_framebuffer_get(fb);
   3590 			goto valid_fb;
   3591 		}
   3592 	}
   3593 
   3594 	/*
   3595 	 * We've failed to reconstruct the BIOS FB.  Current display state
   3596 	 * indicates that the primary plane is visible, but has a NULL FB,
   3597 	 * which will lead to problems later if we don't fix it up.  The
   3598 	 * simplest solution is to just disable the primary plane now and
   3599 	 * pretend the BIOS never had it enabled.
   3600 	 */
   3601 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
   3602 
   3603 	return;
   3604 
   3605 valid_fb:
   3606 	intel_state->hw.rotation = plane_config->rotation;
   3607 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
   3608 				intel_state->hw.rotation);
   3609 	intel_state->color_plane[0].stride =
   3610 		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
   3611 
   3612 	intel_state->vma =
   3613 		intel_pin_and_fence_fb_obj(fb,
   3614 					   &intel_state->view,
   3615 					   intel_plane_uses_fence(intel_state),
   3616 					   &intel_state->flags);
   3617 	if (IS_ERR(intel_state->vma)) {
   3618 		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
   3619 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
   3620 
   3621 		intel_state->vma = NULL;
   3622 		drm_framebuffer_put(fb);
   3623 		return;
   3624 	}
   3625 
   3626 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
   3627 
   3628 	plane_state->src_x = 0;
   3629 	plane_state->src_y = 0;
   3630 	plane_state->src_w = fb->width << 16;
   3631 	plane_state->src_h = fb->height << 16;
   3632 
   3633 	plane_state->crtc_x = 0;
   3634 	plane_state->crtc_y = 0;
   3635 	plane_state->crtc_w = fb->width;
   3636 	plane_state->crtc_h = fb->height;
   3637 
   3638 	intel_state->uapi.src = drm_plane_state_src(plane_state);
   3639 	intel_state->uapi.dst = drm_plane_state_dest(plane_state);
   3640 
   3641 	if (plane_config->tiling)
   3642 		dev_priv->preserve_bios_swizzle = true;
   3643 
   3644 	plane_state->fb = fb;
   3645 	plane_state->crtc = &intel_crtc->base;
   3646 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
   3647 
   3648 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
   3649 		  &to_intel_frontbuffer(fb)->bits);
   3650 }
   3651 
   3652 static int skl_max_plane_width(const struct drm_framebuffer *fb,
   3653 			       int color_plane,
   3654 			       unsigned int rotation)
   3655 {
   3656 	int cpp = fb->format->cpp[color_plane];
   3657 
   3658 	switch (fb->modifier) {
   3659 	case DRM_FORMAT_MOD_LINEAR:
   3660 	case I915_FORMAT_MOD_X_TILED:
   3661 		/*
   3662 		 * Validated limit is 4k, but has 5k should
   3663 		 * work apart from the following features:
   3664 		 * - Ytile (already limited to 4k)
   3665 		 * - FP16 (already limited to 4k)
   3666 		 * - render compression (already limited to 4k)
   3667 		 * - KVMR sprite and cursor (don't care)
   3668 		 * - horizontal panning (TODO verify this)
   3669 		 * - pipe and plane scaling (TODO verify this)
   3670 		 */
   3671 		if (cpp == 8)
   3672 			return 4096;
   3673 		else
   3674 			return 5120;
   3675 	case I915_FORMAT_MOD_Y_TILED_CCS:
   3676 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   3677 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   3678 		/* FIXME AUX plane? */
   3679 	case I915_FORMAT_MOD_Y_TILED:
   3680 	case I915_FORMAT_MOD_Yf_TILED:
   3681 		if (cpp == 8)
   3682 			return 2048;
   3683 		else
   3684 			return 4096;
   3685 	default:
   3686 		MISSING_CASE(fb->modifier);
   3687 		return 2048;
   3688 	}
   3689 }
   3690 
   3691 static int glk_max_plane_width(const struct drm_framebuffer *fb,
   3692 			       int color_plane,
   3693 			       unsigned int rotation)
   3694 {
   3695 	int cpp = fb->format->cpp[color_plane];
   3696 
   3697 	switch (fb->modifier) {
   3698 	case DRM_FORMAT_MOD_LINEAR:
   3699 	case I915_FORMAT_MOD_X_TILED:
   3700 		if (cpp == 8)
   3701 			return 4096;
   3702 		else
   3703 			return 5120;
   3704 	case I915_FORMAT_MOD_Y_TILED_CCS:
   3705 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   3706 		/* FIXME AUX plane? */
   3707 	case I915_FORMAT_MOD_Y_TILED:
   3708 	case I915_FORMAT_MOD_Yf_TILED:
   3709 		if (cpp == 8)
   3710 			return 2048;
   3711 		else
   3712 			return 5120;
   3713 	default:
   3714 		MISSING_CASE(fb->modifier);
   3715 		return 2048;
   3716 	}
   3717 }
   3718 
   3719 static int icl_max_plane_width(const struct drm_framebuffer *fb,
   3720 			       int color_plane,
   3721 			       unsigned int rotation)
   3722 {
   3723 	return 5120;
   3724 }
   3725 
   3726 static int skl_max_plane_height(void)
   3727 {
   3728 	return 4096;
   3729 }
   3730 
   3731 static int icl_max_plane_height(void)
   3732 {
   3733 	return 4320;
   3734 }
   3735 
   3736 static bool
   3737 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
   3738 			       int main_x, int main_y, u32 main_offset,
   3739 			       int ccs_plane)
   3740 {
   3741 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3742 	int aux_x = plane_state->color_plane[ccs_plane].x;
   3743 	int aux_y = plane_state->color_plane[ccs_plane].y;
   3744 	u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
   3745 	u32 alignment = intel_surf_alignment(fb, ccs_plane);
   3746 	int hsub;
   3747 	int vsub;
   3748 
   3749 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
   3750 	while (aux_offset >= main_offset && aux_y <= main_y) {
   3751 		int x, y;
   3752 
   3753 		if (aux_x == main_x && aux_y == main_y)
   3754 			break;
   3755 
   3756 		if (aux_offset == 0)
   3757 			break;
   3758 
   3759 		x = aux_x / hsub;
   3760 		y = aux_y / vsub;
   3761 		aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
   3762 							       plane_state,
   3763 							       ccs_plane,
   3764 							       aux_offset,
   3765 							       aux_offset -
   3766 								alignment);
   3767 		aux_x = x * hsub + aux_x % hsub;
   3768 		aux_y = y * vsub + aux_y % vsub;
   3769 	}
   3770 
   3771 	if (aux_x != main_x || aux_y != main_y)
   3772 		return false;
   3773 
   3774 	plane_state->color_plane[ccs_plane].offset = aux_offset;
   3775 	plane_state->color_plane[ccs_plane].x = aux_x;
   3776 	plane_state->color_plane[ccs_plane].y = aux_y;
   3777 
   3778 	return true;
   3779 }
   3780 
   3781 static int skl_check_main_surface(struct intel_plane_state *plane_state)
   3782 {
   3783 	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
   3784 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3785 	unsigned int rotation = plane_state->hw.rotation;
   3786 	int x = plane_state->uapi.src.x1 >> 16;
   3787 	int y = plane_state->uapi.src.y1 >> 16;
   3788 	int w = drm_rect_width(&plane_state->uapi.src) >> 16;
   3789 	int h = drm_rect_height(&plane_state->uapi.src) >> 16;
   3790 	int max_width;
   3791 	int max_height;
   3792 	u32 alignment;
   3793 	u32 offset;
   3794 	int aux_plane = intel_main_to_aux_plane(fb, 0);
   3795 	u32 aux_offset = plane_state->color_plane[aux_plane].offset;
   3796 
   3797 	if (INTEL_GEN(dev_priv) >= 11)
   3798 		max_width = icl_max_plane_width(fb, 0, rotation);
   3799 	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   3800 		max_width = glk_max_plane_width(fb, 0, rotation);
   3801 	else
   3802 		max_width = skl_max_plane_width(fb, 0, rotation);
   3803 
   3804 	if (INTEL_GEN(dev_priv) >= 11)
   3805 		max_height = icl_max_plane_height();
   3806 	else
   3807 		max_height = skl_max_plane_height();
   3808 
   3809 	if (w > max_width || h > max_height) {
   3810 		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
   3811 			      w, h, max_width, max_height);
   3812 		return -EINVAL;
   3813 	}
   3814 
   3815 	intel_add_fb_offsets(&x, &y, plane_state, 0);
   3816 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
   3817 	alignment = intel_surf_alignment(fb, 0);
   3818 	if (WARN_ON(alignment && !is_power_of_2(alignment)))
   3819 		return -EINVAL;
   3820 
   3821 	/*
   3822 	 * AUX surface offset is specified as the distance from the
   3823 	 * main surface offset, and it must be non-negative. Make
   3824 	 * sure that is what we will get.
   3825 	 */
   3826 	if (offset > aux_offset)
   3827 		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
   3828 							   offset, aux_offset & ~(alignment - 1));
   3829 
   3830 	/*
   3831 	 * When using an X-tiled surface, the plane blows up
   3832 	 * if the x offset + width exceed the stride.
   3833 	 *
   3834 	 * TODO: linear and Y-tiled seem fine, Yf untested,
   3835 	 */
   3836 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
   3837 		int cpp = fb->format->cpp[0];
   3838 
   3839 		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
   3840 			if (offset == 0) {
   3841 				DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
   3842 				return -EINVAL;
   3843 			}
   3844 
   3845 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
   3846 								   offset, offset - alignment);
   3847 		}
   3848 	}
   3849 
   3850 	/*
   3851 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
   3852 	 * they match with the main surface x/y offsets.
   3853 	 */
   3854 	if (is_ccs_modifier(fb->modifier)) {
   3855 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
   3856 						       offset, aux_plane)) {
   3857 			if (offset == 0)
   3858 				break;
   3859 
   3860 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
   3861 								   offset, offset - alignment);
   3862 		}
   3863 
   3864 		if (x != plane_state->color_plane[aux_plane].x ||
   3865 		    y != plane_state->color_plane[aux_plane].y) {
   3866 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
   3867 			return -EINVAL;
   3868 		}
   3869 	}
   3870 
   3871 	plane_state->color_plane[0].offset = offset;
   3872 	plane_state->color_plane[0].x = x;
   3873 	plane_state->color_plane[0].y = y;
   3874 
   3875 	/*
   3876 	 * Put the final coordinates back so that the src
   3877 	 * coordinate checks will see the right values.
   3878 	 */
   3879 	drm_rect_translate_to(&plane_state->uapi.src,
   3880 			      x << 16, y << 16);
   3881 
   3882 	return 0;
   3883 }
   3884 
   3885 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
   3886 {
   3887 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3888 	unsigned int rotation = plane_state->hw.rotation;
   3889 	int uv_plane = 1;
   3890 	int max_width = skl_max_plane_width(fb, uv_plane, rotation);
   3891 	int max_height = 4096;
   3892 	int x = plane_state->uapi.src.x1 >> 17;
   3893 	int y = plane_state->uapi.src.y1 >> 17;
   3894 	int w = drm_rect_width(&plane_state->uapi.src) >> 17;
   3895 	int h = drm_rect_height(&plane_state->uapi.src) >> 17;
   3896 	u32 offset;
   3897 
   3898 	intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
   3899 	offset = intel_plane_compute_aligned_offset(&x, &y,
   3900 						    plane_state, uv_plane);
   3901 
   3902 	/* FIXME not quite sure how/if these apply to the chroma plane */
   3903 	if (w > max_width || h > max_height) {
   3904 		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
   3905 			      w, h, max_width, max_height);
   3906 		return -EINVAL;
   3907 	}
   3908 
   3909 	if (is_ccs_modifier(fb->modifier)) {
   3910 		int ccs_plane = main_to_ccs_plane(fb, uv_plane);
   3911 		int aux_offset = plane_state->color_plane[ccs_plane].offset;
   3912 		int alignment = intel_surf_alignment(fb, uv_plane);
   3913 
   3914 		if (offset > aux_offset)
   3915 			offset = intel_plane_adjust_aligned_offset(&x, &y,
   3916 								   plane_state,
   3917 								   uv_plane,
   3918 								   offset,
   3919 								   aux_offset & ~(alignment - 1));
   3920 
   3921 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
   3922 						       offset, ccs_plane)) {
   3923 			if (offset == 0)
   3924 				break;
   3925 
   3926 			offset = intel_plane_adjust_aligned_offset(&x, &y,
   3927 								   plane_state,
   3928 								   uv_plane,
   3929 								   offset, offset - alignment);
   3930 		}
   3931 
   3932 		if (x != plane_state->color_plane[ccs_plane].x ||
   3933 		    y != plane_state->color_plane[ccs_plane].y) {
   3934 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
   3935 			return -EINVAL;
   3936 		}
   3937 	}
   3938 
   3939 	plane_state->color_plane[uv_plane].offset = offset;
   3940 	plane_state->color_plane[uv_plane].x = x;
   3941 	plane_state->color_plane[uv_plane].y = y;
   3942 
   3943 	return 0;
   3944 }
   3945 
   3946 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
   3947 {
   3948 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3949 	int src_x = plane_state->uapi.src.x1 >> 16;
   3950 	int src_y = plane_state->uapi.src.y1 >> 16;
   3951 	u32 offset;
   3952 	int ccs_plane;
   3953 
   3954 	for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
   3955 		int main_hsub, main_vsub;
   3956 		int hsub, vsub;
   3957 		int x, y;
   3958 
   3959 		if (!is_ccs_plane(fb, ccs_plane))
   3960 			continue;
   3961 
   3962 		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
   3963 					       ccs_to_main_plane(fb, ccs_plane));
   3964 		intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
   3965 
   3966 		hsub *= main_hsub;
   3967 		vsub *= main_vsub;
   3968 		x = src_x / hsub;
   3969 		y = src_y / vsub;
   3970 
   3971 		intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
   3972 
   3973 		offset = intel_plane_compute_aligned_offset(&x, &y,
   3974 							    plane_state,
   3975 							    ccs_plane);
   3976 
   3977 		plane_state->color_plane[ccs_plane].offset = offset;
   3978 		plane_state->color_plane[ccs_plane].x = (x * hsub +
   3979 							 src_x % hsub) /
   3980 							main_hsub;
   3981 		plane_state->color_plane[ccs_plane].y = (y * vsub +
   3982 							 src_y % vsub) /
   3983 							main_vsub;
   3984 	}
   3985 
   3986 	return 0;
   3987 }
   3988 
   3989 int skl_check_plane_surface(struct intel_plane_state *plane_state)
   3990 {
   3991 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3992 	int ret;
   3993 	bool needs_aux = false;
   3994 
   3995 	ret = intel_plane_compute_gtt(plane_state);
   3996 	if (ret)
   3997 		return ret;
   3998 
   3999 	if (!plane_state->uapi.visible)
   4000 		return 0;
   4001 
   4002 	/*
   4003 	 * Handle the AUX surface first since the main surface setup depends on
   4004 	 * it.
   4005 	 */
   4006 	if (is_ccs_modifier(fb->modifier)) {
   4007 		needs_aux = true;
   4008 		ret = skl_check_ccs_aux_surface(plane_state);
   4009 		if (ret)
   4010 			return ret;
   4011 	}
   4012 
   4013 	if (intel_format_info_is_yuv_semiplanar(fb->format,
   4014 						fb->modifier)) {
   4015 		needs_aux = true;
   4016 		ret = skl_check_nv12_aux_surface(plane_state);
   4017 		if (ret)
   4018 			return ret;
   4019 	}
   4020 
   4021 	if (!needs_aux) {
   4022 		int i;
   4023 
   4024 		for (i = 1; i < fb->format->num_planes; i++) {
   4025 			plane_state->color_plane[i].offset = ~0xfff;
   4026 			plane_state->color_plane[i].x = 0;
   4027 			plane_state->color_plane[i].y = 0;
   4028 		}
   4029 	}
   4030 
   4031 	ret = skl_check_main_surface(plane_state);
   4032 	if (ret)
   4033 		return ret;
   4034 
   4035 	return 0;
   4036 }
   4037 
   4038 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
   4039 			     const struct intel_plane_state *plane_state,
   4040 			     unsigned int *num, unsigned int *den)
   4041 {
   4042 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4043 	unsigned int cpp = fb->format->cpp[0];
   4044 
   4045 	/*
   4046 	 * g4x bspec says 64bpp pixel rate can't exceed 80%
   4047 	 * of cdclk when the sprite plane is enabled on the
   4048 	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
   4049 	 * never allowed to exceed 80% of cdclk. Let's just go
   4050 	 * with the ilk/snb limit always.
   4051 	 */
   4052 	if (cpp == 8) {
   4053 		*num = 10;
   4054 		*den = 8;
   4055 	} else {
   4056 		*num = 1;
   4057 		*den = 1;
   4058 	}
   4059 }
   4060 
   4061 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
   4062 				const struct intel_plane_state *plane_state)
   4063 {
   4064 	unsigned int pixel_rate;
   4065 	unsigned int num, den;
   4066 
   4067 	/*
   4068 	 * Note that crtc_state->pixel_rate accounts for both
   4069 	 * horizontal and vertical panel fitter downscaling factors.
   4070 	 * Pre-HSW bspec tells us to only consider the horizontal
   4071 	 * downscaling factor here. We ignore that and just consider
   4072 	 * both for simplicity.
   4073 	 */
   4074 	pixel_rate = crtc_state->pixel_rate;
   4075 
   4076 	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
   4077 
   4078 	/* two pixels per clock with double wide pipe */
   4079 	if (crtc_state->double_wide)
   4080 		den *= 2;
   4081 
   4082 	return DIV_ROUND_UP(pixel_rate * num, den);
   4083 }
   4084 
   4085 unsigned int
   4086 i9xx_plane_max_stride(struct intel_plane *plane,
   4087 		      u32 pixel_format, u64 modifier,
   4088 		      unsigned int rotation)
   4089 {
   4090 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4091 
   4092 	if (!HAS_GMCH(dev_priv)) {
   4093 		return 32*1024;
   4094 	} else if (INTEL_GEN(dev_priv) >= 4) {
   4095 		if (modifier == I915_FORMAT_MOD_X_TILED)
   4096 			return 16*1024;
   4097 		else
   4098 			return 32*1024;
   4099 	} else if (INTEL_GEN(dev_priv) >= 3) {
   4100 		if (modifier == I915_FORMAT_MOD_X_TILED)
   4101 			return 8*1024;
   4102 		else
   4103 			return 16*1024;
   4104 	} else {
   4105 		if (plane->i9xx_plane == PLANE_C)
   4106 			return 4*1024;
   4107 		else
   4108 			return 8*1024;
   4109 	}
   4110 }
   4111 
   4112 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
   4113 {
   4114 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4115 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4116 	u32 dspcntr = 0;
   4117 
   4118 	if (crtc_state->gamma_enable)
   4119 		dspcntr |= DISPPLANE_GAMMA_ENABLE;
   4120 
   4121 	if (crtc_state->csc_enable)
   4122 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
   4123 
   4124 	if (INTEL_GEN(dev_priv) < 5)
   4125 		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
   4126 
   4127 	return dspcntr;
   4128 }
   4129 
   4130 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
   4131 			  const struct intel_plane_state *plane_state)
   4132 {
   4133 	struct drm_i915_private *dev_priv =
   4134 		to_i915(plane_state->uapi.plane->dev);
   4135 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4136 	unsigned int rotation = plane_state->hw.rotation;
   4137 	u32 dspcntr;
   4138 
   4139 	dspcntr = DISPLAY_PLANE_ENABLE;
   4140 
   4141 	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
   4142 	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
   4143 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
   4144 
   4145 	switch (fb->format->format) {
   4146 	case DRM_FORMAT_C8:
   4147 		dspcntr |= DISPPLANE_8BPP;
   4148 		break;
   4149 	case DRM_FORMAT_XRGB1555:
   4150 		dspcntr |= DISPPLANE_BGRX555;
   4151 		break;
   4152 	case DRM_FORMAT_ARGB1555:
   4153 		dspcntr |= DISPPLANE_BGRA555;
   4154 		break;
   4155 	case DRM_FORMAT_RGB565:
   4156 		dspcntr |= DISPPLANE_BGRX565;
   4157 		break;
   4158 	case DRM_FORMAT_XRGB8888:
   4159 		dspcntr |= DISPPLANE_BGRX888;
   4160 		break;
   4161 	case DRM_FORMAT_XBGR8888:
   4162 		dspcntr |= DISPPLANE_RGBX888;
   4163 		break;
   4164 	case DRM_FORMAT_ARGB8888:
   4165 		dspcntr |= DISPPLANE_BGRA888;
   4166 		break;
   4167 	case DRM_FORMAT_ABGR8888:
   4168 		dspcntr |= DISPPLANE_RGBA888;
   4169 		break;
   4170 	case DRM_FORMAT_XRGB2101010:
   4171 		dspcntr |= DISPPLANE_BGRX101010;
   4172 		break;
   4173 	case DRM_FORMAT_XBGR2101010:
   4174 		dspcntr |= DISPPLANE_RGBX101010;
   4175 		break;
   4176 	case DRM_FORMAT_ARGB2101010:
   4177 		dspcntr |= DISPPLANE_BGRA101010;
   4178 		break;
   4179 	case DRM_FORMAT_ABGR2101010:
   4180 		dspcntr |= DISPPLANE_RGBA101010;
   4181 		break;
   4182 	case DRM_FORMAT_XBGR16161616F:
   4183 		dspcntr |= DISPPLANE_RGBX161616;
   4184 		break;
   4185 	default:
   4186 		MISSING_CASE(fb->format->format);
   4187 		return 0;
   4188 	}
   4189 
   4190 	if (INTEL_GEN(dev_priv) >= 4 &&
   4191 	    fb->modifier == I915_FORMAT_MOD_X_TILED)
   4192 		dspcntr |= DISPPLANE_TILED;
   4193 
   4194 	if (rotation & DRM_MODE_ROTATE_180)
   4195 		dspcntr |= DISPPLANE_ROTATE_180;
   4196 
   4197 	if (rotation & DRM_MODE_REFLECT_X)
   4198 		dspcntr |= DISPPLANE_MIRROR;
   4199 
   4200 	return dspcntr;
   4201 }
   4202 
   4203 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
   4204 {
   4205 	struct drm_i915_private *dev_priv =
   4206 		to_i915(plane_state->uapi.plane->dev);
   4207 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4208 	int src_x, src_y, src_w;
   4209 	u32 offset;
   4210 	int ret;
   4211 
   4212 	ret = intel_plane_compute_gtt(plane_state);
   4213 	if (ret)
   4214 		return ret;
   4215 
   4216 	if (!plane_state->uapi.visible)
   4217 		return 0;
   4218 
   4219 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   4220 	src_x = plane_state->uapi.src.x1 >> 16;
   4221 	src_y = plane_state->uapi.src.y1 >> 16;
   4222 
   4223 	/* Undocumented hardware limit on i965/g4x/vlv/chv */
   4224 	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
   4225 		return -EINVAL;
   4226 
   4227 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
   4228 
   4229 	if (INTEL_GEN(dev_priv) >= 4)
   4230 		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
   4231 							    plane_state, 0);
   4232 	else
   4233 		offset = 0;
   4234 
   4235 	/*
   4236 	 * Put the final coordinates back so that the src
   4237 	 * coordinate checks will see the right values.
   4238 	 */
   4239 	drm_rect_translate_to(&plane_state->uapi.src,
   4240 			      src_x << 16, src_y << 16);
   4241 
   4242 	/* HSW/BDW do this automagically in hardware */
   4243 	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
   4244 		unsigned int rotation = plane_state->hw.rotation;
   4245 		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   4246 		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   4247 
   4248 		if (rotation & DRM_MODE_ROTATE_180) {
   4249 			src_x += src_w - 1;
   4250 			src_y += src_h - 1;
   4251 		} else if (rotation & DRM_MODE_REFLECT_X) {
   4252 			src_x += src_w - 1;
   4253 		}
   4254 	}
   4255 
   4256 	plane_state->color_plane[0].offset = offset;
   4257 	plane_state->color_plane[0].x = src_x;
   4258 	plane_state->color_plane[0].y = src_y;
   4259 
   4260 	return 0;
   4261 }
   4262 
   4263 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
   4264 {
   4265 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4266 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4267 
   4268 	if (IS_CHERRYVIEW(dev_priv))
   4269 		return i9xx_plane == PLANE_B;
   4270 	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   4271 		return false;
   4272 	else if (IS_GEN(dev_priv, 4))
   4273 		return i9xx_plane == PLANE_C;
   4274 	else
   4275 		return i9xx_plane == PLANE_B ||
   4276 			i9xx_plane == PLANE_C;
   4277 }
   4278 
   4279 static int
   4280 i9xx_plane_check(struct intel_crtc_state *crtc_state,
   4281 		 struct intel_plane_state *plane_state)
   4282 {
   4283 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   4284 	int ret;
   4285 
   4286 	ret = chv_plane_check_rotation(plane_state);
   4287 	if (ret)
   4288 		return ret;
   4289 
   4290 	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
   4291 						  &crtc_state->uapi,
   4292 						  DRM_PLANE_HELPER_NO_SCALING,
   4293 						  DRM_PLANE_HELPER_NO_SCALING,
   4294 						  i9xx_plane_has_windowing(plane),
   4295 						  true);
   4296 	if (ret)
   4297 		return ret;
   4298 
   4299 	ret = i9xx_check_plane_surface(plane_state);
   4300 	if (ret)
   4301 		return ret;
   4302 
   4303 	if (!plane_state->uapi.visible)
   4304 		return 0;
   4305 
   4306 	ret = intel_plane_check_src_coordinates(plane_state);
   4307 	if (ret)
   4308 		return ret;
   4309 
   4310 	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
   4311 
   4312 	return 0;
   4313 }
   4314 
   4315 static void i9xx_update_plane(struct intel_plane *plane,
   4316 			      const struct intel_crtc_state *crtc_state,
   4317 			      const struct intel_plane_state *plane_state)
   4318 {
   4319 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4320 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4321 	u32 linear_offset;
   4322 	int x = plane_state->color_plane[0].x;
   4323 	int y = plane_state->color_plane[0].y;
   4324 	int crtc_x = plane_state->uapi.dst.x1;
   4325 	int crtc_y = plane_state->uapi.dst.y1;
   4326 	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
   4327 	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
   4328 	unsigned long irqflags;
   4329 	u32 dspaddr_offset;
   4330 	u32 dspcntr;
   4331 
   4332 	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
   4333 
   4334 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
   4335 
   4336 	if (INTEL_GEN(dev_priv) >= 4)
   4337 		dspaddr_offset = plane_state->color_plane[0].offset;
   4338 	else
   4339 		dspaddr_offset = linear_offset;
   4340 
   4341 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   4342 
   4343 	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
   4344 
   4345 	if (INTEL_GEN(dev_priv) < 4) {
   4346 		/*
   4347 		 * PLANE_A doesn't actually have a full window
   4348 		 * generator but let's assume we still need to
   4349 		 * program whatever is there.
   4350 		 */
   4351 		I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
   4352 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
   4353 			      ((crtc_h - 1) << 16) | (crtc_w - 1));
   4354 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
   4355 		I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
   4356 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
   4357 			      ((crtc_h - 1) << 16) | (crtc_w - 1));
   4358 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
   4359 	}
   4360 
   4361 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   4362 		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
   4363 	} else if (INTEL_GEN(dev_priv) >= 4) {
   4364 		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
   4365 		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
   4366 	}
   4367 
   4368 	/*
   4369 	 * The control register self-arms if the plane was previously
   4370 	 * disabled. Try to make the plane enable atomic by writing
   4371 	 * the control register just before the surface register.
   4372 	 */
   4373 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
   4374 	if (INTEL_GEN(dev_priv) >= 4)
   4375 		I915_WRITE_FW(DSPSURF(i9xx_plane),
   4376 			      intel_plane_ggtt_offset(plane_state) +
   4377 			      dspaddr_offset);
   4378 	else
   4379 		I915_WRITE_FW(DSPADDR(i9xx_plane),
   4380 			      intel_plane_ggtt_offset(plane_state) +
   4381 			      dspaddr_offset);
   4382 
   4383 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   4384 }
   4385 
   4386 static void i9xx_disable_plane(struct intel_plane *plane,
   4387 			       const struct intel_crtc_state *crtc_state)
   4388 {
   4389 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4390 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4391 	unsigned long irqflags;
   4392 	u32 dspcntr;
   4393 
   4394 	/*
   4395 	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
   4396 	 * enable on ilk+ affect the pipe bottom color as
   4397 	 * well, so we must configure them even if the plane
   4398 	 * is disabled.
   4399 	 *
   4400 	 * On pre-g4x there is no way to gamma correct the
   4401 	 * pipe bottom color but we'll keep on doing this
   4402 	 * anyway so that the crtc state readout works correctly.
   4403 	 */
   4404 	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
   4405 
   4406 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   4407 
   4408 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
   4409 	if (INTEL_GEN(dev_priv) >= 4)
   4410 		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
   4411 	else
   4412 		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
   4413 
   4414 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   4415 }
   4416 
   4417 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
   4418 				    enum pipe *pipe)
   4419 {
   4420 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4421 	enum intel_display_power_domain power_domain;
   4422 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4423 	intel_wakeref_t wakeref;
   4424 	bool ret;
   4425 	u32 val;
   4426 
   4427 	/*
   4428 	 * Not 100% correct for planes that can move between pipes,
   4429 	 * but that's only the case for gen2-4 which don't have any
   4430 	 * display power wells.
   4431 	 */
   4432 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
   4433 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   4434 	if (!wakeref)
   4435 		return false;
   4436 
   4437 	val = I915_READ(DSPCNTR(i9xx_plane));
   4438 
   4439 	ret = val & DISPLAY_PLANE_ENABLE;
   4440 
   4441 	if (INTEL_GEN(dev_priv) >= 5)
   4442 		*pipe = plane->pipe;
   4443 	else
   4444 		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
   4445 			DISPPLANE_SEL_PIPE_SHIFT;
   4446 
   4447 	intel_display_power_put(dev_priv, power_domain, wakeref);
   4448 
   4449 	return ret;
   4450 }
   4451 
   4452 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
   4453 {
   4454 	struct drm_device *dev = intel_crtc->base.dev;
   4455 	struct drm_i915_private *dev_priv = to_i915(dev);
   4456 
   4457 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
   4458 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
   4459 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
   4460 }
   4461 
   4462 /*
   4463  * This function detaches (aka. unbinds) unused scalers in hardware
   4464  */
   4465 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
   4466 {
   4467 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4468 	const struct intel_crtc_scaler_state *scaler_state =
   4469 		&crtc_state->scaler_state;
   4470 	int i;
   4471 
   4472 	/* loop through and disable scalers that aren't in use */
   4473 	for (i = 0; i < intel_crtc->num_scalers; i++) {
   4474 		if (!scaler_state->scalers[i].in_use)
   4475 			skl_detach_scaler(intel_crtc, i);
   4476 	}
   4477 }
   4478 
   4479 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
   4480 					  int color_plane, unsigned int rotation)
   4481 {
   4482 	/*
   4483 	 * The stride is either expressed as a multiple of 64 bytes chunks for
   4484 	 * linear buffers or in number of tiles for tiled buffers.
   4485 	 */
   4486 	if (is_surface_linear(fb, color_plane))
   4487 		return 64;
   4488 	else if (drm_rotation_90_or_270(rotation))
   4489 		return intel_tile_height(fb, color_plane);
   4490 	else
   4491 		return intel_tile_width_bytes(fb, color_plane);
   4492 }
   4493 
   4494 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
   4495 		     int color_plane)
   4496 {
   4497 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4498 	unsigned int rotation = plane_state->hw.rotation;
   4499 	u32 stride = plane_state->color_plane[color_plane].stride;
   4500 
   4501 	if (color_plane >= fb->format->num_planes)
   4502 		return 0;
   4503 
   4504 	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
   4505 }
   4506 
   4507 static u32 skl_plane_ctl_format(u32 pixel_format)
   4508 {
   4509 	switch (pixel_format) {
   4510 	case DRM_FORMAT_C8:
   4511 		return PLANE_CTL_FORMAT_INDEXED;
   4512 	case DRM_FORMAT_RGB565:
   4513 		return PLANE_CTL_FORMAT_RGB_565;
   4514 	case DRM_FORMAT_XBGR8888:
   4515 	case DRM_FORMAT_ABGR8888:
   4516 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
   4517 	case DRM_FORMAT_XRGB8888:
   4518 	case DRM_FORMAT_ARGB8888:
   4519 		return PLANE_CTL_FORMAT_XRGB_8888;
   4520 	case DRM_FORMAT_XBGR2101010:
   4521 	case DRM_FORMAT_ABGR2101010:
   4522 		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
   4523 	case DRM_FORMAT_XRGB2101010:
   4524 	case DRM_FORMAT_ARGB2101010:
   4525 		return PLANE_CTL_FORMAT_XRGB_2101010;
   4526 	case DRM_FORMAT_XBGR16161616F:
   4527 	case DRM_FORMAT_ABGR16161616F:
   4528 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
   4529 	case DRM_FORMAT_XRGB16161616F:
   4530 	case DRM_FORMAT_ARGB16161616F:
   4531 		return PLANE_CTL_FORMAT_XRGB_16161616F;
   4532 	case DRM_FORMAT_YUYV:
   4533 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
   4534 	case DRM_FORMAT_YVYU:
   4535 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
   4536 	case DRM_FORMAT_UYVY:
   4537 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
   4538 	case DRM_FORMAT_VYUY:
   4539 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
   4540 	case DRM_FORMAT_NV12:
   4541 		return PLANE_CTL_FORMAT_NV12;
   4542 	case DRM_FORMAT_P010:
   4543 		return PLANE_CTL_FORMAT_P010;
   4544 	case DRM_FORMAT_P012:
   4545 		return PLANE_CTL_FORMAT_P012;
   4546 	case DRM_FORMAT_P016:
   4547 		return PLANE_CTL_FORMAT_P016;
   4548 	case DRM_FORMAT_Y210:
   4549 		return PLANE_CTL_FORMAT_Y210;
   4550 	case DRM_FORMAT_Y212:
   4551 		return PLANE_CTL_FORMAT_Y212;
   4552 	case DRM_FORMAT_Y216:
   4553 		return PLANE_CTL_FORMAT_Y216;
   4554 	case DRM_FORMAT_XVYU2101010:
   4555 		return PLANE_CTL_FORMAT_Y410;
   4556 	case DRM_FORMAT_XVYU12_16161616:
   4557 		return PLANE_CTL_FORMAT_Y412;
   4558 	case DRM_FORMAT_XVYU16161616:
   4559 		return PLANE_CTL_FORMAT_Y416;
   4560 	default:
   4561 		MISSING_CASE(pixel_format);
   4562 	}
   4563 
   4564 	return 0;
   4565 }
   4566 
   4567 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
   4568 {
   4569 	if (!plane_state->hw.fb->format->has_alpha)
   4570 		return PLANE_CTL_ALPHA_DISABLE;
   4571 
   4572 	switch (plane_state->hw.pixel_blend_mode) {
   4573 	case DRM_MODE_BLEND_PIXEL_NONE:
   4574 		return PLANE_CTL_ALPHA_DISABLE;
   4575 	case DRM_MODE_BLEND_PREMULTI:
   4576 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
   4577 	case DRM_MODE_BLEND_COVERAGE:
   4578 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
   4579 	default:
   4580 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
   4581 		return PLANE_CTL_ALPHA_DISABLE;
   4582 	}
   4583 }
   4584 
   4585 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
   4586 {
   4587 	if (!plane_state->hw.fb->format->has_alpha)
   4588 		return PLANE_COLOR_ALPHA_DISABLE;
   4589 
   4590 	switch (plane_state->hw.pixel_blend_mode) {
   4591 	case DRM_MODE_BLEND_PIXEL_NONE:
   4592 		return PLANE_COLOR_ALPHA_DISABLE;
   4593 	case DRM_MODE_BLEND_PREMULTI:
   4594 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
   4595 	case DRM_MODE_BLEND_COVERAGE:
   4596 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
   4597 	default:
   4598 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
   4599 		return PLANE_COLOR_ALPHA_DISABLE;
   4600 	}
   4601 }
   4602 
   4603 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
   4604 {
   4605 	switch (fb_modifier) {
   4606 	case DRM_FORMAT_MOD_LINEAR:
   4607 		break;
   4608 	case I915_FORMAT_MOD_X_TILED:
   4609 		return PLANE_CTL_TILED_X;
   4610 	case I915_FORMAT_MOD_Y_TILED:
   4611 		return PLANE_CTL_TILED_Y;
   4612 	case I915_FORMAT_MOD_Y_TILED_CCS:
   4613 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
   4614 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   4615 		return PLANE_CTL_TILED_Y |
   4616 		       PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
   4617 		       PLANE_CTL_CLEAR_COLOR_DISABLE;
   4618 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   4619 		return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
   4620 	case I915_FORMAT_MOD_Yf_TILED:
   4621 		return PLANE_CTL_TILED_YF;
   4622 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   4623 		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
   4624 	default:
   4625 		MISSING_CASE(fb_modifier);
   4626 	}
   4627 
   4628 	return 0;
   4629 }
   4630 
   4631 static u32 skl_plane_ctl_rotate(unsigned int rotate)
   4632 {
   4633 	switch (rotate) {
   4634 	case DRM_MODE_ROTATE_0:
   4635 		break;
   4636 	/*
   4637 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
   4638 	 * while i915 HW rotation is clockwise, thats why this swapping.
   4639 	 */
   4640 	case DRM_MODE_ROTATE_90:
   4641 		return PLANE_CTL_ROTATE_270;
   4642 	case DRM_MODE_ROTATE_180:
   4643 		return PLANE_CTL_ROTATE_180;
   4644 	case DRM_MODE_ROTATE_270:
   4645 		return PLANE_CTL_ROTATE_90;
   4646 	default:
   4647 		MISSING_CASE(rotate);
   4648 	}
   4649 
   4650 	return 0;
   4651 }
   4652 
   4653 static u32 cnl_plane_ctl_flip(unsigned int reflect)
   4654 {
   4655 	switch (reflect) {
   4656 	case 0:
   4657 		break;
   4658 	case DRM_MODE_REFLECT_X:
   4659 		return PLANE_CTL_FLIP_HORIZONTAL;
   4660 	case DRM_MODE_REFLECT_Y:
   4661 	default:
   4662 		MISSING_CASE(reflect);
   4663 	}
   4664 
   4665 	return 0;
   4666 }
   4667 
   4668 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
   4669 {
   4670 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4671 	u32 plane_ctl = 0;
   4672 
   4673 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   4674 		return plane_ctl;
   4675 
   4676 	if (crtc_state->gamma_enable)
   4677 		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
   4678 
   4679 	if (crtc_state->csc_enable)
   4680 		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
   4681 
   4682 	return plane_ctl;
   4683 }
   4684 
   4685 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
   4686 		  const struct intel_plane_state *plane_state)
   4687 {
   4688 	struct drm_i915_private *dev_priv =
   4689 		to_i915(plane_state->uapi.plane->dev);
   4690 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4691 	unsigned int rotation = plane_state->hw.rotation;
   4692 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
   4693 	u32 plane_ctl;
   4694 
   4695 	plane_ctl = PLANE_CTL_ENABLE;
   4696 
   4697 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
   4698 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
   4699 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
   4700 
   4701 		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
   4702 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
   4703 
   4704 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
   4705 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
   4706 	}
   4707 
   4708 	plane_ctl |= skl_plane_ctl_format(fb->format->format);
   4709 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
   4710 	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
   4711 
   4712 	if (INTEL_GEN(dev_priv) >= 10)
   4713 		plane_ctl |= cnl_plane_ctl_flip(rotation &
   4714 						DRM_MODE_REFLECT_MASK);
   4715 
   4716 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
   4717 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
   4718 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
   4719 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
   4720 
   4721 	return plane_ctl;
   4722 }
   4723 
   4724 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
   4725 {
   4726 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4727 	u32 plane_color_ctl = 0;
   4728 
   4729 	if (INTEL_GEN(dev_priv) >= 11)
   4730 		return plane_color_ctl;
   4731 
   4732 	if (crtc_state->gamma_enable)
   4733 		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
   4734 
   4735 	if (crtc_state->csc_enable)
   4736 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
   4737 
   4738 	return plane_color_ctl;
   4739 }
   4740 
   4741 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
   4742 			const struct intel_plane_state *plane_state)
   4743 {
   4744 	struct drm_i915_private *dev_priv =
   4745 		to_i915(plane_state->uapi.plane->dev);
   4746 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4747 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   4748 	u32 plane_color_ctl = 0;
   4749 
   4750 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
   4751 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
   4752 
   4753 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
   4754 		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
   4755 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
   4756 		else
   4757 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
   4758 
   4759 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
   4760 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
   4761 	} else if (fb->format->is_yuv) {
   4762 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
   4763 	}
   4764 
   4765 	return plane_color_ctl;
   4766 }
   4767 
   4768 static int
   4769 __intel_display_resume(struct drm_device *dev,
   4770 		       struct drm_atomic_state *state,
   4771 		       struct drm_modeset_acquire_ctx *ctx)
   4772 {
   4773 	struct drm_crtc_state *crtc_state;
   4774 	struct drm_crtc *crtc;
   4775 	int i, ret;
   4776 
   4777 	intel_modeset_setup_hw_state(dev, ctx);
   4778 	intel_vga_redisable(to_i915(dev));
   4779 
   4780 	if (!state)
   4781 		return 0;
   4782 
   4783 	/*
   4784 	 * We've duplicated the state, pointers to the old state are invalid.
   4785 	 *
   4786 	 * Don't attempt to use the old state until we commit the duplicated state.
   4787 	 */
   4788 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
   4789 		/*
   4790 		 * Force recalculation even if we restore
   4791 		 * current state. With fast modeset this may not result
   4792 		 * in a modeset when the state is compatible.
   4793 		 */
   4794 		crtc_state->mode_changed = true;
   4795 	}
   4796 
   4797 	/* ignore any reset values/BIOS leftovers in the WM registers */
   4798 	if (!HAS_GMCH(to_i915(dev)))
   4799 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
   4800 
   4801 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
   4802 
   4803 	WARN_ON(ret == -EDEADLK);
   4804 	return ret;
   4805 }
   4806 
   4807 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
   4808 {
   4809 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
   4810 		intel_has_gpu_reset(&dev_priv->gt));
   4811 }
   4812 
   4813 void intel_prepare_reset(struct drm_i915_private *dev_priv)
   4814 {
   4815 	struct drm_device *dev = &dev_priv->drm;
   4816 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
   4817 	struct drm_atomic_state *state;
   4818 	int ret;
   4819 
   4820 	/* reset doesn't touch the display */
   4821 	if (!i915_modparams.force_reset_modeset_test &&
   4822 	    !gpu_reset_clobbers_display(dev_priv))
   4823 		return;
   4824 
   4825 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
   4826 	spin_lock(&dev_priv->atomic_commit_lock);
   4827 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
   4828 	DRM_SPIN_WAKEUP_ALL(&dev_priv->atomic_commit_wq,
   4829 	    &dev_priv->atomic_commit_lock);
   4830 	spin_unlock(&dev_priv->atomic_commit_lock);
   4831 
   4832 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
   4833 		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
   4834 		intel_gt_set_wedged(&dev_priv->gt);
   4835 	}
   4836 
   4837 	/*
   4838 	 * Need mode_config.mutex so that we don't
   4839 	 * trample ongoing ->detect() and whatnot.
   4840 	 */
   4841 	mutex_lock(&dev->mode_config.mutex);
   4842 	drm_modeset_acquire_init(ctx, 0);
   4843 	while (1) {
   4844 		ret = drm_modeset_lock_all_ctx(dev, ctx);
   4845 		if (ret != -EDEADLK)
   4846 			break;
   4847 
   4848 		drm_modeset_backoff(ctx);
   4849 	}
   4850 	/*
   4851 	 * Disabling the crtcs gracefully seems nicer. Also the
   4852 	 * g33 docs say we should at least disable all the planes.
   4853 	 */
   4854 	state = drm_atomic_helper_duplicate_state(dev, ctx);
   4855 	if (IS_ERR(state)) {
   4856 		ret = PTR_ERR(state);
   4857 		DRM_ERROR("Duplicating state failed with %i\n", ret);
   4858 		return;
   4859 	}
   4860 
   4861 	ret = drm_atomic_helper_disable_all(dev, ctx);
   4862 	if (ret) {
   4863 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
   4864 		drm_atomic_state_put(state);
   4865 		return;
   4866 	}
   4867 
   4868 	dev_priv->modeset_restore_state = state;
   4869 	state->acquire_ctx = ctx;
   4870 }
   4871 
   4872 void intel_finish_reset(struct drm_i915_private *dev_priv)
   4873 {
   4874 	struct drm_device *dev = &dev_priv->drm;
   4875 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
   4876 	struct drm_atomic_state *state;
   4877 	int ret;
   4878 
   4879 	/* reset doesn't touch the display */
   4880 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
   4881 		return;
   4882 
   4883 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
   4884 	if (!state)
   4885 		goto unlock;
   4886 
   4887 	/* reset doesn't touch the display */
   4888 	if (!gpu_reset_clobbers_display(dev_priv)) {
   4889 		/* for testing only restore the display */
   4890 		ret = __intel_display_resume(dev, state, ctx);
   4891 		if (ret)
   4892 			DRM_ERROR("Restoring old state failed with %i\n", ret);
   4893 	} else {
   4894 		/*
   4895 		 * The display has been reset as well,
   4896 		 * so need a full re-initialization.
   4897 		 */
   4898 		intel_pps_unlock_regs_wa(dev_priv);
   4899 		intel_modeset_init_hw(dev_priv);
   4900 		intel_init_clock_gating(dev_priv);
   4901 
   4902 		spin_lock_irq(&dev_priv->irq_lock);
   4903 		if (dev_priv->display.hpd_irq_setup)
   4904 			dev_priv->display.hpd_irq_setup(dev_priv);
   4905 		spin_unlock_irq(&dev_priv->irq_lock);
   4906 
   4907 		ret = __intel_display_resume(dev, state, ctx);
   4908 		if (ret)
   4909 			DRM_ERROR("Restoring old state failed with %i\n", ret);
   4910 
   4911 		intel_hpd_init(dev_priv);
   4912 	}
   4913 
   4914 	drm_atomic_state_put(state);
   4915 unlock:
   4916 	drm_modeset_drop_locks(ctx);
   4917 	drm_modeset_acquire_fini(ctx);
   4918 	mutex_unlock(&dev->mode_config.mutex);
   4919 
   4920 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
   4921 }
   4922 
   4923 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
   4924 {
   4925 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4926 	enum pipe pipe = crtc->pipe;
   4927 	u32 tmp;
   4928 
   4929 	tmp = I915_READ(PIPE_CHICKEN(pipe));
   4930 
   4931 	/*
   4932 	 * Display WA #1153: icl
   4933 	 * enable hardware to bypass the alpha math
   4934 	 * and rounding for per-pixel values 00 and 0xff
   4935 	 */
   4936 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
   4937 	/*
   4938 	 * Display WA # 1605353570: icl
   4939 	 * Set the pixel rounding bit to 1 for allowing
   4940 	 * passthrough of Frame buffer pixels unmodified
   4941 	 * across pipe
   4942 	 */
   4943 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
   4944 	I915_WRITE(PIPE_CHICKEN(pipe), tmp);
   4945 }
   4946 
   4947 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
   4948 {
   4949 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4950 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4951 	u32 trans_ddi_func_ctl2_val;
   4952 	u8 master_select;
   4953 
   4954 	/*
   4955 	 * Configure the master select and enable Transcoder Port Sync for
   4956 	 * Slave CRTCs transcoder.
   4957 	 */
   4958 	if (crtc_state->master_transcoder == INVALID_TRANSCODER)
   4959 		return;
   4960 
   4961 	if (crtc_state->master_transcoder == TRANSCODER_EDP)
   4962 		master_select = 0;
   4963 	else
   4964 		master_select = crtc_state->master_transcoder + 1;
   4965 
   4966 	/* Set the master select bits for Tranascoder Port Sync */
   4967 	trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
   4968 				   PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
   4969 		PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
   4970 	/* Enable Transcoder Port Sync */
   4971 	trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
   4972 
   4973 	I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
   4974 		   trans_ddi_func_ctl2_val);
   4975 }
   4976 
   4977 static void intel_fdi_normal_train(struct intel_crtc *crtc)
   4978 {
   4979 	struct drm_device *dev = crtc->base.dev;
   4980 	struct drm_i915_private *dev_priv = to_i915(dev);
   4981 	enum pipe pipe = crtc->pipe;
   4982 	i915_reg_t reg;
   4983 	u32 temp;
   4984 
   4985 	/* enable normal train */
   4986 	reg = FDI_TX_CTL(pipe);
   4987 	temp = I915_READ(reg);
   4988 	if (IS_IVYBRIDGE(dev_priv)) {
   4989 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
   4990 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
   4991 	} else {
   4992 		temp &= ~FDI_LINK_TRAIN_NONE;
   4993 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
   4994 	}
   4995 	I915_WRITE(reg, temp);
   4996 
   4997 	reg = FDI_RX_CTL(pipe);
   4998 	temp = I915_READ(reg);
   4999 	if (HAS_PCH_CPT(dev_priv)) {
   5000 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5001 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
   5002 	} else {
   5003 		temp &= ~FDI_LINK_TRAIN_NONE;
   5004 		temp |= FDI_LINK_TRAIN_NONE;
   5005 	}
   5006 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
   5007 
   5008 	/* wait one idle pattern time */
   5009 	POSTING_READ(reg);
   5010 	udelay(1000);
   5011 
   5012 	/* IVB wants error correction enabled */
   5013 	if (IS_IVYBRIDGE(dev_priv))
   5014 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
   5015 			   FDI_FE_ERRC_ENABLE);
   5016 }
   5017 
   5018 /* The FDI link training functions for ILK/Ibexpeak. */
   5019 static void ilk_fdi_link_train(struct intel_crtc *crtc,
   5020 			       const struct intel_crtc_state *crtc_state)
   5021 {
   5022 	struct drm_device *dev = crtc->base.dev;
   5023 	struct drm_i915_private *dev_priv = to_i915(dev);
   5024 	enum pipe pipe = crtc->pipe;
   5025 	i915_reg_t reg;
   5026 	u32 temp, tries;
   5027 
   5028 	/* FDI needs bits from pipe first */
   5029 	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
   5030 
   5031 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
   5032 	   for train result */
   5033 	reg = FDI_RX_IMR(pipe);
   5034 	temp = I915_READ(reg);
   5035 	temp &= ~FDI_RX_SYMBOL_LOCK;
   5036 	temp &= ~FDI_RX_BIT_LOCK;
   5037 	I915_WRITE(reg, temp);
   5038 	I915_READ(reg);
   5039 	udelay(150);
   5040 
   5041 	/* enable CPU FDI TX and PCH FDI RX */
   5042 	reg = FDI_TX_CTL(pipe);
   5043 	temp = I915_READ(reg);
   5044 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
   5045 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5046 	temp &= ~FDI_LINK_TRAIN_NONE;
   5047 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5048 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
   5049 
   5050 	reg = FDI_RX_CTL(pipe);
   5051 	temp = I915_READ(reg);
   5052 	temp &= ~FDI_LINK_TRAIN_NONE;
   5053 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5054 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
   5055 
   5056 	POSTING_READ(reg);
   5057 	udelay(150);
   5058 
   5059 	/* Ironlake workaround, enable clock pointer after FDI enable*/
   5060 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
   5061 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
   5062 		   FDI_RX_PHASE_SYNC_POINTER_EN);
   5063 
   5064 	reg = FDI_RX_IIR(pipe);
   5065 	for (tries = 0; tries < 5; tries++) {
   5066 		temp = I915_READ(reg);
   5067 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5068 
   5069 		if ((temp & FDI_RX_BIT_LOCK)) {
   5070 			DRM_DEBUG_KMS("FDI train 1 done.\n");
   5071 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
   5072 			break;
   5073 		}
   5074 	}
   5075 	if (tries == 5)
   5076 		DRM_ERROR("FDI train 1 fail!\n");
   5077 
   5078 	/* Train 2 */
   5079 	reg = FDI_TX_CTL(pipe);
   5080 	temp = I915_READ(reg);
   5081 	temp &= ~FDI_LINK_TRAIN_NONE;
   5082 	temp |= FDI_LINK_TRAIN_PATTERN_2;
   5083 	I915_WRITE(reg, temp);
   5084 
   5085 	reg = FDI_RX_CTL(pipe);
   5086 	temp = I915_READ(reg);
   5087 	temp &= ~FDI_LINK_TRAIN_NONE;
   5088 	temp |= FDI_LINK_TRAIN_PATTERN_2;
   5089 	I915_WRITE(reg, temp);
   5090 
   5091 	POSTING_READ(reg);
   5092 	udelay(150);
   5093 
   5094 	reg = FDI_RX_IIR(pipe);
   5095 	for (tries = 0; tries < 5; tries++) {
   5096 		temp = I915_READ(reg);
   5097 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5098 
   5099 		if (temp & FDI_RX_SYMBOL_LOCK) {
   5100 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
   5101 			DRM_DEBUG_KMS("FDI train 2 done.\n");
   5102 			break;
   5103 		}
   5104 	}
   5105 	if (tries == 5)
   5106 		DRM_ERROR("FDI train 2 fail!\n");
   5107 
   5108 	DRM_DEBUG_KMS("FDI train done\n");
   5109 
   5110 }
   5111 
   5112 static const int snb_b_fdi_train_param[] = {
   5113 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
   5114 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
   5115 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
   5116 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
   5117 };
   5118 
   5119 /* The FDI link training functions for SNB/Cougarpoint. */
   5120 static void gen6_fdi_link_train(struct intel_crtc *crtc,
   5121 				const struct intel_crtc_state *crtc_state)
   5122 {
   5123 	struct drm_device *dev = crtc->base.dev;
   5124 	struct drm_i915_private *dev_priv = to_i915(dev);
   5125 	enum pipe pipe = crtc->pipe;
   5126 	i915_reg_t reg;
   5127 	u32 temp, i, retry;
   5128 
   5129 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
   5130 	   for train result */
   5131 	reg = FDI_RX_IMR(pipe);
   5132 	temp = I915_READ(reg);
   5133 	temp &= ~FDI_RX_SYMBOL_LOCK;
   5134 	temp &= ~FDI_RX_BIT_LOCK;
   5135 	I915_WRITE(reg, temp);
   5136 
   5137 	POSTING_READ(reg);
   5138 	udelay(150);
   5139 
   5140 	/* enable CPU FDI TX and PCH FDI RX */
   5141 	reg = FDI_TX_CTL(pipe);
   5142 	temp = I915_READ(reg);
   5143 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
   5144 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5145 	temp &= ~FDI_LINK_TRAIN_NONE;
   5146 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5147 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5148 	/* SNB-B */
   5149 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
   5150 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
   5151 
   5152 	I915_WRITE(FDI_RX_MISC(pipe),
   5153 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
   5154 
   5155 	reg = FDI_RX_CTL(pipe);
   5156 	temp = I915_READ(reg);
   5157 	if (HAS_PCH_CPT(dev_priv)) {
   5158 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5159 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
   5160 	} else {
   5161 		temp &= ~FDI_LINK_TRAIN_NONE;
   5162 		temp |= FDI_LINK_TRAIN_PATTERN_1;
   5163 	}
   5164 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
   5165 
   5166 	POSTING_READ(reg);
   5167 	udelay(150);
   5168 
   5169 	for (i = 0; i < 4; i++) {
   5170 		reg = FDI_TX_CTL(pipe);
   5171 		temp = I915_READ(reg);
   5172 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5173 		temp |= snb_b_fdi_train_param[i];
   5174 		I915_WRITE(reg, temp);
   5175 
   5176 		POSTING_READ(reg);
   5177 		udelay(500);
   5178 
   5179 		for (retry = 0; retry < 5; retry++) {
   5180 			reg = FDI_RX_IIR(pipe);
   5181 			temp = I915_READ(reg);
   5182 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5183 			if (temp & FDI_RX_BIT_LOCK) {
   5184 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
   5185 				DRM_DEBUG_KMS("FDI train 1 done.\n");
   5186 				break;
   5187 			}
   5188 			udelay(50);
   5189 		}
   5190 		if (retry < 5)
   5191 			break;
   5192 	}
   5193 	if (i == 4)
   5194 		DRM_ERROR("FDI train 1 fail!\n");
   5195 
   5196 	/* Train 2 */
   5197 	reg = FDI_TX_CTL(pipe);
   5198 	temp = I915_READ(reg);
   5199 	temp &= ~FDI_LINK_TRAIN_NONE;
   5200 	temp |= FDI_LINK_TRAIN_PATTERN_2;
   5201 	if (IS_GEN(dev_priv, 6)) {
   5202 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5203 		/* SNB-B */
   5204 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
   5205 	}
   5206 	I915_WRITE(reg, temp);
   5207 
   5208 	reg = FDI_RX_CTL(pipe);
   5209 	temp = I915_READ(reg);
   5210 	if (HAS_PCH_CPT(dev_priv)) {
   5211 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5212 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
   5213 	} else {
   5214 		temp &= ~FDI_LINK_TRAIN_NONE;
   5215 		temp |= FDI_LINK_TRAIN_PATTERN_2;
   5216 	}
   5217 	I915_WRITE(reg, temp);
   5218 
   5219 	POSTING_READ(reg);
   5220 	udelay(150);
   5221 
   5222 	for (i = 0; i < 4; i++) {
   5223 		reg = FDI_TX_CTL(pipe);
   5224 		temp = I915_READ(reg);
   5225 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5226 		temp |= snb_b_fdi_train_param[i];
   5227 		I915_WRITE(reg, temp);
   5228 
   5229 		POSTING_READ(reg);
   5230 		udelay(500);
   5231 
   5232 		for (retry = 0; retry < 5; retry++) {
   5233 			reg = FDI_RX_IIR(pipe);
   5234 			temp = I915_READ(reg);
   5235 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5236 			if (temp & FDI_RX_SYMBOL_LOCK) {
   5237 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
   5238 				DRM_DEBUG_KMS("FDI train 2 done.\n");
   5239 				break;
   5240 			}
   5241 			udelay(50);
   5242 		}
   5243 		if (retry < 5)
   5244 			break;
   5245 	}
   5246 	if (i == 4)
   5247 		DRM_ERROR("FDI train 2 fail!\n");
   5248 
   5249 	DRM_DEBUG_KMS("FDI train done.\n");
   5250 }
   5251 
   5252 /* Manual link training for Ivy Bridge A0 parts */
   5253 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
   5254 				      const struct intel_crtc_state *crtc_state)
   5255 {
   5256 	struct drm_device *dev = crtc->base.dev;
   5257 	struct drm_i915_private *dev_priv = to_i915(dev);
   5258 	enum pipe pipe = crtc->pipe;
   5259 	i915_reg_t reg;
   5260 	u32 temp, i, j;
   5261 
   5262 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
   5263 	   for train result */
   5264 	reg = FDI_RX_IMR(pipe);
   5265 	temp = I915_READ(reg);
   5266 	temp &= ~FDI_RX_SYMBOL_LOCK;
   5267 	temp &= ~FDI_RX_BIT_LOCK;
   5268 	I915_WRITE(reg, temp);
   5269 
   5270 	POSTING_READ(reg);
   5271 	udelay(150);
   5272 
   5273 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
   5274 		      I915_READ(FDI_RX_IIR(pipe)));
   5275 
   5276 	/* Try each vswing and preemphasis setting twice before moving on */
   5277 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
   5278 		/* disable first in case we need to retry */
   5279 		reg = FDI_TX_CTL(pipe);
   5280 		temp = I915_READ(reg);
   5281 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
   5282 		temp &= ~FDI_TX_ENABLE;
   5283 		I915_WRITE(reg, temp);
   5284 
   5285 		reg = FDI_RX_CTL(pipe);
   5286 		temp = I915_READ(reg);
   5287 		temp &= ~FDI_LINK_TRAIN_AUTO;
   5288 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5289 		temp &= ~FDI_RX_ENABLE;
   5290 		I915_WRITE(reg, temp);
   5291 
   5292 		/* enable CPU FDI TX and PCH FDI RX */
   5293 		reg = FDI_TX_CTL(pipe);
   5294 		temp = I915_READ(reg);
   5295 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
   5296 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5297 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
   5298 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5299 		temp |= snb_b_fdi_train_param[j/2];
   5300 		temp |= FDI_COMPOSITE_SYNC;
   5301 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
   5302 
   5303 		I915_WRITE(FDI_RX_MISC(pipe),
   5304 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
   5305 
   5306 		reg = FDI_RX_CTL(pipe);
   5307 		temp = I915_READ(reg);
   5308 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
   5309 		temp |= FDI_COMPOSITE_SYNC;
   5310 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
   5311 
   5312 		POSTING_READ(reg);
   5313 		udelay(1); /* should be 0.5us */
   5314 
   5315 		for (i = 0; i < 4; i++) {
   5316 			reg = FDI_RX_IIR(pipe);
   5317 			temp = I915_READ(reg);
   5318 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5319 
   5320 			if (temp & FDI_RX_BIT_LOCK ||
   5321 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
   5322 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
   5323 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
   5324 					      i);
   5325 				break;
   5326 			}
   5327 			udelay(1); /* should be 0.5us */
   5328 		}
   5329 		if (i == 4) {
   5330 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
   5331 			continue;
   5332 		}
   5333 
   5334 		/* Train 2 */
   5335 		reg = FDI_TX_CTL(pipe);
   5336 		temp = I915_READ(reg);
   5337 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
   5338 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
   5339 		I915_WRITE(reg, temp);
   5340 
   5341 		reg = FDI_RX_CTL(pipe);
   5342 		temp = I915_READ(reg);
   5343 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5344 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
   5345 		I915_WRITE(reg, temp);
   5346 
   5347 		POSTING_READ(reg);
   5348 		udelay(2); /* should be 1.5us */
   5349 
   5350 		for (i = 0; i < 4; i++) {
   5351 			reg = FDI_RX_IIR(pipe);
   5352 			temp = I915_READ(reg);
   5353 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5354 
   5355 			if (temp & FDI_RX_SYMBOL_LOCK ||
   5356 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
   5357 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
   5358 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
   5359 					      i);
   5360 				goto train_done;
   5361 			}
   5362 			udelay(2); /* should be 1.5us */
   5363 		}
   5364 		if (i == 4)
   5365 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
   5366 	}
   5367 
   5368 train_done:
   5369 	DRM_DEBUG_KMS("FDI train done.\n");
   5370 }
   5371 
   5372 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
   5373 {
   5374 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5375 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
   5376 	enum pipe pipe = intel_crtc->pipe;
   5377 	i915_reg_t reg;
   5378 	u32 temp;
   5379 
   5380 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
   5381 	reg = FDI_RX_CTL(pipe);
   5382 	temp = I915_READ(reg);
   5383 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
   5384 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5385 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
   5386 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
   5387 
   5388 	POSTING_READ(reg);
   5389 	udelay(200);
   5390 
   5391 	/* Switch from Rawclk to PCDclk */
   5392 	temp = I915_READ(reg);
   5393 	I915_WRITE(reg, temp | FDI_PCDCLK);
   5394 
   5395 	POSTING_READ(reg);
   5396 	udelay(200);
   5397 
   5398 	/* Enable CPU FDI TX PLL, always on for Ironlake */
   5399 	reg = FDI_TX_CTL(pipe);
   5400 	temp = I915_READ(reg);
   5401 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
   5402 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
   5403 
   5404 		POSTING_READ(reg);
   5405 		udelay(100);
   5406 	}
   5407 }
   5408 
   5409 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
   5410 {
   5411 	struct drm_device *dev = intel_crtc->base.dev;
   5412 	struct drm_i915_private *dev_priv = to_i915(dev);
   5413 	enum pipe pipe = intel_crtc->pipe;
   5414 	i915_reg_t reg;
   5415 	u32 temp;
   5416 
   5417 	/* Switch from PCDclk to Rawclk */
   5418 	reg = FDI_RX_CTL(pipe);
   5419 	temp = I915_READ(reg);
   5420 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
   5421 
   5422 	/* Disable CPU FDI TX PLL */
   5423 	reg = FDI_TX_CTL(pipe);
   5424 	temp = I915_READ(reg);
   5425 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
   5426 
   5427 	POSTING_READ(reg);
   5428 	udelay(100);
   5429 
   5430 	reg = FDI_RX_CTL(pipe);
   5431 	temp = I915_READ(reg);
   5432 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
   5433 
   5434 	/* Wait for the clocks to turn off. */
   5435 	POSTING_READ(reg);
   5436 	udelay(100);
   5437 }
   5438 
   5439 static void ilk_fdi_disable(struct intel_crtc *crtc)
   5440 {
   5441 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5442 	enum pipe pipe = crtc->pipe;
   5443 	i915_reg_t reg;
   5444 	u32 temp;
   5445 
   5446 	/* disable CPU FDI tx and PCH FDI rx */
   5447 	reg = FDI_TX_CTL(pipe);
   5448 	temp = I915_READ(reg);
   5449 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
   5450 	POSTING_READ(reg);
   5451 
   5452 	reg = FDI_RX_CTL(pipe);
   5453 	temp = I915_READ(reg);
   5454 	temp &= ~(0x7 << 16);
   5455 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
   5456 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
   5457 
   5458 	POSTING_READ(reg);
   5459 	udelay(100);
   5460 
   5461 	/* Ironlake workaround, disable clock pointer after downing FDI */
   5462 	if (HAS_PCH_IBX(dev_priv))
   5463 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
   5464 
   5465 	/* still set train pattern 1 */
   5466 	reg = FDI_TX_CTL(pipe);
   5467 	temp = I915_READ(reg);
   5468 	temp &= ~FDI_LINK_TRAIN_NONE;
   5469 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5470 	I915_WRITE(reg, temp);
   5471 
   5472 	reg = FDI_RX_CTL(pipe);
   5473 	temp = I915_READ(reg);
   5474 	if (HAS_PCH_CPT(dev_priv)) {
   5475 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5476 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
   5477 	} else {
   5478 		temp &= ~FDI_LINK_TRAIN_NONE;
   5479 		temp |= FDI_LINK_TRAIN_PATTERN_1;
   5480 	}
   5481 	/* BPC in FDI rx is consistent with that in PIPECONF */
   5482 	temp &= ~(0x07 << 16);
   5483 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
   5484 	I915_WRITE(reg, temp);
   5485 
   5486 	POSTING_READ(reg);
   5487 	udelay(100);
   5488 }
   5489 
   5490 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
   5491 {
   5492 	struct drm_crtc *crtc;
   5493 	bool cleanup_done;
   5494 
   5495 	drm_for_each_crtc(crtc, &dev_priv->drm) {
   5496 		struct drm_crtc_commit *commit;
   5497 		spin_lock(&crtc->commit_lock);
   5498 		commit = list_first_entry_or_null(&crtc->commit_list,
   5499 						  struct drm_crtc_commit, commit_entry);
   5500 		cleanup_done = commit ?
   5501 			try_wait_for_completion(&commit->cleanup_done) : true;
   5502 		spin_unlock(&crtc->commit_lock);
   5503 
   5504 		if (cleanup_done)
   5505 			continue;
   5506 
   5507 		drm_crtc_wait_one_vblank(crtc);
   5508 
   5509 		return true;
   5510 	}
   5511 
   5512 	return false;
   5513 }
   5514 
   5515 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
   5516 {
   5517 	u32 temp;
   5518 
   5519 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
   5520 
   5521 	mutex_lock(&dev_priv->sb_lock);
   5522 
   5523 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
   5524 	temp |= SBI_SSCCTL_DISABLE;
   5525 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
   5526 
   5527 	mutex_unlock(&dev_priv->sb_lock);
   5528 }
   5529 
   5530 /* Program iCLKIP clock to the desired frequency */
   5531 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
   5532 {
   5533 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5534 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5535 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
   5536 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
   5537 	u32 temp;
   5538 
   5539 	lpt_disable_iclkip(dev_priv);
   5540 
   5541 	/* The iCLK virtual clock root frequency is in MHz,
   5542 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
   5543 	 * divisors, it is necessary to divide one by another, so we
   5544 	 * convert the virtual clock precision to KHz here for higher
   5545 	 * precision.
   5546 	 */
   5547 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
   5548 		u32 iclk_virtual_root_freq = 172800 * 1000;
   5549 		u32 iclk_pi_range = 64;
   5550 		u32 desired_divisor;
   5551 
   5552 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
   5553 						    clock << auxdiv);
   5554 		divsel = (desired_divisor / iclk_pi_range) - 2;
   5555 		phaseinc = desired_divisor % iclk_pi_range;
   5556 
   5557 		/*
   5558 		 * Near 20MHz is a corner case which is
   5559 		 * out of range for the 7-bit divisor
   5560 		 */
   5561 		if (divsel <= 0x7f)
   5562 			break;
   5563 	}
   5564 
   5565 	/* This should not happen with any sane values */
   5566 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
   5567 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
   5568 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
   5569 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
   5570 
   5571 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
   5572 			clock,
   5573 			auxdiv,
   5574 			divsel,
   5575 			phasedir,
   5576 			phaseinc);
   5577 
   5578 	mutex_lock(&dev_priv->sb_lock);
   5579 
   5580 	/* Program SSCDIVINTPHASE6 */
   5581 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
   5582 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
   5583 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
   5584 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
   5585 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
   5586 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
   5587 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
   5588 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
   5589 
   5590 	/* Program SSCAUXDIV */
   5591 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
   5592 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
   5593 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
   5594 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
   5595 
   5596 	/* Enable modulator and associated divider */
   5597 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
   5598 	temp &= ~SBI_SSCCTL_DISABLE;
   5599 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
   5600 
   5601 	mutex_unlock(&dev_priv->sb_lock);
   5602 
   5603 	/* Wait for initialization time */
   5604 	udelay(24);
   5605 
   5606 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
   5607 }
   5608 
   5609 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
   5610 {
   5611 	u32 divsel, phaseinc, auxdiv;
   5612 	u32 iclk_virtual_root_freq = 172800 * 1000;
   5613 	u32 iclk_pi_range = 64;
   5614 	u32 desired_divisor;
   5615 	u32 temp;
   5616 
   5617 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
   5618 		return 0;
   5619 
   5620 	mutex_lock(&dev_priv->sb_lock);
   5621 
   5622 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
   5623 	if (temp & SBI_SSCCTL_DISABLE) {
   5624 		mutex_unlock(&dev_priv->sb_lock);
   5625 		return 0;
   5626 	}
   5627 
   5628 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
   5629 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
   5630 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
   5631 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
   5632 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
   5633 
   5634 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
   5635 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
   5636 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
   5637 
   5638 	mutex_unlock(&dev_priv->sb_lock);
   5639 
   5640 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
   5641 
   5642 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
   5643 				 desired_divisor << auxdiv);
   5644 }
   5645 
   5646 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
   5647 					   enum pipe pch_transcoder)
   5648 {
   5649 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5650 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5651 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   5652 
   5653 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
   5654 		   I915_READ(HTOTAL(cpu_transcoder)));
   5655 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
   5656 		   I915_READ(HBLANK(cpu_transcoder)));
   5657 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
   5658 		   I915_READ(HSYNC(cpu_transcoder)));
   5659 
   5660 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
   5661 		   I915_READ(VTOTAL(cpu_transcoder)));
   5662 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
   5663 		   I915_READ(VBLANK(cpu_transcoder)));
   5664 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
   5665 		   I915_READ(VSYNC(cpu_transcoder)));
   5666 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
   5667 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
   5668 }
   5669 
   5670 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
   5671 {
   5672 	u32 temp;
   5673 
   5674 	temp = I915_READ(SOUTH_CHICKEN1);
   5675 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
   5676 		return;
   5677 
   5678 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
   5679 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
   5680 
   5681 	temp &= ~FDI_BC_BIFURCATION_SELECT;
   5682 	if (enable)
   5683 		temp |= FDI_BC_BIFURCATION_SELECT;
   5684 
   5685 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
   5686 	I915_WRITE(SOUTH_CHICKEN1, temp);
   5687 	POSTING_READ(SOUTH_CHICKEN1);
   5688 }
   5689 
   5690 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
   5691 {
   5692 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5693 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5694 
   5695 	switch (crtc->pipe) {
   5696 	case PIPE_A:
   5697 		break;
   5698 	case PIPE_B:
   5699 		if (crtc_state->fdi_lanes > 2)
   5700 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
   5701 		else
   5702 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
   5703 
   5704 		break;
   5705 	case PIPE_C:
   5706 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
   5707 
   5708 		break;
   5709 	default:
   5710 		BUG();
   5711 	}
   5712 }
   5713 
   5714 /*
   5715  * Finds the encoder associated with the given CRTC. This can only be
   5716  * used when we know that the CRTC isn't feeding multiple encoders!
   5717  */
   5718 static struct intel_encoder *
   5719 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
   5720 			   const struct intel_crtc_state *crtc_state)
   5721 {
   5722 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5723 	const struct drm_connector_state *connector_state;
   5724 	const struct drm_connector *connector;
   5725 	struct intel_encoder *encoder = NULL;
   5726 	int num_encoders = 0;
   5727 	int i;
   5728 
   5729 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
   5730 		if (connector_state->crtc != &crtc->base)
   5731 			continue;
   5732 
   5733 		encoder = to_intel_encoder(connector_state->best_encoder);
   5734 		num_encoders++;
   5735 	}
   5736 
   5737 	WARN(num_encoders != 1, "%d encoders for pipe %c\n",
   5738 	     num_encoders, pipe_name(crtc->pipe));
   5739 
   5740 	return encoder;
   5741 }
   5742 
   5743 /*
   5744  * Enable PCH resources required for PCH ports:
   5745  *   - PCH PLLs
   5746  *   - FDI training & RX/TX
   5747  *   - update transcoder timings
   5748  *   - DP transcoding bits
   5749  *   - transcoder
   5750  */
   5751 static void ilk_pch_enable(const struct intel_atomic_state *state,
   5752 			   const struct intel_crtc_state *crtc_state)
   5753 {
   5754 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5755 	struct drm_device *dev = crtc->base.dev;
   5756 	struct drm_i915_private *dev_priv = to_i915(dev);
   5757 	enum pipe pipe = crtc->pipe;
   5758 	u32 temp;
   5759 
   5760 	assert_pch_transcoder_disabled(dev_priv, pipe);
   5761 
   5762 	if (IS_IVYBRIDGE(dev_priv))
   5763 		ivb_update_fdi_bc_bifurcation(crtc_state);
   5764 
   5765 	/* Write the TU size bits before fdi link training, so that error
   5766 	 * detection works. */
   5767 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
   5768 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
   5769 
   5770 	/* For PCH output, training FDI link */
   5771 	dev_priv->display.fdi_link_train(crtc, crtc_state);
   5772 
   5773 	/* We need to program the right clock selection before writing the pixel
   5774 	 * mutliplier into the DPLL. */
   5775 	if (HAS_PCH_CPT(dev_priv)) {
   5776 		u32 sel;
   5777 
   5778 		temp = I915_READ(PCH_DPLL_SEL);
   5779 		temp |= TRANS_DPLL_ENABLE(pipe);
   5780 		sel = TRANS_DPLLB_SEL(pipe);
   5781 		if (crtc_state->shared_dpll ==
   5782 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
   5783 			temp |= sel;
   5784 		else
   5785 			temp &= ~sel;
   5786 		I915_WRITE(PCH_DPLL_SEL, temp);
   5787 	}
   5788 
   5789 	/* XXX: pch pll's can be enabled any time before we enable the PCH
   5790 	 * transcoder, and we actually should do this to not upset any PCH
   5791 	 * transcoder that already use the clock when we share it.
   5792 	 *
   5793 	 * Note that enable_shared_dpll tries to do the right thing, but
   5794 	 * get_shared_dpll unconditionally resets the pll - we need that to have
   5795 	 * the right LVDS enable sequence. */
   5796 	intel_enable_shared_dpll(crtc_state);
   5797 
   5798 	/* set transcoder timing, panel must allow it */
   5799 	assert_panel_unlocked(dev_priv, pipe);
   5800 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
   5801 
   5802 	intel_fdi_normal_train(crtc);
   5803 
   5804 	/* For PCH DP, enable TRANS_DP_CTL */
   5805 	if (HAS_PCH_CPT(dev_priv) &&
   5806 	    intel_crtc_has_dp_encoder(crtc_state)) {
   5807 		const struct drm_display_mode *adjusted_mode =
   5808 			&crtc_state->hw.adjusted_mode;
   5809 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
   5810 		i915_reg_t reg = TRANS_DP_CTL(pipe);
   5811 		enum port port;
   5812 
   5813 		temp = I915_READ(reg);
   5814 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
   5815 			  TRANS_DP_SYNC_MASK |
   5816 			  TRANS_DP_BPC_MASK);
   5817 		temp |= TRANS_DP_OUTPUT_ENABLE;
   5818 		temp |= bpc << 9; /* same format but at 11:9 */
   5819 
   5820 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
   5821 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
   5822 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
   5823 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
   5824 
   5825 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
   5826 		WARN_ON(port < PORT_B || port > PORT_D);
   5827 		temp |= TRANS_DP_PORT_SEL(port);
   5828 
   5829 		I915_WRITE(reg, temp);
   5830 	}
   5831 
   5832 	ilk_enable_pch_transcoder(crtc_state);
   5833 }
   5834 
   5835 static void lpt_pch_enable(const struct intel_atomic_state *state,
   5836 			   const struct intel_crtc_state *crtc_state)
   5837 {
   5838 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5839 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5840 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   5841 
   5842 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
   5843 
   5844 	lpt_program_iclkip(crtc_state);
   5845 
   5846 	/* Set transcoder timing. */
   5847 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
   5848 
   5849 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
   5850 }
   5851 
   5852 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
   5853 			       enum pipe pipe)
   5854 {
   5855 	i915_reg_t dslreg = PIPEDSL(pipe);
   5856 	u32 temp;
   5857 
   5858 	temp = I915_READ(dslreg);
   5859 	udelay(500);
   5860 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
   5861 		if (wait_for(I915_READ(dslreg) != temp, 5))
   5862 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
   5863 	}
   5864 }
   5865 
   5866 /*
   5867  * The hardware phase 0.0 refers to the center of the pixel.
   5868  * We want to start from the top/left edge which is phase
   5869  * -0.5. That matches how the hardware calculates the scaling
   5870  * factors (from top-left of the first pixel to bottom-right
   5871  * of the last pixel, as opposed to the pixel centers).
   5872  *
   5873  * For 4:2:0 subsampled chroma planes we obviously have to
   5874  * adjust that so that the chroma sample position lands in
   5875  * the right spot.
   5876  *
   5877  * Note that for packed YCbCr 4:2:2 formats there is no way to
   5878  * control chroma siting. The hardware simply replicates the
   5879  * chroma samples for both of the luma samples, and thus we don't
   5880  * actually get the expected MPEG2 chroma siting convention :(
   5881  * The same behaviour is observed on pre-SKL platforms as well.
   5882  *
   5883  * Theory behind the formula (note that we ignore sub-pixel
   5884  * source coordinates):
   5885  * s = source sample position
   5886  * d = destination sample position
   5887  *
   5888  * Downscaling 4:1:
   5889  * -0.5
   5890  * | 0.0
   5891  * | |     1.5 (initial phase)
   5892  * | |     |
   5893  * v v     v
   5894  * | s | s | s | s |
   5895  * |       d       |
   5896  *
   5897  * Upscaling 1:4:
   5898  * -0.5
   5899  * | -0.375 (initial phase)
   5900  * | |     0.0
   5901  * | |     |
   5902  * v v     v
   5903  * |       s       |
   5904  * | d | d | d | d |
   5905  */
   5906 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
   5907 {
   5908 	int phase = -0x8000;
   5909 	u16 trip = 0;
   5910 
   5911 	if (chroma_cosited)
   5912 		phase += (sub - 1) * 0x8000 / sub;
   5913 
   5914 	phase += scale / (2 * sub);
   5915 
   5916 	/*
   5917 	 * Hardware initial phase limited to [-0.5:1.5].
   5918 	 * Since the max hardware scale factor is 3.0, we
   5919 	 * should never actually excdeed 1.0 here.
   5920 	 */
   5921 	WARN_ON(phase < -0x8000 || phase > 0x18000);
   5922 
   5923 	if (phase < 0)
   5924 		phase = 0x10000 + phase;
   5925 	else
   5926 		trip = PS_PHASE_TRIP;
   5927 
   5928 	return ((phase >> 2) & PS_PHASE_MASK) | trip;
   5929 }
   5930 
   5931 #define SKL_MIN_SRC_W 8
   5932 #define SKL_MAX_SRC_W 4096
   5933 #define SKL_MIN_SRC_H 8
   5934 #define SKL_MAX_SRC_H 4096
   5935 #define SKL_MIN_DST_W 8
   5936 #define SKL_MAX_DST_W 4096
   5937 #define SKL_MIN_DST_H 8
   5938 #define SKL_MAX_DST_H 4096
   5939 #define ICL_MAX_SRC_W 5120
   5940 #define ICL_MAX_SRC_H 4096
   5941 #define ICL_MAX_DST_W 5120
   5942 #define ICL_MAX_DST_H 4096
   5943 #define SKL_MIN_YUV_420_SRC_W 16
   5944 #define SKL_MIN_YUV_420_SRC_H 16
   5945 
   5946 static int
   5947 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
   5948 		  unsigned int scaler_user, int *scaler_id,
   5949 		  int src_w, int src_h, int dst_w, int dst_h,
   5950 		  const struct drm_format_info *format,
   5951 		  u64 modifier, bool need_scaler)
   5952 {
   5953 	struct intel_crtc_scaler_state *scaler_state =
   5954 		&crtc_state->scaler_state;
   5955 	struct intel_crtc *intel_crtc =
   5956 		to_intel_crtc(crtc_state->uapi.crtc);
   5957 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
   5958 	const struct drm_display_mode *adjusted_mode =
   5959 		&crtc_state->hw.adjusted_mode;
   5960 
   5961 	/*
   5962 	 * Src coordinates are already rotated by 270 degrees for
   5963 	 * the 90/270 degree plane rotation cases (to match the
   5964 	 * GTT mapping), hence no need to account for rotation here.
   5965 	 */
   5966 	if (src_w != dst_w || src_h != dst_h)
   5967 		need_scaler = true;
   5968 
   5969 	/*
   5970 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
   5971 	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
   5972 	 * Once NV12 is enabled, handle it here while allocating scaler
   5973 	 * for NV12.
   5974 	 */
   5975 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
   5976 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
   5977 		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
   5978 		return -EINVAL;
   5979 	}
   5980 
   5981 	/*
   5982 	 * if plane is being disabled or scaler is no more required or force detach
   5983 	 *  - free scaler binded to this plane/crtc
   5984 	 *  - in order to do this, update crtc->scaler_usage
   5985 	 *
   5986 	 * Here scaler state in crtc_state is set free so that
   5987 	 * scaler can be assigned to other user. Actual register
   5988 	 * update to free the scaler is done in plane/panel-fit programming.
   5989 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
   5990 	 */
   5991 	if (force_detach || !need_scaler) {
   5992 		if (*scaler_id >= 0) {
   5993 			scaler_state->scaler_users &= ~(1 << scaler_user);
   5994 			scaler_state->scalers[*scaler_id].in_use = 0;
   5995 
   5996 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
   5997 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
   5998 				intel_crtc->pipe, scaler_user, *scaler_id,
   5999 				scaler_state->scaler_users);
   6000 			*scaler_id = -1;
   6001 		}
   6002 		return 0;
   6003 	}
   6004 
   6005 	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
   6006 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
   6007 		DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
   6008 		return -EINVAL;
   6009 	}
   6010 
   6011 	/* range checks */
   6012 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
   6013 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
   6014 	    (INTEL_GEN(dev_priv) >= 11 &&
   6015 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
   6016 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
   6017 	    (INTEL_GEN(dev_priv) < 11 &&
   6018 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
   6019 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
   6020 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
   6021 			"size is out of scaler range\n",
   6022 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
   6023 		return -EINVAL;
   6024 	}
   6025 
   6026 	/* mark this plane as a scaler user in crtc_state */
   6027 	scaler_state->scaler_users |= (1 << scaler_user);
   6028 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
   6029 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
   6030 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
   6031 		scaler_state->scaler_users);
   6032 
   6033 	return 0;
   6034 }
   6035 
   6036 /**
   6037  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
   6038  *
   6039  * @state: crtc's scaler state
   6040  *
   6041  * Return
   6042  *     0 - scaler_usage updated successfully
   6043  *    error - requested scaling cannot be supported or other error condition
   6044  */
   6045 int skl_update_scaler_crtc(struct intel_crtc_state *state)
   6046 {
   6047 	const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
   6048 	bool need_scaler = false;
   6049 
   6050 	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
   6051 		need_scaler = true;
   6052 
   6053 	return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
   6054 				 &state->scaler_state.scaler_id,
   6055 				 state->pipe_src_w, state->pipe_src_h,
   6056 				 adjusted_mode->crtc_hdisplay,
   6057 				 adjusted_mode->crtc_vdisplay, NULL, 0,
   6058 				 need_scaler);
   6059 }
   6060 
   6061 /**
   6062  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
   6063  * @crtc_state: crtc's scaler state
   6064  * @plane_state: atomic plane state to update
   6065  *
   6066  * Return
   6067  *     0 - scaler_usage updated successfully
   6068  *    error - requested scaling cannot be supported or other error condition
   6069  */
   6070 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
   6071 				   struct intel_plane_state *plane_state)
   6072 {
   6073 	struct intel_plane *intel_plane =
   6074 		to_intel_plane(plane_state->uapi.plane);
   6075 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
   6076 	struct drm_framebuffer *fb = plane_state->hw.fb;
   6077 	int ret;
   6078 	bool force_detach = !fb || !plane_state->uapi.visible;
   6079 	bool need_scaler = false;
   6080 
   6081 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
   6082 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
   6083 	    fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
   6084 		need_scaler = true;
   6085 
   6086 	ret = skl_update_scaler(crtc_state, force_detach,
   6087 				drm_plane_index(&intel_plane->base),
   6088 				&plane_state->scaler_id,
   6089 				drm_rect_width(&plane_state->uapi.src) >> 16,
   6090 				drm_rect_height(&plane_state->uapi.src) >> 16,
   6091 				drm_rect_width(&plane_state->uapi.dst),
   6092 				drm_rect_height(&plane_state->uapi.dst),
   6093 				fb ? fb->format : NULL,
   6094 				fb ? fb->modifier : 0,
   6095 				need_scaler);
   6096 
   6097 	if (ret || plane_state->scaler_id < 0)
   6098 		return ret;
   6099 
   6100 	/* check colorkey */
   6101 	if (plane_state->ckey.flags) {
   6102 		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
   6103 			      intel_plane->base.base.id,
   6104 			      intel_plane->base.name);
   6105 		return -EINVAL;
   6106 	}
   6107 
   6108 	/* Check src format */
   6109 	switch (fb->format->format) {
   6110 	case DRM_FORMAT_RGB565:
   6111 	case DRM_FORMAT_XBGR8888:
   6112 	case DRM_FORMAT_XRGB8888:
   6113 	case DRM_FORMAT_ABGR8888:
   6114 	case DRM_FORMAT_ARGB8888:
   6115 	case DRM_FORMAT_XRGB2101010:
   6116 	case DRM_FORMAT_XBGR2101010:
   6117 	case DRM_FORMAT_ARGB2101010:
   6118 	case DRM_FORMAT_ABGR2101010:
   6119 	case DRM_FORMAT_YUYV:
   6120 	case DRM_FORMAT_YVYU:
   6121 	case DRM_FORMAT_UYVY:
   6122 	case DRM_FORMAT_VYUY:
   6123 	case DRM_FORMAT_NV12:
   6124 	case DRM_FORMAT_P010:
   6125 	case DRM_FORMAT_P012:
   6126 	case DRM_FORMAT_P016:
   6127 	case DRM_FORMAT_Y210:
   6128 	case DRM_FORMAT_Y212:
   6129 	case DRM_FORMAT_Y216:
   6130 	case DRM_FORMAT_XVYU2101010:
   6131 	case DRM_FORMAT_XVYU12_16161616:
   6132 	case DRM_FORMAT_XVYU16161616:
   6133 		break;
   6134 	case DRM_FORMAT_XBGR16161616F:
   6135 	case DRM_FORMAT_ABGR16161616F:
   6136 	case DRM_FORMAT_XRGB16161616F:
   6137 	case DRM_FORMAT_ARGB16161616F:
   6138 		if (INTEL_GEN(dev_priv) >= 11)
   6139 			break;
   6140 		/* fall through */
   6141 	default:
   6142 		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
   6143 			      intel_plane->base.base.id, intel_plane->base.name,
   6144 			      fb->base.id, fb->format->format);
   6145 		return -EINVAL;
   6146 	}
   6147 
   6148 	return 0;
   6149 }
   6150 
   6151 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
   6152 {
   6153 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   6154 	int i;
   6155 
   6156 	for (i = 0; i < crtc->num_scalers; i++)
   6157 		skl_detach_scaler(crtc, i);
   6158 }
   6159 
   6160 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
   6161 {
   6162 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6163 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6164 	enum pipe pipe = crtc->pipe;
   6165 	const struct intel_crtc_scaler_state *scaler_state =
   6166 		&crtc_state->scaler_state;
   6167 
   6168 	if (crtc_state->pch_pfit.enabled) {
   6169 		u16 uv_rgb_hphase, uv_rgb_vphase;
   6170 		int pfit_w, pfit_h, hscale, vscale;
   6171 		int id;
   6172 
   6173 		if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
   6174 			return;
   6175 
   6176 		pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
   6177 		pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
   6178 
   6179 		hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
   6180 		vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
   6181 
   6182 		uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
   6183 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
   6184 
   6185 		id = scaler_state->scaler_id;
   6186 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
   6187 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
   6188 		I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
   6189 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
   6190 		I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
   6191 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
   6192 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
   6193 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
   6194 	}
   6195 }
   6196 
   6197 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
   6198 {
   6199 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6200 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6201 	enum pipe pipe = crtc->pipe;
   6202 
   6203 	if (crtc_state->pch_pfit.enabled) {
   6204 		/* Force use of hard-coded filter coefficients
   6205 		 * as some pre-programmed values are broken,
   6206 		 * e.g. x201.
   6207 		 */
   6208 		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
   6209 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
   6210 						 PF_PIPE_SEL_IVB(pipe));
   6211 		else
   6212 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
   6213 		I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
   6214 		I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
   6215 	}
   6216 }
   6217 
   6218 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
   6219 {
   6220 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6221 	struct drm_device *dev = crtc->base.dev;
   6222 	struct drm_i915_private *dev_priv = to_i915(dev);
   6223 
   6224 	if (!crtc_state->ips_enabled)
   6225 		return;
   6226 
   6227 	/*
   6228 	 * We can only enable IPS after we enable a plane and wait for a vblank
   6229 	 * This function is called from post_plane_update, which is run after
   6230 	 * a vblank wait.
   6231 	 */
   6232 	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
   6233 
   6234 	if (IS_BROADWELL(dev_priv)) {
   6235 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
   6236 						IPS_ENABLE | IPS_PCODE_CONTROL));
   6237 		/* Quoting Art Runyan: "its not safe to expect any particular
   6238 		 * value in IPS_CTL bit 31 after enabling IPS through the
   6239 		 * mailbox." Moreover, the mailbox may return a bogus state,
   6240 		 * so we need to just enable it and continue on.
   6241 		 */
   6242 	} else {
   6243 		I915_WRITE(IPS_CTL, IPS_ENABLE);
   6244 		/* The bit only becomes 1 in the next vblank, so this wait here
   6245 		 * is essentially intel_wait_for_vblank. If we don't have this
   6246 		 * and don't wait for vblanks until the end of crtc_enable, then
   6247 		 * the HW state readout code will complain that the expected
   6248 		 * IPS_CTL value is not the one we read. */
   6249 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
   6250 			DRM_ERROR("Timed out waiting for IPS enable\n");
   6251 	}
   6252 }
   6253 
   6254 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
   6255 {
   6256 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6257 	struct drm_device *dev = crtc->base.dev;
   6258 	struct drm_i915_private *dev_priv = to_i915(dev);
   6259 
   6260 	if (!crtc_state->ips_enabled)
   6261 		return;
   6262 
   6263 	if (IS_BROADWELL(dev_priv)) {
   6264 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
   6265 		/*
   6266 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
   6267 		 * 42ms timeout value leads to occasional timeouts so use 100ms
   6268 		 * instead.
   6269 		 */
   6270 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
   6271 			DRM_ERROR("Timed out waiting for IPS disable\n");
   6272 	} else {
   6273 		I915_WRITE(IPS_CTL, 0);
   6274 		POSTING_READ(IPS_CTL);
   6275 	}
   6276 
   6277 	/* We need to wait for a vblank before we can disable the plane. */
   6278 	intel_wait_for_vblank(dev_priv, crtc->pipe);
   6279 }
   6280 
   6281 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
   6282 {
   6283 	if (intel_crtc->overlay)
   6284 		(void) intel_overlay_switch_off(intel_crtc->overlay);
   6285 
   6286 	/* Let userspace switch the overlay on again. In most cases userspace
   6287 	 * has to recompute where to put it anyway.
   6288 	 */
   6289 }
   6290 
   6291 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
   6292 				       const struct intel_crtc_state *new_crtc_state)
   6293 {
   6294 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   6295 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6296 
   6297 	if (!old_crtc_state->ips_enabled)
   6298 		return false;
   6299 
   6300 	if (needs_modeset(new_crtc_state))
   6301 		return true;
   6302 
   6303 	/*
   6304 	 * Workaround : Do not read or write the pipe palette/gamma data while
   6305 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
   6306 	 *
   6307 	 * Disable IPS before we program the LUT.
   6308 	 */
   6309 	if (IS_HASWELL(dev_priv) &&
   6310 	    (new_crtc_state->uapi.color_mgmt_changed ||
   6311 	     new_crtc_state->update_pipe) &&
   6312 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
   6313 		return true;
   6314 
   6315 	return !new_crtc_state->ips_enabled;
   6316 }
   6317 
   6318 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
   6319 				       const struct intel_crtc_state *new_crtc_state)
   6320 {
   6321 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   6322 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6323 
   6324 	if (!new_crtc_state->ips_enabled)
   6325 		return false;
   6326 
   6327 	if (needs_modeset(new_crtc_state))
   6328 		return true;
   6329 
   6330 	/*
   6331 	 * Workaround : Do not read or write the pipe palette/gamma data while
   6332 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
   6333 	 *
   6334 	 * Re-enable IPS after the LUT has been programmed.
   6335 	 */
   6336 	if (IS_HASWELL(dev_priv) &&
   6337 	    (new_crtc_state->uapi.color_mgmt_changed ||
   6338 	     new_crtc_state->update_pipe) &&
   6339 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
   6340 		return true;
   6341 
   6342 	/*
   6343 	 * We can't read out IPS on broadwell, assume the worst and
   6344 	 * forcibly enable IPS on the first fastset.
   6345 	 */
   6346 	if (new_crtc_state->update_pipe &&
   6347 	    old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
   6348 		return true;
   6349 
   6350 	return !old_crtc_state->ips_enabled;
   6351 }
   6352 
   6353 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
   6354 {
   6355 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   6356 
   6357 	if (!crtc_state->nv12_planes)
   6358 		return false;
   6359 
   6360 	/* WA Display #0827: Gen9:all */
   6361 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
   6362 		return true;
   6363 
   6364 	return false;
   6365 }
   6366 
   6367 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
   6368 {
   6369 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   6370 
   6371 	/* Wa_2006604312:icl */
   6372 	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
   6373 		return true;
   6374 
   6375 	return false;
   6376 }
   6377 
   6378 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
   6379 			    const struct intel_crtc_state *new_crtc_state)
   6380 {
   6381 	return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
   6382 		new_crtc_state->active_planes;
   6383 }
   6384 
   6385 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
   6386 			     const struct intel_crtc_state *new_crtc_state)
   6387 {
   6388 	return old_crtc_state->active_planes &&
   6389 		(!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
   6390 }
   6391 
   6392 static void intel_post_plane_update(struct intel_atomic_state *state,
   6393 				    struct intel_crtc *crtc)
   6394 {
   6395 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   6396 	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
   6397 	const struct intel_crtc_state *old_crtc_state =
   6398 		intel_atomic_get_old_crtc_state(state, crtc);
   6399 	const struct intel_crtc_state *new_crtc_state =
   6400 		intel_atomic_get_new_crtc_state(state, crtc);
   6401 	const struct intel_plane_state *new_primary_state =
   6402 		intel_atomic_get_new_plane_state(state, primary);
   6403 	enum pipe pipe = crtc->pipe;
   6404 
   6405 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
   6406 
   6407 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
   6408 		intel_update_watermarks(crtc);
   6409 
   6410 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
   6411 		hsw_enable_ips(new_crtc_state);
   6412 
   6413 	if (new_primary_state)
   6414 		intel_fbc_post_update(crtc);
   6415 
   6416 	if (needs_nv12_wa(old_crtc_state) &&
   6417 	    !needs_nv12_wa(new_crtc_state))
   6418 		skl_wa_827(dev_priv, pipe, false);
   6419 
   6420 	if (needs_scalerclk_wa(old_crtc_state) &&
   6421 	    !needs_scalerclk_wa(new_crtc_state))
   6422 		icl_wa_scalerclkgating(dev_priv, pipe, false);
   6423 }
   6424 
   6425 static void intel_pre_plane_update(struct intel_atomic_state *state,
   6426 				   struct intel_crtc *crtc)
   6427 {
   6428 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   6429 	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
   6430 	const struct intel_crtc_state *old_crtc_state =
   6431 		intel_atomic_get_old_crtc_state(state, crtc);
   6432 	const struct intel_crtc_state *new_crtc_state =
   6433 		intel_atomic_get_new_crtc_state(state, crtc);
   6434 	const struct intel_plane_state *new_primary_state =
   6435 		intel_atomic_get_new_plane_state(state, primary);
   6436 	enum pipe pipe = crtc->pipe;
   6437 
   6438 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
   6439 		hsw_disable_ips(old_crtc_state);
   6440 
   6441 	if (new_primary_state &&
   6442 	    intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
   6443 		intel_wait_for_vblank(dev_priv, pipe);
   6444 
   6445 	/* Display WA 827 */
   6446 	if (!needs_nv12_wa(old_crtc_state) &&
   6447 	    needs_nv12_wa(new_crtc_state))
   6448 		skl_wa_827(dev_priv, pipe, true);
   6449 
   6450 	/* Wa_2006604312:icl */
   6451 	if (!needs_scalerclk_wa(old_crtc_state) &&
   6452 	    needs_scalerclk_wa(new_crtc_state))
   6453 		icl_wa_scalerclkgating(dev_priv, pipe, true);
   6454 
   6455 	/*
   6456 	 * Vblank time updates from the shadow to live plane control register
   6457 	 * are blocked if the memory self-refresh mode is active at that
   6458 	 * moment. So to make sure the plane gets truly disabled, disable
   6459 	 * first the self-refresh mode. The self-refresh enable bit in turn
   6460 	 * will be checked/applied by the HW only at the next frame start
   6461 	 * event which is after the vblank start event, so we need to have a
   6462 	 * wait-for-vblank between disabling the plane and the pipe.
   6463 	 */
   6464 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
   6465 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
   6466 		intel_wait_for_vblank(dev_priv, pipe);
   6467 
   6468 	/*
   6469 	 * IVB workaround: must disable low power watermarks for at least
   6470 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
   6471 	 * when scaling is disabled.
   6472 	 *
   6473 	 * WaCxSRDisabledForSpriteScaling:ivb
   6474 	 */
   6475 	if (old_crtc_state->hw.active &&
   6476 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
   6477 		intel_wait_for_vblank(dev_priv, pipe);
   6478 
   6479 	/*
   6480 	 * If we're doing a modeset we don't need to do any
   6481 	 * pre-vblank watermark programming here.
   6482 	 */
   6483 	if (!needs_modeset(new_crtc_state)) {
   6484 		/*
   6485 		 * For platforms that support atomic watermarks, program the
   6486 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
   6487 		 * will be the intermediate values that are safe for both pre- and
   6488 		 * post- vblank; when vblank happens, the 'active' values will be set
   6489 		 * to the final 'target' values and we'll do this again to get the
   6490 		 * optimal watermarks.  For gen9+ platforms, the values we program here
   6491 		 * will be the final target values which will get automatically latched
   6492 		 * at vblank time; no further programming will be necessary.
   6493 		 *
   6494 		 * If a platform hasn't been transitioned to atomic watermarks yet,
   6495 		 * we'll continue to update watermarks the old way, if flags tell
   6496 		 * us to.
   6497 		 */
   6498 		if (dev_priv->display.initial_watermarks)
   6499 			dev_priv->display.initial_watermarks(state, crtc);
   6500 		else if (new_crtc_state->update_wm_pre)
   6501 			intel_update_watermarks(crtc);
   6502 	}
   6503 
   6504 	/*
   6505 	 * Gen2 reports pipe underruns whenever all planes are disabled.
   6506 	 * So disable underrun reporting before all the planes get disabled.
   6507 	 *
   6508 	 * We do this after .initial_watermarks() so that we have a
   6509 	 * chance of catching underruns with the intermediate watermarks
   6510 	 * vs. the old plane configuration.
   6511 	 */
   6512 	if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
   6513 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   6514 }
   6515 
   6516 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
   6517 				      struct intel_crtc *crtc)
   6518 {
   6519 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6520 	const struct intel_crtc_state *new_crtc_state =
   6521 		intel_atomic_get_new_crtc_state(state, crtc);
   6522 	unsigned int update_mask = new_crtc_state->update_planes;
   6523 	const struct intel_plane_state *old_plane_state;
   6524 	struct intel_plane *plane;
   6525 	unsigned fb_bits = 0;
   6526 	int i;
   6527 
   6528 	intel_crtc_dpms_overlay_disable(crtc);
   6529 
   6530 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
   6531 		if (crtc->pipe != plane->pipe ||
   6532 		    !(update_mask & BIT(plane->id)))
   6533 			continue;
   6534 
   6535 		intel_disable_plane(plane, new_crtc_state);
   6536 
   6537 		if (old_plane_state->uapi.visible)
   6538 			fb_bits |= plane->frontbuffer_bit;
   6539 	}
   6540 
   6541 	intel_frontbuffer_flip(dev_priv, fb_bits);
   6542 }
   6543 
   6544 /*
   6545  * intel_connector_primary_encoder - get the primary encoder for a connector
   6546  * @connector: connector for which to return the encoder
   6547  *
   6548  * Returns the primary encoder for a connector. There is a 1:1 mapping from
   6549  * all connectors to their encoder, except for DP-MST connectors which have
   6550  * both a virtual and a primary encoder. These DP-MST primary encoders can be
   6551  * pointed to by as many DP-MST connectors as there are pipes.
   6552  */
   6553 static struct intel_encoder *
   6554 intel_connector_primary_encoder(struct intel_connector *connector)
   6555 {
   6556 	struct intel_encoder *encoder;
   6557 
   6558 	if (connector->mst_port)
   6559 		return &dp_to_dig_port(connector->mst_port)->base;
   6560 
   6561 	encoder = intel_attached_encoder(connector);
   6562 	WARN_ON(!encoder);
   6563 
   6564 	return encoder;
   6565 }
   6566 
   6567 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
   6568 {
   6569 	struct drm_connector_state *new_conn_state;
   6570 	struct drm_connector *connector;
   6571 	int i;
   6572 
   6573 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
   6574 					i) {
   6575 		struct intel_connector *intel_connector;
   6576 		struct intel_encoder *encoder;
   6577 		struct intel_crtc *crtc;
   6578 
   6579 		if (!intel_connector_needs_modeset(state, connector))
   6580 			continue;
   6581 
   6582 		intel_connector = to_intel_connector(connector);
   6583 		encoder = intel_connector_primary_encoder(intel_connector);
   6584 		if (!encoder->update_prepare)
   6585 			continue;
   6586 
   6587 		crtc = new_conn_state->crtc ?
   6588 			to_intel_crtc(new_conn_state->crtc) : NULL;
   6589 		encoder->update_prepare(state, encoder, crtc);
   6590 	}
   6591 }
   6592 
   6593 static void intel_encoders_update_complete(struct intel_atomic_state *state)
   6594 {
   6595 	struct drm_connector_state *new_conn_state;
   6596 	struct drm_connector *connector;
   6597 	int i;
   6598 
   6599 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
   6600 					i) {
   6601 		struct intel_connector *intel_connector;
   6602 		struct intel_encoder *encoder;
   6603 		struct intel_crtc *crtc;
   6604 
   6605 		if (!intel_connector_needs_modeset(state, connector))
   6606 			continue;
   6607 
   6608 		intel_connector = to_intel_connector(connector);
   6609 		encoder = intel_connector_primary_encoder(intel_connector);
   6610 		if (!encoder->update_complete)
   6611 			continue;
   6612 
   6613 		crtc = new_conn_state->crtc ?
   6614 			to_intel_crtc(new_conn_state->crtc) : NULL;
   6615 		encoder->update_complete(state, encoder, crtc);
   6616 	}
   6617 }
   6618 
   6619 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
   6620 					  struct intel_crtc *crtc)
   6621 {
   6622 	const struct intel_crtc_state *crtc_state =
   6623 		intel_atomic_get_new_crtc_state(state, crtc);
   6624 	const struct drm_connector_state *conn_state;
   6625 	struct drm_connector *conn;
   6626 	int i;
   6627 
   6628 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6629 		struct intel_encoder *encoder =
   6630 			to_intel_encoder(conn_state->best_encoder);
   6631 
   6632 		if (conn_state->crtc != &crtc->base)
   6633 			continue;
   6634 
   6635 		if (encoder->pre_pll_enable)
   6636 			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
   6637 	}
   6638 }
   6639 
   6640 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
   6641 				      struct intel_crtc *crtc)
   6642 {
   6643 	const struct intel_crtc_state *crtc_state =
   6644 		intel_atomic_get_new_crtc_state(state, crtc);
   6645 	const struct drm_connector_state *conn_state;
   6646 	struct drm_connector *conn;
   6647 	int i;
   6648 
   6649 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6650 		struct intel_encoder *encoder =
   6651 			to_intel_encoder(conn_state->best_encoder);
   6652 
   6653 		if (conn_state->crtc != &crtc->base)
   6654 			continue;
   6655 
   6656 		if (encoder->pre_enable)
   6657 			encoder->pre_enable(encoder, crtc_state, conn_state);
   6658 	}
   6659 }
   6660 
   6661 static void intel_encoders_enable(struct intel_atomic_state *state,
   6662 				  struct intel_crtc *crtc)
   6663 {
   6664 	const struct intel_crtc_state *crtc_state =
   6665 		intel_atomic_get_new_crtc_state(state, crtc);
   6666 	const struct drm_connector_state *conn_state;
   6667 	struct drm_connector *conn;
   6668 	int i;
   6669 
   6670 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6671 		struct intel_encoder *encoder =
   6672 			to_intel_encoder(conn_state->best_encoder);
   6673 
   6674 		if (conn_state->crtc != &crtc->base)
   6675 			continue;
   6676 
   6677 		if (encoder->enable)
   6678 			encoder->enable(encoder, crtc_state, conn_state);
   6679 		intel_opregion_notify_encoder(encoder, true);
   6680 	}
   6681 }
   6682 
   6683 static void intel_encoders_disable(struct intel_atomic_state *state,
   6684 				   struct intel_crtc *crtc)
   6685 {
   6686 	const struct intel_crtc_state *old_crtc_state =
   6687 		intel_atomic_get_old_crtc_state(state, crtc);
   6688 	const struct drm_connector_state *old_conn_state;
   6689 	struct drm_connector *conn;
   6690 	int i;
   6691 
   6692 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   6693 		struct intel_encoder *encoder =
   6694 			to_intel_encoder(old_conn_state->best_encoder);
   6695 
   6696 		if (old_conn_state->crtc != &crtc->base)
   6697 			continue;
   6698 
   6699 		intel_opregion_notify_encoder(encoder, false);
   6700 		if (encoder->disable)
   6701 			encoder->disable(encoder, old_crtc_state, old_conn_state);
   6702 	}
   6703 }
   6704 
   6705 static void intel_encoders_post_disable(struct intel_atomic_state *state,
   6706 					struct intel_crtc *crtc)
   6707 {
   6708 	const struct intel_crtc_state *old_crtc_state =
   6709 		intel_atomic_get_old_crtc_state(state, crtc);
   6710 	const struct drm_connector_state *old_conn_state;
   6711 	struct drm_connector *conn;
   6712 	int i;
   6713 
   6714 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   6715 		struct intel_encoder *encoder =
   6716 			to_intel_encoder(old_conn_state->best_encoder);
   6717 
   6718 		if (old_conn_state->crtc != &crtc->base)
   6719 			continue;
   6720 
   6721 		if (encoder->post_disable)
   6722 			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
   6723 	}
   6724 }
   6725 
   6726 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
   6727 					    struct intel_crtc *crtc)
   6728 {
   6729 	const struct intel_crtc_state *old_crtc_state =
   6730 		intel_atomic_get_old_crtc_state(state, crtc);
   6731 	const struct drm_connector_state *old_conn_state;
   6732 	struct drm_connector *conn;
   6733 	int i;
   6734 
   6735 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   6736 		struct intel_encoder *encoder =
   6737 			to_intel_encoder(old_conn_state->best_encoder);
   6738 
   6739 		if (old_conn_state->crtc != &crtc->base)
   6740 			continue;
   6741 
   6742 		if (encoder->post_pll_disable)
   6743 			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
   6744 	}
   6745 }
   6746 
   6747 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
   6748 				       struct intel_crtc *crtc)
   6749 {
   6750 	const struct intel_crtc_state *crtc_state =
   6751 		intel_atomic_get_new_crtc_state(state, crtc);
   6752 	const struct drm_connector_state *conn_state;
   6753 	struct drm_connector *conn;
   6754 	int i;
   6755 
   6756 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6757 		struct intel_encoder *encoder =
   6758 			to_intel_encoder(conn_state->best_encoder);
   6759 
   6760 		if (conn_state->crtc != &crtc->base)
   6761 			continue;
   6762 
   6763 		if (encoder->update_pipe)
   6764 			encoder->update_pipe(encoder, crtc_state, conn_state);
   6765 	}
   6766 }
   6767 
   6768 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
   6769 {
   6770 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6771 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   6772 
   6773 	plane->disable_plane(plane, crtc_state);
   6774 }
   6775 
   6776 static void ilk_crtc_enable(struct intel_atomic_state *state,
   6777 			    struct intel_crtc *crtc)
   6778 {
   6779 	const struct intel_crtc_state *new_crtc_state =
   6780 		intel_atomic_get_new_crtc_state(state, crtc);
   6781 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6782 	enum pipe pipe = crtc->pipe;
   6783 
   6784 	if (WARN_ON(crtc->active))
   6785 		return;
   6786 
   6787 	/*
   6788 	 * Sometimes spurious CPU pipe underruns happen during FDI
   6789 	 * training, at least with VGA+HDMI cloning. Suppress them.
   6790 	 *
   6791 	 * On ILK we get an occasional spurious CPU pipe underruns
   6792 	 * between eDP port A enable and vdd enable. Also PCH port
   6793 	 * enable seems to result in the occasional CPU pipe underrun.
   6794 	 *
   6795 	 * Spurious PCH underruns also occur during PCH enabling.
   6796 	 */
   6797 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   6798 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
   6799 
   6800 	if (new_crtc_state->has_pch_encoder)
   6801 		intel_prepare_shared_dpll(new_crtc_state);
   6802 
   6803 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   6804 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   6805 
   6806 	intel_set_pipe_timings(new_crtc_state);
   6807 	intel_set_pipe_src_size(new_crtc_state);
   6808 
   6809 	if (new_crtc_state->has_pch_encoder)
   6810 		intel_cpu_transcoder_set_m_n(new_crtc_state,
   6811 					     &new_crtc_state->fdi_m_n, NULL);
   6812 
   6813 	ilk_set_pipeconf(new_crtc_state);
   6814 
   6815 	crtc->active = true;
   6816 
   6817 	intel_encoders_pre_enable(state, crtc);
   6818 
   6819 	if (new_crtc_state->has_pch_encoder) {
   6820 		/* Note: FDI PLL enabling _must_ be done before we enable the
   6821 		 * cpu pipes, hence this is separate from all the other fdi/pch
   6822 		 * enabling. */
   6823 		ilk_fdi_pll_enable(new_crtc_state);
   6824 	} else {
   6825 		assert_fdi_tx_disabled(dev_priv, pipe);
   6826 		assert_fdi_rx_disabled(dev_priv, pipe);
   6827 	}
   6828 
   6829 	ilk_pfit_enable(new_crtc_state);
   6830 
   6831 	/*
   6832 	 * On ILK+ LUT must be loaded before the pipe is running but with
   6833 	 * clocks enabled
   6834 	 */
   6835 	intel_color_load_luts(new_crtc_state);
   6836 	intel_color_commit(new_crtc_state);
   6837 	/* update DSPCNTR to configure gamma for pipe bottom color */
   6838 	intel_disable_primary_plane(new_crtc_state);
   6839 
   6840 	if (dev_priv->display.initial_watermarks)
   6841 		dev_priv->display.initial_watermarks(state, crtc);
   6842 	intel_enable_pipe(new_crtc_state);
   6843 
   6844 	if (new_crtc_state->has_pch_encoder)
   6845 		ilk_pch_enable(state, new_crtc_state);
   6846 
   6847 	intel_crtc_vblank_on(new_crtc_state);
   6848 
   6849 	intel_encoders_enable(state, crtc);
   6850 
   6851 	if (HAS_PCH_CPT(dev_priv))
   6852 		cpt_verify_modeset(dev_priv, pipe);
   6853 
   6854 	/*
   6855 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
   6856 	 * And a second vblank wait is needed at least on ILK with
   6857 	 * some interlaced HDMI modes. Let's do the double wait always
   6858 	 * in case there are more corner cases we don't know about.
   6859 	 */
   6860 	if (new_crtc_state->has_pch_encoder) {
   6861 		intel_wait_for_vblank(dev_priv, pipe);
   6862 		intel_wait_for_vblank(dev_priv, pipe);
   6863 	}
   6864 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   6865 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
   6866 }
   6867 
   6868 /* IPS only exists on ULT machines and is tied to pipe A. */
   6869 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
   6870 {
   6871 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
   6872 }
   6873 
   6874 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
   6875 					    enum pipe pipe, bool apply)
   6876 {
   6877 	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
   6878 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
   6879 
   6880 	if (apply)
   6881 		val |= mask;
   6882 	else
   6883 		val &= ~mask;
   6884 
   6885 	I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
   6886 }
   6887 
   6888 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
   6889 {
   6890 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6891 	enum pipe pipe = crtc->pipe;
   6892 	u32 val;
   6893 
   6894 	val = MBUS_DBOX_A_CREDIT(2);
   6895 
   6896 	if (INTEL_GEN(dev_priv) >= 12) {
   6897 		val |= MBUS_DBOX_BW_CREDIT(2);
   6898 		val |= MBUS_DBOX_B_CREDIT(12);
   6899 	} else {
   6900 		val |= MBUS_DBOX_BW_CREDIT(1);
   6901 		val |= MBUS_DBOX_B_CREDIT(8);
   6902 	}
   6903 
   6904 	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
   6905 }
   6906 
   6907 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
   6908 {
   6909 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6910 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6911 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
   6912 	u32 val;
   6913 
   6914 	val = I915_READ(reg);
   6915 	val &= ~HSW_FRAME_START_DELAY_MASK;
   6916 	val |= HSW_FRAME_START_DELAY(0);
   6917 	I915_WRITE(reg, val);
   6918 }
   6919 
   6920 static void hsw_crtc_enable(struct intel_atomic_state *state,
   6921 			    struct intel_crtc *crtc)
   6922 {
   6923 	const struct intel_crtc_state *new_crtc_state =
   6924 		intel_atomic_get_new_crtc_state(state, crtc);
   6925 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6926 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
   6927 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
   6928 	bool psl_clkgate_wa;
   6929 
   6930 	if (WARN_ON(crtc->active))
   6931 		return;
   6932 
   6933 	intel_encoders_pre_pll_enable(state, crtc);
   6934 
   6935 	if (new_crtc_state->shared_dpll)
   6936 		intel_enable_shared_dpll(new_crtc_state);
   6937 
   6938 	intel_encoders_pre_enable(state, crtc);
   6939 
   6940 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   6941 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   6942 
   6943 	if (!transcoder_is_dsi(cpu_transcoder))
   6944 		intel_set_pipe_timings(new_crtc_state);
   6945 
   6946 	if (INTEL_GEN(dev_priv) >= 11)
   6947 		icl_enable_trans_port_sync(new_crtc_state);
   6948 
   6949 	intel_set_pipe_src_size(new_crtc_state);
   6950 
   6951 	if (cpu_transcoder != TRANSCODER_EDP &&
   6952 	    !transcoder_is_dsi(cpu_transcoder))
   6953 		I915_WRITE(PIPE_MULT(cpu_transcoder),
   6954 			   new_crtc_state->pixel_multiplier - 1);
   6955 
   6956 	if (new_crtc_state->has_pch_encoder)
   6957 		intel_cpu_transcoder_set_m_n(new_crtc_state,
   6958 					     &new_crtc_state->fdi_m_n, NULL);
   6959 
   6960 	if (!transcoder_is_dsi(cpu_transcoder)) {
   6961 		hsw_set_frame_start_delay(new_crtc_state);
   6962 		hsw_set_pipeconf(new_crtc_state);
   6963 	}
   6964 
   6965 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
   6966 		bdw_set_pipemisc(new_crtc_state);
   6967 
   6968 	crtc->active = true;
   6969 
   6970 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
   6971 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
   6972 		new_crtc_state->pch_pfit.enabled;
   6973 	if (psl_clkgate_wa)
   6974 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
   6975 
   6976 	if (INTEL_GEN(dev_priv) >= 9)
   6977 		skl_pfit_enable(new_crtc_state);
   6978 	else
   6979 		ilk_pfit_enable(new_crtc_state);
   6980 
   6981 	/*
   6982 	 * On ILK+ LUT must be loaded before the pipe is running but with
   6983 	 * clocks enabled
   6984 	 */
   6985 	intel_color_load_luts(new_crtc_state);
   6986 	intel_color_commit(new_crtc_state);
   6987 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
   6988 	if (INTEL_GEN(dev_priv) < 9)
   6989 		intel_disable_primary_plane(new_crtc_state);
   6990 
   6991 	if (INTEL_GEN(dev_priv) >= 11)
   6992 		icl_set_pipe_chicken(crtc);
   6993 
   6994 	if (!transcoder_is_dsi(cpu_transcoder))
   6995 		intel_ddi_enable_transcoder_func(new_crtc_state);
   6996 
   6997 	if (dev_priv->display.initial_watermarks)
   6998 		dev_priv->display.initial_watermarks(state, crtc);
   6999 
   7000 	if (INTEL_GEN(dev_priv) >= 11)
   7001 		icl_pipe_mbus_enable(crtc);
   7002 
   7003 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
   7004 	if (!transcoder_is_dsi(cpu_transcoder))
   7005 		intel_enable_pipe(new_crtc_state);
   7006 
   7007 	if (new_crtc_state->has_pch_encoder)
   7008 		lpt_pch_enable(state, new_crtc_state);
   7009 
   7010 	intel_crtc_vblank_on(new_crtc_state);
   7011 
   7012 	intel_encoders_enable(state, crtc);
   7013 
   7014 	if (psl_clkgate_wa) {
   7015 		intel_wait_for_vblank(dev_priv, pipe);
   7016 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
   7017 	}
   7018 
   7019 	/* If we change the relative order between pipe/planes enabling, we need
   7020 	 * to change the workaround. */
   7021 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
   7022 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
   7023 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
   7024 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
   7025 	}
   7026 }
   7027 
   7028 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
   7029 {
   7030 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   7031 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7032 	enum pipe pipe = crtc->pipe;
   7033 
   7034 	/* To avoid upsetting the power well on haswell only disable the pfit if
   7035 	 * it's in use. The hw state code will make sure we get this right. */
   7036 	if (old_crtc_state->pch_pfit.enabled) {
   7037 		I915_WRITE(PF_CTL(pipe), 0);
   7038 		I915_WRITE(PF_WIN_POS(pipe), 0);
   7039 		I915_WRITE(PF_WIN_SZ(pipe), 0);
   7040 	}
   7041 }
   7042 
   7043 static void ilk_crtc_disable(struct intel_atomic_state *state,
   7044 			     struct intel_crtc *crtc)
   7045 {
   7046 	const struct intel_crtc_state *old_crtc_state =
   7047 		intel_atomic_get_old_crtc_state(state, crtc);
   7048 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7049 	enum pipe pipe = crtc->pipe;
   7050 
   7051 	/*
   7052 	 * Sometimes spurious CPU pipe underruns happen when the
   7053 	 * pipe is already disabled, but FDI RX/TX is still enabled.
   7054 	 * Happens at least with VGA+HDMI cloning. Suppress them.
   7055 	 */
   7056 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   7057 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
   7058 
   7059 	intel_encoders_disable(state, crtc);
   7060 
   7061 	intel_crtc_vblank_off(old_crtc_state);
   7062 
   7063 	intel_disable_pipe(old_crtc_state);
   7064 
   7065 	ilk_pfit_disable(old_crtc_state);
   7066 
   7067 	if (old_crtc_state->has_pch_encoder)
   7068 		ilk_fdi_disable(crtc);
   7069 
   7070 	intel_encoders_post_disable(state, crtc);
   7071 
   7072 	if (old_crtc_state->has_pch_encoder) {
   7073 		ilk_disable_pch_transcoder(dev_priv, pipe);
   7074 
   7075 		if (HAS_PCH_CPT(dev_priv)) {
   7076 			i915_reg_t reg;
   7077 			u32 temp;
   7078 
   7079 			/* disable TRANS_DP_CTL */
   7080 			reg = TRANS_DP_CTL(pipe);
   7081 			temp = I915_READ(reg);
   7082 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
   7083 				  TRANS_DP_PORT_SEL_MASK);
   7084 			temp |= TRANS_DP_PORT_SEL_NONE;
   7085 			I915_WRITE(reg, temp);
   7086 
   7087 			/* disable DPLL_SEL */
   7088 			temp = I915_READ(PCH_DPLL_SEL);
   7089 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
   7090 			I915_WRITE(PCH_DPLL_SEL, temp);
   7091 		}
   7092 
   7093 		ilk_fdi_pll_disable(crtc);
   7094 	}
   7095 
   7096 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   7097 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
   7098 }
   7099 
   7100 static void hsw_crtc_disable(struct intel_atomic_state *state,
   7101 			     struct intel_crtc *crtc)
   7102 {
   7103 	/*
   7104 	 * FIXME collapse everything to one hook.
   7105 	 * Need care with mst->ddi interactions.
   7106 	 */
   7107 	intel_encoders_disable(state, crtc);
   7108 	intel_encoders_post_disable(state, crtc);
   7109 }
   7110 
   7111 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
   7112 {
   7113 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7114 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7115 
   7116 	if (!crtc_state->gmch_pfit.control)
   7117 		return;
   7118 
   7119 	/*
   7120 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
   7121 	 * according to register description and PRM.
   7122 	 */
   7123 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
   7124 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
   7125 
   7126 	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
   7127 	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
   7128 
   7129 	/* Border color in case we don't scale up to the full screen. Black by
   7130 	 * default, change to something else for debugging. */
   7131 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
   7132 }
   7133 
   7134 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
   7135 {
   7136 	if (phy == PHY_NONE)
   7137 		return false;
   7138 
   7139 	if (IS_ELKHARTLAKE(dev_priv))
   7140 		return phy <= PHY_C;
   7141 
   7142 	if (INTEL_GEN(dev_priv) >= 11)
   7143 		return phy <= PHY_B;
   7144 
   7145 	return false;
   7146 }
   7147 
   7148 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
   7149 {
   7150 	if (INTEL_GEN(dev_priv) >= 12)
   7151 		return phy >= PHY_D && phy <= PHY_I;
   7152 
   7153 	if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
   7154 		return phy >= PHY_C && phy <= PHY_F;
   7155 
   7156 	return false;
   7157 }
   7158 
   7159 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
   7160 {
   7161 	if (IS_ELKHARTLAKE(i915) && port == PORT_D)
   7162 		return PHY_A;
   7163 
   7164 	return (enum phy)port;
   7165 }
   7166 
   7167 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
   7168 {
   7169 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
   7170 		return PORT_TC_NONE;
   7171 
   7172 	if (INTEL_GEN(dev_priv) >= 12)
   7173 		return port - PORT_D;
   7174 
   7175 	return port - PORT_C;
   7176 }
   7177 
   7178 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
   7179 {
   7180 	switch (port) {
   7181 	case PORT_A:
   7182 		return POWER_DOMAIN_PORT_DDI_A_LANES;
   7183 	case PORT_B:
   7184 		return POWER_DOMAIN_PORT_DDI_B_LANES;
   7185 	case PORT_C:
   7186 		return POWER_DOMAIN_PORT_DDI_C_LANES;
   7187 	case PORT_D:
   7188 		return POWER_DOMAIN_PORT_DDI_D_LANES;
   7189 	case PORT_E:
   7190 		return POWER_DOMAIN_PORT_DDI_E_LANES;
   7191 	case PORT_F:
   7192 		return POWER_DOMAIN_PORT_DDI_F_LANES;
   7193 	case PORT_G:
   7194 		return POWER_DOMAIN_PORT_DDI_G_LANES;
   7195 	default:
   7196 		MISSING_CASE(port);
   7197 		return POWER_DOMAIN_PORT_OTHER;
   7198 	}
   7199 }
   7200 
   7201 enum intel_display_power_domain
   7202 intel_aux_power_domain(struct intel_digital_port *dig_port)
   7203 {
   7204 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
   7205 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
   7206 
   7207 	if (intel_phy_is_tc(dev_priv, phy) &&
   7208 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
   7209 		switch (dig_port->aux_ch) {
   7210 		case AUX_CH_C:
   7211 			return POWER_DOMAIN_AUX_C_TBT;
   7212 		case AUX_CH_D:
   7213 			return POWER_DOMAIN_AUX_D_TBT;
   7214 		case AUX_CH_E:
   7215 			return POWER_DOMAIN_AUX_E_TBT;
   7216 		case AUX_CH_F:
   7217 			return POWER_DOMAIN_AUX_F_TBT;
   7218 		case AUX_CH_G:
   7219 			return POWER_DOMAIN_AUX_G_TBT;
   7220 		default:
   7221 			MISSING_CASE(dig_port->aux_ch);
   7222 			return POWER_DOMAIN_AUX_C_TBT;
   7223 		}
   7224 	}
   7225 
   7226 	switch (dig_port->aux_ch) {
   7227 	case AUX_CH_A:
   7228 		return POWER_DOMAIN_AUX_A;
   7229 	case AUX_CH_B:
   7230 		return POWER_DOMAIN_AUX_B;
   7231 	case AUX_CH_C:
   7232 		return POWER_DOMAIN_AUX_C;
   7233 	case AUX_CH_D:
   7234 		return POWER_DOMAIN_AUX_D;
   7235 	case AUX_CH_E:
   7236 		return POWER_DOMAIN_AUX_E;
   7237 	case AUX_CH_F:
   7238 		return POWER_DOMAIN_AUX_F;
   7239 	case AUX_CH_G:
   7240 		return POWER_DOMAIN_AUX_G;
   7241 	default:
   7242 		MISSING_CASE(dig_port->aux_ch);
   7243 		return POWER_DOMAIN_AUX_A;
   7244 	}
   7245 }
   7246 
   7247 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
   7248 {
   7249 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7250 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7251 	struct drm_encoder *encoder;
   7252 	enum pipe pipe = crtc->pipe;
   7253 	u64 mask;
   7254 	enum transcoder transcoder = crtc_state->cpu_transcoder;
   7255 
   7256 	if (!crtc_state->hw.active)
   7257 		return 0;
   7258 
   7259 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
   7260 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
   7261 	if (crtc_state->pch_pfit.enabled ||
   7262 	    crtc_state->pch_pfit.force_thru)
   7263 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
   7264 
   7265 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
   7266 				  crtc_state->uapi.encoder_mask) {
   7267 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
   7268 
   7269 		mask |= BIT_ULL(intel_encoder->power_domain);
   7270 	}
   7271 
   7272 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
   7273 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
   7274 
   7275 	if (crtc_state->shared_dpll)
   7276 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
   7277 
   7278 	return mask;
   7279 }
   7280 
   7281 static u64
   7282 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
   7283 {
   7284 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7285 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7286 	enum intel_display_power_domain domain;
   7287 	u64 domains, new_domains, old_domains;
   7288 
   7289 	old_domains = crtc->enabled_power_domains;
   7290 	crtc->enabled_power_domains = new_domains =
   7291 		get_crtc_power_domains(crtc_state);
   7292 
   7293 	domains = new_domains & ~old_domains;
   7294 
   7295 	for_each_power_domain(domain, domains)
   7296 		intel_display_power_get(dev_priv, domain);
   7297 
   7298 	return old_domains & ~new_domains;
   7299 }
   7300 
   7301 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
   7302 				      u64 domains)
   7303 {
   7304 	enum intel_display_power_domain domain;
   7305 
   7306 	for_each_power_domain(domain, domains)
   7307 		intel_display_power_put_unchecked(dev_priv, domain);
   7308 }
   7309 
   7310 static void valleyview_crtc_enable(struct intel_atomic_state *state,
   7311 				   struct intel_crtc *crtc)
   7312 {
   7313 	const struct intel_crtc_state *new_crtc_state =
   7314 		intel_atomic_get_new_crtc_state(state, crtc);
   7315 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7316 	enum pipe pipe = crtc->pipe;
   7317 
   7318 	if (WARN_ON(crtc->active))
   7319 		return;
   7320 
   7321 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   7322 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   7323 
   7324 	intel_set_pipe_timings(new_crtc_state);
   7325 	intel_set_pipe_src_size(new_crtc_state);
   7326 
   7327 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
   7328 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
   7329 		I915_WRITE(CHV_CANVAS(pipe), 0);
   7330 	}
   7331 
   7332 	i9xx_set_pipeconf(new_crtc_state);
   7333 
   7334 	crtc->active = true;
   7335 
   7336 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   7337 
   7338 	intel_encoders_pre_pll_enable(state, crtc);
   7339 
   7340 	if (IS_CHERRYVIEW(dev_priv)) {
   7341 		chv_prepare_pll(crtc, new_crtc_state);
   7342 		chv_enable_pll(crtc, new_crtc_state);
   7343 	} else {
   7344 		vlv_prepare_pll(crtc, new_crtc_state);
   7345 		vlv_enable_pll(crtc, new_crtc_state);
   7346 	}
   7347 
   7348 	intel_encoders_pre_enable(state, crtc);
   7349 
   7350 	i9xx_pfit_enable(new_crtc_state);
   7351 
   7352 	intel_color_load_luts(new_crtc_state);
   7353 	intel_color_commit(new_crtc_state);
   7354 	/* update DSPCNTR to configure gamma for pipe bottom color */
   7355 	intel_disable_primary_plane(new_crtc_state);
   7356 
   7357 	dev_priv->display.initial_watermarks(state, crtc);
   7358 	intel_enable_pipe(new_crtc_state);
   7359 
   7360 	intel_crtc_vblank_on(new_crtc_state);
   7361 
   7362 	intel_encoders_enable(state, crtc);
   7363 }
   7364 
   7365 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
   7366 {
   7367 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7368 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7369 
   7370 	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
   7371 	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
   7372 }
   7373 
   7374 static void i9xx_crtc_enable(struct intel_atomic_state *state,
   7375 			     struct intel_crtc *crtc)
   7376 {
   7377 	const struct intel_crtc_state *new_crtc_state =
   7378 		intel_atomic_get_new_crtc_state(state, crtc);
   7379 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7380 	enum pipe pipe = crtc->pipe;
   7381 
   7382 	if (WARN_ON(crtc->active))
   7383 		return;
   7384 
   7385 	i9xx_set_pll_dividers(new_crtc_state);
   7386 
   7387 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   7388 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   7389 
   7390 	intel_set_pipe_timings(new_crtc_state);
   7391 	intel_set_pipe_src_size(new_crtc_state);
   7392 
   7393 	i9xx_set_pipeconf(new_crtc_state);
   7394 
   7395 	crtc->active = true;
   7396 
   7397 	if (!IS_GEN(dev_priv, 2))
   7398 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   7399 
   7400 	intel_encoders_pre_enable(state, crtc);
   7401 
   7402 	i9xx_enable_pll(crtc, new_crtc_state);
   7403 
   7404 	i9xx_pfit_enable(new_crtc_state);
   7405 
   7406 	intel_color_load_luts(new_crtc_state);
   7407 	intel_color_commit(new_crtc_state);
   7408 	/* update DSPCNTR to configure gamma for pipe bottom color */
   7409 	intel_disable_primary_plane(new_crtc_state);
   7410 
   7411 	if (dev_priv->display.initial_watermarks)
   7412 		dev_priv->display.initial_watermarks(state, crtc);
   7413 	else
   7414 		intel_update_watermarks(crtc);
   7415 	intel_enable_pipe(new_crtc_state);
   7416 
   7417 	intel_crtc_vblank_on(new_crtc_state);
   7418 
   7419 	intel_encoders_enable(state, crtc);
   7420 }
   7421 
   7422 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
   7423 {
   7424 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   7425 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7426 
   7427 	if (!old_crtc_state->gmch_pfit.control)
   7428 		return;
   7429 
   7430 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
   7431 
   7432 	DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
   7433 		      I915_READ(PFIT_CONTROL));
   7434 	I915_WRITE(PFIT_CONTROL, 0);
   7435 }
   7436 
   7437 static void i9xx_crtc_disable(struct intel_atomic_state *state,
   7438 			      struct intel_crtc *crtc)
   7439 {
   7440 	struct intel_crtc_state *old_crtc_state =
   7441 		intel_atomic_get_old_crtc_state(state, crtc);
   7442 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7443 	enum pipe pipe = crtc->pipe;
   7444 
   7445 	/*
   7446 	 * On gen2 planes are double buffered but the pipe isn't, so we must
   7447 	 * wait for planes to fully turn off before disabling the pipe.
   7448 	 */
   7449 	if (IS_GEN(dev_priv, 2))
   7450 		intel_wait_for_vblank(dev_priv, pipe);
   7451 
   7452 	intel_encoders_disable(state, crtc);
   7453 
   7454 	intel_crtc_vblank_off(old_crtc_state);
   7455 
   7456 	intel_disable_pipe(old_crtc_state);
   7457 
   7458 	i9xx_pfit_disable(old_crtc_state);
   7459 
   7460 	intel_encoders_post_disable(state, crtc);
   7461 
   7462 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
   7463 		if (IS_CHERRYVIEW(dev_priv))
   7464 			chv_disable_pll(dev_priv, pipe);
   7465 		else if (IS_VALLEYVIEW(dev_priv))
   7466 			vlv_disable_pll(dev_priv, pipe);
   7467 		else
   7468 			i9xx_disable_pll(old_crtc_state);
   7469 	}
   7470 
   7471 	intel_encoders_post_pll_disable(state, crtc);
   7472 
   7473 	if (!IS_GEN(dev_priv, 2))
   7474 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   7475 
   7476 	if (!dev_priv->display.initial_watermarks)
   7477 		intel_update_watermarks(crtc);
   7478 
   7479 	/* clock the pipe down to 640x480@60 to potentially save power */
   7480 	if (IS_I830(dev_priv))
   7481 		i830_enable_pipe(dev_priv, pipe);
   7482 }
   7483 
   7484 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
   7485 					struct drm_modeset_acquire_ctx *ctx)
   7486 {
   7487 	struct intel_encoder *encoder;
   7488 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7489 	struct intel_bw_state *bw_state =
   7490 		to_intel_bw_state(dev_priv->bw_obj.state);
   7491 	struct intel_crtc_state *crtc_state =
   7492 		to_intel_crtc_state(crtc->base.state);
   7493 	enum intel_display_power_domain domain;
   7494 	struct intel_plane *plane;
   7495 	struct drm_atomic_state *state;
   7496 	struct intel_crtc_state *temp_crtc_state;
   7497 	enum pipe pipe = crtc->pipe;
   7498 	u64 domains;
   7499 	int ret;
   7500 
   7501 	if (!crtc_state->hw.active)
   7502 		return;
   7503 
   7504 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   7505 		const struct intel_plane_state *plane_state =
   7506 			to_intel_plane_state(plane->base.state);
   7507 
   7508 		if (plane_state->uapi.visible)
   7509 			intel_plane_disable_noatomic(crtc, plane);
   7510 	}
   7511 
   7512 	state = drm_atomic_state_alloc(&dev_priv->drm);
   7513 	if (!state) {
   7514 		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
   7515 			      crtc->base.base.id, crtc->base.name);
   7516 		return;
   7517 	}
   7518 
   7519 	state->acquire_ctx = ctx;
   7520 
   7521 	/* Everything's already locked, -EDEADLK can't happen. */
   7522 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
   7523 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
   7524 
   7525 	WARN_ON(IS_ERR(temp_crtc_state) || ret);
   7526 
   7527 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
   7528 
   7529 	drm_atomic_state_put(state);
   7530 
   7531 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
   7532 		      crtc->base.base.id, crtc->base.name);
   7533 
   7534 	crtc->active = false;
   7535 	crtc->base.enabled = false;
   7536 
   7537 	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
   7538 	crtc_state->uapi.active = false;
   7539 	crtc_state->uapi.connector_mask = 0;
   7540 	crtc_state->uapi.encoder_mask = 0;
   7541 	intel_crtc_free_hw_state(crtc_state);
   7542 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
   7543 
   7544 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
   7545 		encoder->base.crtc = NULL;
   7546 
   7547 	intel_fbc_disable(crtc);
   7548 	intel_update_watermarks(crtc);
   7549 	intel_disable_shared_dpll(crtc_state);
   7550 
   7551 	domains = crtc->enabled_power_domains;
   7552 	for_each_power_domain(domain, domains)
   7553 		intel_display_power_put_unchecked(dev_priv, domain);
   7554 	crtc->enabled_power_domains = 0;
   7555 
   7556 	dev_priv->active_pipes &= ~BIT(pipe);
   7557 	dev_priv->min_cdclk[pipe] = 0;
   7558 	dev_priv->min_voltage_level[pipe] = 0;
   7559 
   7560 	bw_state->data_rate[pipe] = 0;
   7561 	bw_state->num_active_planes[pipe] = 0;
   7562 }
   7563 
   7564 /*
   7565  * turn all crtc's off, but do not adjust state
   7566  * This has to be paired with a call to intel_modeset_setup_hw_state.
   7567  */
   7568 int intel_display_suspend(struct drm_device *dev)
   7569 {
   7570 	struct drm_i915_private *dev_priv = to_i915(dev);
   7571 	struct drm_atomic_state *state;
   7572 	int ret;
   7573 
   7574 	state = drm_atomic_helper_suspend(dev);
   7575 	ret = PTR_ERR_OR_ZERO(state);
   7576 	if (ret)
   7577 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
   7578 	else
   7579 		dev_priv->modeset_restore_state = state;
   7580 	return ret;
   7581 }
   7582 
   7583 void intel_encoder_destroy(struct drm_encoder *encoder)
   7584 {
   7585 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
   7586 
   7587 	drm_encoder_cleanup(encoder);
   7588 	kfree(intel_encoder);
   7589 }
   7590 
   7591 /* Cross check the actual hw state with our own modeset state tracking (and it's
   7592  * internal consistency). */
   7593 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
   7594 					 struct drm_connector_state *conn_state)
   7595 {
   7596 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
   7597 
   7598 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
   7599 		      connector->base.base.id,
   7600 		      connector->base.name);
   7601 
   7602 	if (connector->get_hw_state(connector)) {
   7603 		struct intel_encoder *encoder = connector->encoder;
   7604 
   7605 		I915_STATE_WARN(!crtc_state,
   7606 			 "connector enabled without attached crtc\n");
   7607 
   7608 		if (!crtc_state)
   7609 			return;
   7610 
   7611 		I915_STATE_WARN(!crtc_state->hw.active,
   7612 				"connector is active, but attached crtc isn't\n");
   7613 
   7614 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
   7615 			return;
   7616 
   7617 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
   7618 			"atomic encoder doesn't match attached encoder\n");
   7619 
   7620 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
   7621 			"attached encoder crtc differs from connector crtc\n");
   7622 	} else {
   7623 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
   7624 				"attached crtc is active, but connector isn't\n");
   7625 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
   7626 			"best encoder set without crtc!\n");
   7627 	}
   7628 }
   7629 
   7630 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
   7631 {
   7632 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
   7633 		return crtc_state->fdi_lanes;
   7634 
   7635 	return 0;
   7636 }
   7637 
   7638 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
   7639 			       struct intel_crtc_state *pipe_config)
   7640 {
   7641 	struct drm_i915_private *dev_priv = to_i915(dev);
   7642 	struct drm_atomic_state *state = pipe_config->uapi.state;
   7643 	struct intel_crtc *other_crtc;
   7644 	struct intel_crtc_state *other_crtc_state;
   7645 
   7646 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
   7647 		      pipe_name(pipe), pipe_config->fdi_lanes);
   7648 	if (pipe_config->fdi_lanes > 4) {
   7649 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
   7650 			      pipe_name(pipe), pipe_config->fdi_lanes);
   7651 		return -EINVAL;
   7652 	}
   7653 
   7654 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   7655 		if (pipe_config->fdi_lanes > 2) {
   7656 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
   7657 				      pipe_config->fdi_lanes);
   7658 			return -EINVAL;
   7659 		} else {
   7660 			return 0;
   7661 		}
   7662 	}
   7663 
   7664 	if (INTEL_NUM_PIPES(dev_priv) == 2)
   7665 		return 0;
   7666 
   7667 	/* Ivybridge 3 pipe is really complicated */
   7668 	switch (pipe) {
   7669 	case PIPE_A:
   7670 		return 0;
   7671 	case PIPE_B:
   7672 		if (pipe_config->fdi_lanes <= 2)
   7673 			return 0;
   7674 
   7675 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
   7676 		other_crtc_state =
   7677 			intel_atomic_get_crtc_state(state, other_crtc);
   7678 		if (IS_ERR(other_crtc_state))
   7679 			return PTR_ERR(other_crtc_state);
   7680 
   7681 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
   7682 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
   7683 				      pipe_name(pipe), pipe_config->fdi_lanes);
   7684 			return -EINVAL;
   7685 		}
   7686 		return 0;
   7687 	case PIPE_C:
   7688 		if (pipe_config->fdi_lanes > 2) {
   7689 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
   7690 				      pipe_name(pipe), pipe_config->fdi_lanes);
   7691 			return -EINVAL;
   7692 		}
   7693 
   7694 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
   7695 		other_crtc_state =
   7696 			intel_atomic_get_crtc_state(state, other_crtc);
   7697 		if (IS_ERR(other_crtc_state))
   7698 			return PTR_ERR(other_crtc_state);
   7699 
   7700 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
   7701 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
   7702 			return -EINVAL;
   7703 		}
   7704 		return 0;
   7705 	default:
   7706 		BUG();
   7707 	}
   7708 }
   7709 
   7710 #define RETRY 1
   7711 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
   7712 				  struct intel_crtc_state *pipe_config)
   7713 {
   7714 	struct drm_device *dev = intel_crtc->base.dev;
   7715 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
   7716 	int lane, link_bw, fdi_dotclock, ret;
   7717 	bool needs_recompute = false;
   7718 
   7719 retry:
   7720 	/* FDI is a binary signal running at ~2.7GHz, encoding
   7721 	 * each output octet as 10 bits. The actual frequency
   7722 	 * is stored as a divider into a 100MHz clock, and the
   7723 	 * mode pixel clock is stored in units of 1KHz.
   7724 	 * Hence the bw of each lane in terms of the mode signal
   7725 	 * is:
   7726 	 */
   7727 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
   7728 
   7729 	fdi_dotclock = adjusted_mode->crtc_clock;
   7730 
   7731 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
   7732 				      pipe_config->pipe_bpp);
   7733 
   7734 	pipe_config->fdi_lanes = lane;
   7735 
   7736 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
   7737 			       link_bw, &pipe_config->fdi_m_n, false, false);
   7738 
   7739 	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
   7740 	if (ret == -EDEADLK)
   7741 		return ret;
   7742 
   7743 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
   7744 		pipe_config->pipe_bpp -= 2*3;
   7745 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
   7746 			      pipe_config->pipe_bpp);
   7747 		needs_recompute = true;
   7748 		pipe_config->bw_constrained = true;
   7749 
   7750 		goto retry;
   7751 	}
   7752 
   7753 	if (needs_recompute)
   7754 		return RETRY;
   7755 
   7756 	return ret;
   7757 }
   7758 
   7759 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
   7760 {
   7761 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7762 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7763 
   7764 	/* IPS only exists on ULT machines and is tied to pipe A. */
   7765 	if (!hsw_crtc_supports_ips(crtc))
   7766 		return false;
   7767 
   7768 	if (!i915_modparams.enable_ips)
   7769 		return false;
   7770 
   7771 	if (crtc_state->pipe_bpp > 24)
   7772 		return false;
   7773 
   7774 	/*
   7775 	 * We compare against max which means we must take
   7776 	 * the increased cdclk requirement into account when
   7777 	 * calculating the new cdclk.
   7778 	 *
   7779 	 * Should measure whether using a lower cdclk w/o IPS
   7780 	 */
   7781 	if (IS_BROADWELL(dev_priv) &&
   7782 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
   7783 		return false;
   7784 
   7785 	return true;
   7786 }
   7787 
   7788 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
   7789 {
   7790 	struct drm_i915_private *dev_priv =
   7791 		to_i915(crtc_state->uapi.crtc->dev);
   7792 	struct intel_atomic_state *intel_state =
   7793 		to_intel_atomic_state(crtc_state->uapi.state);
   7794 
   7795 	if (!hsw_crtc_state_ips_capable(crtc_state))
   7796 		return false;
   7797 
   7798 	/*
   7799 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
   7800 	 * enabled and disabled dynamically based on package C states,
   7801 	 * user space can't make reliable use of the CRCs, so let's just
   7802 	 * completely disable it.
   7803 	 */
   7804 	if (crtc_state->crc_enabled)
   7805 		return false;
   7806 
   7807 	/* IPS should be fine as long as at least one plane is enabled. */
   7808 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
   7809 		return false;
   7810 
   7811 	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
   7812 	if (IS_BROADWELL(dev_priv) &&
   7813 	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
   7814 		return false;
   7815 
   7816 	return true;
   7817 }
   7818 
   7819 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
   7820 {
   7821 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7822 
   7823 	/* GDG double wide on either pipe, otherwise pipe A only */
   7824 	return INTEL_GEN(dev_priv) < 4 &&
   7825 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
   7826 }
   7827 
   7828 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
   7829 {
   7830 	u32 pixel_rate;
   7831 
   7832 	pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
   7833 
   7834 	/*
   7835 	 * We only use IF-ID interlacing. If we ever use
   7836 	 * PF-ID we'll need to adjust the pixel_rate here.
   7837 	 */
   7838 
   7839 	if (pipe_config->pch_pfit.enabled) {
   7840 		u64 pipe_w, pipe_h, pfit_w, pfit_h;
   7841 		u32 pfit_size = pipe_config->pch_pfit.size;
   7842 
   7843 		pipe_w = pipe_config->pipe_src_w;
   7844 		pipe_h = pipe_config->pipe_src_h;
   7845 
   7846 		pfit_w = (pfit_size >> 16) & 0xFFFF;
   7847 		pfit_h = pfit_size & 0xFFFF;
   7848 		if (pipe_w < pfit_w)
   7849 			pipe_w = pfit_w;
   7850 		if (pipe_h < pfit_h)
   7851 			pipe_h = pfit_h;
   7852 
   7853 		if (WARN_ON(!pfit_w || !pfit_h))
   7854 			return pixel_rate;
   7855 
   7856 		pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
   7857 				     pfit_w * pfit_h);
   7858 	}
   7859 
   7860 	return pixel_rate;
   7861 }
   7862 
   7863 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
   7864 {
   7865 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   7866 
   7867 	if (HAS_GMCH(dev_priv))
   7868 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
   7869 		crtc_state->pixel_rate =
   7870 			crtc_state->hw.adjusted_mode.crtc_clock;
   7871 	else
   7872 		crtc_state->pixel_rate =
   7873 			ilk_pipe_pixel_rate(crtc_state);
   7874 }
   7875 
   7876 static int intel_crtc_compute_config(struct intel_crtc *crtc,
   7877 				     struct intel_crtc_state *pipe_config)
   7878 {
   7879 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7880 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
   7881 	int clock_limit = dev_priv->max_dotclk_freq;
   7882 
   7883 	if (INTEL_GEN(dev_priv) < 4) {
   7884 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
   7885 
   7886 		/*
   7887 		 * Enable double wide mode when the dot clock
   7888 		 * is > 90% of the (display) core speed.
   7889 		 */
   7890 		if (intel_crtc_supports_double_wide(crtc) &&
   7891 		    adjusted_mode->crtc_clock > clock_limit) {
   7892 			clock_limit = dev_priv->max_dotclk_freq;
   7893 			pipe_config->double_wide = true;
   7894 		}
   7895 	}
   7896 
   7897 	if (adjusted_mode->crtc_clock > clock_limit) {
   7898 		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
   7899 			      adjusted_mode->crtc_clock, clock_limit,
   7900 			      yesno(pipe_config->double_wide));
   7901 		return -EINVAL;
   7902 	}
   7903 
   7904 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
   7905 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
   7906 	     pipe_config->hw.ctm) {
   7907 		/*
   7908 		 * There is only one pipe CSC unit per pipe, and we need that
   7909 		 * for output conversion from RGB->YCBCR. So if CTM is already
   7910 		 * applied we can't support YCBCR420 output.
   7911 		 */
   7912 		DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
   7913 		return -EINVAL;
   7914 	}
   7915 
   7916 	/*
   7917 	 * Pipe horizontal size must be even in:
   7918 	 * - DVO ganged mode
   7919 	 * - LVDS dual channel mode
   7920 	 * - Double wide pipe
   7921 	 */
   7922 	if (pipe_config->pipe_src_w & 1) {
   7923 		if (pipe_config->double_wide) {
   7924 			DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
   7925 			return -EINVAL;
   7926 		}
   7927 
   7928 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
   7929 		    intel_is_dual_link_lvds(dev_priv)) {
   7930 			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
   7931 			return -EINVAL;
   7932 		}
   7933 	}
   7934 
   7935 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
   7936 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
   7937 	 */
   7938 	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
   7939 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
   7940 		return -EINVAL;
   7941 
   7942 	intel_crtc_compute_pixel_rate(pipe_config);
   7943 
   7944 	if (pipe_config->has_pch_encoder)
   7945 		return ilk_fdi_compute_config(crtc, pipe_config);
   7946 
   7947 	return 0;
   7948 }
   7949 
   7950 static void
   7951 intel_reduce_m_n_ratio(u32 *num, u32 *den)
   7952 {
   7953 	while (*num > DATA_LINK_M_N_MASK ||
   7954 	       *den > DATA_LINK_M_N_MASK) {
   7955 		*num >>= 1;
   7956 		*den >>= 1;
   7957 	}
   7958 }
   7959 
   7960 static void compute_m_n(unsigned int m, unsigned int n,
   7961 			u32 *ret_m, u32 *ret_n,
   7962 			bool constant_n)
   7963 {
   7964 	/*
   7965 	 * Several DP dongles in particular seem to be fussy about
   7966 	 * too large link M/N values. Give N value as 0x8000 that
   7967 	 * should be acceptable by specific devices. 0x8000 is the
   7968 	 * specified fixed N value for asynchronous clock mode,
   7969 	 * which the devices expect also in synchronous clock mode.
   7970 	 */
   7971 	if (constant_n)
   7972 		*ret_n = 0x8000;
   7973 	else
   7974 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
   7975 
   7976 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
   7977 	intel_reduce_m_n_ratio(ret_m, ret_n);
   7978 }
   7979 
   7980 void
   7981 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
   7982 		       int pixel_clock, int link_clock,
   7983 		       struct intel_link_m_n *m_n,
   7984 		       bool constant_n, bool fec_enable)
   7985 {
   7986 	u32 data_clock = bits_per_pixel * pixel_clock;
   7987 
   7988 	if (fec_enable)
   7989 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
   7990 
   7991 	m_n->tu = 64;
   7992 	compute_m_n(data_clock,
   7993 		    link_clock * nlanes * 8,
   7994 		    &m_n->gmch_m, &m_n->gmch_n,
   7995 		    constant_n);
   7996 
   7997 	compute_m_n(pixel_clock, link_clock,
   7998 		    &m_n->link_m, &m_n->link_n,
   7999 		    constant_n);
   8000 }
   8001 
   8002 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
   8003 {
   8004 	/*
   8005 	 * There may be no VBT; and if the BIOS enabled SSC we can
   8006 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
   8007 	 * BIOS isn't using it, don't assume it will work even if the VBT
   8008 	 * indicates as much.
   8009 	 */
   8010 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
   8011 		bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
   8012 			DREF_SSC1_ENABLE;
   8013 
   8014 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
   8015 			DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
   8016 				      enableddisabled(bios_lvds_use_ssc),
   8017 				      enableddisabled(dev_priv->vbt.lvds_use_ssc));
   8018 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
   8019 		}
   8020 	}
   8021 }
   8022 
   8023 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
   8024 {
   8025 	if (i915_modparams.panel_use_ssc >= 0)
   8026 		return i915_modparams.panel_use_ssc != 0;
   8027 	return dev_priv->vbt.lvds_use_ssc
   8028 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
   8029 }
   8030 
   8031 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
   8032 {
   8033 	return (1 << dpll->n) << 16 | dpll->m2;
   8034 }
   8035 
   8036 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
   8037 {
   8038 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
   8039 }
   8040 
   8041 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
   8042 				     struct intel_crtc_state *crtc_state,
   8043 				     struct dpll *reduced_clock)
   8044 {
   8045 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8046 	u32 fp, fp2 = 0;
   8047 
   8048 	if (IS_PINEVIEW(dev_priv)) {
   8049 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
   8050 		if (reduced_clock)
   8051 			fp2 = pnv_dpll_compute_fp(reduced_clock);
   8052 	} else {
   8053 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
   8054 		if (reduced_clock)
   8055 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
   8056 	}
   8057 
   8058 	crtc_state->dpll_hw_state.fp0 = fp;
   8059 
   8060 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   8061 	    reduced_clock) {
   8062 		crtc_state->dpll_hw_state.fp1 = fp2;
   8063 	} else {
   8064 		crtc_state->dpll_hw_state.fp1 = fp;
   8065 	}
   8066 }
   8067 
   8068 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
   8069 		pipe)
   8070 {
   8071 	u32 reg_val;
   8072 
   8073 	/*
   8074 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
   8075 	 * and set it to a reasonable value instead.
   8076 	 */
   8077 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
   8078 	reg_val &= 0xffffff00;
   8079 	reg_val |= 0x00000030;
   8080 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
   8081 
   8082 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
   8083 	reg_val &= 0x00ffffff;
   8084 	reg_val |= 0x8c000000;
   8085 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
   8086 
   8087 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
   8088 	reg_val &= 0xffffff00;
   8089 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
   8090 
   8091 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
   8092 	reg_val &= 0x00ffffff;
   8093 	reg_val |= 0xb0000000;
   8094 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
   8095 }
   8096 
   8097 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
   8098 					 const struct intel_link_m_n *m_n)
   8099 {
   8100 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8101 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8102 	enum pipe pipe = crtc->pipe;
   8103 
   8104 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
   8105 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
   8106 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
   8107 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
   8108 }
   8109 
   8110 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
   8111 				 enum transcoder transcoder)
   8112 {
   8113 	if (IS_HASWELL(dev_priv))
   8114 		return transcoder == TRANSCODER_EDP;
   8115 
   8116 	/*
   8117 	 * Strictly speaking some registers are available before
   8118 	 * gen7, but we only support DRRS on gen7+
   8119 	 */
   8120 	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
   8121 }
   8122 
   8123 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
   8124 					 const struct intel_link_m_n *m_n,
   8125 					 const struct intel_link_m_n *m2_n2)
   8126 {
   8127 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8128 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8129 	enum pipe pipe = crtc->pipe;
   8130 	enum transcoder transcoder = crtc_state->cpu_transcoder;
   8131 
   8132 	if (INTEL_GEN(dev_priv) >= 5) {
   8133 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
   8134 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
   8135 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
   8136 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
   8137 		/*
   8138 		 *  M2_N2 registers are set only if DRRS is supported
   8139 		 * (to make sure the registers are not unnecessarily accessed).
   8140 		 */
   8141 		if (m2_n2 && crtc_state->has_drrs &&
   8142 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
   8143 			I915_WRITE(PIPE_DATA_M2(transcoder),
   8144 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
   8145 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
   8146 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
   8147 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
   8148 		}
   8149 	} else {
   8150 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
   8151 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
   8152 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
   8153 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
   8154 	}
   8155 }
   8156 
   8157 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
   8158 {
   8159 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
   8160 
   8161 	if (m_n == M1_N1) {
   8162 		dp_m_n = &crtc_state->dp_m_n;
   8163 		dp_m2_n2 = &crtc_state->dp_m2_n2;
   8164 	} else if (m_n == M2_N2) {
   8165 
   8166 		/*
   8167 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
   8168 		 * needs to be programmed into M1_N1.
   8169 		 */
   8170 		dp_m_n = &crtc_state->dp_m2_n2;
   8171 	} else {
   8172 		DRM_ERROR("Unsupported divider value\n");
   8173 		return;
   8174 	}
   8175 
   8176 	if (crtc_state->has_pch_encoder)
   8177 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
   8178 	else
   8179 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
   8180 }
   8181 
   8182 static void vlv_compute_dpll(struct intel_crtc *crtc,
   8183 			     struct intel_crtc_state *pipe_config)
   8184 {
   8185 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
   8186 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   8187 	if (crtc->pipe != PIPE_A)
   8188 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
   8189 
   8190 	/* DPLL not used with DSI, but still need the rest set up */
   8191 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
   8192 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
   8193 			DPLL_EXT_BUFFER_ENABLE_VLV;
   8194 
   8195 	pipe_config->dpll_hw_state.dpll_md =
   8196 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
   8197 }
   8198 
   8199 static void chv_compute_dpll(struct intel_crtc *crtc,
   8200 			     struct intel_crtc_state *pipe_config)
   8201 {
   8202 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
   8203 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   8204 	if (crtc->pipe != PIPE_A)
   8205 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
   8206 
   8207 	/* DPLL not used with DSI, but still need the rest set up */
   8208 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
   8209 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
   8210 
   8211 	pipe_config->dpll_hw_state.dpll_md =
   8212 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
   8213 }
   8214 
   8215 static void vlv_prepare_pll(struct intel_crtc *crtc,
   8216 			    const struct intel_crtc_state *pipe_config)
   8217 {
   8218 	struct drm_device *dev = crtc->base.dev;
   8219 	struct drm_i915_private *dev_priv = to_i915(dev);
   8220 	enum pipe pipe = crtc->pipe;
   8221 	u32 mdiv;
   8222 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
   8223 	u32 coreclk, reg_val;
   8224 
   8225 	/* Enable Refclk */
   8226 	I915_WRITE(DPLL(pipe),
   8227 		   pipe_config->dpll_hw_state.dpll &
   8228 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
   8229 
   8230 	/* No need to actually set up the DPLL with DSI */
   8231 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   8232 		return;
   8233 
   8234 	vlv_dpio_get(dev_priv);
   8235 
   8236 	bestn = pipe_config->dpll.n;
   8237 	bestm1 = pipe_config->dpll.m1;
   8238 	bestm2 = pipe_config->dpll.m2;
   8239 	bestp1 = pipe_config->dpll.p1;
   8240 	bestp2 = pipe_config->dpll.p2;
   8241 
   8242 	/* See eDP HDMI DPIO driver vbios notes doc */
   8243 
   8244 	/* PLL B needs special handling */
   8245 	if (pipe == PIPE_B)
   8246 		vlv_pllb_recal_opamp(dev_priv, pipe);
   8247 
   8248 	/* Set up Tx target for periodic Rcomp update */
   8249 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
   8250 
   8251 	/* Disable target IRef on PLL */
   8252 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
   8253 	reg_val &= 0x00ffffff;
   8254 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
   8255 
   8256 	/* Disable fast lock */
   8257 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
   8258 
   8259 	/* Set idtafcrecal before PLL is enabled */
   8260 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
   8261 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
   8262 	mdiv |= ((bestn << DPIO_N_SHIFT));
   8263 	mdiv |= (1 << DPIO_K_SHIFT);
   8264 
   8265 	/*
   8266 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
   8267 	 * but we don't support that).
   8268 	 * Note: don't use the DAC post divider as it seems unstable.
   8269 	 */
   8270 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
   8271 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
   8272 
   8273 	mdiv |= DPIO_ENABLE_CALIBRATION;
   8274 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
   8275 
   8276 	/* Set HBR and RBR LPF coefficients */
   8277 	if (pipe_config->port_clock == 162000 ||
   8278 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
   8279 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
   8280 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
   8281 				 0x009f0003);
   8282 	else
   8283 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
   8284 				 0x00d0000f);
   8285 
   8286 	if (intel_crtc_has_dp_encoder(pipe_config)) {
   8287 		/* Use SSC source */
   8288 		if (pipe == PIPE_A)
   8289 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8290 					 0x0df40000);
   8291 		else
   8292 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8293 					 0x0df70000);
   8294 	} else { /* HDMI or VGA */
   8295 		/* Use bend source */
   8296 		if (pipe == PIPE_A)
   8297 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8298 					 0x0df70000);
   8299 		else
   8300 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8301 					 0x0df40000);
   8302 	}
   8303 
   8304 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
   8305 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
   8306 	if (intel_crtc_has_dp_encoder(pipe_config))
   8307 		coreclk |= 0x01000000;
   8308 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
   8309 
   8310 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
   8311 
   8312 	vlv_dpio_put(dev_priv);
   8313 }
   8314 
   8315 static void chv_prepare_pll(struct intel_crtc *crtc,
   8316 			    const struct intel_crtc_state *pipe_config)
   8317 {
   8318 	struct drm_device *dev = crtc->base.dev;
   8319 	struct drm_i915_private *dev_priv = to_i915(dev);
   8320 	enum pipe pipe = crtc->pipe;
   8321 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   8322 	u32 loopfilter, tribuf_calcntr;
   8323 	u32 bestn __unused, bestm1 __unused, bestm2, bestp1, bestp2, bestm2_frac;
   8324 	u32 dpio_val;
   8325 	int vco;
   8326 
   8327 	/* Enable Refclk and SSC */
   8328 	I915_WRITE(DPLL(pipe),
   8329 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
   8330 
   8331 	/* No need to actually set up the DPLL with DSI */
   8332 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   8333 		return;
   8334 
   8335 	bestn = pipe_config->dpll.n;
   8336 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
   8337 	bestm1 = pipe_config->dpll.m1;
   8338 	bestm2 = pipe_config->dpll.m2 >> 22;
   8339 	bestp1 = pipe_config->dpll.p1;
   8340 	bestp2 = pipe_config->dpll.p2;
   8341 	vco = pipe_config->dpll.vco;
   8342 	dpio_val = 0;
   8343 	loopfilter = 0;
   8344 
   8345 	vlv_dpio_get(dev_priv);
   8346 
   8347 	/* p1 and p2 divider */
   8348 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
   8349 			5 << DPIO_CHV_S1_DIV_SHIFT |
   8350 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
   8351 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
   8352 			1 << DPIO_CHV_K_DIV_SHIFT);
   8353 
   8354 	/* Feedback post-divider - m2 */
   8355 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
   8356 
   8357 	/* Feedback refclk divider - n and m1 */
   8358 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
   8359 			DPIO_CHV_M1_DIV_BY_2 |
   8360 			1 << DPIO_CHV_N_DIV_SHIFT);
   8361 
   8362 	/* M2 fraction division */
   8363 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
   8364 
   8365 	/* M2 fraction division enable */
   8366 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
   8367 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
   8368 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
   8369 	if (bestm2_frac)
   8370 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
   8371 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
   8372 
   8373 	/* Program digital lock detect threshold */
   8374 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
   8375 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
   8376 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
   8377 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
   8378 	if (!bestm2_frac)
   8379 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
   8380 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
   8381 
   8382 	/* Loop filter */
   8383 	if (vco == 5400000) {
   8384 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
   8385 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
   8386 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8387 		tribuf_calcntr = 0x9;
   8388 	} else if (vco <= 6200000) {
   8389 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
   8390 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
   8391 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8392 		tribuf_calcntr = 0x9;
   8393 	} else if (vco <= 6480000) {
   8394 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
   8395 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
   8396 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8397 		tribuf_calcntr = 0x8;
   8398 	} else {
   8399 		/* Not supported. Apply the same limits as in the max case */
   8400 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
   8401 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
   8402 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8403 		tribuf_calcntr = 0;
   8404 	}
   8405 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
   8406 
   8407 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
   8408 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
   8409 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
   8410 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
   8411 
   8412 	/* AFC Recal */
   8413 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
   8414 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
   8415 			DPIO_AFC_RECAL);
   8416 
   8417 	vlv_dpio_put(dev_priv);
   8418 }
   8419 
   8420 /**
   8421  * vlv_force_pll_on - forcibly enable just the PLL
   8422  * @dev_priv: i915 private structure
   8423  * @pipe: pipe PLL to enable
   8424  * @dpll: PLL configuration
   8425  *
   8426  * Enable the PLL for @pipe using the supplied @dpll config. To be used
   8427  * in cases where we need the PLL enabled even when @pipe is not going to
   8428  * be enabled.
   8429  */
   8430 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
   8431 		     const struct dpll *dpll)
   8432 {
   8433 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   8434 	struct intel_crtc_state *pipe_config;
   8435 
   8436 	pipe_config = intel_crtc_state_alloc(crtc);
   8437 	if (!pipe_config)
   8438 		return -ENOMEM;
   8439 
   8440 	pipe_config->cpu_transcoder = (enum transcoder)pipe;
   8441 	pipe_config->pixel_multiplier = 1;
   8442 	pipe_config->dpll = *dpll;
   8443 
   8444 	if (IS_CHERRYVIEW(dev_priv)) {
   8445 		chv_compute_dpll(crtc, pipe_config);
   8446 		chv_prepare_pll(crtc, pipe_config);
   8447 		chv_enable_pll(crtc, pipe_config);
   8448 	} else {
   8449 		vlv_compute_dpll(crtc, pipe_config);
   8450 		vlv_prepare_pll(crtc, pipe_config);
   8451 		vlv_enable_pll(crtc, pipe_config);
   8452 	}
   8453 
   8454 	kfree(pipe_config);
   8455 
   8456 	return 0;
   8457 }
   8458 
   8459 /**
   8460  * vlv_force_pll_off - forcibly disable just the PLL
   8461  * @dev_priv: i915 private structure
   8462  * @pipe: pipe PLL to disable
   8463  *
   8464  * Disable the PLL for @pipe. To be used in cases where we need
   8465  * the PLL enabled even when @pipe is not going to be enabled.
   8466  */
   8467 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
   8468 {
   8469 	if (IS_CHERRYVIEW(dev_priv))
   8470 		chv_disable_pll(dev_priv, pipe);
   8471 	else
   8472 		vlv_disable_pll(dev_priv, pipe);
   8473 }
   8474 
   8475 static void i9xx_compute_dpll(struct intel_crtc *crtc,
   8476 			      struct intel_crtc_state *crtc_state,
   8477 			      struct dpll *reduced_clock)
   8478 {
   8479 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8480 	u32 dpll;
   8481 	struct dpll *clock = &crtc_state->dpll;
   8482 
   8483 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
   8484 
   8485 	dpll = DPLL_VGA_MODE_DIS;
   8486 
   8487 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
   8488 		dpll |= DPLLB_MODE_LVDS;
   8489 	else
   8490 		dpll |= DPLLB_MODE_DAC_SERIAL;
   8491 
   8492 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
   8493 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
   8494 		dpll |= (crtc_state->pixel_multiplier - 1)
   8495 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
   8496 	}
   8497 
   8498 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
   8499 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
   8500 		dpll |= DPLL_SDVO_HIGH_SPEED;
   8501 
   8502 	if (intel_crtc_has_dp_encoder(crtc_state))
   8503 		dpll |= DPLL_SDVO_HIGH_SPEED;
   8504 
   8505 	/* compute bitmask from p1 value */
   8506 	if (IS_PINEVIEW(dev_priv))
   8507 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
   8508 	else {
   8509 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   8510 		if (IS_G4X(dev_priv) && reduced_clock)
   8511 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
   8512 	}
   8513 	switch (clock->p2) {
   8514 	case 5:
   8515 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
   8516 		break;
   8517 	case 7:
   8518 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
   8519 		break;
   8520 	case 10:
   8521 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
   8522 		break;
   8523 	case 14:
   8524 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
   8525 		break;
   8526 	}
   8527 	if (INTEL_GEN(dev_priv) >= 4)
   8528 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
   8529 
   8530 	if (crtc_state->sdvo_tv_clock)
   8531 		dpll |= PLL_REF_INPUT_TVCLKINBC;
   8532 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   8533 		 intel_panel_use_ssc(dev_priv))
   8534 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
   8535 	else
   8536 		dpll |= PLL_REF_INPUT_DREFCLK;
   8537 
   8538 	dpll |= DPLL_VCO_ENABLE;
   8539 	crtc_state->dpll_hw_state.dpll = dpll;
   8540 
   8541 	if (INTEL_GEN(dev_priv) >= 4) {
   8542 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
   8543 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
   8544 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
   8545 	}
   8546 }
   8547 
   8548 static void i8xx_compute_dpll(struct intel_crtc *crtc,
   8549 			      struct intel_crtc_state *crtc_state,
   8550 			      struct dpll *reduced_clock)
   8551 {
   8552 	struct drm_device *dev = crtc->base.dev;
   8553 	struct drm_i915_private *dev_priv = to_i915(dev);
   8554 	u32 dpll;
   8555 	struct dpll *clock = &crtc_state->dpll;
   8556 
   8557 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
   8558 
   8559 	dpll = DPLL_VGA_MODE_DIS;
   8560 
   8561 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8562 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   8563 	} else {
   8564 		if (clock->p1 == 2)
   8565 			dpll |= PLL_P1_DIVIDE_BY_TWO;
   8566 		else
   8567 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   8568 		if (clock->p2 == 4)
   8569 			dpll |= PLL_P2_DIVIDE_BY_4;
   8570 	}
   8571 
   8572 	/*
   8573 	 * Bspec:
   8574 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
   8575 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
   8576 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
   8577 	 *  Enable) must be set to 1 in both the DPLL A Control Register
   8578 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
   8579 	 *
   8580 	 * For simplicity We simply keep both bits always enabled in
   8581 	 * both DPLLS. The spec says we should disable the DVO 2X clock
   8582 	 * when not needed, but this seems to work fine in practice.
   8583 	 */
   8584 	if (IS_I830(dev_priv) ||
   8585 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
   8586 		dpll |= DPLL_DVO_2X_MODE;
   8587 
   8588 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   8589 	    intel_panel_use_ssc(dev_priv))
   8590 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
   8591 	else
   8592 		dpll |= PLL_REF_INPUT_DREFCLK;
   8593 
   8594 	dpll |= DPLL_VCO_ENABLE;
   8595 	crtc_state->dpll_hw_state.dpll = dpll;
   8596 }
   8597 
   8598 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
   8599 {
   8600 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8601 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8602 	enum pipe pipe = crtc->pipe;
   8603 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   8604 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
   8605 	u32 crtc_vtotal, crtc_vblank_end;
   8606 	int vsyncshift = 0;
   8607 
   8608 	/* We need to be careful not to changed the adjusted mode, for otherwise
   8609 	 * the hw state checker will get angry at the mismatch. */
   8610 	crtc_vtotal = adjusted_mode->crtc_vtotal;
   8611 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
   8612 
   8613 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
   8614 		/* the chip adds 2 halflines automatically */
   8615 		crtc_vtotal -= 1;
   8616 		crtc_vblank_end -= 1;
   8617 
   8618 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   8619 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
   8620 		else
   8621 			vsyncshift = adjusted_mode->crtc_hsync_start -
   8622 				adjusted_mode->crtc_htotal / 2;
   8623 		if (vsyncshift < 0)
   8624 			vsyncshift += adjusted_mode->crtc_htotal;
   8625 	}
   8626 
   8627 	if (INTEL_GEN(dev_priv) > 3)
   8628 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
   8629 
   8630 	I915_WRITE(HTOTAL(cpu_transcoder),
   8631 		   (adjusted_mode->crtc_hdisplay - 1) |
   8632 		   ((adjusted_mode->crtc_htotal - 1) << 16));
   8633 	I915_WRITE(HBLANK(cpu_transcoder),
   8634 		   (adjusted_mode->crtc_hblank_start - 1) |
   8635 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
   8636 	I915_WRITE(HSYNC(cpu_transcoder),
   8637 		   (adjusted_mode->crtc_hsync_start - 1) |
   8638 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
   8639 
   8640 	I915_WRITE(VTOTAL(cpu_transcoder),
   8641 		   (adjusted_mode->crtc_vdisplay - 1) |
   8642 		   ((crtc_vtotal - 1) << 16));
   8643 	I915_WRITE(VBLANK(cpu_transcoder),
   8644 		   (adjusted_mode->crtc_vblank_start - 1) |
   8645 		   ((crtc_vblank_end - 1) << 16));
   8646 	I915_WRITE(VSYNC(cpu_transcoder),
   8647 		   (adjusted_mode->crtc_vsync_start - 1) |
   8648 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
   8649 
   8650 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
   8651 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
   8652 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
   8653 	 * bits. */
   8654 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
   8655 	    (pipe == PIPE_B || pipe == PIPE_C))
   8656 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
   8657 
   8658 }
   8659 
   8660 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
   8661 {
   8662 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8663 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8664 	enum pipe pipe = crtc->pipe;
   8665 
   8666 	/* pipesrc controls the size that is scaled from, which should
   8667 	 * always be the user's requested size.
   8668 	 */
   8669 	I915_WRITE(PIPESRC(pipe),
   8670 		   ((crtc_state->pipe_src_w - 1) << 16) |
   8671 		   (crtc_state->pipe_src_h - 1));
   8672 }
   8673 
   8674 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
   8675 {
   8676 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   8677 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   8678 
   8679 	if (IS_GEN(dev_priv, 2))
   8680 		return false;
   8681 
   8682 	if (INTEL_GEN(dev_priv) >= 9 ||
   8683 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   8684 		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
   8685 	else
   8686 		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
   8687 }
   8688 
   8689 static void intel_get_pipe_timings(struct intel_crtc *crtc,
   8690 				   struct intel_crtc_state *pipe_config)
   8691 {
   8692 	struct drm_device *dev = crtc->base.dev;
   8693 	struct drm_i915_private *dev_priv = to_i915(dev);
   8694 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
   8695 	u32 tmp;
   8696 
   8697 	tmp = I915_READ(HTOTAL(cpu_transcoder));
   8698 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
   8699 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
   8700 
   8701 	if (!transcoder_is_dsi(cpu_transcoder)) {
   8702 		tmp = I915_READ(HBLANK(cpu_transcoder));
   8703 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
   8704 							(tmp & 0xffff) + 1;
   8705 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
   8706 						((tmp >> 16) & 0xffff) + 1;
   8707 	}
   8708 	tmp = I915_READ(HSYNC(cpu_transcoder));
   8709 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
   8710 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
   8711 
   8712 	tmp = I915_READ(VTOTAL(cpu_transcoder));
   8713 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
   8714 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
   8715 
   8716 	if (!transcoder_is_dsi(cpu_transcoder)) {
   8717 		tmp = I915_READ(VBLANK(cpu_transcoder));
   8718 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
   8719 							(tmp & 0xffff) + 1;
   8720 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
   8721 						((tmp >> 16) & 0xffff) + 1;
   8722 	}
   8723 	tmp = I915_READ(VSYNC(cpu_transcoder));
   8724 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
   8725 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
   8726 
   8727 	if (intel_pipe_is_interlaced(pipe_config)) {
   8728 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
   8729 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
   8730 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
   8731 	}
   8732 }
   8733 
   8734 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
   8735 				    struct intel_crtc_state *pipe_config)
   8736 {
   8737 	struct drm_device *dev = crtc->base.dev;
   8738 	struct drm_i915_private *dev_priv = to_i915(dev);
   8739 	u32 tmp;
   8740 
   8741 	tmp = I915_READ(PIPESRC(crtc->pipe));
   8742 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
   8743 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
   8744 
   8745 	pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
   8746 	pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
   8747 }
   8748 
   8749 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
   8750 				 struct intel_crtc_state *pipe_config)
   8751 {
   8752 	mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
   8753 	mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
   8754 	mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
   8755 	mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
   8756 
   8757 	mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
   8758 	mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
   8759 	mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
   8760 	mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
   8761 
   8762 	mode->flags = pipe_config->hw.adjusted_mode.flags;
   8763 	mode->type = DRM_MODE_TYPE_DRIVER;
   8764 
   8765 	mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
   8766 
   8767 	mode->hsync = drm_mode_hsync(mode);
   8768 	mode->vrefresh = drm_mode_vrefresh(mode);
   8769 	drm_mode_set_name(mode);
   8770 }
   8771 
   8772 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
   8773 {
   8774 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8775 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8776 	u32 pipeconf;
   8777 
   8778 	pipeconf = 0;
   8779 
   8780 	/* we keep both pipes enabled on 830 */
   8781 	if (IS_I830(dev_priv))
   8782 		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
   8783 
   8784 	if (crtc_state->double_wide)
   8785 		pipeconf |= PIPECONF_DOUBLE_WIDE;
   8786 
   8787 	/* only g4x and later have fancy bpc/dither controls */
   8788 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   8789 	    IS_CHERRYVIEW(dev_priv)) {
   8790 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
   8791 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
   8792 			pipeconf |= PIPECONF_DITHER_EN |
   8793 				    PIPECONF_DITHER_TYPE_SP;
   8794 
   8795 		switch (crtc_state->pipe_bpp) {
   8796 		case 18:
   8797 			pipeconf |= PIPECONF_6BPC;
   8798 			break;
   8799 		case 24:
   8800 			pipeconf |= PIPECONF_8BPC;
   8801 			break;
   8802 		case 30:
   8803 			pipeconf |= PIPECONF_10BPC;
   8804 			break;
   8805 		default:
   8806 			/* Case prevented by intel_choose_pipe_bpp_dither. */
   8807 			BUG();
   8808 		}
   8809 	}
   8810 
   8811 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
   8812 		if (INTEL_GEN(dev_priv) < 4 ||
   8813 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   8814 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
   8815 		else
   8816 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
   8817 	} else {
   8818 		pipeconf |= PIPECONF_PROGRESSIVE;
   8819 	}
   8820 
   8821 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
   8822 	     crtc_state->limited_color_range)
   8823 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
   8824 
   8825 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
   8826 
   8827 	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
   8828 
   8829 	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
   8830 	POSTING_READ(PIPECONF(crtc->pipe));
   8831 }
   8832 
   8833 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
   8834 				   struct intel_crtc_state *crtc_state)
   8835 {
   8836 	struct drm_device *dev = crtc->base.dev;
   8837 	struct drm_i915_private *dev_priv = to_i915(dev);
   8838 	const struct intel_limit *limit;
   8839 	int refclk = 48000;
   8840 
   8841 	memset(&crtc_state->dpll_hw_state, 0,
   8842 	       sizeof(crtc_state->dpll_hw_state));
   8843 
   8844 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8845 		if (intel_panel_use_ssc(dev_priv)) {
   8846 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8847 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8848 		}
   8849 
   8850 		limit = &intel_limits_i8xx_lvds;
   8851 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
   8852 		limit = &intel_limits_i8xx_dvo;
   8853 	} else {
   8854 		limit = &intel_limits_i8xx_dac;
   8855 	}
   8856 
   8857 	if (!crtc_state->clock_set &&
   8858 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8859 				 refclk, NULL, &crtc_state->dpll)) {
   8860 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8861 		return -EINVAL;
   8862 	}
   8863 
   8864 	i8xx_compute_dpll(crtc, crtc_state, NULL);
   8865 
   8866 	return 0;
   8867 }
   8868 
   8869 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
   8870 				  struct intel_crtc_state *crtc_state)
   8871 {
   8872 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8873 	const struct intel_limit *limit;
   8874 	int refclk = 96000;
   8875 
   8876 	memset(&crtc_state->dpll_hw_state, 0,
   8877 	       sizeof(crtc_state->dpll_hw_state));
   8878 
   8879 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8880 		if (intel_panel_use_ssc(dev_priv)) {
   8881 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8882 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8883 		}
   8884 
   8885 		if (intel_is_dual_link_lvds(dev_priv))
   8886 			limit = &intel_limits_g4x_dual_channel_lvds;
   8887 		else
   8888 			limit = &intel_limits_g4x_single_channel_lvds;
   8889 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
   8890 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
   8891 		limit = &intel_limits_g4x_hdmi;
   8892 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
   8893 		limit = &intel_limits_g4x_sdvo;
   8894 	} else {
   8895 		/* The option is for other outputs */
   8896 		limit = &intel_limits_i9xx_sdvo;
   8897 	}
   8898 
   8899 	if (!crtc_state->clock_set &&
   8900 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8901 				refclk, NULL, &crtc_state->dpll)) {
   8902 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8903 		return -EINVAL;
   8904 	}
   8905 
   8906 	i9xx_compute_dpll(crtc, crtc_state, NULL);
   8907 
   8908 	return 0;
   8909 }
   8910 
   8911 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
   8912 				  struct intel_crtc_state *crtc_state)
   8913 {
   8914 	struct drm_device *dev = crtc->base.dev;
   8915 	struct drm_i915_private *dev_priv = to_i915(dev);
   8916 	const struct intel_limit *limit;
   8917 	int refclk = 96000;
   8918 
   8919 	memset(&crtc_state->dpll_hw_state, 0,
   8920 	       sizeof(crtc_state->dpll_hw_state));
   8921 
   8922 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8923 		if (intel_panel_use_ssc(dev_priv)) {
   8924 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8925 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8926 		}
   8927 
   8928 		limit = &pnv_limits_lvds;
   8929 	} else {
   8930 		limit = &pnv_limits_sdvo;
   8931 	}
   8932 
   8933 	if (!crtc_state->clock_set &&
   8934 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8935 				refclk, NULL, &crtc_state->dpll)) {
   8936 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8937 		return -EINVAL;
   8938 	}
   8939 
   8940 	i9xx_compute_dpll(crtc, crtc_state, NULL);
   8941 
   8942 	return 0;
   8943 }
   8944 
   8945 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
   8946 				   struct intel_crtc_state *crtc_state)
   8947 {
   8948 	struct drm_device *dev = crtc->base.dev;
   8949 	struct drm_i915_private *dev_priv = to_i915(dev);
   8950 	const struct intel_limit *limit;
   8951 	int refclk = 96000;
   8952 
   8953 	memset(&crtc_state->dpll_hw_state, 0,
   8954 	       sizeof(crtc_state->dpll_hw_state));
   8955 
   8956 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8957 		if (intel_panel_use_ssc(dev_priv)) {
   8958 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8959 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8960 		}
   8961 
   8962 		limit = &intel_limits_i9xx_lvds;
   8963 	} else {
   8964 		limit = &intel_limits_i9xx_sdvo;
   8965 	}
   8966 
   8967 	if (!crtc_state->clock_set &&
   8968 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8969 				 refclk, NULL, &crtc_state->dpll)) {
   8970 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8971 		return -EINVAL;
   8972 	}
   8973 
   8974 	i9xx_compute_dpll(crtc, crtc_state, NULL);
   8975 
   8976 	return 0;
   8977 }
   8978 
   8979 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
   8980 				  struct intel_crtc_state *crtc_state)
   8981 {
   8982 	int refclk = 100000;
   8983 	const struct intel_limit *limit = &intel_limits_chv;
   8984 
   8985 	memset(&crtc_state->dpll_hw_state, 0,
   8986 	       sizeof(crtc_state->dpll_hw_state));
   8987 
   8988 	if (!crtc_state->clock_set &&
   8989 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8990 				refclk, NULL, &crtc_state->dpll)) {
   8991 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8992 		return -EINVAL;
   8993 	}
   8994 
   8995 	chv_compute_dpll(crtc, crtc_state);
   8996 
   8997 	return 0;
   8998 }
   8999 
   9000 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
   9001 				  struct intel_crtc_state *crtc_state)
   9002 {
   9003 	int refclk = 100000;
   9004 	const struct intel_limit *limit = &intel_limits_vlv;
   9005 
   9006 	memset(&crtc_state->dpll_hw_state, 0,
   9007 	       sizeof(crtc_state->dpll_hw_state));
   9008 
   9009 	if (!crtc_state->clock_set &&
   9010 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   9011 				refclk, NULL, &crtc_state->dpll)) {
   9012 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   9013 		return -EINVAL;
   9014 	}
   9015 
   9016 	vlv_compute_dpll(crtc, crtc_state);
   9017 
   9018 	return 0;
   9019 }
   9020 
   9021 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
   9022 {
   9023 	if (IS_I830(dev_priv))
   9024 		return false;
   9025 
   9026 	return INTEL_GEN(dev_priv) >= 4 ||
   9027 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
   9028 }
   9029 
   9030 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
   9031 				 struct intel_crtc_state *pipe_config)
   9032 {
   9033 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9034 	u32 tmp;
   9035 
   9036 	if (!i9xx_has_pfit(dev_priv))
   9037 		return;
   9038 
   9039 	tmp = I915_READ(PFIT_CONTROL);
   9040 	if (!(tmp & PFIT_ENABLE))
   9041 		return;
   9042 
   9043 	/* Check whether the pfit is attached to our pipe. */
   9044 	if (INTEL_GEN(dev_priv) < 4) {
   9045 		if (crtc->pipe != PIPE_B)
   9046 			return;
   9047 	} else {
   9048 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
   9049 			return;
   9050 	}
   9051 
   9052 	pipe_config->gmch_pfit.control = tmp;
   9053 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
   9054 }
   9055 
   9056 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
   9057 			       struct intel_crtc_state *pipe_config)
   9058 {
   9059 	struct drm_device *dev = crtc->base.dev;
   9060 	struct drm_i915_private *dev_priv = to_i915(dev);
   9061 	enum pipe pipe = crtc->pipe;
   9062 	struct dpll clock;
   9063 	u32 mdiv;
   9064 	int refclk = 100000;
   9065 
   9066 	/* In case of DSI, DPLL will not be used */
   9067 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   9068 		return;
   9069 
   9070 	vlv_dpio_get(dev_priv);
   9071 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
   9072 	vlv_dpio_put(dev_priv);
   9073 
   9074 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
   9075 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
   9076 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
   9077 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
   9078 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
   9079 
   9080 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
   9081 }
   9082 
   9083 static void
   9084 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
   9085 			      struct intel_initial_plane_config *plane_config)
   9086 {
   9087 	struct drm_device *dev = crtc->base.dev;
   9088 	struct drm_i915_private *dev_priv = to_i915(dev);
   9089 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   9090 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   9091 	enum pipe pipe;
   9092 	u32 val, base, offset __unused;
   9093 	int fourcc, pixel_format;
   9094 	unsigned int aligned_height;
   9095 	struct drm_framebuffer *fb;
   9096 	struct intel_framebuffer *intel_fb;
   9097 
   9098 	if (!plane->get_hw_state(plane, &pipe))
   9099 		return;
   9100 
   9101 	WARN_ON(pipe != crtc->pipe);
   9102 
   9103 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
   9104 	if (!intel_fb) {
   9105 		DRM_DEBUG_KMS("failed to alloc fb\n");
   9106 		return;
   9107 	}
   9108 
   9109 	fb = &intel_fb->base;
   9110 
   9111 	fb->dev = dev;
   9112 
   9113 	val = I915_READ(DSPCNTR(i9xx_plane));
   9114 
   9115 	if (INTEL_GEN(dev_priv) >= 4) {
   9116 		if (val & DISPPLANE_TILED) {
   9117 			plane_config->tiling = I915_TILING_X;
   9118 			fb->modifier = I915_FORMAT_MOD_X_TILED;
   9119 		}
   9120 
   9121 		if (val & DISPPLANE_ROTATE_180)
   9122 			plane_config->rotation = DRM_MODE_ROTATE_180;
   9123 	}
   9124 
   9125 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
   9126 	    val & DISPPLANE_MIRROR)
   9127 		plane_config->rotation |= DRM_MODE_REFLECT_X;
   9128 
   9129 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
   9130 	fourcc = i9xx_format_to_fourcc(pixel_format);
   9131 	fb->format = drm_format_info(fourcc);
   9132 
   9133 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   9134 		offset = I915_READ(DSPOFFSET(i9xx_plane));
   9135 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
   9136 	} else if (INTEL_GEN(dev_priv) >= 4) {
   9137 		if (plane_config->tiling)
   9138 			offset = I915_READ(DSPTILEOFF(i9xx_plane));
   9139 		else
   9140 			offset = I915_READ(DSPLINOFF(i9xx_plane));
   9141 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
   9142 	} else {
   9143 		base = I915_READ(DSPADDR(i9xx_plane));
   9144 	}
   9145 	plane_config->base = base;
   9146 
   9147 	val = I915_READ(PIPESRC(pipe));
   9148 	fb->width = ((val >> 16) & 0xfff) + 1;
   9149 	fb->height = ((val >> 0) & 0xfff) + 1;
   9150 
   9151 	val = I915_READ(DSPSTRIDE(i9xx_plane));
   9152 	fb->pitches[0] = val & 0xffffffc0;
   9153 
   9154 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
   9155 
   9156 	plane_config->size = fb->pitches[0] * aligned_height;
   9157 
   9158 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
   9159 		      crtc->base.name, plane->base.name, fb->width, fb->height,
   9160 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
   9161 		      plane_config->size);
   9162 
   9163 	plane_config->fb = intel_fb;
   9164 }
   9165 
   9166 static void chv_crtc_clock_get(struct intel_crtc *crtc,
   9167 			       struct intel_crtc_state *pipe_config)
   9168 {
   9169 	struct drm_device *dev = crtc->base.dev;
   9170 	struct drm_i915_private *dev_priv = to_i915(dev);
   9171 	enum pipe pipe = crtc->pipe;
   9172 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   9173 	struct dpll clock;
   9174 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
   9175 	int refclk = 100000;
   9176 
   9177 	/* In case of DSI, DPLL will not be used */
   9178 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   9179 		return;
   9180 
   9181 	vlv_dpio_get(dev_priv);
   9182 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
   9183 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
   9184 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
   9185 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
   9186 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
   9187 	vlv_dpio_put(dev_priv);
   9188 
   9189 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
   9190 	clock.m2 = (pll_dw0 & 0xff) << 22;
   9191 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
   9192 		clock.m2 |= pll_dw2 & 0x3fffff;
   9193 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
   9194 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
   9195 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
   9196 
   9197 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
   9198 }
   9199 
   9200 static enum intel_output_format
   9201 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
   9202 {
   9203 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9204 	u32 tmp;
   9205 
   9206 	tmp = I915_READ(PIPEMISC(crtc->pipe));
   9207 
   9208 	if (tmp & PIPEMISC_YUV420_ENABLE) {
   9209 		/* We support 4:2:0 in full blend mode only */
   9210 		WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
   9211 
   9212 		return INTEL_OUTPUT_FORMAT_YCBCR420;
   9213 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
   9214 		return INTEL_OUTPUT_FORMAT_YCBCR444;
   9215 	} else {
   9216 		return INTEL_OUTPUT_FORMAT_RGB;
   9217 	}
   9218 }
   9219 
   9220 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
   9221 {
   9222 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9223 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   9224 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9225 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   9226 	u32 tmp;
   9227 
   9228 	tmp = I915_READ(DSPCNTR(i9xx_plane));
   9229 
   9230 	if (tmp & DISPPLANE_GAMMA_ENABLE)
   9231 		crtc_state->gamma_enable = true;
   9232 
   9233 	if (!HAS_GMCH(dev_priv) &&
   9234 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
   9235 		crtc_state->csc_enable = true;
   9236 }
   9237 
   9238 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
   9239 				 struct intel_crtc_state *pipe_config)
   9240 {
   9241 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9242 	enum intel_display_power_domain power_domain;
   9243 	intel_wakeref_t wakeref;
   9244 	u32 tmp;
   9245 	bool ret;
   9246 
   9247 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   9248 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   9249 	if (!wakeref)
   9250 		return false;
   9251 
   9252 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   9253 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   9254 	pipe_config->shared_dpll = NULL;
   9255 	pipe_config->master_transcoder = INVALID_TRANSCODER;
   9256 
   9257 	ret = false;
   9258 
   9259 	tmp = I915_READ(PIPECONF(crtc->pipe));
   9260 	if (!(tmp & PIPECONF_ENABLE))
   9261 		goto out;
   9262 
   9263 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   9264 	    IS_CHERRYVIEW(dev_priv)) {
   9265 		switch (tmp & PIPECONF_BPC_MASK) {
   9266 		case PIPECONF_6BPC:
   9267 			pipe_config->pipe_bpp = 18;
   9268 			break;
   9269 		case PIPECONF_8BPC:
   9270 			pipe_config->pipe_bpp = 24;
   9271 			break;
   9272 		case PIPECONF_10BPC:
   9273 			pipe_config->pipe_bpp = 30;
   9274 			break;
   9275 		default:
   9276 			break;
   9277 		}
   9278 	}
   9279 
   9280 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
   9281 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
   9282 		pipe_config->limited_color_range = true;
   9283 
   9284 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
   9285 		PIPECONF_GAMMA_MODE_SHIFT;
   9286 
   9287 	if (IS_CHERRYVIEW(dev_priv))
   9288 		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
   9289 
   9290 	i9xx_get_pipe_color_config(pipe_config);
   9291 	intel_color_get_config(pipe_config);
   9292 
   9293 	if (INTEL_GEN(dev_priv) < 4)
   9294 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
   9295 
   9296 	intel_get_pipe_timings(crtc, pipe_config);
   9297 	intel_get_pipe_src_size(crtc, pipe_config);
   9298 
   9299 	i9xx_get_pfit_config(crtc, pipe_config);
   9300 
   9301 	if (INTEL_GEN(dev_priv) >= 4) {
   9302 		/* No way to read it out on pipes B and C */
   9303 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
   9304 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
   9305 		else
   9306 			tmp = I915_READ(DPLL_MD(crtc->pipe));
   9307 		pipe_config->pixel_multiplier =
   9308 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
   9309 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
   9310 		pipe_config->dpll_hw_state.dpll_md = tmp;
   9311 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
   9312 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
   9313 		tmp = I915_READ(DPLL(crtc->pipe));
   9314 		pipe_config->pixel_multiplier =
   9315 			((tmp & SDVO_MULTIPLIER_MASK)
   9316 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
   9317 	} else {
   9318 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
   9319 		 * port and will be fixed up in the encoder->get_config
   9320 		 * function. */
   9321 		pipe_config->pixel_multiplier = 1;
   9322 	}
   9323 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
   9324 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
   9325 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
   9326 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
   9327 	} else {
   9328 		/* Mask out read-only status bits. */
   9329 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
   9330 						     DPLL_PORTC_READY_MASK |
   9331 						     DPLL_PORTB_READY_MASK);
   9332 	}
   9333 
   9334 	if (IS_CHERRYVIEW(dev_priv))
   9335 		chv_crtc_clock_get(crtc, pipe_config);
   9336 	else if (IS_VALLEYVIEW(dev_priv))
   9337 		vlv_crtc_clock_get(crtc, pipe_config);
   9338 	else
   9339 		i9xx_crtc_clock_get(crtc, pipe_config);
   9340 
   9341 	/*
   9342 	 * Normally the dotclock is filled in by the encoder .get_config()
   9343 	 * but in case the pipe is enabled w/o any ports we need a sane
   9344 	 * default.
   9345 	 */
   9346 	pipe_config->hw.adjusted_mode.crtc_clock =
   9347 		pipe_config->port_clock / pipe_config->pixel_multiplier;
   9348 
   9349 	ret = true;
   9350 
   9351 out:
   9352 	intel_display_power_put(dev_priv, power_domain, wakeref);
   9353 
   9354 	return ret;
   9355 }
   9356 
   9357 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
   9358 {
   9359 	struct intel_encoder *encoder;
   9360 	int i;
   9361 	u32 val, final;
   9362 	bool has_lvds = false;
   9363 	bool has_cpu_edp = false;
   9364 	bool has_panel = false;
   9365 	bool has_ck505 = false;
   9366 	bool can_ssc = false;
   9367 	bool using_ssc_source = false;
   9368 
   9369 	/* We need to take the global config into account */
   9370 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   9371 		switch (encoder->type) {
   9372 		case INTEL_OUTPUT_LVDS:
   9373 			has_panel = true;
   9374 			has_lvds = true;
   9375 			break;
   9376 		case INTEL_OUTPUT_EDP:
   9377 			has_panel = true;
   9378 			if (encoder->port == PORT_A)
   9379 				has_cpu_edp = true;
   9380 			break;
   9381 		default:
   9382 			break;
   9383 		}
   9384 	}
   9385 
   9386 	if (HAS_PCH_IBX(dev_priv)) {
   9387 		has_ck505 = dev_priv->vbt.display_clock_mode;
   9388 		can_ssc = has_ck505;
   9389 	} else {
   9390 		has_ck505 = false;
   9391 		can_ssc = true;
   9392 	}
   9393 
   9394 	/* Check if any DPLLs are using the SSC source */
   9395 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
   9396 		u32 temp = I915_READ(PCH_DPLL(i));
   9397 
   9398 		if (!(temp & DPLL_VCO_ENABLE))
   9399 			continue;
   9400 
   9401 		if ((temp & PLL_REF_INPUT_MASK) ==
   9402 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
   9403 			using_ssc_source = true;
   9404 			break;
   9405 		}
   9406 	}
   9407 
   9408 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
   9409 		      has_panel, has_lvds, has_ck505, using_ssc_source);
   9410 
   9411 	/* Ironlake: try to setup display ref clock before DPLL
   9412 	 * enabling. This is only under driver's control after
   9413 	 * PCH B stepping, previous chipset stepping should be
   9414 	 * ignoring this setting.
   9415 	 */
   9416 	val = I915_READ(PCH_DREF_CONTROL);
   9417 
   9418 	/* As we must carefully and slowly disable/enable each source in turn,
   9419 	 * compute the final state we want first and check if we need to
   9420 	 * make any changes at all.
   9421 	 */
   9422 	final = val;
   9423 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
   9424 	if (has_ck505)
   9425 		final |= DREF_NONSPREAD_CK505_ENABLE;
   9426 	else
   9427 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
   9428 
   9429 	final &= ~DREF_SSC_SOURCE_MASK;
   9430 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
   9431 	final &= ~DREF_SSC1_ENABLE;
   9432 
   9433 	if (has_panel) {
   9434 		final |= DREF_SSC_SOURCE_ENABLE;
   9435 
   9436 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
   9437 			final |= DREF_SSC1_ENABLE;
   9438 
   9439 		if (has_cpu_edp) {
   9440 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
   9441 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
   9442 			else
   9443 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
   9444 		} else
   9445 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
   9446 	} else if (using_ssc_source) {
   9447 		final |= DREF_SSC_SOURCE_ENABLE;
   9448 		final |= DREF_SSC1_ENABLE;
   9449 	}
   9450 
   9451 	if (final == val)
   9452 		return;
   9453 
   9454 	/* Always enable nonspread source */
   9455 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
   9456 
   9457 	if (has_ck505)
   9458 		val |= DREF_NONSPREAD_CK505_ENABLE;
   9459 	else
   9460 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
   9461 
   9462 	if (has_panel) {
   9463 		val &= ~DREF_SSC_SOURCE_MASK;
   9464 		val |= DREF_SSC_SOURCE_ENABLE;
   9465 
   9466 		/* SSC must be turned on before enabling the CPU output  */
   9467 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
   9468 			DRM_DEBUG_KMS("Using SSC on panel\n");
   9469 			val |= DREF_SSC1_ENABLE;
   9470 		} else
   9471 			val &= ~DREF_SSC1_ENABLE;
   9472 
   9473 		/* Get SSC going before enabling the outputs */
   9474 		I915_WRITE(PCH_DREF_CONTROL, val);
   9475 		POSTING_READ(PCH_DREF_CONTROL);
   9476 		udelay(200);
   9477 
   9478 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
   9479 
   9480 		/* Enable CPU source on CPU attached eDP */
   9481 		if (has_cpu_edp) {
   9482 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
   9483 				DRM_DEBUG_KMS("Using SSC on eDP\n");
   9484 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
   9485 			} else
   9486 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
   9487 		} else
   9488 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
   9489 
   9490 		I915_WRITE(PCH_DREF_CONTROL, val);
   9491 		POSTING_READ(PCH_DREF_CONTROL);
   9492 		udelay(200);
   9493 	} else {
   9494 		DRM_DEBUG_KMS("Disabling CPU source output\n");
   9495 
   9496 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
   9497 
   9498 		/* Turn off CPU output */
   9499 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
   9500 
   9501 		I915_WRITE(PCH_DREF_CONTROL, val);
   9502 		POSTING_READ(PCH_DREF_CONTROL);
   9503 		udelay(200);
   9504 
   9505 		if (!using_ssc_source) {
   9506 			DRM_DEBUG_KMS("Disabling SSC source\n");
   9507 
   9508 			/* Turn off the SSC source */
   9509 			val &= ~DREF_SSC_SOURCE_MASK;
   9510 			val |= DREF_SSC_SOURCE_DISABLE;
   9511 
   9512 			/* Turn off SSC1 */
   9513 			val &= ~DREF_SSC1_ENABLE;
   9514 
   9515 			I915_WRITE(PCH_DREF_CONTROL, val);
   9516 			POSTING_READ(PCH_DREF_CONTROL);
   9517 			udelay(200);
   9518 		}
   9519 	}
   9520 
   9521 	BUG_ON(val != final);
   9522 }
   9523 
   9524 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
   9525 {
   9526 	u32 tmp;
   9527 
   9528 	tmp = I915_READ(SOUTH_CHICKEN2);
   9529 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
   9530 	I915_WRITE(SOUTH_CHICKEN2, tmp);
   9531 
   9532 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
   9533 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
   9534 		DRM_ERROR("FDI mPHY reset assert timeout\n");
   9535 
   9536 	tmp = I915_READ(SOUTH_CHICKEN2);
   9537 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
   9538 	I915_WRITE(SOUTH_CHICKEN2, tmp);
   9539 
   9540 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
   9541 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
   9542 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
   9543 }
   9544 
   9545 /* WaMPhyProgramming:hsw */
   9546 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
   9547 {
   9548 	u32 tmp;
   9549 
   9550 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
   9551 	tmp &= ~(0xFF << 24);
   9552 	tmp |= (0x12 << 24);
   9553 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
   9554 
   9555 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
   9556 	tmp |= (1 << 11);
   9557 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
   9558 
   9559 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
   9560 	tmp |= (1 << 11);
   9561 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
   9562 
   9563 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
   9564 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
   9565 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
   9566 
   9567 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
   9568 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
   9569 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
   9570 
   9571 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
   9572 	tmp &= ~(7 << 13);
   9573 	tmp |= (5 << 13);
   9574 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
   9575 
   9576 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
   9577 	tmp &= ~(7 << 13);
   9578 	tmp |= (5 << 13);
   9579 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
   9580 
   9581 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
   9582 	tmp &= ~0xFF;
   9583 	tmp |= 0x1C;
   9584 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
   9585 
   9586 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
   9587 	tmp &= ~0xFF;
   9588 	tmp |= 0x1C;
   9589 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
   9590 
   9591 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
   9592 	tmp &= ~(0xFF << 16);
   9593 	tmp |= (0x1C << 16);
   9594 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
   9595 
   9596 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
   9597 	tmp &= ~(0xFF << 16);
   9598 	tmp |= (0x1C << 16);
   9599 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
   9600 
   9601 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
   9602 	tmp |= (1 << 27);
   9603 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
   9604 
   9605 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
   9606 	tmp |= (1 << 27);
   9607 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
   9608 
   9609 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
   9610 	tmp &= ~(0xF << 28);
   9611 	tmp |= (4 << 28);
   9612 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
   9613 
   9614 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
   9615 	tmp &= ~(0xF << 28);
   9616 	tmp |= (4 << 28);
   9617 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
   9618 }
   9619 
   9620 /* Implements 3 different sequences from BSpec chapter "Display iCLK
   9621  * Programming" based on the parameters passed:
   9622  * - Sequence to enable CLKOUT_DP
   9623  * - Sequence to enable CLKOUT_DP without spread
   9624  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
   9625  */
   9626 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
   9627 				 bool with_spread, bool with_fdi)
   9628 {
   9629 	u32 reg, tmp;
   9630 
   9631 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
   9632 		with_spread = true;
   9633 	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
   9634 	    with_fdi, "LP PCH doesn't have FDI\n"))
   9635 		with_fdi = false;
   9636 
   9637 	mutex_lock(&dev_priv->sb_lock);
   9638 
   9639 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
   9640 	tmp &= ~SBI_SSCCTL_DISABLE;
   9641 	tmp |= SBI_SSCCTL_PATHALT;
   9642 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9643 
   9644 	udelay(24);
   9645 
   9646 	if (with_spread) {
   9647 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
   9648 		tmp &= ~SBI_SSCCTL_PATHALT;
   9649 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9650 
   9651 		if (with_fdi) {
   9652 			lpt_reset_fdi_mphy(dev_priv);
   9653 			lpt_program_fdi_mphy(dev_priv);
   9654 		}
   9655 	}
   9656 
   9657 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
   9658 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
   9659 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
   9660 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
   9661 
   9662 	mutex_unlock(&dev_priv->sb_lock);
   9663 }
   9664 
   9665 /* Sequence to disable CLKOUT_DP */
   9666 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
   9667 {
   9668 	u32 reg, tmp;
   9669 
   9670 	mutex_lock(&dev_priv->sb_lock);
   9671 
   9672 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
   9673 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
   9674 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
   9675 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
   9676 
   9677 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
   9678 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
   9679 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
   9680 			tmp |= SBI_SSCCTL_PATHALT;
   9681 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9682 			udelay(32);
   9683 		}
   9684 		tmp |= SBI_SSCCTL_DISABLE;
   9685 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9686 	}
   9687 
   9688 	mutex_unlock(&dev_priv->sb_lock);
   9689 }
   9690 
   9691 #define BEND_IDX(steps) ((50 + (steps)) / 5)
   9692 
   9693 static const u16 sscdivintphase[] = {
   9694 	[BEND_IDX( 50)] = 0x3B23,
   9695 	[BEND_IDX( 45)] = 0x3B23,
   9696 	[BEND_IDX( 40)] = 0x3C23,
   9697 	[BEND_IDX( 35)] = 0x3C23,
   9698 	[BEND_IDX( 30)] = 0x3D23,
   9699 	[BEND_IDX( 25)] = 0x3D23,
   9700 	[BEND_IDX( 20)] = 0x3E23,
   9701 	[BEND_IDX( 15)] = 0x3E23,
   9702 	[BEND_IDX( 10)] = 0x3F23,
   9703 	[BEND_IDX(  5)] = 0x3F23,
   9704 	[BEND_IDX(  0)] = 0x0025,
   9705 	[BEND_IDX( -5)] = 0x0025,
   9706 	[BEND_IDX(-10)] = 0x0125,
   9707 	[BEND_IDX(-15)] = 0x0125,
   9708 	[BEND_IDX(-20)] = 0x0225,
   9709 	[BEND_IDX(-25)] = 0x0225,
   9710 	[BEND_IDX(-30)] = 0x0325,
   9711 	[BEND_IDX(-35)] = 0x0325,
   9712 	[BEND_IDX(-40)] = 0x0425,
   9713 	[BEND_IDX(-45)] = 0x0425,
   9714 	[BEND_IDX(-50)] = 0x0525,
   9715 };
   9716 
   9717 /*
   9718  * Bend CLKOUT_DP
   9719  * steps -50 to 50 inclusive, in steps of 5
   9720  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
   9721  * change in clock period = -(steps / 10) * 5.787 ps
   9722  */
   9723 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
   9724 {
   9725 	u32 tmp;
   9726 	int idx = BEND_IDX(steps);
   9727 
   9728 	if (WARN_ON(steps % 5 != 0))
   9729 		return;
   9730 
   9731 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
   9732 		return;
   9733 
   9734 	mutex_lock(&dev_priv->sb_lock);
   9735 
   9736 	if (steps % 10 != 0)
   9737 		tmp = 0xAAAAAAAB;
   9738 	else
   9739 		tmp = 0x00000000;
   9740 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
   9741 
   9742 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
   9743 	tmp &= 0xffff0000;
   9744 	tmp |= sscdivintphase[idx];
   9745 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
   9746 
   9747 	mutex_unlock(&dev_priv->sb_lock);
   9748 }
   9749 
   9750 #undef BEND_IDX
   9751 
   9752 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
   9753 {
   9754 	u32 fuse_strap = I915_READ(FUSE_STRAP);
   9755 	u32 ctl = I915_READ(SPLL_CTL);
   9756 
   9757 	if ((ctl & SPLL_PLL_ENABLE) == 0)
   9758 		return false;
   9759 
   9760 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
   9761 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
   9762 		return true;
   9763 
   9764 	if (IS_BROADWELL(dev_priv) &&
   9765 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
   9766 		return true;
   9767 
   9768 	return false;
   9769 }
   9770 
   9771 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
   9772 			       enum intel_dpll_id id)
   9773 {
   9774 	u32 fuse_strap = I915_READ(FUSE_STRAP);
   9775 	u32 ctl = I915_READ(WRPLL_CTL(id));
   9776 
   9777 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
   9778 		return false;
   9779 
   9780 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
   9781 		return true;
   9782 
   9783 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
   9784 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
   9785 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
   9786 		return true;
   9787 
   9788 	return false;
   9789 }
   9790 
   9791 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
   9792 {
   9793 	struct intel_encoder *encoder;
   9794 	bool has_fdi = false;
   9795 
   9796 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   9797 		switch (encoder->type) {
   9798 		case INTEL_OUTPUT_ANALOG:
   9799 			has_fdi = true;
   9800 			break;
   9801 		default:
   9802 			break;
   9803 		}
   9804 	}
   9805 
   9806 	/*
   9807 	 * The BIOS may have decided to use the PCH SSC
   9808 	 * reference so we must not disable it until the
   9809 	 * relevant PLLs have stopped relying on it. We'll
   9810 	 * just leave the PCH SSC reference enabled in case
   9811 	 * any active PLL is using it. It will get disabled
   9812 	 * after runtime suspend if we don't have FDI.
   9813 	 *
   9814 	 * TODO: Move the whole reference clock handling
   9815 	 * to the modeset sequence proper so that we can
   9816 	 * actually enable/disable/reconfigure these things
   9817 	 * safely. To do that we need to introduce a real
   9818 	 * clock hierarchy. That would also allow us to do
   9819 	 * clock bending finally.
   9820 	 */
   9821 	dev_priv->pch_ssc_use = 0;
   9822 
   9823 	if (spll_uses_pch_ssc(dev_priv)) {
   9824 		DRM_DEBUG_KMS("SPLL using PCH SSC\n");
   9825 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
   9826 	}
   9827 
   9828 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
   9829 		DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
   9830 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
   9831 	}
   9832 
   9833 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
   9834 		DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
   9835 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
   9836 	}
   9837 
   9838 	if (dev_priv->pch_ssc_use)
   9839 		return;
   9840 
   9841 	if (has_fdi) {
   9842 		lpt_bend_clkout_dp(dev_priv, 0);
   9843 		lpt_enable_clkout_dp(dev_priv, true, true);
   9844 	} else {
   9845 		lpt_disable_clkout_dp(dev_priv);
   9846 	}
   9847 }
   9848 
   9849 /*
   9850  * Initialize reference clocks when the driver loads
   9851  */
   9852 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
   9853 {
   9854 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
   9855 		ilk_init_pch_refclk(dev_priv);
   9856 	else if (HAS_PCH_LPT(dev_priv))
   9857 		lpt_init_pch_refclk(dev_priv);
   9858 }
   9859 
   9860 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
   9861 {
   9862 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9863 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9864 	enum pipe pipe = crtc->pipe;
   9865 	u32 val;
   9866 
   9867 	val = 0;
   9868 
   9869 	switch (crtc_state->pipe_bpp) {
   9870 	case 18:
   9871 		val |= PIPECONF_6BPC;
   9872 		break;
   9873 	case 24:
   9874 		val |= PIPECONF_8BPC;
   9875 		break;
   9876 	case 30:
   9877 		val |= PIPECONF_10BPC;
   9878 		break;
   9879 	case 36:
   9880 		val |= PIPECONF_12BPC;
   9881 		break;
   9882 	default:
   9883 		/* Case prevented by intel_choose_pipe_bpp_dither. */
   9884 		BUG();
   9885 	}
   9886 
   9887 	if (crtc_state->dither)
   9888 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
   9889 
   9890 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   9891 		val |= PIPECONF_INTERLACED_ILK;
   9892 	else
   9893 		val |= PIPECONF_PROGRESSIVE;
   9894 
   9895 	/*
   9896 	 * This would end up with an odd purple hue over
   9897 	 * the entire display. Make sure we don't do it.
   9898 	 */
   9899 	WARN_ON(crtc_state->limited_color_range &&
   9900 		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
   9901 
   9902 	if (crtc_state->limited_color_range)
   9903 		val |= PIPECONF_COLOR_RANGE_SELECT;
   9904 
   9905 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
   9906 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
   9907 
   9908 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
   9909 
   9910 	val |= PIPECONF_FRAME_START_DELAY(0);
   9911 
   9912 	I915_WRITE(PIPECONF(pipe), val);
   9913 	POSTING_READ(PIPECONF(pipe));
   9914 }
   9915 
   9916 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
   9917 {
   9918 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9919 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9920 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   9921 	u32 val = 0;
   9922 
   9923 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
   9924 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
   9925 
   9926 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   9927 		val |= PIPECONF_INTERLACED_ILK;
   9928 	else
   9929 		val |= PIPECONF_PROGRESSIVE;
   9930 
   9931 	if (IS_HASWELL(dev_priv) &&
   9932 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
   9933 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
   9934 
   9935 	I915_WRITE(PIPECONF(cpu_transcoder), val);
   9936 	POSTING_READ(PIPECONF(cpu_transcoder));
   9937 }
   9938 
   9939 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
   9940 {
   9941 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9942 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9943 	u32 val = 0;
   9944 
   9945 	switch (crtc_state->pipe_bpp) {
   9946 	case 18:
   9947 		val |= PIPEMISC_DITHER_6_BPC;
   9948 		break;
   9949 	case 24:
   9950 		val |= PIPEMISC_DITHER_8_BPC;
   9951 		break;
   9952 	case 30:
   9953 		val |= PIPEMISC_DITHER_10_BPC;
   9954 		break;
   9955 	case 36:
   9956 		val |= PIPEMISC_DITHER_12_BPC;
   9957 		break;
   9958 	default:
   9959 		MISSING_CASE(crtc_state->pipe_bpp);
   9960 		break;
   9961 	}
   9962 
   9963 	if (crtc_state->dither)
   9964 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
   9965 
   9966 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
   9967 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
   9968 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
   9969 
   9970 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
   9971 		val |= PIPEMISC_YUV420_ENABLE |
   9972 			PIPEMISC_YUV420_MODE_FULL_BLEND;
   9973 
   9974 	if (INTEL_GEN(dev_priv) >= 11 &&
   9975 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
   9976 					   BIT(PLANE_CURSOR))) == 0)
   9977 		val |= PIPEMISC_HDR_MODE_PRECISION;
   9978 
   9979 	I915_WRITE(PIPEMISC(crtc->pipe), val);
   9980 }
   9981 
   9982 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
   9983 {
   9984 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9985 	u32 tmp;
   9986 
   9987 	tmp = I915_READ(PIPEMISC(crtc->pipe));
   9988 
   9989 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
   9990 	case PIPEMISC_DITHER_6_BPC:
   9991 		return 18;
   9992 	case PIPEMISC_DITHER_8_BPC:
   9993 		return 24;
   9994 	case PIPEMISC_DITHER_10_BPC:
   9995 		return 30;
   9996 	case PIPEMISC_DITHER_12_BPC:
   9997 		return 36;
   9998 	default:
   9999 		MISSING_CASE(tmp);
   10000 		return 0;
   10001 	}
   10002 }
   10003 
   10004 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
   10005 {
   10006 	/*
   10007 	 * Account for spread spectrum to avoid
   10008 	 * oversubscribing the link. Max center spread
   10009 	 * is 2.5%; use 5% for safety's sake.
   10010 	 */
   10011 	u32 bps = target_clock * bpp * 21 / 20;
   10012 	return DIV_ROUND_UP(bps, link_bw * 8);
   10013 }
   10014 
   10015 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
   10016 {
   10017 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
   10018 }
   10019 
   10020 static void ilk_compute_dpll(struct intel_crtc *crtc,
   10021 			     struct intel_crtc_state *crtc_state,
   10022 			     struct dpll *reduced_clock)
   10023 {
   10024 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10025 	u32 dpll, fp, fp2;
   10026 	int factor;
   10027 
   10028 	/* Enable autotuning of the PLL clock (if permissible) */
   10029 	factor = 21;
   10030 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   10031 		if ((intel_panel_use_ssc(dev_priv) &&
   10032 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
   10033 		    (HAS_PCH_IBX(dev_priv) &&
   10034 		     intel_is_dual_link_lvds(dev_priv)))
   10035 			factor = 25;
   10036 	} else if (crtc_state->sdvo_tv_clock) {
   10037 		factor = 20;
   10038 	}
   10039 
   10040 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
   10041 
   10042 	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
   10043 		fp |= FP_CB_TUNE;
   10044 
   10045 	if (reduced_clock) {
   10046 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
   10047 
   10048 		if (reduced_clock->m < factor * reduced_clock->n)
   10049 			fp2 |= FP_CB_TUNE;
   10050 	} else {
   10051 		fp2 = fp;
   10052 	}
   10053 
   10054 	dpll = 0;
   10055 
   10056 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
   10057 		dpll |= DPLLB_MODE_LVDS;
   10058 	else
   10059 		dpll |= DPLLB_MODE_DAC_SERIAL;
   10060 
   10061 	dpll |= (crtc_state->pixel_multiplier - 1)
   10062 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
   10063 
   10064 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
   10065 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
   10066 		dpll |= DPLL_SDVO_HIGH_SPEED;
   10067 
   10068 	if (intel_crtc_has_dp_encoder(crtc_state))
   10069 		dpll |= DPLL_SDVO_HIGH_SPEED;
   10070 
   10071 	/*
   10072 	 * The high speed IO clock is only really required for
   10073 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
   10074 	 * possible to share the DPLL between CRT and HDMI. Enabling
   10075 	 * the clock needlessly does no real harm, except use up a
   10076 	 * bit of power potentially.
   10077 	 *
   10078 	 * We'll limit this to IVB with 3 pipes, since it has only two
   10079 	 * DPLLs and so DPLL sharing is the only way to get three pipes
   10080 	 * driving PCH ports at the same time. On SNB we could do this,
   10081 	 * and potentially avoid enabling the second DPLL, but it's not
   10082 	 * clear if it''s a win or loss power wise. No point in doing
   10083 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
   10084 	 */
   10085 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
   10086 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
   10087 		dpll |= DPLL_SDVO_HIGH_SPEED;
   10088 
   10089 	/* compute bitmask from p1 value */
   10090 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   10091 	/* also FPA1 */
   10092 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
   10093 
   10094 	switch (crtc_state->dpll.p2) {
   10095 	case 5:
   10096 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
   10097 		break;
   10098 	case 7:
   10099 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
   10100 		break;
   10101 	case 10:
   10102 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
   10103 		break;
   10104 	case 14:
   10105 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
   10106 		break;
   10107 	}
   10108 
   10109 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   10110 	    intel_panel_use_ssc(dev_priv))
   10111 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
   10112 	else
   10113 		dpll |= PLL_REF_INPUT_DREFCLK;
   10114 
   10115 	dpll |= DPLL_VCO_ENABLE;
   10116 
   10117 	crtc_state->dpll_hw_state.dpll = dpll;
   10118 	crtc_state->dpll_hw_state.fp0 = fp;
   10119 	crtc_state->dpll_hw_state.fp1 = fp2;
   10120 }
   10121 
   10122 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
   10123 				  struct intel_crtc_state *crtc_state)
   10124 {
   10125 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10126 	struct intel_atomic_state *state =
   10127 		to_intel_atomic_state(crtc_state->uapi.state);
   10128 	const struct intel_limit *limit;
   10129 	int refclk = 120000;
   10130 
   10131 	memset(&crtc_state->dpll_hw_state, 0,
   10132 	       sizeof(crtc_state->dpll_hw_state));
   10133 
   10134 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
   10135 	if (!crtc_state->has_pch_encoder)
   10136 		return 0;
   10137 
   10138 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   10139 		if (intel_panel_use_ssc(dev_priv)) {
   10140 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
   10141 				      dev_priv->vbt.lvds_ssc_freq);
   10142 			refclk = dev_priv->vbt.lvds_ssc_freq;
   10143 		}
   10144 
   10145 		if (intel_is_dual_link_lvds(dev_priv)) {
   10146 			if (refclk == 100000)
   10147 				limit = &ilk_limits_dual_lvds_100m;
   10148 			else
   10149 				limit = &ilk_limits_dual_lvds;
   10150 		} else {
   10151 			if (refclk == 100000)
   10152 				limit = &ilk_limits_single_lvds_100m;
   10153 			else
   10154 				limit = &ilk_limits_single_lvds;
   10155 		}
   10156 	} else {
   10157 		limit = &ilk_limits_dac;
   10158 	}
   10159 
   10160 	if (!crtc_state->clock_set &&
   10161 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   10162 				refclk, NULL, &crtc_state->dpll)) {
   10163 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   10164 		return -EINVAL;
   10165 	}
   10166 
   10167 	ilk_compute_dpll(crtc, crtc_state, NULL);
   10168 
   10169 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
   10170 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
   10171 			      pipe_name(crtc->pipe));
   10172 		return -EINVAL;
   10173 	}
   10174 
   10175 	return 0;
   10176 }
   10177 
   10178 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
   10179 					 struct intel_link_m_n *m_n)
   10180 {
   10181 	struct drm_device *dev = crtc->base.dev;
   10182 	struct drm_i915_private *dev_priv = to_i915(dev);
   10183 	enum pipe pipe = crtc->pipe;
   10184 
   10185 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
   10186 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
   10187 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
   10188 		& ~TU_SIZE_MASK;
   10189 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
   10190 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
   10191 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10192 }
   10193 
   10194 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
   10195 					 enum transcoder transcoder,
   10196 					 struct intel_link_m_n *m_n,
   10197 					 struct intel_link_m_n *m2_n2)
   10198 {
   10199 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10200 	enum pipe pipe = crtc->pipe;
   10201 
   10202 	if (INTEL_GEN(dev_priv) >= 5) {
   10203 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
   10204 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
   10205 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
   10206 			& ~TU_SIZE_MASK;
   10207 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
   10208 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
   10209 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10210 
   10211 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
   10212 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
   10213 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
   10214 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
   10215 					& ~TU_SIZE_MASK;
   10216 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
   10217 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
   10218 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10219 		}
   10220 	} else {
   10221 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
   10222 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
   10223 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
   10224 			& ~TU_SIZE_MASK;
   10225 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
   10226 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
   10227 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10228 	}
   10229 }
   10230 
   10231 void intel_dp_get_m_n(struct intel_crtc *crtc,
   10232 		      struct intel_crtc_state *pipe_config)
   10233 {
   10234 	if (pipe_config->has_pch_encoder)
   10235 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
   10236 	else
   10237 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
   10238 					     &pipe_config->dp_m_n,
   10239 					     &pipe_config->dp_m2_n2);
   10240 }
   10241 
   10242 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
   10243 				   struct intel_crtc_state *pipe_config)
   10244 {
   10245 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
   10246 				     &pipe_config->fdi_m_n, NULL);
   10247 }
   10248 
   10249 static void skl_get_pfit_config(struct intel_crtc *crtc,
   10250 				struct intel_crtc_state *pipe_config)
   10251 {
   10252 	struct drm_device *dev = crtc->base.dev;
   10253 	struct drm_i915_private *dev_priv = to_i915(dev);
   10254 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
   10255 	u32 ps_ctrl = 0;
   10256 	int id = -1;
   10257 	int i;
   10258 
   10259 	/* find scaler attached to this pipe */
   10260 	for (i = 0; i < crtc->num_scalers; i++) {
   10261 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
   10262 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
   10263 			id = i;
   10264 			pipe_config->pch_pfit.enabled = true;
   10265 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
   10266 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
   10267 			scaler_state->scalers[i].in_use = true;
   10268 			break;
   10269 		}
   10270 	}
   10271 
   10272 	scaler_state->scaler_id = id;
   10273 	if (id >= 0) {
   10274 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
   10275 	} else {
   10276 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
   10277 	}
   10278 }
   10279 
   10280 static void
   10281 skl_get_initial_plane_config(struct intel_crtc *crtc,
   10282 			     struct intel_initial_plane_config *plane_config)
   10283 {
   10284 	struct drm_device *dev = crtc->base.dev;
   10285 	struct drm_i915_private *dev_priv = to_i915(dev);
   10286 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   10287 	enum plane_id plane_id = plane->id;
   10288 	enum pipe pipe;
   10289 	u32 val, base, offset __unused, stride_mult, tiling, alpha;
   10290 	int fourcc, pixel_format;
   10291 	unsigned int aligned_height;
   10292 	struct drm_framebuffer *fb;
   10293 	struct intel_framebuffer *intel_fb;
   10294 
   10295 	if (!plane->get_hw_state(plane, &pipe))
   10296 		return;
   10297 
   10298 	WARN_ON(pipe != crtc->pipe);
   10299 
   10300 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
   10301 	if (!intel_fb) {
   10302 		DRM_DEBUG_KMS("failed to alloc fb\n");
   10303 		return;
   10304 	}
   10305 
   10306 	fb = &intel_fb->base;
   10307 
   10308 	fb->dev = dev;
   10309 
   10310 	val = I915_READ(PLANE_CTL(pipe, plane_id));
   10311 
   10312 	if (INTEL_GEN(dev_priv) >= 11)
   10313 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
   10314 	else
   10315 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
   10316 
   10317 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
   10318 		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
   10319 		alpha &= PLANE_COLOR_ALPHA_MASK;
   10320 	} else {
   10321 		alpha = val & PLANE_CTL_ALPHA_MASK;
   10322 	}
   10323 
   10324 	fourcc = skl_format_to_fourcc(pixel_format,
   10325 				      val & PLANE_CTL_ORDER_RGBX, alpha);
   10326 	fb->format = drm_format_info(fourcc);
   10327 
   10328 	tiling = val & PLANE_CTL_TILED_MASK;
   10329 	switch (tiling) {
   10330 	case PLANE_CTL_TILED_LINEAR:
   10331 		fb->modifier = DRM_FORMAT_MOD_LINEAR;
   10332 		break;
   10333 	case PLANE_CTL_TILED_X:
   10334 		plane_config->tiling = I915_TILING_X;
   10335 		fb->modifier = I915_FORMAT_MOD_X_TILED;
   10336 		break;
   10337 	case PLANE_CTL_TILED_Y:
   10338 		plane_config->tiling = I915_TILING_Y;
   10339 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
   10340 			fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
   10341 				I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
   10342 				I915_FORMAT_MOD_Y_TILED_CCS;
   10343 		else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
   10344 			fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
   10345 		else
   10346 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
   10347 		break;
   10348 	case PLANE_CTL_TILED_YF:
   10349 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
   10350 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
   10351 		else
   10352 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
   10353 		break;
   10354 	default:
   10355 		MISSING_CASE(tiling);
   10356 		goto error;
   10357 	}
   10358 
   10359 	/*
   10360 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
   10361 	 * while i915 HW rotation is clockwise, thats why this swapping.
   10362 	 */
   10363 	switch (val & PLANE_CTL_ROTATE_MASK) {
   10364 	case PLANE_CTL_ROTATE_0:
   10365 		plane_config->rotation = DRM_MODE_ROTATE_0;
   10366 		break;
   10367 	case PLANE_CTL_ROTATE_90:
   10368 		plane_config->rotation = DRM_MODE_ROTATE_270;
   10369 		break;
   10370 	case PLANE_CTL_ROTATE_180:
   10371 		plane_config->rotation = DRM_MODE_ROTATE_180;
   10372 		break;
   10373 	case PLANE_CTL_ROTATE_270:
   10374 		plane_config->rotation = DRM_MODE_ROTATE_90;
   10375 		break;
   10376 	}
   10377 
   10378 	if (INTEL_GEN(dev_priv) >= 10 &&
   10379 	    val & PLANE_CTL_FLIP_HORIZONTAL)
   10380 		plane_config->rotation |= DRM_MODE_REFLECT_X;
   10381 
   10382 	base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
   10383 	plane_config->base = base;
   10384 
   10385 	offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
   10386 
   10387 	val = I915_READ(PLANE_SIZE(pipe, plane_id));
   10388 	fb->height = ((val >> 16) & 0xffff) + 1;
   10389 	fb->width = ((val >> 0) & 0xffff) + 1;
   10390 
   10391 	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
   10392 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
   10393 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
   10394 
   10395 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
   10396 
   10397 	plane_config->size = fb->pitches[0] * aligned_height;
   10398 
   10399 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
   10400 		      crtc->base.name, plane->base.name, fb->width, fb->height,
   10401 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
   10402 		      plane_config->size);
   10403 
   10404 	plane_config->fb = intel_fb;
   10405 	return;
   10406 
   10407 error:
   10408 	kfree(intel_fb);
   10409 }
   10410 
   10411 static void ilk_get_pfit_config(struct intel_crtc *crtc,
   10412 				struct intel_crtc_state *pipe_config)
   10413 {
   10414 	struct drm_device *dev = crtc->base.dev;
   10415 	struct drm_i915_private *dev_priv = to_i915(dev);
   10416 	u32 tmp;
   10417 
   10418 	tmp = I915_READ(PF_CTL(crtc->pipe));
   10419 
   10420 	if (tmp & PF_ENABLE) {
   10421 		pipe_config->pch_pfit.enabled = true;
   10422 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
   10423 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
   10424 
   10425 		/* We currently do not free assignements of panel fitters on
   10426 		 * ivb/hsw (since we don't use the higher upscaling modes which
   10427 		 * differentiates them) so just WARN about this case for now. */
   10428 		if (IS_GEN(dev_priv, 7)) {
   10429 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
   10430 				PF_PIPE_SEL_IVB(crtc->pipe));
   10431 		}
   10432 	}
   10433 }
   10434 
   10435 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
   10436 				struct intel_crtc_state *pipe_config)
   10437 {
   10438 	struct drm_device *dev = crtc->base.dev;
   10439 	struct drm_i915_private *dev_priv = to_i915(dev);
   10440 	enum intel_display_power_domain power_domain;
   10441 	intel_wakeref_t wakeref;
   10442 	u32 tmp;
   10443 	bool ret;
   10444 
   10445 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   10446 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10447 	if (!wakeref)
   10448 		return false;
   10449 
   10450 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   10451 	pipe_config->shared_dpll = NULL;
   10452 	pipe_config->master_transcoder = INVALID_TRANSCODER;
   10453 
   10454 	ret = false;
   10455 	tmp = I915_READ(PIPECONF(crtc->pipe));
   10456 	if (!(tmp & PIPECONF_ENABLE))
   10457 		goto out;
   10458 
   10459 	switch (tmp & PIPECONF_BPC_MASK) {
   10460 	case PIPECONF_6BPC:
   10461 		pipe_config->pipe_bpp = 18;
   10462 		break;
   10463 	case PIPECONF_8BPC:
   10464 		pipe_config->pipe_bpp = 24;
   10465 		break;
   10466 	case PIPECONF_10BPC:
   10467 		pipe_config->pipe_bpp = 30;
   10468 		break;
   10469 	case PIPECONF_12BPC:
   10470 		pipe_config->pipe_bpp = 36;
   10471 		break;
   10472 	default:
   10473 		break;
   10474 	}
   10475 
   10476 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
   10477 		pipe_config->limited_color_range = true;
   10478 
   10479 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
   10480 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
   10481 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
   10482 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
   10483 		break;
   10484 	default:
   10485 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   10486 		break;
   10487 	}
   10488 
   10489 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
   10490 		PIPECONF_GAMMA_MODE_SHIFT;
   10491 
   10492 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
   10493 
   10494 	i9xx_get_pipe_color_config(pipe_config);
   10495 	intel_color_get_config(pipe_config);
   10496 
   10497 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
   10498 		struct intel_shared_dpll *pll;
   10499 		enum intel_dpll_id pll_id;
   10500 
   10501 		pipe_config->has_pch_encoder = true;
   10502 
   10503 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
   10504 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
   10505 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
   10506 
   10507 		ilk_get_fdi_m_n_config(crtc, pipe_config);
   10508 
   10509 		if (HAS_PCH_IBX(dev_priv)) {
   10510 			/*
   10511 			 * The pipe->pch transcoder and pch transcoder->pll
   10512 			 * mapping is fixed.
   10513 			 */
   10514 			pll_id = (enum intel_dpll_id) crtc->pipe;
   10515 		} else {
   10516 			tmp = I915_READ(PCH_DPLL_SEL);
   10517 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
   10518 				pll_id = DPLL_ID_PCH_PLL_B;
   10519 			else
   10520 				pll_id= DPLL_ID_PCH_PLL_A;
   10521 		}
   10522 
   10523 		pipe_config->shared_dpll =
   10524 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
   10525 		pll = pipe_config->shared_dpll;
   10526 
   10527 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
   10528 						&pipe_config->dpll_hw_state));
   10529 
   10530 		tmp = pipe_config->dpll_hw_state.dpll;
   10531 		pipe_config->pixel_multiplier =
   10532 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
   10533 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
   10534 
   10535 		ilk_pch_clock_get(crtc, pipe_config);
   10536 	} else {
   10537 		pipe_config->pixel_multiplier = 1;
   10538 	}
   10539 
   10540 	intel_get_pipe_timings(crtc, pipe_config);
   10541 	intel_get_pipe_src_size(crtc, pipe_config);
   10542 
   10543 	ilk_get_pfit_config(crtc, pipe_config);
   10544 
   10545 	ret = true;
   10546 
   10547 out:
   10548 	intel_display_power_put(dev_priv, power_domain, wakeref);
   10549 
   10550 	return ret;
   10551 }
   10552 
   10553 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
   10554 				  struct intel_crtc_state *crtc_state)
   10555 {
   10556 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10557 	struct intel_atomic_state *state =
   10558 		to_intel_atomic_state(crtc_state->uapi.state);
   10559 
   10560 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
   10561 	    INTEL_GEN(dev_priv) >= 11) {
   10562 		struct intel_encoder *encoder =
   10563 			intel_get_crtc_new_encoder(state, crtc_state);
   10564 
   10565 		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
   10566 			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
   10567 				      pipe_name(crtc->pipe));
   10568 			return -EINVAL;
   10569 		}
   10570 	}
   10571 
   10572 	return 0;
   10573 }
   10574 
   10575 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10576 			    struct intel_crtc_state *pipe_config)
   10577 {
   10578 	enum intel_dpll_id id;
   10579 	u32 temp;
   10580 
   10581 	temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
   10582 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
   10583 
   10584 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
   10585 		return;
   10586 
   10587 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10588 }
   10589 
   10590 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10591 			    struct intel_crtc_state *pipe_config)
   10592 {
   10593 	enum phy phy = intel_port_to_phy(dev_priv, port);
   10594 	enum icl_port_dpll_id port_dpll_id;
   10595 	enum intel_dpll_id id;
   10596 	u32 temp;
   10597 
   10598 	if (intel_phy_is_combo(dev_priv, phy)) {
   10599 		temp = I915_READ(ICL_DPCLKA_CFGCR0) &
   10600 			ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
   10601 		id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
   10602 		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
   10603 	} else if (intel_phy_is_tc(dev_priv, phy)) {
   10604 		u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
   10605 
   10606 		if (clk_sel == DDI_CLK_SEL_MG) {
   10607 			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
   10608 								    port));
   10609 			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
   10610 		} else {
   10611 			WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
   10612 			id = DPLL_ID_ICL_TBTPLL;
   10613 			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
   10614 		}
   10615 	} else {
   10616 		WARN(1, "Invalid port %x\n", port);
   10617 		return;
   10618 	}
   10619 
   10620 	pipe_config->icl_port_dplls[port_dpll_id].pll =
   10621 		intel_get_shared_dpll_by_id(dev_priv, id);
   10622 
   10623 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
   10624 }
   10625 
   10626 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
   10627 				enum port port,
   10628 				struct intel_crtc_state *pipe_config)
   10629 {
   10630 	enum intel_dpll_id id;
   10631 
   10632 	switch (port) {
   10633 	case PORT_A:
   10634 		id = DPLL_ID_SKL_DPLL0;
   10635 		break;
   10636 	case PORT_B:
   10637 		id = DPLL_ID_SKL_DPLL1;
   10638 		break;
   10639 	case PORT_C:
   10640 		id = DPLL_ID_SKL_DPLL2;
   10641 		break;
   10642 	default:
   10643 		DRM_ERROR("Incorrect port type\n");
   10644 		return;
   10645 	}
   10646 
   10647 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10648 }
   10649 
   10650 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10651 			    struct intel_crtc_state *pipe_config)
   10652 {
   10653 	enum intel_dpll_id id;
   10654 	u32 temp;
   10655 
   10656 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
   10657 	id = temp >> (port * 3 + 1);
   10658 
   10659 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
   10660 		return;
   10661 
   10662 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10663 }
   10664 
   10665 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10666 			    struct intel_crtc_state *pipe_config)
   10667 {
   10668 	enum intel_dpll_id id;
   10669 	u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
   10670 
   10671 	switch (ddi_pll_sel) {
   10672 	case PORT_CLK_SEL_WRPLL1:
   10673 		id = DPLL_ID_WRPLL1;
   10674 		break;
   10675 	case PORT_CLK_SEL_WRPLL2:
   10676 		id = DPLL_ID_WRPLL2;
   10677 		break;
   10678 	case PORT_CLK_SEL_SPLL:
   10679 		id = DPLL_ID_SPLL;
   10680 		break;
   10681 	case PORT_CLK_SEL_LCPLL_810:
   10682 		id = DPLL_ID_LCPLL_810;
   10683 		break;
   10684 	case PORT_CLK_SEL_LCPLL_1350:
   10685 		id = DPLL_ID_LCPLL_1350;
   10686 		break;
   10687 	case PORT_CLK_SEL_LCPLL_2700:
   10688 		id = DPLL_ID_LCPLL_2700;
   10689 		break;
   10690 	default:
   10691 		MISSING_CASE(ddi_pll_sel);
   10692 		/* fall through */
   10693 	case PORT_CLK_SEL_NONE:
   10694 		return;
   10695 	}
   10696 
   10697 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10698 }
   10699 
   10700 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
   10701 				     struct intel_crtc_state *pipe_config,
   10702 				     u64 *power_domain_mask,
   10703 				     intel_wakeref_t *wakerefs)
   10704 {
   10705 	struct drm_device *dev = crtc->base.dev;
   10706 	struct drm_i915_private *dev_priv = to_i915(dev);
   10707 	enum intel_display_power_domain power_domain;
   10708 	unsigned long panel_transcoder_mask = 0;
   10709 	unsigned long enabled_panel_transcoders = 0;
   10710 	enum transcoder panel_transcoder;
   10711 	intel_wakeref_t wf;
   10712 	u32 tmp;
   10713 
   10714 	if (INTEL_GEN(dev_priv) >= 11)
   10715 		panel_transcoder_mask |=
   10716 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
   10717 
   10718 	if (HAS_TRANSCODER_EDP(dev_priv))
   10719 		panel_transcoder_mask |= BIT(TRANSCODER_EDP);
   10720 
   10721 	/*
   10722 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
   10723 	 * and DSI transcoders handled below.
   10724 	 */
   10725 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   10726 
   10727 	/*
   10728 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
   10729 	 * consistency and less surprising code; it's in always on power).
   10730 	 */
   10731 	for_each_set_bit(panel_transcoder,
   10732 			 &panel_transcoder_mask,
   10733 			 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
   10734 		bool force_thru = false;
   10735 		enum pipe trans_pipe;
   10736 
   10737 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
   10738 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
   10739 			continue;
   10740 
   10741 		/*
   10742 		 * Log all enabled ones, only use the first one.
   10743 		 *
   10744 		 * FIXME: This won't work for two separate DSI displays.
   10745 		 */
   10746 		enabled_panel_transcoders |= BIT(panel_transcoder);
   10747 		if (enabled_panel_transcoders != BIT(panel_transcoder))
   10748 			continue;
   10749 
   10750 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
   10751 		default:
   10752 			WARN(1, "unknown pipe linked to transcoder %s\n",
   10753 			     transcoder_name(panel_transcoder));
   10754 			/* fall through */
   10755 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
   10756 			force_thru = true;
   10757 			/* fall through */
   10758 		case TRANS_DDI_EDP_INPUT_A_ON:
   10759 			trans_pipe = PIPE_A;
   10760 			break;
   10761 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
   10762 			trans_pipe = PIPE_B;
   10763 			break;
   10764 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
   10765 			trans_pipe = PIPE_C;
   10766 			break;
   10767 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
   10768 			trans_pipe = PIPE_D;
   10769 			break;
   10770 		}
   10771 
   10772 		if (trans_pipe == crtc->pipe) {
   10773 			pipe_config->cpu_transcoder = panel_transcoder;
   10774 			pipe_config->pch_pfit.force_thru = force_thru;
   10775 		}
   10776 	}
   10777 
   10778 	/*
   10779 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
   10780 	 */
   10781 	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
   10782 		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
   10783 
   10784 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
   10785 	WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
   10786 
   10787 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10788 	if (!wf)
   10789 		return false;
   10790 
   10791 	wakerefs[power_domain] = wf;
   10792 	*power_domain_mask |= BIT_ULL(power_domain);
   10793 
   10794 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
   10795 
   10796 	return tmp & PIPECONF_ENABLE;
   10797 }
   10798 
   10799 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
   10800 					 struct intel_crtc_state *pipe_config,
   10801 					 u64 *power_domain_mask,
   10802 					 intel_wakeref_t *wakerefs)
   10803 {
   10804 	struct drm_device *dev = crtc->base.dev;
   10805 	struct drm_i915_private *dev_priv = to_i915(dev);
   10806 	enum intel_display_power_domain power_domain;
   10807 	enum transcoder cpu_transcoder;
   10808 	intel_wakeref_t wf;
   10809 	enum port port;
   10810 	u32 tmp;
   10811 
   10812 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
   10813 		if (port == PORT_A)
   10814 			cpu_transcoder = TRANSCODER_DSI_A;
   10815 		else
   10816 			cpu_transcoder = TRANSCODER_DSI_C;
   10817 
   10818 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   10819 		WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
   10820 
   10821 		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10822 		if (!wf)
   10823 			continue;
   10824 
   10825 		wakerefs[power_domain] = wf;
   10826 		*power_domain_mask |= BIT_ULL(power_domain);
   10827 
   10828 		/*
   10829 		 * The PLL needs to be enabled with a valid divider
   10830 		 * configuration, otherwise accessing DSI registers will hang
   10831 		 * the machine. See BSpec North Display Engine
   10832 		 * registers/MIPI[BXT]. We can break out here early, since we
   10833 		 * need the same DSI PLL to be enabled for both DSI ports.
   10834 		 */
   10835 		if (!bxt_dsi_pll_is_enabled(dev_priv))
   10836 			break;
   10837 
   10838 		/* XXX: this works for video mode only */
   10839 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
   10840 		if (!(tmp & DPI_ENABLE))
   10841 			continue;
   10842 
   10843 		tmp = I915_READ(MIPI_CTRL(port));
   10844 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
   10845 			continue;
   10846 
   10847 		pipe_config->cpu_transcoder = cpu_transcoder;
   10848 		break;
   10849 	}
   10850 
   10851 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
   10852 }
   10853 
   10854 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
   10855 				   struct intel_crtc_state *pipe_config)
   10856 {
   10857 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10858 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
   10859 	struct intel_shared_dpll *pll;
   10860 	enum port port;
   10861 	u32 tmp;
   10862 
   10863 	if (transcoder_is_dsi(cpu_transcoder)) {
   10864 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
   10865 						PORT_A : PORT_B;
   10866 	} else {
   10867 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
   10868 		if (INTEL_GEN(dev_priv) >= 12)
   10869 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
   10870 		else
   10871 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
   10872 	}
   10873 
   10874 	if (INTEL_GEN(dev_priv) >= 11)
   10875 		icl_get_ddi_pll(dev_priv, port, pipe_config);
   10876 	else if (IS_CANNONLAKE(dev_priv))
   10877 		cnl_get_ddi_pll(dev_priv, port, pipe_config);
   10878 	else if (IS_GEN9_BC(dev_priv))
   10879 		skl_get_ddi_pll(dev_priv, port, pipe_config);
   10880 	else if (IS_GEN9_LP(dev_priv))
   10881 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
   10882 	else
   10883 		hsw_get_ddi_pll(dev_priv, port, pipe_config);
   10884 
   10885 	pll = pipe_config->shared_dpll;
   10886 	if (pll) {
   10887 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
   10888 						&pipe_config->dpll_hw_state));
   10889 	}
   10890 
   10891 	/*
   10892 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
   10893 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
   10894 	 * the PCH transcoder is on.
   10895 	 */
   10896 	if (INTEL_GEN(dev_priv) < 9 &&
   10897 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
   10898 		pipe_config->has_pch_encoder = true;
   10899 
   10900 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
   10901 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
   10902 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
   10903 
   10904 		ilk_get_fdi_m_n_config(crtc, pipe_config);
   10905 	}
   10906 }
   10907 
   10908 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
   10909 						 enum transcoder cpu_transcoder)
   10910 {
   10911 	u32 trans_port_sync, master_select;
   10912 
   10913 	trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
   10914 
   10915 	if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
   10916 		return INVALID_TRANSCODER;
   10917 
   10918 	master_select = trans_port_sync &
   10919 			PORT_SYNC_MODE_MASTER_SELECT_MASK;
   10920 	if (master_select == 0)
   10921 		return TRANSCODER_EDP;
   10922 	else
   10923 		return master_select - 1;
   10924 }
   10925 
   10926 static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
   10927 {
   10928 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   10929 	u32 transcoders;
   10930 	enum transcoder cpu_transcoder;
   10931 
   10932 	crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
   10933 								  crtc_state->cpu_transcoder);
   10934 
   10935 	transcoders = BIT(TRANSCODER_A) |
   10936 		BIT(TRANSCODER_B) |
   10937 		BIT(TRANSCODER_C) |
   10938 		BIT(TRANSCODER_D);
   10939 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
   10940 		enum intel_display_power_domain power_domain;
   10941 		intel_wakeref_t trans_wakeref;
   10942 
   10943 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   10944 		trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
   10945 								   power_domain);
   10946 
   10947 		if (!trans_wakeref)
   10948 			continue;
   10949 
   10950 		if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
   10951 		    crtc_state->cpu_transcoder)
   10952 			crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
   10953 
   10954 		intel_display_power_put(dev_priv, power_domain, trans_wakeref);
   10955 	}
   10956 
   10957 	WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
   10958 		crtc_state->sync_mode_slaves_mask);
   10959 }
   10960 
   10961 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
   10962 				struct intel_crtc_state *pipe_config)
   10963 {
   10964 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10965 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
   10966 	enum intel_display_power_domain power_domain;
   10967 	u64 power_domain_mask;
   10968 	bool active;
   10969 
   10970 	pipe_config->master_transcoder = INVALID_TRANSCODER;
   10971 
   10972 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   10973 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10974 	if (!wf)
   10975 		return false;
   10976 
   10977 	wakerefs[power_domain] = wf;
   10978 	power_domain_mask = BIT_ULL(power_domain);
   10979 
   10980 	pipe_config->shared_dpll = NULL;
   10981 
   10982 	active = hsw_get_transcoder_state(crtc, pipe_config,
   10983 					  &power_domain_mask, wakerefs);
   10984 
   10985 	if (IS_GEN9_LP(dev_priv) &&
   10986 	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
   10987 					 &power_domain_mask, wakerefs)) {
   10988 		WARN_ON(active);
   10989 		active = true;
   10990 	}
   10991 
   10992 	if (!active)
   10993 		goto out;
   10994 
   10995 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
   10996 	    INTEL_GEN(dev_priv) >= 11) {
   10997 		hsw_get_ddi_port_state(crtc, pipe_config);
   10998 		intel_get_pipe_timings(crtc, pipe_config);
   10999 	}
   11000 
   11001 	intel_get_pipe_src_size(crtc, pipe_config);
   11002 
   11003 	if (IS_HASWELL(dev_priv)) {
   11004 		u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
   11005 
   11006 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
   11007 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
   11008 		else
   11009 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   11010 	} else {
   11011 		pipe_config->output_format =
   11012 			bdw_get_pipemisc_output_format(crtc);
   11013 
   11014 		/*
   11015 		 * Currently there is no interface defined to
   11016 		 * check user preference between RGB/YCBCR444
   11017 		 * or YCBCR420. So the only possible case for
   11018 		 * YCBCR444 usage is driving YCBCR420 output
   11019 		 * with LSPCON, when pipe is configured for
   11020 		 * YCBCR444 output and LSPCON takes care of
   11021 		 * downsampling it.
   11022 		 */
   11023 		pipe_config->lspcon_downsampling =
   11024 			pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
   11025 	}
   11026 
   11027 	pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
   11028 
   11029 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
   11030 
   11031 	if (INTEL_GEN(dev_priv) >= 9) {
   11032 		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
   11033 
   11034 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
   11035 			pipe_config->gamma_enable = true;
   11036 
   11037 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
   11038 			pipe_config->csc_enable = true;
   11039 	} else {
   11040 		i9xx_get_pipe_color_config(pipe_config);
   11041 	}
   11042 
   11043 	intel_color_get_config(pipe_config);
   11044 
   11045 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
   11046 	WARN_ON(power_domain_mask & BIT_ULL(power_domain));
   11047 
   11048 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   11049 	if (wf) {
   11050 		wakerefs[power_domain] = wf;
   11051 		power_domain_mask |= BIT_ULL(power_domain);
   11052 
   11053 		if (INTEL_GEN(dev_priv) >= 9)
   11054 			skl_get_pfit_config(crtc, pipe_config);
   11055 		else
   11056 			ilk_get_pfit_config(crtc, pipe_config);
   11057 	}
   11058 
   11059 	if (hsw_crtc_supports_ips(crtc)) {
   11060 		if (IS_HASWELL(dev_priv))
   11061 			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
   11062 		else {
   11063 			/*
   11064 			 * We cannot readout IPS state on broadwell, set to
   11065 			 * true so we can set it to a defined state on first
   11066 			 * commit.
   11067 			 */
   11068 			pipe_config->ips_enabled = true;
   11069 		}
   11070 	}
   11071 
   11072 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
   11073 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
   11074 		pipe_config->pixel_multiplier =
   11075 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
   11076 	} else {
   11077 		pipe_config->pixel_multiplier = 1;
   11078 	}
   11079 
   11080 	if (INTEL_GEN(dev_priv) >= 11 &&
   11081 	    !transcoder_is_dsi(pipe_config->cpu_transcoder))
   11082 		icl_get_trans_port_sync_config(pipe_config);
   11083 
   11084 out:
   11085 	for_each_power_domain(power_domain, power_domain_mask)
   11086 		intel_display_power_put(dev_priv,
   11087 					power_domain, wakerefs[power_domain]);
   11088 
   11089 	return active;
   11090 }
   11091 
   11092 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
   11093 {
   11094 	struct drm_i915_private *dev_priv =
   11095 		to_i915(plane_state->uapi.plane->dev);
   11096 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11097 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   11098 	u32 base;
   11099 
   11100 	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
   11101 		base = sg_dma_address(obj->mm.pages->sgl);
   11102 	else
   11103 		base = intel_plane_ggtt_offset(plane_state);
   11104 
   11105 	return base + plane_state->color_plane[0].offset;
   11106 }
   11107 
   11108 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
   11109 {
   11110 	int x = plane_state->uapi.dst.x1;
   11111 	int y = plane_state->uapi.dst.y1;
   11112 	u32 pos = 0;
   11113 
   11114 	if (x < 0) {
   11115 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
   11116 		x = -x;
   11117 	}
   11118 	pos |= x << CURSOR_X_SHIFT;
   11119 
   11120 	if (y < 0) {
   11121 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
   11122 		y = -y;
   11123 	}
   11124 	pos |= y << CURSOR_Y_SHIFT;
   11125 
   11126 	return pos;
   11127 }
   11128 
   11129 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
   11130 {
   11131 	const struct drm_mode_config *config =
   11132 		&plane_state->uapi.plane->dev->mode_config;
   11133 	int width = drm_rect_width(&plane_state->uapi.dst);
   11134 	int height = drm_rect_height(&plane_state->uapi.dst);
   11135 
   11136 	return width > 0 && width <= config->cursor_width &&
   11137 		height > 0 && height <= config->cursor_height;
   11138 }
   11139 
   11140 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
   11141 {
   11142 	struct drm_i915_private *dev_priv =
   11143 		to_i915(plane_state->uapi.plane->dev);
   11144 	unsigned int rotation = plane_state->hw.rotation;
   11145 	int src_x, src_y;
   11146 	u32 offset;
   11147 	int ret;
   11148 
   11149 	ret = intel_plane_compute_gtt(plane_state);
   11150 	if (ret)
   11151 		return ret;
   11152 
   11153 	if (!plane_state->uapi.visible)
   11154 		return 0;
   11155 
   11156 	src_x = plane_state->uapi.src.x1 >> 16;
   11157 	src_y = plane_state->uapi.src.y1 >> 16;
   11158 
   11159 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
   11160 	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
   11161 						    plane_state, 0);
   11162 
   11163 	if (src_x != 0 || src_y != 0) {
   11164 		DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
   11165 		return -EINVAL;
   11166 	}
   11167 
   11168 	/*
   11169 	 * Put the final coordinates back so that the src
   11170 	 * coordinate checks will see the right values.
   11171 	 */
   11172 	drm_rect_translate_to(&plane_state->uapi.src,
   11173 			      src_x << 16, src_y << 16);
   11174 
   11175 	/* ILK+ do this automagically in hardware */
   11176 	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
   11177 		const struct drm_framebuffer *fb = plane_state->hw.fb;
   11178 		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   11179 		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   11180 
   11181 		offset += (src_h * src_w - 1) * fb->format->cpp[0];
   11182 	}
   11183 
   11184 	plane_state->color_plane[0].offset = offset;
   11185 	plane_state->color_plane[0].x = src_x;
   11186 	plane_state->color_plane[0].y = src_y;
   11187 
   11188 	return 0;
   11189 }
   11190 
   11191 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
   11192 			      struct intel_plane_state *plane_state)
   11193 {
   11194 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11195 	int ret;
   11196 
   11197 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
   11198 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
   11199 		return -EINVAL;
   11200 	}
   11201 
   11202 	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
   11203 						  &crtc_state->uapi,
   11204 						  DRM_PLANE_HELPER_NO_SCALING,
   11205 						  DRM_PLANE_HELPER_NO_SCALING,
   11206 						  true, true);
   11207 	if (ret)
   11208 		return ret;
   11209 
   11210 	/* Use the unclipped src/dst rectangles, which we program to hw */
   11211 	plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
   11212 	plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
   11213 
   11214 	ret = intel_cursor_check_surface(plane_state);
   11215 	if (ret)
   11216 		return ret;
   11217 
   11218 	if (!plane_state->uapi.visible)
   11219 		return 0;
   11220 
   11221 	ret = intel_plane_check_src_coordinates(plane_state);
   11222 	if (ret)
   11223 		return ret;
   11224 
   11225 	return 0;
   11226 }
   11227 
   11228 static unsigned int
   11229 i845_cursor_max_stride(struct intel_plane *plane,
   11230 		       u32 pixel_format, u64 modifier,
   11231 		       unsigned int rotation)
   11232 {
   11233 	return 2048;
   11234 }
   11235 
   11236 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
   11237 {
   11238 	u32 cntl = 0;
   11239 
   11240 	if (crtc_state->gamma_enable)
   11241 		cntl |= CURSOR_GAMMA_ENABLE;
   11242 
   11243 	return cntl;
   11244 }
   11245 
   11246 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
   11247 			   const struct intel_plane_state *plane_state)
   11248 {
   11249 	return CURSOR_ENABLE |
   11250 		CURSOR_FORMAT_ARGB |
   11251 		CURSOR_STRIDE(plane_state->color_plane[0].stride);
   11252 }
   11253 
   11254 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
   11255 {
   11256 	int width = drm_rect_width(&plane_state->uapi.dst);
   11257 
   11258 	/*
   11259 	 * 845g/865g are only limited by the width of their cursors,
   11260 	 * the height is arbitrary up to the precision of the register.
   11261 	 */
   11262 	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
   11263 }
   11264 
   11265 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
   11266 			     struct intel_plane_state *plane_state)
   11267 {
   11268 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11269 	int ret;
   11270 
   11271 	ret = intel_check_cursor(crtc_state, plane_state);
   11272 	if (ret)
   11273 		return ret;
   11274 
   11275 	/* if we want to turn off the cursor ignore width and height */
   11276 	if (!fb)
   11277 		return 0;
   11278 
   11279 	/* Check for which cursor types we support */
   11280 	if (!i845_cursor_size_ok(plane_state)) {
   11281 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
   11282 			  drm_rect_width(&plane_state->uapi.dst),
   11283 			  drm_rect_height(&plane_state->uapi.dst));
   11284 		return -EINVAL;
   11285 	}
   11286 
   11287 	WARN_ON(plane_state->uapi.visible &&
   11288 		plane_state->color_plane[0].stride != fb->pitches[0]);
   11289 
   11290 	switch (fb->pitches[0]) {
   11291 	case 256:
   11292 	case 512:
   11293 	case 1024:
   11294 	case 2048:
   11295 		break;
   11296 	default:
   11297 		DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
   11298 			      fb->pitches[0]);
   11299 		return -EINVAL;
   11300 	}
   11301 
   11302 	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
   11303 
   11304 	return 0;
   11305 }
   11306 
   11307 static void i845_update_cursor(struct intel_plane *plane,
   11308 			       const struct intel_crtc_state *crtc_state,
   11309 			       const struct intel_plane_state *plane_state)
   11310 {
   11311 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11312 	u32 cntl = 0, base = 0, pos = 0, size = 0;
   11313 	unsigned long irqflags;
   11314 
   11315 	if (plane_state && plane_state->uapi.visible) {
   11316 		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
   11317 		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
   11318 
   11319 		cntl = plane_state->ctl |
   11320 			i845_cursor_ctl_crtc(crtc_state);
   11321 
   11322 		size = (height << 12) | width;
   11323 
   11324 		base = intel_cursor_base(plane_state);
   11325 		pos = intel_cursor_position(plane_state);
   11326 	}
   11327 
   11328 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   11329 
   11330 	/* On these chipsets we can only modify the base/size/stride
   11331 	 * whilst the cursor is disabled.
   11332 	 */
   11333 	if (plane->cursor.base != base ||
   11334 	    plane->cursor.size != size ||
   11335 	    plane->cursor.cntl != cntl) {
   11336 		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
   11337 		I915_WRITE_FW(CURBASE(PIPE_A), base);
   11338 		I915_WRITE_FW(CURSIZE, size);
   11339 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
   11340 		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
   11341 
   11342 		plane->cursor.base = base;
   11343 		plane->cursor.size = size;
   11344 		plane->cursor.cntl = cntl;
   11345 	} else {
   11346 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
   11347 	}
   11348 
   11349 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   11350 }
   11351 
   11352 static void i845_disable_cursor(struct intel_plane *plane,
   11353 				const struct intel_crtc_state *crtc_state)
   11354 {
   11355 	i845_update_cursor(plane, crtc_state, NULL);
   11356 }
   11357 
   11358 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
   11359 				     enum pipe *pipe)
   11360 {
   11361 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11362 	enum intel_display_power_domain power_domain;
   11363 	intel_wakeref_t wakeref;
   11364 	bool ret;
   11365 
   11366 	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
   11367 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   11368 	if (!wakeref)
   11369 		return false;
   11370 
   11371 	ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
   11372 
   11373 	*pipe = PIPE_A;
   11374 
   11375 	intel_display_power_put(dev_priv, power_domain, wakeref);
   11376 
   11377 	return ret;
   11378 }
   11379 
   11380 static unsigned int
   11381 i9xx_cursor_max_stride(struct intel_plane *plane,
   11382 		       u32 pixel_format, u64 modifier,
   11383 		       unsigned int rotation)
   11384 {
   11385 	return plane->base.dev->mode_config.cursor_width * 4;
   11386 }
   11387 
   11388 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
   11389 {
   11390 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   11391 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   11392 	u32 cntl = 0;
   11393 
   11394 	if (INTEL_GEN(dev_priv) >= 11)
   11395 		return cntl;
   11396 
   11397 	if (crtc_state->gamma_enable)
   11398 		cntl = MCURSOR_GAMMA_ENABLE;
   11399 
   11400 	if (crtc_state->csc_enable)
   11401 		cntl |= MCURSOR_PIPE_CSC_ENABLE;
   11402 
   11403 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
   11404 		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
   11405 
   11406 	return cntl;
   11407 }
   11408 
   11409 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
   11410 			   const struct intel_plane_state *plane_state)
   11411 {
   11412 	struct drm_i915_private *dev_priv =
   11413 		to_i915(plane_state->uapi.plane->dev);
   11414 	u32 cntl = 0;
   11415 
   11416 	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
   11417 		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
   11418 
   11419 	switch (drm_rect_width(&plane_state->uapi.dst)) {
   11420 	case 64:
   11421 		cntl |= MCURSOR_MODE_64_ARGB_AX;
   11422 		break;
   11423 	case 128:
   11424 		cntl |= MCURSOR_MODE_128_ARGB_AX;
   11425 		break;
   11426 	case 256:
   11427 		cntl |= MCURSOR_MODE_256_ARGB_AX;
   11428 		break;
   11429 	default:
   11430 		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
   11431 		return 0;
   11432 	}
   11433 
   11434 	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
   11435 		cntl |= MCURSOR_ROTATE_180;
   11436 
   11437 	return cntl;
   11438 }
   11439 
   11440 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
   11441 {
   11442 	struct drm_i915_private *dev_priv =
   11443 		to_i915(plane_state->uapi.plane->dev);
   11444 	int width = drm_rect_width(&plane_state->uapi.dst);
   11445 	int height = drm_rect_height(&plane_state->uapi.dst);
   11446 
   11447 	if (!intel_cursor_size_ok(plane_state))
   11448 		return false;
   11449 
   11450 	/* Cursor width is limited to a few power-of-two sizes */
   11451 	switch (width) {
   11452 	case 256:
   11453 	case 128:
   11454 	case 64:
   11455 		break;
   11456 	default:
   11457 		return false;
   11458 	}
   11459 
   11460 	/*
   11461 	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
   11462 	 * height from 8 lines up to the cursor width, when the
   11463 	 * cursor is not rotated. Everything else requires square
   11464 	 * cursors.
   11465 	 */
   11466 	if (HAS_CUR_FBC(dev_priv) &&
   11467 	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
   11468 		if (height < 8 || height > width)
   11469 			return false;
   11470 	} else {
   11471 		if (height != width)
   11472 			return false;
   11473 	}
   11474 
   11475 	return true;
   11476 }
   11477 
   11478 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
   11479 			     struct intel_plane_state *plane_state)
   11480 {
   11481 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   11482 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11483 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11484 	enum pipe pipe = plane->pipe;
   11485 	int ret;
   11486 
   11487 	ret = intel_check_cursor(crtc_state, plane_state);
   11488 	if (ret)
   11489 		return ret;
   11490 
   11491 	/* if we want to turn off the cursor ignore width and height */
   11492 	if (!fb)
   11493 		return 0;
   11494 
   11495 	/* Check for which cursor types we support */
   11496 	if (!i9xx_cursor_size_ok(plane_state)) {
   11497 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
   11498 			  drm_rect_width(&plane_state->uapi.dst),
   11499 			  drm_rect_height(&plane_state->uapi.dst));
   11500 		return -EINVAL;
   11501 	}
   11502 
   11503 	WARN_ON(plane_state->uapi.visible &&
   11504 		plane_state->color_plane[0].stride != fb->pitches[0]);
   11505 
   11506 	if (fb->pitches[0] !=
   11507 	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
   11508 		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
   11509 			      fb->pitches[0],
   11510 			      drm_rect_width(&plane_state->uapi.dst));
   11511 		return -EINVAL;
   11512 	}
   11513 
   11514 	/*
   11515 	 * There's something wrong with the cursor on CHV pipe C.
   11516 	 * If it straddles the left edge of the screen then
   11517 	 * moving it away from the edge or disabling it often
   11518 	 * results in a pipe underrun, and often that can lead to
   11519 	 * dead pipe (constant underrun reported, and it scans
   11520 	 * out just a solid color). To recover from that, the
   11521 	 * display power well must be turned off and on again.
   11522 	 * Refuse the put the cursor into that compromised position.
   11523 	 */
   11524 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
   11525 	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
   11526 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
   11527 		return -EINVAL;
   11528 	}
   11529 
   11530 	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
   11531 
   11532 	return 0;
   11533 }
   11534 
   11535 static void i9xx_update_cursor(struct intel_plane *plane,
   11536 			       const struct intel_crtc_state *crtc_state,
   11537 			       const struct intel_plane_state *plane_state)
   11538 {
   11539 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11540 	enum pipe pipe = plane->pipe;
   11541 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
   11542 	unsigned long irqflags;
   11543 
   11544 	if (plane_state && plane_state->uapi.visible) {
   11545 		unsigned width = drm_rect_width(&plane_state->uapi.dst);
   11546 		unsigned height = drm_rect_height(&plane_state->uapi.dst);
   11547 
   11548 		cntl = plane_state->ctl |
   11549 			i9xx_cursor_ctl_crtc(crtc_state);
   11550 
   11551 		if (width != height)
   11552 			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
   11553 
   11554 		base = intel_cursor_base(plane_state);
   11555 		pos = intel_cursor_position(plane_state);
   11556 	}
   11557 
   11558 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   11559 
   11560 	/*
   11561 	 * On some platforms writing CURCNTR first will also
   11562 	 * cause CURPOS to be armed by the CURBASE write.
   11563 	 * Without the CURCNTR write the CURPOS write would
   11564 	 * arm itself. Thus we always update CURCNTR before
   11565 	 * CURPOS.
   11566 	 *
   11567 	 * On other platforms CURPOS always requires the
   11568 	 * CURBASE write to arm the update. Additonally
   11569 	 * a write to any of the cursor register will cancel
   11570 	 * an already armed cursor update. Thus leaving out
   11571 	 * the CURBASE write after CURPOS could lead to a
   11572 	 * cursor that doesn't appear to move, or even change
   11573 	 * shape. Thus we always write CURBASE.
   11574 	 *
   11575 	 * The other registers are armed by by the CURBASE write
   11576 	 * except when the plane is getting enabled at which time
   11577 	 * the CURCNTR write arms the update.
   11578 	 */
   11579 
   11580 	if (INTEL_GEN(dev_priv) >= 9)
   11581 		skl_write_cursor_wm(plane, crtc_state);
   11582 
   11583 	if (plane->cursor.base != base ||
   11584 	    plane->cursor.size != fbc_ctl ||
   11585 	    plane->cursor.cntl != cntl) {
   11586 		if (HAS_CUR_FBC(dev_priv))
   11587 			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
   11588 		I915_WRITE_FW(CURCNTR(pipe), cntl);
   11589 		I915_WRITE_FW(CURPOS(pipe), pos);
   11590 		I915_WRITE_FW(CURBASE(pipe), base);
   11591 
   11592 		plane->cursor.base = base;
   11593 		plane->cursor.size = fbc_ctl;
   11594 		plane->cursor.cntl = cntl;
   11595 	} else {
   11596 		I915_WRITE_FW(CURPOS(pipe), pos);
   11597 		I915_WRITE_FW(CURBASE(pipe), base);
   11598 	}
   11599 
   11600 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   11601 }
   11602 
   11603 static void i9xx_disable_cursor(struct intel_plane *plane,
   11604 				const struct intel_crtc_state *crtc_state)
   11605 {
   11606 	i9xx_update_cursor(plane, crtc_state, NULL);
   11607 }
   11608 
   11609 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
   11610 				     enum pipe *pipe)
   11611 {
   11612 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11613 	enum intel_display_power_domain power_domain;
   11614 	intel_wakeref_t wakeref;
   11615 	bool ret;
   11616 	u32 val;
   11617 
   11618 	/*
   11619 	 * Not 100% correct for planes that can move between pipes,
   11620 	 * but that's only the case for gen2-3 which don't have any
   11621 	 * display power wells.
   11622 	 */
   11623 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
   11624 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   11625 	if (!wakeref)
   11626 		return false;
   11627 
   11628 	val = I915_READ(CURCNTR(plane->pipe));
   11629 
   11630 	ret = val & MCURSOR_MODE;
   11631 
   11632 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   11633 		*pipe = plane->pipe;
   11634 	else
   11635 		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
   11636 			MCURSOR_PIPE_SELECT_SHIFT;
   11637 
   11638 	intel_display_power_put(dev_priv, power_domain, wakeref);
   11639 
   11640 	return ret;
   11641 }
   11642 
   11643 /* VESA 640x480x72Hz mode to set on the pipe */
   11644 static const struct drm_display_mode load_detect_mode = {
   11645 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
   11646 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
   11647 };
   11648 
   11649 struct drm_framebuffer *
   11650 intel_framebuffer_create(struct drm_i915_gem_object *obj,
   11651 			 struct drm_mode_fb_cmd2 *mode_cmd)
   11652 {
   11653 	struct intel_framebuffer *intel_fb;
   11654 	int ret;
   11655 
   11656 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
   11657 	if (!intel_fb)
   11658 		return ERR_PTR(-ENOMEM);
   11659 
   11660 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
   11661 	if (ret)
   11662 		goto err;
   11663 
   11664 	return &intel_fb->base;
   11665 
   11666 err:
   11667 	kfree(intel_fb);
   11668 	return ERR_PTR(ret);
   11669 }
   11670 
   11671 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
   11672 					struct drm_crtc *crtc)
   11673 {
   11674 	struct drm_plane *plane;
   11675 	struct drm_plane_state *plane_state;
   11676 	int ret, i;
   11677 
   11678 	ret = drm_atomic_add_affected_planes(state, crtc);
   11679 	if (ret)
   11680 		return ret;
   11681 
   11682 	for_each_new_plane_in_state(state, plane, plane_state, i) {
   11683 		if (plane_state->crtc != crtc)
   11684 			continue;
   11685 
   11686 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
   11687 		if (ret)
   11688 			return ret;
   11689 
   11690 		drm_atomic_set_fb_for_plane(plane_state, NULL);
   11691 	}
   11692 
   11693 	return 0;
   11694 }
   11695 
   11696 int intel_get_load_detect_pipe(struct drm_connector *connector,
   11697 			       struct intel_load_detect_pipe *old,
   11698 			       struct drm_modeset_acquire_ctx *ctx)
   11699 {
   11700 	struct intel_crtc *intel_crtc;
   11701 	struct intel_encoder *intel_encoder =
   11702 		intel_attached_encoder(to_intel_connector(connector));
   11703 	struct drm_crtc *possible_crtc;
   11704 	struct drm_encoder *encoder = &intel_encoder->base;
   11705 	struct drm_crtc *crtc = NULL;
   11706 	struct drm_device *dev = encoder->dev;
   11707 	struct drm_i915_private *dev_priv = to_i915(dev);
   11708 	struct drm_mode_config *config = &dev->mode_config;
   11709 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
   11710 	struct drm_connector_state *connector_state;
   11711 	struct intel_crtc_state *crtc_state;
   11712 	int ret, i = -1;
   11713 
   11714 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
   11715 		      connector->base.id, connector->name,
   11716 		      encoder->base.id, encoder->name);
   11717 
   11718 	old->restore_state = NULL;
   11719 
   11720 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
   11721 
   11722 	/*
   11723 	 * Algorithm gets a little messy:
   11724 	 *
   11725 	 *   - if the connector already has an assigned crtc, use it (but make
   11726 	 *     sure it's on first)
   11727 	 *
   11728 	 *   - try to find the first unused crtc that can drive this connector,
   11729 	 *     and use that if we find one
   11730 	 */
   11731 
   11732 	/* See if we already have a CRTC for this connector */
   11733 	if (connector->state->crtc) {
   11734 		crtc = connector->state->crtc;
   11735 
   11736 		ret = drm_modeset_lock(&crtc->mutex, ctx);
   11737 		if (ret)
   11738 			goto fail;
   11739 
   11740 		/* Make sure the crtc and connector are running */
   11741 		goto found;
   11742 	}
   11743 
   11744 	/* Find an unused one (if possible) */
   11745 	for_each_crtc(dev, possible_crtc) {
   11746 		i++;
   11747 		if (!(encoder->possible_crtcs & (1 << i)))
   11748 			continue;
   11749 
   11750 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
   11751 		if (ret)
   11752 			goto fail;
   11753 
   11754 		if (possible_crtc->state->enable) {
   11755 			drm_modeset_unlock(&possible_crtc->mutex);
   11756 			continue;
   11757 		}
   11758 
   11759 		crtc = possible_crtc;
   11760 		break;
   11761 	}
   11762 
   11763 	/*
   11764 	 * If we didn't find an unused CRTC, don't use any.
   11765 	 */
   11766 	if (!crtc) {
   11767 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
   11768 		ret = -ENODEV;
   11769 		goto fail;
   11770 	}
   11771 
   11772 found:
   11773 	intel_crtc = to_intel_crtc(crtc);
   11774 
   11775 	state = drm_atomic_state_alloc(dev);
   11776 	restore_state = drm_atomic_state_alloc(dev);
   11777 	if (!state || !restore_state) {
   11778 		ret = -ENOMEM;
   11779 		goto fail;
   11780 	}
   11781 
   11782 	state->acquire_ctx = ctx;
   11783 	restore_state->acquire_ctx = ctx;
   11784 
   11785 	connector_state = drm_atomic_get_connector_state(state, connector);
   11786 	if (IS_ERR(connector_state)) {
   11787 		ret = PTR_ERR(connector_state);
   11788 		goto fail;
   11789 	}
   11790 
   11791 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
   11792 	if (ret)
   11793 		goto fail;
   11794 
   11795 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
   11796 	if (IS_ERR(crtc_state)) {
   11797 		ret = PTR_ERR(crtc_state);
   11798 		goto fail;
   11799 	}
   11800 
   11801 	crtc_state->uapi.active = true;
   11802 
   11803 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
   11804 					   &load_detect_mode);
   11805 	if (ret)
   11806 		goto fail;
   11807 
   11808 	ret = intel_modeset_disable_planes(state, crtc);
   11809 	if (ret)
   11810 		goto fail;
   11811 
   11812 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
   11813 	if (!ret)
   11814 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
   11815 	if (!ret)
   11816 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
   11817 	if (ret) {
   11818 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
   11819 		goto fail;
   11820 	}
   11821 
   11822 	ret = drm_atomic_commit(state);
   11823 	if (ret) {
   11824 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
   11825 		goto fail;
   11826 	}
   11827 
   11828 	old->restore_state = restore_state;
   11829 	drm_atomic_state_put(state);
   11830 
   11831 	/* let the connector get through one full cycle before testing */
   11832 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
   11833 	return true;
   11834 
   11835 fail:
   11836 	if (state) {
   11837 		drm_atomic_state_put(state);
   11838 		state = NULL;
   11839 	}
   11840 	if (restore_state) {
   11841 		drm_atomic_state_put(restore_state);
   11842 		restore_state = NULL;
   11843 	}
   11844 
   11845 	if (ret == -EDEADLK)
   11846 		return ret;
   11847 
   11848 	return false;
   11849 }
   11850 
   11851 void intel_release_load_detect_pipe(struct drm_connector *connector,
   11852 				    struct intel_load_detect_pipe *old,
   11853 				    struct drm_modeset_acquire_ctx *ctx)
   11854 {
   11855 	struct intel_encoder *intel_encoder =
   11856 		intel_attached_encoder(to_intel_connector(connector));
   11857 	struct drm_encoder *encoder = &intel_encoder->base;
   11858 	struct drm_atomic_state *state = old->restore_state;
   11859 	int ret;
   11860 
   11861 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
   11862 		      connector->base.id, connector->name,
   11863 		      encoder->base.id, encoder->name);
   11864 
   11865 	if (!state)
   11866 		return;
   11867 
   11868 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
   11869 	if (ret)
   11870 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
   11871 	drm_atomic_state_put(state);
   11872 }
   11873 
   11874 static int i9xx_pll_refclk(struct drm_device *dev,
   11875 			   const struct intel_crtc_state *pipe_config)
   11876 {
   11877 	struct drm_i915_private *dev_priv = to_i915(dev);
   11878 	u32 dpll = pipe_config->dpll_hw_state.dpll;
   11879 
   11880 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
   11881 		return dev_priv->vbt.lvds_ssc_freq;
   11882 	else if (HAS_PCH_SPLIT(dev_priv))
   11883 		return 120000;
   11884 	else if (!IS_GEN(dev_priv, 2))
   11885 		return 96000;
   11886 	else
   11887 		return 48000;
   11888 }
   11889 
   11890 /* Returns the clock of the currently programmed mode of the given pipe. */
   11891 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
   11892 				struct intel_crtc_state *pipe_config)
   11893 {
   11894 	struct drm_device *dev = crtc->base.dev;
   11895 	struct drm_i915_private *dev_priv = to_i915(dev);
   11896 	enum pipe pipe = crtc->pipe;
   11897 	u32 dpll = pipe_config->dpll_hw_state.dpll;
   11898 	u32 fp;
   11899 	struct dpll clock;
   11900 	int port_clock;
   11901 	int refclk = i9xx_pll_refclk(dev, pipe_config);
   11902 
   11903 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
   11904 		fp = pipe_config->dpll_hw_state.fp0;
   11905 	else
   11906 		fp = pipe_config->dpll_hw_state.fp1;
   11907 
   11908 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
   11909 	if (IS_PINEVIEW(dev_priv)) {
   11910 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
   11911 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
   11912 	} else {
   11913 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
   11914 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
   11915 	}
   11916 
   11917 	if (!IS_GEN(dev_priv, 2)) {
   11918 		if (IS_PINEVIEW(dev_priv))
   11919 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
   11920 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
   11921 		else
   11922 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
   11923 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
   11924 
   11925 		switch (dpll & DPLL_MODE_MASK) {
   11926 		case DPLLB_MODE_DAC_SERIAL:
   11927 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
   11928 				5 : 10;
   11929 			break;
   11930 		case DPLLB_MODE_LVDS:
   11931 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
   11932 				7 : 14;
   11933 			break;
   11934 		default:
   11935 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
   11936 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
   11937 			return;
   11938 		}
   11939 
   11940 		if (IS_PINEVIEW(dev_priv))
   11941 			port_clock = pnv_calc_dpll_params(refclk, &clock);
   11942 		else
   11943 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
   11944 	} else {
   11945 		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
   11946 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
   11947 
   11948 		if (is_lvds) {
   11949 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
   11950 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
   11951 
   11952 			if (lvds & LVDS_CLKB_POWER_UP)
   11953 				clock.p2 = 7;
   11954 			else
   11955 				clock.p2 = 14;
   11956 		} else {
   11957 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
   11958 				clock.p1 = 2;
   11959 			else {
   11960 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
   11961 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
   11962 			}
   11963 			if (dpll & PLL_P2_DIVIDE_BY_4)
   11964 				clock.p2 = 4;
   11965 			else
   11966 				clock.p2 = 2;
   11967 		}
   11968 
   11969 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
   11970 	}
   11971 
   11972 	/*
   11973 	 * This value includes pixel_multiplier. We will use
   11974 	 * port_clock to compute adjusted_mode.crtc_clock in the
   11975 	 * encoder's get_config() function.
   11976 	 */
   11977 	pipe_config->port_clock = port_clock;
   11978 }
   11979 
   11980 int intel_dotclock_calculate(int link_freq,
   11981 			     const struct intel_link_m_n *m_n)
   11982 {
   11983 	/*
   11984 	 * The calculation for the data clock is:
   11985 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
   11986 	 * But we want to avoid losing precison if possible, so:
   11987 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
   11988 	 *
   11989 	 * and the link clock is simpler:
   11990 	 * link_clock = (m * link_clock) / n
   11991 	 */
   11992 
   11993 	if (!m_n->link_n)
   11994 		return 0;
   11995 
   11996 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
   11997 }
   11998 
   11999 static void ilk_pch_clock_get(struct intel_crtc *crtc,
   12000 			      struct intel_crtc_state *pipe_config)
   12001 {
   12002 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12003 
   12004 	/* read out port_clock from the DPLL */
   12005 	i9xx_crtc_clock_get(crtc, pipe_config);
   12006 
   12007 	/*
   12008 	 * In case there is an active pipe without active ports,
   12009 	 * we may need some idea for the dotclock anyway.
   12010 	 * Calculate one based on the FDI configuration.
   12011 	 */
   12012 	pipe_config->hw.adjusted_mode.crtc_clock =
   12013 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
   12014 					 &pipe_config->fdi_m_n);
   12015 }
   12016 
   12017 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
   12018 				   struct intel_crtc *crtc)
   12019 {
   12020 	memset(crtc_state, 0, sizeof(*crtc_state));
   12021 
   12022 	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
   12023 
   12024 	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
   12025 	crtc_state->master_transcoder = INVALID_TRANSCODER;
   12026 	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
   12027 	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
   12028 	crtc_state->scaler_state.scaler_id = -1;
   12029 	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
   12030 }
   12031 
   12032 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
   12033 {
   12034 	struct intel_crtc_state *crtc_state;
   12035 
   12036 	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
   12037 
   12038 	if (crtc_state)
   12039 		intel_crtc_state_reset(crtc_state, crtc);
   12040 
   12041 	return crtc_state;
   12042 }
   12043 
   12044 /* Returns the currently programmed mode of the given encoder. */
   12045 struct drm_display_mode *
   12046 intel_encoder_current_mode(struct intel_encoder *encoder)
   12047 {
   12048 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   12049 	struct intel_crtc_state *crtc_state;
   12050 	struct drm_display_mode *mode;
   12051 	struct intel_crtc *crtc;
   12052 	enum pipe pipe;
   12053 
   12054 	if (!encoder->get_hw_state(encoder, &pipe))
   12055 		return NULL;
   12056 
   12057 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   12058 
   12059 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
   12060 	if (!mode)
   12061 		return NULL;
   12062 
   12063 	crtc_state = intel_crtc_state_alloc(crtc);
   12064 	if (!crtc_state) {
   12065 		kfree(mode);
   12066 		return NULL;
   12067 	}
   12068 
   12069 	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
   12070 		kfree(crtc_state);
   12071 		kfree(mode);
   12072 		return NULL;
   12073 	}
   12074 
   12075 	encoder->get_config(encoder, crtc_state);
   12076 
   12077 	intel_mode_from_pipe_config(mode, crtc_state);
   12078 
   12079 	kfree(crtc_state);
   12080 
   12081 	return mode;
   12082 }
   12083 
   12084 static void intel_crtc_destroy(struct drm_crtc *crtc)
   12085 {
   12086 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
   12087 
   12088 	drm_crtc_cleanup(crtc);
   12089 	kfree(intel_crtc);
   12090 }
   12091 
   12092 /**
   12093  * intel_wm_need_update - Check whether watermarks need updating
   12094  * @cur: current plane state
   12095  * @new: new plane state
   12096  *
   12097  * Check current plane state versus the new one to determine whether
   12098  * watermarks need to be recalculated.
   12099  *
   12100  * Returns true or false.
   12101  */
   12102 static bool intel_wm_need_update(const struct intel_plane_state *cur,
   12103 				 struct intel_plane_state *new)
   12104 {
   12105 	/* Update watermarks on tiling or size changes. */
   12106 	if (new->uapi.visible != cur->uapi.visible)
   12107 		return true;
   12108 
   12109 	if (!cur->hw.fb || !new->hw.fb)
   12110 		return false;
   12111 
   12112 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
   12113 	    cur->hw.rotation != new->hw.rotation ||
   12114 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
   12115 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
   12116 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
   12117 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
   12118 		return true;
   12119 
   12120 	return false;
   12121 }
   12122 
   12123 static bool needs_scaling(const struct intel_plane_state *state)
   12124 {
   12125 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
   12126 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
   12127 	int dst_w = drm_rect_width(&state->uapi.dst);
   12128 	int dst_h = drm_rect_height(&state->uapi.dst);
   12129 
   12130 	return (src_w != dst_w || src_h != dst_h);
   12131 }
   12132 
   12133 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
   12134 				    struct intel_crtc_state *crtc_state,
   12135 				    const struct intel_plane_state *old_plane_state,
   12136 				    struct intel_plane_state *plane_state)
   12137 {
   12138 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   12139 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   12140 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12141 	bool mode_changed = needs_modeset(crtc_state);
   12142 	bool was_crtc_enabled = old_crtc_state->hw.active;
   12143 	bool is_crtc_enabled = crtc_state->hw.active;
   12144 	bool turn_off, turn_on, visible, was_visible;
   12145 	int ret;
   12146 
   12147 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
   12148 		ret = skl_update_scaler_plane(crtc_state, plane_state);
   12149 		if (ret)
   12150 			return ret;
   12151 	}
   12152 
   12153 	was_visible = old_plane_state->uapi.visible;
   12154 	visible = plane_state->uapi.visible;
   12155 
   12156 	if (!was_crtc_enabled && WARN_ON(was_visible))
   12157 		was_visible = false;
   12158 
   12159 	/*
   12160 	 * Visibility is calculated as if the crtc was on, but
   12161 	 * after scaler setup everything depends on it being off
   12162 	 * when the crtc isn't active.
   12163 	 *
   12164 	 * FIXME this is wrong for watermarks. Watermarks should also
   12165 	 * be computed as if the pipe would be active. Perhaps move
   12166 	 * per-plane wm computation to the .check_plane() hook, and
   12167 	 * only combine the results from all planes in the current place?
   12168 	 */
   12169 	if (!is_crtc_enabled) {
   12170 		plane_state->uapi.visible = visible = false;
   12171 		crtc_state->active_planes &= ~BIT(plane->id);
   12172 		crtc_state->data_rate[plane->id] = 0;
   12173 		crtc_state->min_cdclk[plane->id] = 0;
   12174 	}
   12175 
   12176 	if (!was_visible && !visible)
   12177 		return 0;
   12178 
   12179 	turn_off = was_visible && (!visible || mode_changed);
   12180 	turn_on = visible && (!was_visible || mode_changed);
   12181 
   12182 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
   12183 			 crtc->base.base.id, crtc->base.name,
   12184 			 plane->base.base.id, plane->base.name,
   12185 			 was_visible, visible,
   12186 			 turn_off, turn_on, mode_changed);
   12187 
   12188 	if (turn_on) {
   12189 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
   12190 			crtc_state->update_wm_pre = true;
   12191 
   12192 		/* must disable cxsr around plane enable/disable */
   12193 		if (plane->id != PLANE_CURSOR)
   12194 			crtc_state->disable_cxsr = true;
   12195 	} else if (turn_off) {
   12196 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
   12197 			crtc_state->update_wm_post = true;
   12198 
   12199 		/* must disable cxsr around plane enable/disable */
   12200 		if (plane->id != PLANE_CURSOR)
   12201 			crtc_state->disable_cxsr = true;
   12202 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
   12203 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
   12204 			/* FIXME bollocks */
   12205 			crtc_state->update_wm_pre = true;
   12206 			crtc_state->update_wm_post = true;
   12207 		}
   12208 	}
   12209 
   12210 	if (visible || was_visible)
   12211 		crtc_state->fb_bits |= plane->frontbuffer_bit;
   12212 
   12213 	/*
   12214 	 * ILK/SNB DVSACNTR/Sprite Enable
   12215 	 * IVB SPR_CTL/Sprite Enable
   12216 	 * "When in Self Refresh Big FIFO mode, a write to enable the
   12217 	 *  plane will be internally buffered and delayed while Big FIFO
   12218 	 *  mode is exiting."
   12219 	 *
   12220 	 * Which means that enabling the sprite can take an extra frame
   12221 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
   12222 	 * down to LP0 and wait for vblank in order to make sure the
   12223 	 * sprite gets enabled on the next vblank after the register write.
   12224 	 * Doing otherwise would risk enabling the sprite one frame after
   12225 	 * we've already signalled flip completion. We can resume LP1+
   12226 	 * once the sprite has been enabled.
   12227 	 *
   12228 	 *
   12229 	 * WaCxSRDisabledForSpriteScaling:ivb
   12230 	 * IVB SPR_SCALE/Scaling Enable
   12231 	 * "Low Power watermarks must be disabled for at least one
   12232 	 *  frame before enabling sprite scaling, and kept disabled
   12233 	 *  until sprite scaling is disabled."
   12234 	 *
   12235 	 * ILK/SNB DVSASCALE/Scaling Enable
   12236 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
   12237 	 *  masked off while Big FIFO mode is exiting."
   12238 	 *
   12239 	 * Despite the w/a only being listed for IVB we assume that
   12240 	 * the ILK/SNB note has similar ramifications, hence we apply
   12241 	 * the w/a on all three platforms.
   12242 	 *
   12243 	 * With experimental results seems this is needed also for primary
   12244 	 * plane, not only sprite plane.
   12245 	 */
   12246 	if (plane->id != PLANE_CURSOR &&
   12247 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
   12248 	     IS_IVYBRIDGE(dev_priv)) &&
   12249 	    (turn_on || (!needs_scaling(old_plane_state) &&
   12250 			 needs_scaling(plane_state))))
   12251 		crtc_state->disable_lp_wm = true;
   12252 
   12253 	return 0;
   12254 }
   12255 
   12256 static bool encoders_cloneable(const struct intel_encoder *a,
   12257 			       const struct intel_encoder *b)
   12258 {
   12259 	/* masks could be asymmetric, so check both ways */
   12260 	return a == b || (a->cloneable & (1 << b->type) &&
   12261 			  b->cloneable & (1 << a->type));
   12262 }
   12263 
   12264 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
   12265 					 struct intel_crtc *crtc,
   12266 					 struct intel_encoder *encoder)
   12267 {
   12268 	struct intel_encoder *source_encoder;
   12269 	struct drm_connector *connector;
   12270 	struct drm_connector_state *connector_state;
   12271 	int i;
   12272 
   12273 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   12274 		if (connector_state->crtc != &crtc->base)
   12275 			continue;
   12276 
   12277 		source_encoder =
   12278 			to_intel_encoder(connector_state->best_encoder);
   12279 		if (!encoders_cloneable(encoder, source_encoder))
   12280 			return false;
   12281 	}
   12282 
   12283 	return true;
   12284 }
   12285 
   12286 static int icl_add_linked_planes(struct intel_atomic_state *state)
   12287 {
   12288 	struct intel_plane *plane, *linked;
   12289 	struct intel_plane_state *plane_state, *linked_plane_state;
   12290 	int i;
   12291 
   12292 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12293 		linked = plane_state->planar_linked_plane;
   12294 
   12295 		if (!linked)
   12296 			continue;
   12297 
   12298 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
   12299 		if (IS_ERR(linked_plane_state))
   12300 			return PTR_ERR(linked_plane_state);
   12301 
   12302 		WARN_ON(linked_plane_state->planar_linked_plane != plane);
   12303 		WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
   12304 	}
   12305 
   12306 	return 0;
   12307 }
   12308 
   12309 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
   12310 {
   12311 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   12312 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12313 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
   12314 	struct intel_plane *plane, *linked;
   12315 	struct intel_plane_state *plane_state;
   12316 	int i;
   12317 
   12318 	if (INTEL_GEN(dev_priv) < 11)
   12319 		return 0;
   12320 
   12321 	/*
   12322 	 * Destroy all old plane links and make the slave plane invisible
   12323 	 * in the crtc_state->active_planes mask.
   12324 	 */
   12325 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12326 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
   12327 			continue;
   12328 
   12329 		plane_state->planar_linked_plane = NULL;
   12330 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
   12331 			crtc_state->active_planes &= ~BIT(plane->id);
   12332 			crtc_state->update_planes |= BIT(plane->id);
   12333 		}
   12334 
   12335 		plane_state->planar_slave = false;
   12336 	}
   12337 
   12338 	if (!crtc_state->nv12_planes)
   12339 		return 0;
   12340 
   12341 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12342 		struct intel_plane_state *linked_state = NULL;
   12343 
   12344 		if (plane->pipe != crtc->pipe ||
   12345 		    !(crtc_state->nv12_planes & BIT(plane->id)))
   12346 			continue;
   12347 
   12348 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
   12349 			if (!icl_is_nv12_y_plane(linked->id))
   12350 				continue;
   12351 
   12352 			if (crtc_state->active_planes & BIT(linked->id))
   12353 				continue;
   12354 
   12355 			linked_state = intel_atomic_get_plane_state(state, linked);
   12356 			if (IS_ERR(linked_state))
   12357 				return PTR_ERR(linked_state);
   12358 
   12359 			break;
   12360 		}
   12361 
   12362 		if (!linked_state) {
   12363 			DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
   12364 				      hweight8(crtc_state->nv12_planes));
   12365 
   12366 			return -EINVAL;
   12367 		}
   12368 
   12369 		plane_state->planar_linked_plane = linked;
   12370 
   12371 		linked_state->planar_slave = true;
   12372 		linked_state->planar_linked_plane = plane;
   12373 		crtc_state->active_planes |= BIT(linked->id);
   12374 		crtc_state->update_planes |= BIT(linked->id);
   12375 		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
   12376 
   12377 		/* Copy parameters to slave plane */
   12378 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
   12379 		linked_state->color_ctl = plane_state->color_ctl;
   12380 		linked_state->view = plane_state->view;
   12381 		memcpy(linked_state->color_plane, plane_state->color_plane,
   12382 		       sizeof(linked_state->color_plane));
   12383 
   12384 		intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
   12385 		linked_state->uapi.src = plane_state->uapi.src;
   12386 		linked_state->uapi.dst = plane_state->uapi.dst;
   12387 
   12388 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
   12389 			if (linked->id == PLANE_SPRITE5)
   12390 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
   12391 			else if (linked->id == PLANE_SPRITE4)
   12392 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
   12393 			else
   12394 				MISSING_CASE(linked->id);
   12395 		}
   12396 	}
   12397 
   12398 	return 0;
   12399 }
   12400 
   12401 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
   12402 {
   12403 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   12404 	struct intel_atomic_state *state =
   12405 		to_intel_atomic_state(new_crtc_state->uapi.state);
   12406 	const struct intel_crtc_state *old_crtc_state =
   12407 		intel_atomic_get_old_crtc_state(state, crtc);
   12408 
   12409 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
   12410 }
   12411 
   12412 static bool
   12413 intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
   12414 {
   12415 	struct drm_crtc *crtc = crtc_state->uapi.crtc;
   12416 	struct drm_atomic_state *state = crtc_state->uapi.state;
   12417 	struct drm_connector *connector;
   12418 	struct drm_connector_state *connector_state;
   12419 	int i;
   12420 
   12421 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   12422 		if (connector_state->crtc != crtc)
   12423 			continue;
   12424 		if (connector->has_tile &&
   12425 		    connector->tile_h_loc == connector->num_h_tile - 1 &&
   12426 		    connector->tile_v_loc == connector->num_v_tile - 1)
   12427 			return true;
   12428 	}
   12429 
   12430 	return false;
   12431 }
   12432 
   12433 static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
   12434 {
   12435 	crtc_state->master_transcoder = INVALID_TRANSCODER;
   12436 	crtc_state->sync_mode_slaves_mask = 0;
   12437 }
   12438 
   12439 static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
   12440 					    struct intel_crtc_state *crtc_state,
   12441 					    int num_tiled_conns)
   12442 {
   12443 	struct drm_crtc *crtc = crtc_state->uapi.crtc;
   12444 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
   12445 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   12446 	struct drm_connector *master_connector;
   12447 	struct drm_connector_list_iter conn_iter;
   12448 	struct drm_crtc *master_crtc = NULL;
   12449 	struct drm_crtc_state *master_crtc_state;
   12450 	struct intel_crtc_state *master_pipe_config;
   12451 
   12452 	if (INTEL_GEN(dev_priv) < 11)
   12453 		return 0;
   12454 
   12455 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
   12456 		return 0;
   12457 
   12458 	/*
   12459 	 * In case of tiled displays there could be one or more slaves but there is
   12460 	 * only one master. Lets make the CRTC used by the connector corresponding
   12461 	 * to the last horizonal and last vertical tile a master/genlock CRTC.
   12462 	 * All the other CRTCs corresponding to other tiles of the same Tile group
   12463 	 * are the slave CRTCs and hold a pointer to their genlock CRTC.
   12464 	 * If all tiles not present do not make master slave assignments.
   12465 	 */
   12466 	if (!connector->has_tile ||
   12467 	    crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
   12468 	    crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
   12469 	    num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
   12470 		reset_port_sync_mode_state(crtc_state);
   12471 		return 0;
   12472 	}
   12473 	/* Last Horizontal and last vertical tile connector is a master
   12474 	 * Master's crtc state is already populated in slave for port sync
   12475 	 */
   12476 	if (connector->tile_h_loc == connector->num_h_tile - 1 &&
   12477 	    connector->tile_v_loc == connector->num_v_tile - 1)
   12478 		return 0;
   12479 
   12480 	/* Loop through all connectors and configure the Slave crtc_state
   12481 	 * to point to the correct master.
   12482 	 */
   12483 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
   12484 	drm_for_each_connector_iter(master_connector, &conn_iter) {
   12485 		struct drm_connector_state *master_conn_state = NULL;
   12486 
   12487 		if (!(master_connector->has_tile &&
   12488 		      master_connector->tile_group->id == connector->tile_group->id))
   12489 			continue;
   12490 		if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
   12491 		    master_connector->tile_v_loc != master_connector->num_v_tile - 1)
   12492 			continue;
   12493 
   12494 		master_conn_state = drm_atomic_get_connector_state(&state->base,
   12495 								   master_connector);
   12496 		if (IS_ERR(master_conn_state)) {
   12497 			drm_connector_list_iter_end(&conn_iter);
   12498 			return PTR_ERR(master_conn_state);
   12499 		}
   12500 		if (master_conn_state->crtc) {
   12501 			master_crtc = master_conn_state->crtc;
   12502 			break;
   12503 		}
   12504 	}
   12505 	drm_connector_list_iter_end(&conn_iter);
   12506 
   12507 	if (!master_crtc) {
   12508 		DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
   12509 			      crtc->base.id);
   12510 		return -EINVAL;
   12511 	}
   12512 
   12513 	master_crtc_state = drm_atomic_get_crtc_state(&state->base,
   12514 						      master_crtc);
   12515 	if (IS_ERR(master_crtc_state))
   12516 		return PTR_ERR(master_crtc_state);
   12517 
   12518 	master_pipe_config = to_intel_crtc_state(master_crtc_state);
   12519 	crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
   12520 	master_pipe_config->sync_mode_slaves_mask |=
   12521 		BIT(crtc_state->cpu_transcoder);
   12522 	DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
   12523 		      transcoder_name(crtc_state->master_transcoder),
   12524 		      crtc->base.id,
   12525 		      master_pipe_config->sync_mode_slaves_mask);
   12526 
   12527 	return 0;
   12528 }
   12529 
   12530 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
   12531 				   struct intel_crtc *crtc)
   12532 {
   12533 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12534 	struct intel_crtc_state *crtc_state =
   12535 		intel_atomic_get_new_crtc_state(state, crtc);
   12536 	bool mode_changed = needs_modeset(crtc_state);
   12537 	int ret;
   12538 
   12539 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
   12540 	    mode_changed && !crtc_state->hw.active)
   12541 		crtc_state->update_wm_post = true;
   12542 
   12543 	if (mode_changed && crtc_state->hw.enable &&
   12544 	    dev_priv->display.crtc_compute_clock &&
   12545 	    !WARN_ON(crtc_state->shared_dpll)) {
   12546 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
   12547 		if (ret)
   12548 			return ret;
   12549 	}
   12550 
   12551 	/*
   12552 	 * May need to update pipe gamma enable bits
   12553 	 * when C8 planes are getting enabled/disabled.
   12554 	 */
   12555 	if (c8_planes_changed(crtc_state))
   12556 		crtc_state->uapi.color_mgmt_changed = true;
   12557 
   12558 	if (mode_changed || crtc_state->update_pipe ||
   12559 	    crtc_state->uapi.color_mgmt_changed) {
   12560 		ret = intel_color_check(crtc_state);
   12561 		if (ret)
   12562 			return ret;
   12563 	}
   12564 
   12565 	ret = 0;
   12566 	if (dev_priv->display.compute_pipe_wm) {
   12567 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
   12568 		if (ret) {
   12569 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
   12570 			return ret;
   12571 		}
   12572 	}
   12573 
   12574 	if (dev_priv->display.compute_intermediate_wm) {
   12575 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
   12576 			return 0;
   12577 
   12578 		/*
   12579 		 * Calculate 'intermediate' watermarks that satisfy both the
   12580 		 * old state and the new state.  We can program these
   12581 		 * immediately.
   12582 		 */
   12583 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
   12584 		if (ret) {
   12585 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
   12586 			return ret;
   12587 		}
   12588 	}
   12589 
   12590 	if (INTEL_GEN(dev_priv) >= 9) {
   12591 		if (mode_changed || crtc_state->update_pipe)
   12592 			ret = skl_update_scaler_crtc(crtc_state);
   12593 		if (!ret)
   12594 			ret = intel_atomic_setup_scalers(dev_priv, crtc,
   12595 							 crtc_state);
   12596 	}
   12597 
   12598 	if (HAS_IPS(dev_priv))
   12599 		crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
   12600 
   12601 	return ret;
   12602 }
   12603 
   12604 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
   12605 {
   12606 	struct intel_connector *connector;
   12607 	struct drm_connector_list_iter conn_iter;
   12608 
   12609 	drm_connector_list_iter_begin(dev, &conn_iter);
   12610 	for_each_intel_connector_iter(connector, &conn_iter) {
   12611 		if (connector->base.state->crtc)
   12612 			drm_connector_put(&connector->base);
   12613 
   12614 		if (connector->base.encoder) {
   12615 			connector->base.state->best_encoder =
   12616 				connector->base.encoder;
   12617 			connector->base.state->crtc =
   12618 				connector->base.encoder->crtc;
   12619 
   12620 			drm_connector_get(&connector->base);
   12621 		} else {
   12622 			connector->base.state->best_encoder = NULL;
   12623 			connector->base.state->crtc = NULL;
   12624 		}
   12625 	}
   12626 	drm_connector_list_iter_end(&conn_iter);
   12627 }
   12628 
   12629 static int
   12630 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
   12631 		      struct intel_crtc_state *pipe_config)
   12632 {
   12633 	struct drm_connector *connector = conn_state->connector;
   12634 	const struct drm_display_info *info = &connector->display_info;
   12635 	int bpp;
   12636 
   12637 	switch (conn_state->max_bpc) {
   12638 	case 6 ... 7:
   12639 		bpp = 6 * 3;
   12640 		break;
   12641 	case 8 ... 9:
   12642 		bpp = 8 * 3;
   12643 		break;
   12644 	case 10 ... 11:
   12645 		bpp = 10 * 3;
   12646 		break;
   12647 	case 12:
   12648 		bpp = 12 * 3;
   12649 		break;
   12650 	default:
   12651 		return -EINVAL;
   12652 	}
   12653 
   12654 	if (bpp < pipe_config->pipe_bpp) {
   12655 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
   12656 			      "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
   12657 			      connector->base.id, connector->name,
   12658 			      bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
   12659 			      pipe_config->pipe_bpp);
   12660 
   12661 		pipe_config->pipe_bpp = bpp;
   12662 	}
   12663 
   12664 	return 0;
   12665 }
   12666 
   12667 static int
   12668 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
   12669 			  struct intel_crtc_state *pipe_config)
   12670 {
   12671 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12672 	struct drm_atomic_state *state = pipe_config->uapi.state;
   12673 	struct drm_connector *connector;
   12674 	struct drm_connector_state *connector_state;
   12675 	int bpp, i;
   12676 
   12677 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   12678 	    IS_CHERRYVIEW(dev_priv)))
   12679 		bpp = 10*3;
   12680 	else if (INTEL_GEN(dev_priv) >= 5)
   12681 		bpp = 12*3;
   12682 	else
   12683 		bpp = 8*3;
   12684 
   12685 	pipe_config->pipe_bpp = bpp;
   12686 
   12687 	/* Clamp display bpp to connector max bpp */
   12688 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   12689 		int ret;
   12690 
   12691 		if (connector_state->crtc != &crtc->base)
   12692 			continue;
   12693 
   12694 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
   12695 		if (ret)
   12696 			return ret;
   12697 	}
   12698 
   12699 	return 0;
   12700 }
   12701 
   12702 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
   12703 {
   12704 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
   12705 		      "type: 0x%x flags: 0x%x\n",
   12706 		      mode->crtc_clock,
   12707 		      mode->crtc_hdisplay, mode->crtc_hsync_start,
   12708 		      mode->crtc_hsync_end, mode->crtc_htotal,
   12709 		      mode->crtc_vdisplay, mode->crtc_vsync_start,
   12710 		      mode->crtc_vsync_end, mode->crtc_vtotal,
   12711 		      mode->type, mode->flags);
   12712 }
   12713 
   12714 static inline void
   12715 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
   12716 		      const char *id, unsigned int lane_count,
   12717 		      const struct intel_link_m_n *m_n)
   12718 {
   12719 	DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
   12720 		      id, lane_count,
   12721 		      m_n->gmch_m, m_n->gmch_n,
   12722 		      m_n->link_m, m_n->link_n, m_n->tu);
   12723 }
   12724 
   12725 static void
   12726 intel_dump_infoframe(struct drm_i915_private *dev_priv,
   12727 		     const union hdmi_infoframe *frame)
   12728 {
   12729 	if (!drm_debug_enabled(DRM_UT_KMS))
   12730 		return;
   12731 
   12732 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
   12733 }
   12734 
   12735 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
   12736 
   12737 static const char * const output_type_str[] = {
   12738 	OUTPUT_TYPE(UNUSED),
   12739 	OUTPUT_TYPE(ANALOG),
   12740 	OUTPUT_TYPE(DVO),
   12741 	OUTPUT_TYPE(SDVO),
   12742 	OUTPUT_TYPE(LVDS),
   12743 	OUTPUT_TYPE(TVOUT),
   12744 	OUTPUT_TYPE(HDMI),
   12745 	OUTPUT_TYPE(DP),
   12746 	OUTPUT_TYPE(EDP),
   12747 	OUTPUT_TYPE(DSI),
   12748 	OUTPUT_TYPE(DDI),
   12749 	OUTPUT_TYPE(DP_MST),
   12750 };
   12751 
   12752 #undef OUTPUT_TYPE
   12753 
   12754 static void snprintf_output_types(char *buf, size_t len,
   12755 				  unsigned int output_types)
   12756 {
   12757 	char *str = buf;
   12758 	int i;
   12759 
   12760 	str[0] = '\0';
   12761 
   12762 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
   12763 		int r;
   12764 
   12765 		if ((output_types & BIT(i)) == 0)
   12766 			continue;
   12767 
   12768 		r = snprintf(str, len, "%s%s",
   12769 			     str != buf ? "," : "", output_type_str[i]);
   12770 		if (r >= len)
   12771 			break;
   12772 		str += r;
   12773 		len -= r;
   12774 
   12775 		output_types &= ~BIT(i);
   12776 	}
   12777 
   12778 	WARN_ON_ONCE(output_types != 0);
   12779 }
   12780 
   12781 static const char * const output_format_str[] = {
   12782 	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
   12783 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
   12784 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
   12785 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
   12786 };
   12787 
   12788 static const char *output_formats(enum intel_output_format format)
   12789 {
   12790 	if (format >= ARRAY_SIZE(output_format_str))
   12791 		format = INTEL_OUTPUT_FORMAT_INVALID;
   12792 	return output_format_str[format];
   12793 }
   12794 
   12795 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
   12796 {
   12797 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   12798 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   12799 	struct drm_format_name_buf format_name;
   12800 
   12801 	if (!fb) {
   12802 		DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
   12803 			      plane->base.base.id, plane->base.name,
   12804 			      yesno(plane_state->uapi.visible));
   12805 		return;
   12806 	}
   12807 
   12808 	DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
   12809 		      plane->base.base.id, plane->base.name,
   12810 		      fb->base.id, fb->width, fb->height,
   12811 		      drm_get_format_name(fb->format->format, &format_name),
   12812 		      yesno(plane_state->uapi.visible));
   12813 	DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
   12814 		      plane_state->hw.rotation, plane_state->scaler_id);
   12815 	if (plane_state->uapi.visible)
   12816 		DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
   12817 			      DRM_RECT_FP_ARG(&plane_state->uapi.src),
   12818 			      DRM_RECT_ARG(&plane_state->uapi.dst));
   12819 }
   12820 
   12821 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
   12822 				   struct intel_atomic_state *state,
   12823 				   const char *context)
   12824 {
   12825 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
   12826 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12827 	const struct intel_plane_state *plane_state;
   12828 	struct intel_plane *plane;
   12829 	char buf[64];
   12830 	int i;
   12831 
   12832 	DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
   12833 		      crtc->base.base.id, crtc->base.name,
   12834 		      yesno(pipe_config->hw.enable), context);
   12835 
   12836 	if (!pipe_config->hw.enable)
   12837 		goto dump_planes;
   12838 
   12839 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
   12840 	DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
   12841 		      yesno(pipe_config->hw.active),
   12842 		      buf, pipe_config->output_types,
   12843 		      output_formats(pipe_config->output_format));
   12844 
   12845 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
   12846 		      transcoder_name(pipe_config->cpu_transcoder),
   12847 		      pipe_config->pipe_bpp, pipe_config->dither);
   12848 
   12849 	if (pipe_config->has_pch_encoder)
   12850 		intel_dump_m_n_config(pipe_config, "fdi",
   12851 				      pipe_config->fdi_lanes,
   12852 				      &pipe_config->fdi_m_n);
   12853 
   12854 	if (intel_crtc_has_dp_encoder(pipe_config)) {
   12855 		intel_dump_m_n_config(pipe_config, "dp m_n",
   12856 				pipe_config->lane_count, &pipe_config->dp_m_n);
   12857 		if (pipe_config->has_drrs)
   12858 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
   12859 					      pipe_config->lane_count,
   12860 					      &pipe_config->dp_m2_n2);
   12861 	}
   12862 
   12863 	DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
   12864 		      pipe_config->has_audio, pipe_config->has_infoframe,
   12865 		      pipe_config->infoframes.enable);
   12866 
   12867 	if (pipe_config->infoframes.enable &
   12868 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
   12869 		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
   12870 	if (pipe_config->infoframes.enable &
   12871 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
   12872 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
   12873 	if (pipe_config->infoframes.enable &
   12874 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
   12875 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
   12876 	if (pipe_config->infoframes.enable &
   12877 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
   12878 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
   12879 
   12880 	DRM_DEBUG_KMS("requested mode:\n");
   12881 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
   12882 	DRM_DEBUG_KMS("adjusted mode:\n");
   12883 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
   12884 	intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
   12885 	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
   12886 		      pipe_config->port_clock,
   12887 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
   12888 		      pipe_config->pixel_rate);
   12889 
   12890 	if (INTEL_GEN(dev_priv) >= 9)
   12891 		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
   12892 			      crtc->num_scalers,
   12893 			      pipe_config->scaler_state.scaler_users,
   12894 		              pipe_config->scaler_state.scaler_id);
   12895 
   12896 	if (HAS_GMCH(dev_priv))
   12897 		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
   12898 			      pipe_config->gmch_pfit.control,
   12899 			      pipe_config->gmch_pfit.pgm_ratios,
   12900 			      pipe_config->gmch_pfit.lvds_border_bits);
   12901 	else
   12902 		DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
   12903 			      pipe_config->pch_pfit.pos,
   12904 			      pipe_config->pch_pfit.size,
   12905 			      enableddisabled(pipe_config->pch_pfit.enabled),
   12906 			      yesno(pipe_config->pch_pfit.force_thru));
   12907 
   12908 	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
   12909 		      pipe_config->ips_enabled, pipe_config->double_wide);
   12910 
   12911 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
   12912 
   12913 	if (IS_CHERRYVIEW(dev_priv))
   12914 		DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
   12915 			      pipe_config->cgm_mode, pipe_config->gamma_mode,
   12916 			      pipe_config->gamma_enable, pipe_config->csc_enable);
   12917 	else
   12918 		DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
   12919 			      pipe_config->csc_mode, pipe_config->gamma_mode,
   12920 			      pipe_config->gamma_enable, pipe_config->csc_enable);
   12921 
   12922 	DRM_DEBUG_KMS("MST master transcoder: %s\n",
   12923 		      transcoder_name(pipe_config->mst_master_transcoder));
   12924 
   12925 dump_planes:
   12926 	if (!state)
   12927 		return;
   12928 
   12929 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12930 		if (plane->pipe == crtc->pipe)
   12931 			intel_dump_plane_state(plane_state);
   12932 	}
   12933 }
   12934 
   12935 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
   12936 {
   12937 	struct drm_device *dev = state->base.dev;
   12938 	struct drm_connector *connector;
   12939 	struct drm_connector_list_iter conn_iter;
   12940 	unsigned int used_ports = 0;
   12941 	unsigned int used_mst_ports = 0;
   12942 	bool ret = true;
   12943 
   12944 	/*
   12945 	 * We're going to peek into connector->state,
   12946 	 * hence connection_mutex must be held.
   12947 	 */
   12948 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
   12949 
   12950 	/*
   12951 	 * Walk the connector list instead of the encoder
   12952 	 * list to detect the problem on ddi platforms
   12953 	 * where there's just one encoder per digital port.
   12954 	 */
   12955 	drm_connector_list_iter_begin(dev, &conn_iter);
   12956 	drm_for_each_connector_iter(connector, &conn_iter) {
   12957 		struct drm_connector_state *connector_state;
   12958 		struct intel_encoder *encoder;
   12959 
   12960 		connector_state =
   12961 			drm_atomic_get_new_connector_state(&state->base,
   12962 							   connector);
   12963 		if (!connector_state)
   12964 			connector_state = connector->state;
   12965 
   12966 		if (!connector_state->best_encoder)
   12967 			continue;
   12968 
   12969 		encoder = to_intel_encoder(connector_state->best_encoder);
   12970 
   12971 		WARN_ON(!connector_state->crtc);
   12972 
   12973 		switch (encoder->type) {
   12974 			unsigned int port_mask;
   12975 		case INTEL_OUTPUT_DDI:
   12976 			if (WARN_ON(!HAS_DDI(to_i915(dev))))
   12977 				break;
   12978 			/* else, fall through */
   12979 		case INTEL_OUTPUT_DP:
   12980 		case INTEL_OUTPUT_HDMI:
   12981 		case INTEL_OUTPUT_EDP:
   12982 			port_mask = 1 << encoder->port;
   12983 
   12984 			/* the same port mustn't appear more than once */
   12985 			if (used_ports & port_mask)
   12986 				ret = false;
   12987 
   12988 			used_ports |= port_mask;
   12989 			break;
   12990 		case INTEL_OUTPUT_DP_MST:
   12991 			used_mst_ports |=
   12992 				1 << encoder->port;
   12993 			break;
   12994 		default:
   12995 			break;
   12996 		}
   12997 	}
   12998 	drm_connector_list_iter_end(&conn_iter);
   12999 
   13000 	/* can't mix MST and SST/HDMI on the same port */
   13001 	if (used_ports & used_mst_ports)
   13002 		return false;
   13003 
   13004 	return ret;
   13005 }
   13006 
   13007 static void
   13008 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
   13009 {
   13010 	intel_crtc_copy_color_blobs(crtc_state);
   13011 }
   13012 
   13013 static void
   13014 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
   13015 {
   13016 	crtc_state->hw.enable = crtc_state->uapi.enable;
   13017 	crtc_state->hw.active = crtc_state->uapi.active;
   13018 	crtc_state->hw.mode = crtc_state->uapi.mode;
   13019 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
   13020 	intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
   13021 }
   13022 
   13023 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
   13024 {
   13025 	crtc_state->uapi.enable = crtc_state->hw.enable;
   13026 	crtc_state->uapi.active = crtc_state->hw.active;
   13027 	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
   13028 
   13029 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
   13030 
   13031 	/* copy color blobs to uapi */
   13032 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
   13033 				  crtc_state->hw.degamma_lut);
   13034 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
   13035 				  crtc_state->hw.gamma_lut);
   13036 	drm_property_replace_blob(&crtc_state->uapi.ctm,
   13037 				  crtc_state->hw.ctm);
   13038 }
   13039 
   13040 static int
   13041 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
   13042 {
   13043 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   13044 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   13045 	struct intel_crtc_state *saved_state;
   13046 
   13047 	saved_state = intel_crtc_state_alloc(crtc);
   13048 	if (!saved_state)
   13049 		return -ENOMEM;
   13050 
   13051 	/* free the old crtc_state->hw members */
   13052 	intel_crtc_free_hw_state(crtc_state);
   13053 
   13054 	/* FIXME: before the switch to atomic started, a new pipe_config was
   13055 	 * kzalloc'd. Code that depends on any field being zero should be
   13056 	 * fixed, so that the crtc_state can be safely duplicated. For now,
   13057 	 * only fields that are know to not cause problems are preserved. */
   13058 
   13059 	saved_state->uapi = crtc_state->uapi;
   13060 	saved_state->scaler_state = crtc_state->scaler_state;
   13061 	saved_state->shared_dpll = crtc_state->shared_dpll;
   13062 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
   13063 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
   13064 	       sizeof(saved_state->icl_port_dplls));
   13065 	saved_state->crc_enabled = crtc_state->crc_enabled;
   13066 	if (IS_G4X(dev_priv) ||
   13067 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   13068 		saved_state->wm = crtc_state->wm;
   13069 	/*
   13070 	 * Save the slave bitmask which gets filled for master crtc state during
   13071 	 * slave atomic check call. For all other CRTCs reset the port sync variables
   13072 	 * crtc_state->master_transcoder needs to be set to INVALID
   13073 	 */
   13074 	reset_port_sync_mode_state(saved_state);
   13075 	if (intel_atomic_is_master_connector(crtc_state))
   13076 		saved_state->sync_mode_slaves_mask =
   13077 			crtc_state->sync_mode_slaves_mask;
   13078 
   13079 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
   13080 	kfree(saved_state);
   13081 
   13082 	intel_crtc_copy_uapi_to_hw_state(crtc_state);
   13083 
   13084 	return 0;
   13085 }
   13086 
   13087 static int
   13088 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
   13089 {
   13090 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
   13091 	struct drm_atomic_state *state = pipe_config->uapi.state;
   13092 	struct intel_encoder *encoder;
   13093 	struct drm_connector *connector;
   13094 	struct drm_connector_state *connector_state;
   13095 	int base_bpp, ret;
   13096 	int i, tile_group_id = -1, num_tiled_conns = 0;
   13097 	bool retry = true;
   13098 
   13099 	pipe_config->cpu_transcoder =
   13100 		(enum transcoder) to_intel_crtc(crtc)->pipe;
   13101 
   13102 	/*
   13103 	 * Sanitize sync polarity flags based on requested ones. If neither
   13104 	 * positive or negative polarity is requested, treat this as meaning
   13105 	 * negative polarity.
   13106 	 */
   13107 	if (!(pipe_config->hw.adjusted_mode.flags &
   13108 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
   13109 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
   13110 
   13111 	if (!(pipe_config->hw.adjusted_mode.flags &
   13112 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
   13113 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
   13114 
   13115 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
   13116 					pipe_config);
   13117 	if (ret)
   13118 		return ret;
   13119 
   13120 	base_bpp = pipe_config->pipe_bpp;
   13121 
   13122 	/*
   13123 	 * Determine the real pipe dimensions. Note that stereo modes can
   13124 	 * increase the actual pipe size due to the frame doubling and
   13125 	 * insertion of additional space for blanks between the frame. This
   13126 	 * is stored in the crtc timings. We use the requested mode to do this
   13127 	 * computation to clearly distinguish it from the adjusted mode, which
   13128 	 * can be changed by the connectors in the below retry loop.
   13129 	 */
   13130 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
   13131 			       &pipe_config->pipe_src_w,
   13132 			       &pipe_config->pipe_src_h);
   13133 
   13134 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13135 		if (connector_state->crtc != crtc)
   13136 			continue;
   13137 
   13138 		encoder = to_intel_encoder(connector_state->best_encoder);
   13139 
   13140 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
   13141 			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
   13142 			return -EINVAL;
   13143 		}
   13144 
   13145 		/*
   13146 		 * Determine output_types before calling the .compute_config()
   13147 		 * hooks so that the hooks can use this information safely.
   13148 		 */
   13149 		if (encoder->compute_output_type)
   13150 			pipe_config->output_types |=
   13151 				BIT(encoder->compute_output_type(encoder, pipe_config,
   13152 								 connector_state));
   13153 		else
   13154 			pipe_config->output_types |= BIT(encoder->type);
   13155 	}
   13156 
   13157 encoder_retry:
   13158 	/* Ensure the port clock defaults are reset when retrying. */
   13159 	pipe_config->port_clock = 0;
   13160 	pipe_config->pixel_multiplier = 1;
   13161 
   13162 	/* Fill in default crtc timings, allow encoders to overwrite them. */
   13163 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
   13164 			      CRTC_STEREO_DOUBLE);
   13165 
   13166 	/* Get tile_group_id of tiled connector */
   13167 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13168 		if (connector_state->crtc == crtc &&
   13169 		    connector->has_tile) {
   13170 			tile_group_id = connector->tile_group->id;
   13171 			break;
   13172 		}
   13173 	}
   13174 
   13175 	/* Get total number of tiled connectors in state that belong to
   13176 	 * this tile group.
   13177 	 */
   13178 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13179 		if (connector->has_tile &&
   13180 		    connector->tile_group->id == tile_group_id)
   13181 			num_tiled_conns++;
   13182 	}
   13183 
   13184 	/* Pass our mode to the connectors and the CRTC to give them a chance to
   13185 	 * adjust it according to limitations or connector properties, and also
   13186 	 * a chance to reject the mode entirely.
   13187 	 */
   13188 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13189 		if (connector_state->crtc != crtc)
   13190 			continue;
   13191 
   13192 		ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
   13193 						       num_tiled_conns);
   13194 		if (ret) {
   13195 			DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
   13196 				      ret);
   13197 			return ret;
   13198 		}
   13199 
   13200 		encoder = to_intel_encoder(connector_state->best_encoder);
   13201 		ret = encoder->compute_config(encoder, pipe_config,
   13202 					      connector_state);
   13203 		if (ret < 0) {
   13204 			if (ret != -EDEADLK)
   13205 				DRM_DEBUG_KMS("Encoder config failure: %d\n",
   13206 					      ret);
   13207 			return ret;
   13208 		}
   13209 	}
   13210 
   13211 	/* Set default port clock if not overwritten by the encoder. Needs to be
   13212 	 * done afterwards in case the encoder adjusts the mode. */
   13213 	if (!pipe_config->port_clock)
   13214 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
   13215 			* pipe_config->pixel_multiplier;
   13216 
   13217 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
   13218 	if (ret == -EDEADLK)
   13219 		return ret;
   13220 	if (ret < 0) {
   13221 		DRM_DEBUG_KMS("CRTC fixup failed\n");
   13222 		return ret;
   13223 	}
   13224 
   13225 	if (ret == RETRY) {
   13226 		if (WARN(!retry, "loop in pipe configuration computation\n"))
   13227 			return -EINVAL;
   13228 
   13229 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
   13230 		retry = false;
   13231 		goto encoder_retry;
   13232 	}
   13233 
   13234 	/* Dithering seems to not pass-through bits correctly when it should, so
   13235 	 * only enable it on 6bpc panels and when its not a compliance
   13236 	 * test requesting 6bpc video pattern.
   13237 	 */
   13238 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
   13239 		!pipe_config->dither_force_disable;
   13240 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
   13241 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
   13242 
   13243 	/*
   13244 	 * Make drm_calc_timestamping_constants in
   13245 	 * drm_atomic_helper_update_legacy_modeset_state() happy
   13246 	 */
   13247 	pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
   13248 
   13249 	return 0;
   13250 }
   13251 
   13252 bool intel_fuzzy_clock_check(int clock1, int clock2)
   13253 {
   13254 	int diff;
   13255 
   13256 	if (clock1 == clock2)
   13257 		return true;
   13258 
   13259 	if (!clock1 || !clock2)
   13260 		return false;
   13261 
   13262 	diff = abs(clock1 - clock2);
   13263 
   13264 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
   13265 		return true;
   13266 
   13267 	return false;
   13268 }
   13269 
   13270 static bool
   13271 intel_compare_m_n(unsigned int m, unsigned int n,
   13272 		  unsigned int m2, unsigned int n2,
   13273 		  bool exact)
   13274 {
   13275 	if (m == m2 && n == n2)
   13276 		return true;
   13277 
   13278 	if (exact || !m || !n || !m2 || !n2)
   13279 		return false;
   13280 
   13281 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
   13282 
   13283 	if (n > n2) {
   13284 		while (n > n2) {
   13285 			m2 <<= 1;
   13286 			n2 <<= 1;
   13287 		}
   13288 	} else if (n < n2) {
   13289 		while (n < n2) {
   13290 			m <<= 1;
   13291 			n <<= 1;
   13292 		}
   13293 	}
   13294 
   13295 	if (n != n2)
   13296 		return false;
   13297 
   13298 	return intel_fuzzy_clock_check(m, m2);
   13299 }
   13300 
   13301 static bool
   13302 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
   13303 		       const struct intel_link_m_n *m2_n2,
   13304 		       bool exact)
   13305 {
   13306 	return m_n->tu == m2_n2->tu &&
   13307 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
   13308 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
   13309 		intel_compare_m_n(m_n->link_m, m_n->link_n,
   13310 				  m2_n2->link_m, m2_n2->link_n, exact);
   13311 }
   13312 
   13313 static bool
   13314 intel_compare_infoframe(const union hdmi_infoframe *a,
   13315 			const union hdmi_infoframe *b)
   13316 {
   13317 	return memcmp(a, b, sizeof(*a)) == 0;
   13318 }
   13319 
   13320 static void
   13321 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
   13322 			       bool fastset, const char *name,
   13323 			       const union hdmi_infoframe *a,
   13324 			       const union hdmi_infoframe *b)
   13325 {
   13326 	if (fastset) {
   13327 		if (!drm_debug_enabled(DRM_UT_KMS))
   13328 			return;
   13329 
   13330 		DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
   13331 		DRM_DEBUG_KMS("expected:\n");
   13332 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
   13333 		DRM_DEBUG_KMS("found:\n");
   13334 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
   13335 	} else {
   13336 		DRM_ERROR("mismatch in %s infoframe\n", name);
   13337 		DRM_ERROR("expected:\n");
   13338 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
   13339 		DRM_ERROR("found:\n");
   13340 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
   13341 	}
   13342 }
   13343 
   13344 static void __printf(4, 5)
   13345 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
   13346 		     const char *name, const char *format, ...)
   13347 {
   13348 	struct va_format vaf;
   13349 	va_list args;
   13350 
   13351 	va_start(args, format);
   13352 	vaf.fmt = format;
   13353 	vaf.va = &args;
   13354 
   13355 	if (fastset)
   13356 		DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
   13357 			      crtc->base.base.id, crtc->base.name, name, &vaf);
   13358 	else
   13359 		DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
   13360 			  crtc->base.base.id, crtc->base.name, name, &vaf);
   13361 
   13362 	va_end(args);
   13363 }
   13364 
   13365 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
   13366 {
   13367 	if (i915_modparams.fastboot != -1)
   13368 		return i915_modparams.fastboot;
   13369 
   13370 	/* Enable fastboot by default on Skylake and newer */
   13371 	if (INTEL_GEN(dev_priv) >= 9)
   13372 		return true;
   13373 
   13374 	/* Enable fastboot by default on VLV and CHV */
   13375 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   13376 		return true;
   13377 
   13378 	/* Disabled by default on all others */
   13379 	return false;
   13380 }
   13381 
   13382 static bool
   13383 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
   13384 			  const struct intel_crtc_state *pipe_config,
   13385 			  bool fastset)
   13386 {
   13387 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
   13388 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
   13389 	bool ret = true;
   13390 	u32 bp_gamma = 0;
   13391 	bool fixup_inherited = fastset &&
   13392 		(current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
   13393 		!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
   13394 
   13395 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
   13396 		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
   13397 		ret = false;
   13398 	}
   13399 
   13400 #define PIPE_CONF_CHECK_X(name) do { \
   13401 	if (current_config->name != pipe_config->name) { \
   13402 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13403 				     "(expected 0x%08x, found 0x%08x)", \
   13404 				     current_config->name, \
   13405 				     pipe_config->name); \
   13406 		ret = false; \
   13407 	} \
   13408 } while (0)
   13409 
   13410 #define PIPE_CONF_CHECK_I(name) do { \
   13411 	if (current_config->name != pipe_config->name) { \
   13412 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13413 				     "(expected %i, found %i)", \
   13414 				     current_config->name, \
   13415 				     pipe_config->name); \
   13416 		ret = false; \
   13417 	} \
   13418 } while (0)
   13419 
   13420 #define PIPE_CONF_CHECK_BOOL(name) do { \
   13421 	if (current_config->name != pipe_config->name) { \
   13422 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
   13423 				     "(expected %s, found %s)", \
   13424 				     yesno(current_config->name), \
   13425 				     yesno(pipe_config->name)); \
   13426 		ret = false; \
   13427 	} \
   13428 } while (0)
   13429 
   13430 /*
   13431  * Checks state where we only read out the enabling, but not the entire
   13432  * state itself (like full infoframes or ELD for audio). These states
   13433  * require a full modeset on bootup to fix up.
   13434  */
   13435 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
   13436 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
   13437 		PIPE_CONF_CHECK_BOOL(name); \
   13438 	} else { \
   13439 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13440 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
   13441 				     yesno(current_config->name), \
   13442 				     yesno(pipe_config->name)); \
   13443 		ret = false; \
   13444 	} \
   13445 } while (0)
   13446 
   13447 #define PIPE_CONF_CHECK_P(name) do { \
   13448 	if (current_config->name != pipe_config->name) { \
   13449 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13450 				     "(expected %p, found %p)", \
   13451 				     current_config->name, \
   13452 				     pipe_config->name); \
   13453 		ret = false; \
   13454 	} \
   13455 } while (0)
   13456 
   13457 #define PIPE_CONF_CHECK_M_N(name) do { \
   13458 	if (!intel_compare_link_m_n(&current_config->name, \
   13459 				    &pipe_config->name,\
   13460 				    !fastset)) { \
   13461 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13462 				     "(expected tu %i gmch %i/%i link %i/%i, " \
   13463 				     "found tu %i, gmch %i/%i link %i/%i)", \
   13464 				     current_config->name.tu, \
   13465 				     current_config->name.gmch_m, \
   13466 				     current_config->name.gmch_n, \
   13467 				     current_config->name.link_m, \
   13468 				     current_config->name.link_n, \
   13469 				     pipe_config->name.tu, \
   13470 				     pipe_config->name.gmch_m, \
   13471 				     pipe_config->name.gmch_n, \
   13472 				     pipe_config->name.link_m, \
   13473 				     pipe_config->name.link_n); \
   13474 		ret = false; \
   13475 	} \
   13476 } while (0)
   13477 
   13478 /* This is required for BDW+ where there is only one set of registers for
   13479  * switching between high and low RR.
   13480  * This macro can be used whenever a comparison has to be made between one
   13481  * hw state and multiple sw state variables.
   13482  */
   13483 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
   13484 	if (!intel_compare_link_m_n(&current_config->name, \
   13485 				    &pipe_config->name, !fastset) && \
   13486 	    !intel_compare_link_m_n(&current_config->alt_name, \
   13487 				    &pipe_config->name, !fastset)) { \
   13488 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13489 				     "(expected tu %i gmch %i/%i link %i/%i, " \
   13490 				     "or tu %i gmch %i/%i link %i/%i, " \
   13491 				     "found tu %i, gmch %i/%i link %i/%i)", \
   13492 				     current_config->name.tu, \
   13493 				     current_config->name.gmch_m, \
   13494 				     current_config->name.gmch_n, \
   13495 				     current_config->name.link_m, \
   13496 				     current_config->name.link_n, \
   13497 				     current_config->alt_name.tu, \
   13498 				     current_config->alt_name.gmch_m, \
   13499 				     current_config->alt_name.gmch_n, \
   13500 				     current_config->alt_name.link_m, \
   13501 				     current_config->alt_name.link_n, \
   13502 				     pipe_config->name.tu, \
   13503 				     pipe_config->name.gmch_m, \
   13504 				     pipe_config->name.gmch_n, \
   13505 				     pipe_config->name.link_m, \
   13506 				     pipe_config->name.link_n); \
   13507 		ret = false; \
   13508 	} \
   13509 } while (0)
   13510 
   13511 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
   13512 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
   13513 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13514 				     "(%x) (expected %i, found %i)", \
   13515 				     (mask), \
   13516 				     current_config->name & (mask), \
   13517 				     pipe_config->name & (mask)); \
   13518 		ret = false; \
   13519 	} \
   13520 } while (0)
   13521 
   13522 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
   13523 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
   13524 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13525 				     "(expected %i, found %i)", \
   13526 				     current_config->name, \
   13527 				     pipe_config->name); \
   13528 		ret = false; \
   13529 	} \
   13530 } while (0)
   13531 
   13532 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
   13533 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
   13534 				     &pipe_config->infoframes.name)) { \
   13535 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
   13536 					       &current_config->infoframes.name, \
   13537 					       &pipe_config->infoframes.name); \
   13538 		ret = false; \
   13539 	} \
   13540 } while (0)
   13541 
   13542 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
   13543 	if (current_config->name1 != pipe_config->name1) { \
   13544 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
   13545 				"(expected %i, found %i, won't compare lut values)", \
   13546 				current_config->name1, \
   13547 				pipe_config->name1); \
   13548 		ret = false;\
   13549 	} else { \
   13550 		if (!intel_color_lut_equal(current_config->name2, \
   13551 					pipe_config->name2, pipe_config->name1, \
   13552 					bit_precision)) { \
   13553 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
   13554 					"hw_state doesn't match sw_state"); \
   13555 			ret = false; \
   13556 		} \
   13557 	} \
   13558 } while (0)
   13559 
   13560 #define PIPE_CONF_QUIRK(quirk) \
   13561 	((current_config->quirks | pipe_config->quirks) & (quirk))
   13562 
   13563 	PIPE_CONF_CHECK_I(cpu_transcoder);
   13564 
   13565 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
   13566 	PIPE_CONF_CHECK_I(fdi_lanes);
   13567 	PIPE_CONF_CHECK_M_N(fdi_m_n);
   13568 
   13569 	PIPE_CONF_CHECK_I(lane_count);
   13570 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
   13571 
   13572 	if (INTEL_GEN(dev_priv) < 8) {
   13573 		PIPE_CONF_CHECK_M_N(dp_m_n);
   13574 
   13575 		if (current_config->has_drrs)
   13576 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
   13577 	} else
   13578 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
   13579 
   13580 	PIPE_CONF_CHECK_X(output_types);
   13581 
   13582 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
   13583 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
   13584 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
   13585 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
   13586 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
   13587 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
   13588 
   13589 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
   13590 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
   13591 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
   13592 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
   13593 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
   13594 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
   13595 
   13596 	PIPE_CONF_CHECK_I(pixel_multiplier);
   13597 	PIPE_CONF_CHECK_I(output_format);
   13598 	PIPE_CONF_CHECK_I(dc3co_exitline);
   13599 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
   13600 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
   13601 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   13602 		PIPE_CONF_CHECK_BOOL(limited_color_range);
   13603 
   13604 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
   13605 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
   13606 	PIPE_CONF_CHECK_BOOL(has_infoframe);
   13607 	PIPE_CONF_CHECK_BOOL(fec_enable);
   13608 
   13609 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
   13610 
   13611 	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13612 			      DRM_MODE_FLAG_INTERLACE);
   13613 
   13614 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
   13615 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13616 				      DRM_MODE_FLAG_PHSYNC);
   13617 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13618 				      DRM_MODE_FLAG_NHSYNC);
   13619 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13620 				      DRM_MODE_FLAG_PVSYNC);
   13621 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13622 				      DRM_MODE_FLAG_NVSYNC);
   13623 	}
   13624 
   13625 	PIPE_CONF_CHECK_X(gmch_pfit.control);
   13626 	/* pfit ratios are autocomputed by the hw on gen4+ */
   13627 	if (INTEL_GEN(dev_priv) < 4)
   13628 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
   13629 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
   13630 
   13631 	/*
   13632 	 * Changing the EDP transcoder input mux
   13633 	 * (A_ONOFF vs. A_ON) requires a full modeset.
   13634 	 */
   13635 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
   13636 
   13637 	if (!fastset) {
   13638 		PIPE_CONF_CHECK_I(pipe_src_w);
   13639 		PIPE_CONF_CHECK_I(pipe_src_h);
   13640 
   13641 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
   13642 		if (current_config->pch_pfit.enabled) {
   13643 			PIPE_CONF_CHECK_X(pch_pfit.pos);
   13644 			PIPE_CONF_CHECK_X(pch_pfit.size);
   13645 		}
   13646 
   13647 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
   13648 		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
   13649 
   13650 		PIPE_CONF_CHECK_X(gamma_mode);
   13651 		if (IS_CHERRYVIEW(dev_priv))
   13652 			PIPE_CONF_CHECK_X(cgm_mode);
   13653 		else
   13654 			PIPE_CONF_CHECK_X(csc_mode);
   13655 		PIPE_CONF_CHECK_BOOL(gamma_enable);
   13656 		PIPE_CONF_CHECK_BOOL(csc_enable);
   13657 
   13658 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
   13659 		if (bp_gamma)
   13660 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
   13661 
   13662 	}
   13663 
   13664 	PIPE_CONF_CHECK_BOOL(double_wide);
   13665 
   13666 	PIPE_CONF_CHECK_P(shared_dpll);
   13667 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
   13668 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
   13669 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
   13670 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
   13671 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
   13672 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
   13673 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
   13674 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
   13675 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
   13676 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
   13677 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
   13678 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
   13679 	PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
   13680 	PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
   13681 	PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
   13682 	PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
   13683 	PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
   13684 	PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
   13685 	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
   13686 	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
   13687 	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
   13688 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
   13689 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
   13690 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
   13691 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
   13692 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
   13693 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
   13694 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
   13695 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
   13696 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
   13697 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
   13698 
   13699 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
   13700 	PIPE_CONF_CHECK_X(dsi_pll.div);
   13701 
   13702 	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
   13703 		PIPE_CONF_CHECK_I(pipe_bpp);
   13704 
   13705 	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
   13706 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
   13707 
   13708 	PIPE_CONF_CHECK_I(min_voltage_level);
   13709 
   13710 	PIPE_CONF_CHECK_X(infoframes.enable);
   13711 	PIPE_CONF_CHECK_X(infoframes.gcp);
   13712 	PIPE_CONF_CHECK_INFOFRAME(avi);
   13713 	PIPE_CONF_CHECK_INFOFRAME(spd);
   13714 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
   13715 	PIPE_CONF_CHECK_INFOFRAME(drm);
   13716 
   13717 	PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
   13718 	PIPE_CONF_CHECK_I(master_transcoder);
   13719 
   13720 	PIPE_CONF_CHECK_I(dsc.compression_enable);
   13721 	PIPE_CONF_CHECK_I(dsc.dsc_split);
   13722 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
   13723 
   13724 	PIPE_CONF_CHECK_I(mst_master_transcoder);
   13725 
   13726 #undef PIPE_CONF_CHECK_X
   13727 #undef PIPE_CONF_CHECK_I
   13728 #undef PIPE_CONF_CHECK_BOOL
   13729 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
   13730 #undef PIPE_CONF_CHECK_P
   13731 #undef PIPE_CONF_CHECK_FLAGS
   13732 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
   13733 #undef PIPE_CONF_CHECK_COLOR_LUT
   13734 #undef PIPE_CONF_QUIRK
   13735 
   13736 	return ret;
   13737 }
   13738 
   13739 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
   13740 					   const struct intel_crtc_state *pipe_config)
   13741 {
   13742 	if (pipe_config->has_pch_encoder) {
   13743 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
   13744 							    &pipe_config->fdi_m_n);
   13745 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
   13746 
   13747 		/*
   13748 		 * FDI already provided one idea for the dotclock.
   13749 		 * Yell if the encoder disagrees.
   13750 		 */
   13751 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
   13752 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
   13753 		     fdi_dotclock, dotclock);
   13754 	}
   13755 }
   13756 
   13757 static void verify_wm_state(struct intel_crtc *crtc,
   13758 			    struct intel_crtc_state *new_crtc_state)
   13759 {
   13760 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   13761 	struct skl_hw_state {
   13762 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
   13763 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
   13764 		struct skl_ddb_allocation ddb;
   13765 		struct skl_pipe_wm wm;
   13766 	} *hw;
   13767 	struct skl_ddb_allocation *sw_ddb;
   13768 	struct skl_pipe_wm *sw_wm;
   13769 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
   13770 	const enum pipe pipe = crtc->pipe;
   13771 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
   13772 
   13773 	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
   13774 		return;
   13775 
   13776 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
   13777 	if (!hw)
   13778 		return;
   13779 
   13780 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
   13781 	sw_wm = &new_crtc_state->wm.skl.optimal;
   13782 
   13783 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
   13784 
   13785 	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
   13786 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
   13787 
   13788 	if (INTEL_GEN(dev_priv) >= 11 &&
   13789 	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
   13790 		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
   13791 			  sw_ddb->enabled_slices,
   13792 			  hw->ddb.enabled_slices);
   13793 
   13794 	/* planes */
   13795 	for_each_universal_plane(dev_priv, pipe, plane) {
   13796 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
   13797 
   13798 		hw_plane_wm = &hw->wm.planes[plane];
   13799 		sw_plane_wm = &sw_wm->planes[plane];
   13800 
   13801 		/* Watermarks */
   13802 		for (level = 0; level <= max_level; level++) {
   13803 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
   13804 						&sw_plane_wm->wm[level]))
   13805 				continue;
   13806 
   13807 			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13808 				  pipe_name(pipe), plane + 1, level,
   13809 				  sw_plane_wm->wm[level].plane_en,
   13810 				  sw_plane_wm->wm[level].plane_res_b,
   13811 				  sw_plane_wm->wm[level].plane_res_l,
   13812 				  hw_plane_wm->wm[level].plane_en,
   13813 				  hw_plane_wm->wm[level].plane_res_b,
   13814 				  hw_plane_wm->wm[level].plane_res_l);
   13815 		}
   13816 
   13817 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
   13818 					 &sw_plane_wm->trans_wm)) {
   13819 			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13820 				  pipe_name(pipe), plane + 1,
   13821 				  sw_plane_wm->trans_wm.plane_en,
   13822 				  sw_plane_wm->trans_wm.plane_res_b,
   13823 				  sw_plane_wm->trans_wm.plane_res_l,
   13824 				  hw_plane_wm->trans_wm.plane_en,
   13825 				  hw_plane_wm->trans_wm.plane_res_b,
   13826 				  hw_plane_wm->trans_wm.plane_res_l);
   13827 		}
   13828 
   13829 		/* DDB */
   13830 		hw_ddb_entry = &hw->ddb_y[plane];
   13831 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
   13832 
   13833 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
   13834 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
   13835 				  pipe_name(pipe), plane + 1,
   13836 				  sw_ddb_entry->start, sw_ddb_entry->end,
   13837 				  hw_ddb_entry->start, hw_ddb_entry->end);
   13838 		}
   13839 	}
   13840 
   13841 	/*
   13842 	 * cursor
   13843 	 * If the cursor plane isn't active, we may not have updated it's ddb
   13844 	 * allocation. In that case since the ddb allocation will be updated
   13845 	 * once the plane becomes visible, we can skip this check
   13846 	 */
   13847 	if (1) {
   13848 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
   13849 
   13850 		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
   13851 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
   13852 
   13853 		/* Watermarks */
   13854 		for (level = 0; level <= max_level; level++) {
   13855 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
   13856 						&sw_plane_wm->wm[level]))
   13857 				continue;
   13858 
   13859 			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13860 				  pipe_name(pipe), level,
   13861 				  sw_plane_wm->wm[level].plane_en,
   13862 				  sw_plane_wm->wm[level].plane_res_b,
   13863 				  sw_plane_wm->wm[level].plane_res_l,
   13864 				  hw_plane_wm->wm[level].plane_en,
   13865 				  hw_plane_wm->wm[level].plane_res_b,
   13866 				  hw_plane_wm->wm[level].plane_res_l);
   13867 		}
   13868 
   13869 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
   13870 					 &sw_plane_wm->trans_wm)) {
   13871 			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13872 				  pipe_name(pipe),
   13873 				  sw_plane_wm->trans_wm.plane_en,
   13874 				  sw_plane_wm->trans_wm.plane_res_b,
   13875 				  sw_plane_wm->trans_wm.plane_res_l,
   13876 				  hw_plane_wm->trans_wm.plane_en,
   13877 				  hw_plane_wm->trans_wm.plane_res_b,
   13878 				  hw_plane_wm->trans_wm.plane_res_l);
   13879 		}
   13880 
   13881 		/* DDB */
   13882 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
   13883 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
   13884 
   13885 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
   13886 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
   13887 				  pipe_name(pipe),
   13888 				  sw_ddb_entry->start, sw_ddb_entry->end,
   13889 				  hw_ddb_entry->start, hw_ddb_entry->end);
   13890 		}
   13891 	}
   13892 
   13893 	kfree(hw);
   13894 }
   13895 
   13896 static void
   13897 verify_connector_state(struct intel_atomic_state *state,
   13898 		       struct intel_crtc *crtc)
   13899 {
   13900 	struct drm_connector *connector;
   13901 	struct drm_connector_state *new_conn_state;
   13902 	int i;
   13903 
   13904 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
   13905 		struct drm_encoder *encoder = connector->encoder;
   13906 		struct intel_crtc_state *crtc_state = NULL;
   13907 
   13908 		if (new_conn_state->crtc != &crtc->base)
   13909 			continue;
   13910 
   13911 		if (crtc)
   13912 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
   13913 
   13914 		intel_connector_verify_state(crtc_state, new_conn_state);
   13915 
   13916 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
   13917 		     "connector's atomic encoder doesn't match legacy encoder\n");
   13918 	}
   13919 }
   13920 
   13921 static void
   13922 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
   13923 {
   13924 	struct intel_encoder *encoder;
   13925 	struct drm_connector *connector;
   13926 	struct drm_connector_state *old_conn_state, *new_conn_state;
   13927 	int i;
   13928 
   13929 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   13930 		bool enabled = false, found = false;
   13931 		enum pipe pipe;
   13932 
   13933 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
   13934 			      encoder->base.base.id,
   13935 			      encoder->base.name);
   13936 
   13937 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
   13938 						   new_conn_state, i) {
   13939 			if (old_conn_state->best_encoder == &encoder->base)
   13940 				found = true;
   13941 
   13942 			if (new_conn_state->best_encoder != &encoder->base)
   13943 				continue;
   13944 			found = enabled = true;
   13945 
   13946 			I915_STATE_WARN(new_conn_state->crtc !=
   13947 					encoder->base.crtc,
   13948 			     "connector's crtc doesn't match encoder crtc\n");
   13949 		}
   13950 
   13951 		if (!found)
   13952 			continue;
   13953 
   13954 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
   13955 		     "encoder's enabled state mismatch "
   13956 		     "(expected %i, found %i)\n",
   13957 		     !!encoder->base.crtc, enabled);
   13958 
   13959 		if (!encoder->base.crtc) {
   13960 			bool active;
   13961 
   13962 			active = encoder->get_hw_state(encoder, &pipe);
   13963 			I915_STATE_WARN(active,
   13964 			     "encoder detached but still enabled on pipe %c.\n",
   13965 			     pipe_name(pipe));
   13966 		}
   13967 	}
   13968 }
   13969 
   13970 static void
   13971 verify_crtc_state(struct intel_crtc *crtc,
   13972 		  struct intel_crtc_state *old_crtc_state,
   13973 		  struct intel_crtc_state *new_crtc_state)
   13974 {
   13975 	struct drm_device *dev = crtc->base.dev;
   13976 	struct drm_i915_private *dev_priv = to_i915(dev);
   13977 	struct intel_encoder *encoder;
   13978 	struct intel_crtc_state *pipe_config = old_crtc_state;
   13979 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
   13980 	bool active;
   13981 
   13982 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
   13983 	intel_crtc_free_hw_state(old_crtc_state);
   13984 	intel_crtc_state_reset(old_crtc_state, crtc);
   13985 	old_crtc_state->uapi.state = state;
   13986 
   13987 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
   13988 
   13989 	active = dev_priv->display.get_pipe_config(crtc, pipe_config);
   13990 
   13991 	/* we keep both pipes enabled on 830 */
   13992 	if (IS_I830(dev_priv))
   13993 		active = new_crtc_state->hw.active;
   13994 
   13995 	I915_STATE_WARN(new_crtc_state->hw.active != active,
   13996 			"crtc active state doesn't match with hw state "
   13997 			"(expected %i, found %i)\n",
   13998 			new_crtc_state->hw.active, active);
   13999 
   14000 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
   14001 			"transitional active state does not match atomic hw state "
   14002 			"(expected %i, found %i)\n",
   14003 			new_crtc_state->hw.active, crtc->active);
   14004 
   14005 	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
   14006 		enum pipe pipe;
   14007 
   14008 		active = encoder->get_hw_state(encoder, &pipe);
   14009 		I915_STATE_WARN(active != new_crtc_state->hw.active,
   14010 				"[ENCODER:%i] active %i with crtc active %i\n",
   14011 				encoder->base.base.id, active,
   14012 				new_crtc_state->hw.active);
   14013 
   14014 		I915_STATE_WARN(active && crtc->pipe != pipe,
   14015 				"Encoder connected to wrong pipe %c\n",
   14016 				pipe_name(pipe));
   14017 
   14018 		if (active)
   14019 			encoder->get_config(encoder, pipe_config);
   14020 	}
   14021 
   14022 	intel_crtc_compute_pixel_rate(pipe_config);
   14023 
   14024 	if (!new_crtc_state->hw.active)
   14025 		return;
   14026 
   14027 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
   14028 
   14029 	if (!intel_pipe_config_compare(new_crtc_state,
   14030 				       pipe_config, false)) {
   14031 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
   14032 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
   14033 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
   14034 	}
   14035 }
   14036 
   14037 static void
   14038 intel_verify_planes(struct intel_atomic_state *state)
   14039 {
   14040 	struct intel_plane *plane;
   14041 	const struct intel_plane_state *plane_state;
   14042 	int i;
   14043 
   14044 	for_each_new_intel_plane_in_state(state, plane,
   14045 					  plane_state, i)
   14046 		assert_plane(plane, plane_state->planar_slave ||
   14047 			     plane_state->uapi.visible);
   14048 }
   14049 
   14050 static void
   14051 verify_single_dpll_state(struct drm_i915_private *dev_priv,
   14052 			 struct intel_shared_dpll *pll,
   14053 			 struct intel_crtc *crtc,
   14054 			 struct intel_crtc_state *new_crtc_state)
   14055 {
   14056 	struct intel_dpll_hw_state dpll_hw_state;
   14057 	unsigned int crtc_mask;
   14058 	bool active;
   14059 
   14060 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
   14061 
   14062 	DRM_DEBUG_KMS("%s\n", pll->info->name);
   14063 
   14064 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
   14065 
   14066 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
   14067 		I915_STATE_WARN(!pll->on && pll->active_mask,
   14068 		     "pll in active use but not on in sw tracking\n");
   14069 		I915_STATE_WARN(pll->on && !pll->active_mask,
   14070 		     "pll is on but not used by any active crtc\n");
   14071 		I915_STATE_WARN(pll->on != active,
   14072 		     "pll on state mismatch (expected %i, found %i)\n",
   14073 		     pll->on, active);
   14074 	}
   14075 
   14076 	if (!crtc) {
   14077 		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
   14078 				"more active pll users than references: %x vs %x\n",
   14079 				pll->active_mask, pll->state.crtc_mask);
   14080 
   14081 		return;
   14082 	}
   14083 
   14084 	crtc_mask = drm_crtc_mask(&crtc->base);
   14085 
   14086 	if (new_crtc_state->hw.active)
   14087 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
   14088 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
   14089 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
   14090 	else
   14091 		I915_STATE_WARN(pll->active_mask & crtc_mask,
   14092 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
   14093 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
   14094 
   14095 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
   14096 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
   14097 			crtc_mask, pll->state.crtc_mask);
   14098 
   14099 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
   14100 					  &dpll_hw_state,
   14101 					  sizeof(dpll_hw_state)),
   14102 			"pll hw state mismatch\n");
   14103 }
   14104 
   14105 static void
   14106 verify_shared_dpll_state(struct intel_crtc *crtc,
   14107 			 struct intel_crtc_state *old_crtc_state,
   14108 			 struct intel_crtc_state *new_crtc_state)
   14109 {
   14110 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14111 
   14112 	if (new_crtc_state->shared_dpll)
   14113 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
   14114 
   14115 	if (old_crtc_state->shared_dpll &&
   14116 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
   14117 		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
   14118 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
   14119 
   14120 		I915_STATE_WARN(pll->active_mask & crtc_mask,
   14121 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
   14122 				pipe_name(drm_crtc_index(&crtc->base)));
   14123 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
   14124 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
   14125 				pipe_name(drm_crtc_index(&crtc->base)));
   14126 	}
   14127 }
   14128 
   14129 static void
   14130 intel_modeset_verify_crtc(struct intel_crtc *crtc,
   14131 			  struct intel_atomic_state *state,
   14132 			  struct intel_crtc_state *old_crtc_state,
   14133 			  struct intel_crtc_state *new_crtc_state)
   14134 {
   14135 	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
   14136 		return;
   14137 
   14138 	verify_wm_state(crtc, new_crtc_state);
   14139 	verify_connector_state(state, crtc);
   14140 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
   14141 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
   14142 }
   14143 
   14144 static void
   14145 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
   14146 {
   14147 	int i;
   14148 
   14149 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
   14150 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
   14151 }
   14152 
   14153 static void
   14154 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
   14155 			      struct intel_atomic_state *state)
   14156 {
   14157 	verify_encoder_state(dev_priv, state);
   14158 	verify_connector_state(state, NULL);
   14159 	verify_disabled_dpll_state(dev_priv);
   14160 }
   14161 
   14162 static void
   14163 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
   14164 {
   14165 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   14166 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14167 	const struct drm_display_mode *adjusted_mode =
   14168 		&crtc_state->hw.adjusted_mode;
   14169 
   14170 	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
   14171 
   14172 	/*
   14173 	 * The scanline counter increments at the leading edge of hsync.
   14174 	 *
   14175 	 * On most platforms it starts counting from vtotal-1 on the
   14176 	 * first active line. That means the scanline counter value is
   14177 	 * always one less than what we would expect. Ie. just after
   14178 	 * start of vblank, which also occurs at start of hsync (on the
   14179 	 * last active line), the scanline counter will read vblank_start-1.
   14180 	 *
   14181 	 * On gen2 the scanline counter starts counting from 1 instead
   14182 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
   14183 	 * to keep the value positive), instead of adding one.
   14184 	 *
   14185 	 * On HSW+ the behaviour of the scanline counter depends on the output
   14186 	 * type. For DP ports it behaves like most other platforms, but on HDMI
   14187 	 * there's an extra 1 line difference. So we need to add two instead of
   14188 	 * one to the value.
   14189 	 *
   14190 	 * On VLV/CHV DSI the scanline counter would appear to increment
   14191 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
   14192 	 * that means we can't tell whether we're in vblank or not while
   14193 	 * we're on that particular line. We must still set scanline_offset
   14194 	 * to 1 so that the vblank timestamps come out correct when we query
   14195 	 * the scanline counter from within the vblank interrupt handler.
   14196 	 * However if queried just before the start of vblank we'll get an
   14197 	 * answer that's slightly in the future.
   14198 	 */
   14199 	if (IS_GEN(dev_priv, 2)) {
   14200 		int vtotal;
   14201 
   14202 		vtotal = adjusted_mode->crtc_vtotal;
   14203 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
   14204 			vtotal /= 2;
   14205 
   14206 		crtc->scanline_offset = vtotal - 1;
   14207 	} else if (HAS_DDI(dev_priv) &&
   14208 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
   14209 		crtc->scanline_offset = 2;
   14210 	} else {
   14211 		crtc->scanline_offset = 1;
   14212 	}
   14213 }
   14214 
   14215 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
   14216 {
   14217 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14218 	struct intel_crtc_state *new_crtc_state;
   14219 	struct intel_crtc *crtc;
   14220 	int i;
   14221 
   14222 	if (!dev_priv->display.crtc_compute_clock)
   14223 		return;
   14224 
   14225 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   14226 		if (!needs_modeset(new_crtc_state))
   14227 			continue;
   14228 
   14229 		intel_release_shared_dplls(state, crtc);
   14230 	}
   14231 }
   14232 
   14233 /*
   14234  * This implements the workaround described in the "notes" section of the mode
   14235  * set sequence documentation. When going from no pipes or single pipe to
   14236  * multiple pipes, and planes are enabled after the pipe, we need to wait at
   14237  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
   14238  */
   14239 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
   14240 {
   14241 	struct intel_crtc_state *crtc_state;
   14242 	struct intel_crtc *crtc;
   14243 	struct intel_crtc_state *first_crtc_state = NULL;
   14244 	struct intel_crtc_state *other_crtc_state = NULL;
   14245 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
   14246 	int i;
   14247 
   14248 	/* look at all crtc's that are going to be enabled in during modeset */
   14249 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   14250 		if (!crtc_state->hw.active ||
   14251 		    !needs_modeset(crtc_state))
   14252 			continue;
   14253 
   14254 		if (first_crtc_state) {
   14255 			other_crtc_state = crtc_state;
   14256 			break;
   14257 		} else {
   14258 			first_crtc_state = crtc_state;
   14259 			first_pipe = crtc->pipe;
   14260 		}
   14261 	}
   14262 
   14263 	/* No workaround needed? */
   14264 	if (!first_crtc_state)
   14265 		return 0;
   14266 
   14267 	/* w/a possibly needed, check how many crtc's are already enabled. */
   14268 	for_each_intel_crtc(state->base.dev, crtc) {
   14269 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
   14270 		if (IS_ERR(crtc_state))
   14271 			return PTR_ERR(crtc_state);
   14272 
   14273 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
   14274 
   14275 		if (!crtc_state->hw.active ||
   14276 		    needs_modeset(crtc_state))
   14277 			continue;
   14278 
   14279 		/* 2 or more enabled crtcs means no need for w/a */
   14280 		if (enabled_pipe != INVALID_PIPE)
   14281 			return 0;
   14282 
   14283 		enabled_pipe = crtc->pipe;
   14284 	}
   14285 
   14286 	if (enabled_pipe != INVALID_PIPE)
   14287 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
   14288 	else if (other_crtc_state)
   14289 		other_crtc_state->hsw_workaround_pipe = first_pipe;
   14290 
   14291 	return 0;
   14292 }
   14293 
   14294 static int intel_modeset_checks(struct intel_atomic_state *state)
   14295 {
   14296 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14297 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14298 	struct intel_crtc *crtc;
   14299 	int ret, i;
   14300 
   14301 	/* keep the current setting */
   14302 	if (!state->cdclk.force_min_cdclk_changed)
   14303 		state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
   14304 
   14305 	state->modeset = true;
   14306 	state->active_pipes = dev_priv->active_pipes;
   14307 	state->cdclk.logical = dev_priv->cdclk.logical;
   14308 	state->cdclk.actual = dev_priv->cdclk.actual;
   14309 
   14310 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14311 					    new_crtc_state, i) {
   14312 		if (new_crtc_state->hw.active)
   14313 			state->active_pipes |= BIT(crtc->pipe);
   14314 		else
   14315 			state->active_pipes &= ~BIT(crtc->pipe);
   14316 
   14317 		if (old_crtc_state->hw.active != new_crtc_state->hw.active)
   14318 			state->active_pipe_changes |= BIT(crtc->pipe);
   14319 	}
   14320 
   14321 	if (state->active_pipe_changes) {
   14322 		ret = intel_atomic_lock_global_state(state);
   14323 		if (ret)
   14324 			return ret;
   14325 	}
   14326 
   14327 	ret = intel_modeset_calc_cdclk(state);
   14328 	if (ret)
   14329 		return ret;
   14330 
   14331 	intel_modeset_clear_plls(state);
   14332 
   14333 	if (IS_HASWELL(dev_priv))
   14334 		return hsw_mode_set_planes_workaround(state);
   14335 
   14336 	return 0;
   14337 }
   14338 
   14339 /*
   14340  * Handle calculation of various watermark data at the end of the atomic check
   14341  * phase.  The code here should be run after the per-crtc and per-plane 'check'
   14342  * handlers to ensure that all derived state has been updated.
   14343  */
   14344 static int calc_watermark_data(struct intel_atomic_state *state)
   14345 {
   14346 	struct drm_device *dev = state->base.dev;
   14347 	struct drm_i915_private *dev_priv = to_i915(dev);
   14348 
   14349 	/* Is there platform-specific watermark information to calculate? */
   14350 	if (dev_priv->display.compute_global_watermarks)
   14351 		return dev_priv->display.compute_global_watermarks(state);
   14352 
   14353 	return 0;
   14354 }
   14355 
   14356 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
   14357 				     struct intel_crtc_state *new_crtc_state)
   14358 {
   14359 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
   14360 		return;
   14361 
   14362 	new_crtc_state->uapi.mode_changed = false;
   14363 	new_crtc_state->update_pipe = true;
   14364 }
   14365 
   14366 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
   14367 				    struct intel_crtc_state *new_crtc_state)
   14368 {
   14369 	/*
   14370 	 * If we're not doing the full modeset we want to
   14371 	 * keep the current M/N values as they may be
   14372 	 * sufficiently different to the computed values
   14373 	 * to cause problems.
   14374 	 *
   14375 	 * FIXME: should really copy more fuzzy state here
   14376 	 */
   14377 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
   14378 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
   14379 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
   14380 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
   14381 }
   14382 
   14383 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
   14384 					  struct intel_crtc *crtc,
   14385 					  u8 plane_ids_mask)
   14386 {
   14387 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14388 	struct intel_plane *plane;
   14389 
   14390 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   14391 		struct intel_plane_state *plane_state;
   14392 
   14393 		if ((plane_ids_mask & BIT(plane->id)) == 0)
   14394 			continue;
   14395 
   14396 		plane_state = intel_atomic_get_plane_state(state, plane);
   14397 		if (IS_ERR(plane_state))
   14398 			return PTR_ERR(plane_state);
   14399 	}
   14400 
   14401 	return 0;
   14402 }
   14403 
   14404 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
   14405 {
   14406 	/* See {hsw,vlv,ivb}_plane_ratio() */
   14407 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
   14408 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   14409 		IS_IVYBRIDGE(dev_priv);
   14410 }
   14411 
   14412 static int intel_atomic_check_planes(struct intel_atomic_state *state,
   14413 				     bool *need_modeset)
   14414 {
   14415 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14416 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14417 	struct intel_plane_state *plane_state __unused;
   14418 	struct intel_plane *plane;
   14419 	struct intel_crtc *crtc;
   14420 	int i, ret;
   14421 
   14422 	ret = icl_add_linked_planes(state);
   14423 	if (ret)
   14424 		return ret;
   14425 
   14426 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   14427 		ret = intel_plane_atomic_check(state, plane);
   14428 		if (ret) {
   14429 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
   14430 					 plane->base.base.id, plane->base.name);
   14431 			return ret;
   14432 		}
   14433 	}
   14434 
   14435 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14436 					    new_crtc_state, i) {
   14437 		u8 old_active_planes, new_active_planes;
   14438 
   14439 		ret = icl_check_nv12_planes(new_crtc_state);
   14440 		if (ret)
   14441 			return ret;
   14442 
   14443 		/*
   14444 		 * On some platforms the number of active planes affects
   14445 		 * the planes' minimum cdclk calculation. Add such planes
   14446 		 * to the state before we compute the minimum cdclk.
   14447 		 */
   14448 		if (!active_planes_affects_min_cdclk(dev_priv))
   14449 			continue;
   14450 
   14451 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   14452 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   14453 
   14454 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
   14455 			continue;
   14456 
   14457 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
   14458 		if (ret)
   14459 			return ret;
   14460 	}
   14461 
   14462 	/*
   14463 	 * active_planes bitmask has been updated, and potentially
   14464 	 * affected planes are part of the state. We can now
   14465 	 * compute the minimum cdclk for each plane.
   14466 	 */
   14467 	for_each_new_intel_plane_in_state(state, plane, plane_state, i)
   14468 		*need_modeset |= intel_plane_calc_min_cdclk(state, plane);
   14469 
   14470 	return 0;
   14471 }
   14472 
   14473 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
   14474 {
   14475 	struct intel_crtc_state *crtc_state __unused;
   14476 	struct intel_crtc *crtc;
   14477 	int i;
   14478 
   14479 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   14480 		int ret = intel_crtc_atomic_check(state, crtc);
   14481 		if (ret) {
   14482 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
   14483 					 crtc->base.base.id, crtc->base.name);
   14484 			return ret;
   14485 		}
   14486 	}
   14487 
   14488 	return 0;
   14489 }
   14490 
   14491 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
   14492 					       u8 transcoders)
   14493 {
   14494 	const struct intel_crtc_state *new_crtc_state;
   14495 	struct intel_crtc *crtc;
   14496 	int i;
   14497 
   14498 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   14499 		if (new_crtc_state->hw.enable &&
   14500 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
   14501 		    needs_modeset(new_crtc_state))
   14502 			return true;
   14503 	}
   14504 
   14505 	return false;
   14506 }
   14507 
   14508 static int
   14509 intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
   14510 {
   14511 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14512 	struct drm_connector *connector;
   14513 	struct drm_connector_list_iter conn_iter;
   14514 	int ret = 0;
   14515 
   14516 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
   14517 	drm_for_each_connector_iter(connector, &conn_iter) {
   14518 		struct drm_connector_state *conn_state;
   14519 		struct drm_crtc_state *crtc_state;
   14520 
   14521 		if (!connector->has_tile ||
   14522 		    connector->tile_group->id != tile_grp_id)
   14523 			continue;
   14524 		conn_state = drm_atomic_get_connector_state(&state->base,
   14525 							    connector);
   14526 		if (IS_ERR(conn_state)) {
   14527 			ret =  PTR_ERR(conn_state);
   14528 			break;
   14529 		}
   14530 
   14531 		if (!conn_state->crtc)
   14532 			continue;
   14533 
   14534 		crtc_state = drm_atomic_get_crtc_state(&state->base,
   14535 						       conn_state->crtc);
   14536 		if (IS_ERR(crtc_state)) {
   14537 			ret = PTR_ERR(crtc_state);
   14538 			break;
   14539 		}
   14540 		crtc_state->mode_changed = true;
   14541 		ret = drm_atomic_add_affected_connectors(&state->base,
   14542 							 conn_state->crtc);
   14543 		if (ret)
   14544 			break;
   14545 	}
   14546 	drm_connector_list_iter_end(&conn_iter);
   14547 
   14548 	return ret;
   14549 }
   14550 
   14551 static int
   14552 intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
   14553 {
   14554 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14555 	struct drm_connector *connector;
   14556 	struct drm_connector_state *old_conn_state __unused, *new_conn_state __unused;
   14557 	int i, ret;
   14558 
   14559 	if (INTEL_GEN(dev_priv) < 11)
   14560 		return 0;
   14561 
   14562 	/* Is tiled, mark all other tiled CRTCs as needing a modeset */
   14563 	for_each_oldnew_connector_in_state(&state->base, connector,
   14564 					   old_conn_state, new_conn_state, i) {
   14565 		if (!connector->has_tile)
   14566 			continue;
   14567 		if (!intel_connector_needs_modeset(state, connector))
   14568 			continue;
   14569 
   14570 		ret = intel_modeset_all_tiles(state, connector->tile_group->id);
   14571 		if (ret)
   14572 			return ret;
   14573 	}
   14574 
   14575 	return 0;
   14576 }
   14577 
   14578 /**
   14579  * intel_atomic_check - validate state object
   14580  * @dev: drm device
   14581  * @_state: state to validate
   14582  */
   14583 static int intel_atomic_check(struct drm_device *dev,
   14584 			      struct drm_atomic_state *_state)
   14585 {
   14586 	struct drm_i915_private *dev_priv = to_i915(dev);
   14587 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
   14588 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14589 	struct intel_crtc *crtc;
   14590 	int ret, i;
   14591 	bool any_ms = false;
   14592 
   14593 	/* Catch I915_MODE_FLAG_INHERITED */
   14594 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14595 					    new_crtc_state, i) {
   14596 		if (new_crtc_state->hw.mode.private_flags !=
   14597 		    old_crtc_state->hw.mode.private_flags)
   14598 			new_crtc_state->uapi.mode_changed = true;
   14599 	}
   14600 
   14601 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
   14602 	if (ret)
   14603 		goto fail;
   14604 
   14605 	/**
   14606 	 * This check adds all the connectors in current state that belong to
   14607 	 * the same tile group to a full modeset.
   14608 	 * This function directly sets the mode_changed to true and we also call
   14609 	 * drm_atomic_add_affected_connectors(). Hence we are not explicitly
   14610 	 * calling drm_atomic_helper_check_modeset() after this.
   14611 	 *
   14612 	 * Fixme: Handle some corner cases where one of the
   14613 	 * tiled connectors gets disconnected and tile info is lost but since it
   14614 	 * was previously synced to other conn, we need to add that to the modeset.
   14615 	 */
   14616 	ret = intel_atomic_check_tiled_conns(state);
   14617 	if (ret)
   14618 		goto fail;
   14619 
   14620 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14621 					    new_crtc_state, i) {
   14622 		if (!needs_modeset(new_crtc_state)) {
   14623 			/* Light copy */
   14624 			intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
   14625 
   14626 			continue;
   14627 		}
   14628 
   14629 		if (!new_crtc_state->uapi.enable) {
   14630 			intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
   14631 			continue;
   14632 		}
   14633 
   14634 		ret = intel_crtc_prepare_cleared_state(new_crtc_state);
   14635 		if (ret)
   14636 			goto fail;
   14637 
   14638 		ret = intel_modeset_pipe_config(new_crtc_state);
   14639 		if (ret)
   14640 			goto fail;
   14641 
   14642 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
   14643 	}
   14644 
   14645 	/**
   14646 	 * Check if fastset is allowed by external dependencies like other
   14647 	 * pipes and transcoders.
   14648 	 *
   14649 	 * Right now it only forces a fullmodeset when the MST master
   14650 	 * transcoder did not changed but the pipe of the master transcoder
   14651 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
   14652 	 * in case of port synced crtcs, if one of the synced crtcs
   14653 	 * needs a full modeset, all other synced crtcs should be
   14654 	 * forced a full modeset.
   14655 	 */
   14656 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   14657 		if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
   14658 			continue;
   14659 
   14660 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
   14661 			enum transcoder master = new_crtc_state->mst_master_transcoder;
   14662 
   14663 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
   14664 				new_crtc_state->uapi.mode_changed = true;
   14665 				new_crtc_state->update_pipe = false;
   14666 			}
   14667 		}
   14668 
   14669 		if (is_trans_port_sync_mode(new_crtc_state)) {
   14670 			u8 trans = new_crtc_state->sync_mode_slaves_mask |
   14671 				   BIT(new_crtc_state->master_transcoder);
   14672 
   14673 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
   14674 				new_crtc_state->uapi.mode_changed = true;
   14675 				new_crtc_state->update_pipe = false;
   14676 			}
   14677 		}
   14678 	}
   14679 
   14680 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14681 					    new_crtc_state, i) {
   14682 		if (needs_modeset(new_crtc_state)) {
   14683 			any_ms = true;
   14684 			continue;
   14685 		}
   14686 
   14687 		if (!new_crtc_state->update_pipe)
   14688 			continue;
   14689 
   14690 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
   14691 	}
   14692 
   14693 	if (any_ms && !check_digital_port_conflicts(state)) {
   14694 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
   14695 		ret = EINVAL;
   14696 		goto fail;
   14697 	}
   14698 
   14699 	ret = drm_dp_mst_atomic_check(&state->base);
   14700 	if (ret)
   14701 		goto fail;
   14702 
   14703 	any_ms |= state->cdclk.force_min_cdclk_changed;
   14704 
   14705 	ret = intel_atomic_check_planes(state, &any_ms);
   14706 	if (ret)
   14707 		goto fail;
   14708 
   14709 	if (any_ms) {
   14710 		ret = intel_modeset_checks(state);
   14711 		if (ret)
   14712 			goto fail;
   14713 	} else {
   14714 		state->cdclk.logical = dev_priv->cdclk.logical;
   14715 	}
   14716 
   14717 	ret = intel_atomic_check_crtcs(state);
   14718 	if (ret)
   14719 		goto fail;
   14720 
   14721 	intel_fbc_choose_crtc(dev_priv, state);
   14722 	ret = calc_watermark_data(state);
   14723 	if (ret)
   14724 		goto fail;
   14725 
   14726 	ret = intel_bw_atomic_check(state);
   14727 	if (ret)
   14728 		goto fail;
   14729 
   14730 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14731 					    new_crtc_state, i) {
   14732 		if (!needs_modeset(new_crtc_state) &&
   14733 		    !new_crtc_state->update_pipe)
   14734 			continue;
   14735 
   14736 		intel_dump_pipe_config(new_crtc_state, state,
   14737 				       needs_modeset(new_crtc_state) ?
   14738 				       "[modeset]" : "[fastset]");
   14739 	}
   14740 
   14741 	return 0;
   14742 
   14743  fail:
   14744 	if (ret == -EDEADLK)
   14745 		return ret;
   14746 
   14747 	/*
   14748 	 * FIXME would probably be nice to know which crtc specifically
   14749 	 * caused the failure, in cases where we can pinpoint it.
   14750 	 */
   14751 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14752 					    new_crtc_state, i)
   14753 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
   14754 
   14755 	return ret;
   14756 }
   14757 
   14758 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
   14759 {
   14760 	return drm_atomic_helper_prepare_planes(state->base.dev,
   14761 						&state->base);
   14762 }
   14763 
   14764 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
   14765 {
   14766 	struct drm_device *dev = crtc->base.dev;
   14767 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
   14768 
   14769 	if (!vblank->max_vblank_count)
   14770 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
   14771 
   14772 	return crtc->base.funcs->get_vblank_counter(&crtc->base);
   14773 }
   14774 
   14775 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
   14776 				  struct intel_crtc_state *crtc_state)
   14777 {
   14778 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14779 
   14780 	if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
   14781 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
   14782 
   14783 	if (crtc_state->has_pch_encoder) {
   14784 		enum pipe pch_transcoder =
   14785 			intel_crtc_pch_transcoder(crtc);
   14786 
   14787 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
   14788 	}
   14789 }
   14790 
   14791 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
   14792 			       const struct intel_crtc_state *new_crtc_state)
   14793 {
   14794 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   14795 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14796 
   14797 	/*
   14798 	 * Update pipe size and adjust fitter if needed: the reason for this is
   14799 	 * that in compute_mode_changes we check the native mode (not the pfit
   14800 	 * mode) to see if we can flip rather than do a full mode set. In the
   14801 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
   14802 	 * pfit state, we'll end up with a big fb scanned out into the wrong
   14803 	 * sized surface.
   14804 	 */
   14805 	intel_set_pipe_src_size(new_crtc_state);
   14806 
   14807 	/* on skylake this is done by detaching scalers */
   14808 	if (INTEL_GEN(dev_priv) >= 9) {
   14809 		skl_detach_scalers(new_crtc_state);
   14810 
   14811 		if (new_crtc_state->pch_pfit.enabled)
   14812 			skl_pfit_enable(new_crtc_state);
   14813 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   14814 		if (new_crtc_state->pch_pfit.enabled)
   14815 			ilk_pfit_enable(new_crtc_state);
   14816 		else if (old_crtc_state->pch_pfit.enabled)
   14817 			ilk_pfit_disable(old_crtc_state);
   14818 	}
   14819 
   14820 	if (INTEL_GEN(dev_priv) >= 11)
   14821 		icl_set_pipe_chicken(crtc);
   14822 }
   14823 
   14824 static void commit_pipe_config(struct intel_atomic_state *state,
   14825 			       struct intel_crtc_state *old_crtc_state,
   14826 			       struct intel_crtc_state *new_crtc_state)
   14827 {
   14828 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   14829 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14830 	bool modeset = needs_modeset(new_crtc_state);
   14831 
   14832 	/*
   14833 	 * During modesets pipe configuration was programmed as the
   14834 	 * CRTC was enabled.
   14835 	 */
   14836 	if (!modeset) {
   14837 		if (new_crtc_state->uapi.color_mgmt_changed ||
   14838 		    new_crtc_state->update_pipe)
   14839 			intel_color_commit(new_crtc_state);
   14840 
   14841 		if (INTEL_GEN(dev_priv) >= 9)
   14842 			skl_detach_scalers(new_crtc_state);
   14843 
   14844 		if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
   14845 			bdw_set_pipemisc(new_crtc_state);
   14846 
   14847 		if (new_crtc_state->update_pipe)
   14848 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
   14849 	}
   14850 
   14851 	if (dev_priv->display.atomic_update_watermarks)
   14852 		dev_priv->display.atomic_update_watermarks(state, crtc);
   14853 }
   14854 
   14855 static void intel_update_crtc(struct intel_crtc *crtc,
   14856 			      struct intel_atomic_state *state,
   14857 			      struct intel_crtc_state *old_crtc_state,
   14858 			      struct intel_crtc_state *new_crtc_state)
   14859 {
   14860 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14861 	bool modeset = needs_modeset(new_crtc_state);
   14862 	struct intel_plane_state *new_plane_state =
   14863 		intel_atomic_get_new_plane_state(state,
   14864 						 to_intel_plane(crtc->base.primary));
   14865 
   14866 	if (modeset) {
   14867 		intel_crtc_update_active_timings(new_crtc_state);
   14868 
   14869 		dev_priv->display.crtc_enable(state, crtc);
   14870 
   14871 		/* vblanks work again, re-enable pipe CRC. */
   14872 		intel_crtc_enable_pipe_crc(crtc);
   14873 	} else {
   14874 		if (new_crtc_state->preload_luts &&
   14875 		    (new_crtc_state->uapi.color_mgmt_changed ||
   14876 		     new_crtc_state->update_pipe))
   14877 			intel_color_load_luts(new_crtc_state);
   14878 
   14879 		intel_pre_plane_update(state, crtc);
   14880 
   14881 		if (new_crtc_state->update_pipe)
   14882 			intel_encoders_update_pipe(state, crtc);
   14883 	}
   14884 
   14885 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
   14886 		intel_fbc_disable(crtc);
   14887 	else if (new_plane_state)
   14888 		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
   14889 
   14890 	/* Perform vblank evasion around commit operation */
   14891 	intel_pipe_update_start(new_crtc_state);
   14892 
   14893 	commit_pipe_config(state, old_crtc_state, new_crtc_state);
   14894 
   14895 	if (INTEL_GEN(dev_priv) >= 9)
   14896 		skl_update_planes_on_crtc(state, crtc);
   14897 	else
   14898 		i9xx_update_planes_on_crtc(state, crtc);
   14899 
   14900 	intel_pipe_update_end(new_crtc_state);
   14901 
   14902 	/*
   14903 	 * We usually enable FIFO underrun interrupts as part of the
   14904 	 * CRTC enable sequence during modesets.  But when we inherit a
   14905 	 * valid pipe configuration from the BIOS we need to take care
   14906 	 * of enabling them on the CRTC's first fastset.
   14907 	 */
   14908 	if (new_crtc_state->update_pipe && !modeset &&
   14909 	    old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
   14910 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
   14911 }
   14912 
   14913 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
   14914 {
   14915 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
   14916 	enum transcoder slave_transcoder;
   14917 
   14918 	WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
   14919 
   14920 	slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
   14921 	return intel_get_crtc_for_pipe(dev_priv,
   14922 				       (enum pipe)slave_transcoder);
   14923 }
   14924 
   14925 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
   14926 					  struct intel_crtc_state *old_crtc_state,
   14927 					  struct intel_crtc_state *new_crtc_state,
   14928 					  struct intel_crtc *crtc)
   14929 {
   14930 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14931 
   14932 	intel_crtc_disable_planes(state, crtc);
   14933 
   14934 	/*
   14935 	 * We need to disable pipe CRC before disabling the pipe,
   14936 	 * or we race against vblank off.
   14937 	 */
   14938 	intel_crtc_disable_pipe_crc(crtc);
   14939 
   14940 	dev_priv->display.crtc_disable(state, crtc);
   14941 	crtc->active = false;
   14942 	intel_fbc_disable(crtc);
   14943 	intel_disable_shared_dpll(old_crtc_state);
   14944 
   14945 	/* FIXME unify this for all platforms */
   14946 	if (!new_crtc_state->hw.active &&
   14947 	    !HAS_GMCH(dev_priv) &&
   14948 	    dev_priv->display.initial_watermarks)
   14949 		dev_priv->display.initial_watermarks(state, crtc);
   14950 }
   14951 
   14952 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
   14953 {
   14954 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   14955 	struct intel_crtc *crtc;
   14956 	u32 handled = 0;
   14957 	int i;
   14958 
   14959 	/* Only disable port sync and MST slaves */
   14960 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14961 					    new_crtc_state, i) {
   14962 		if (!needs_modeset(new_crtc_state))
   14963 			continue;
   14964 
   14965 		if (!old_crtc_state->hw.active)
   14966 			continue;
   14967 
   14968 		/* In case of Transcoder port Sync master slave CRTCs can be
   14969 		 * assigned in any order and we need to make sure that
   14970 		 * slave CRTCs are disabled first and then master CRTC since
   14971 		 * Slave vblanks are masked till Master Vblanks.
   14972 		 */
   14973 		if (!is_trans_port_sync_slave(old_crtc_state) &&
   14974 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
   14975 			continue;
   14976 
   14977 		intel_pre_plane_update(state, crtc);
   14978 		intel_old_crtc_state_disables(state, old_crtc_state,
   14979 					      new_crtc_state, crtc);
   14980 		handled |= BIT(crtc->pipe);
   14981 	}
   14982 
   14983 	/* Disable everything else left on */
   14984 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14985 					    new_crtc_state, i) {
   14986 		if (!needs_modeset(new_crtc_state) ||
   14987 		    (handled & BIT(crtc->pipe)))
   14988 			continue;
   14989 
   14990 		intel_pre_plane_update(state, crtc);
   14991 		if (old_crtc_state->hw.active)
   14992 			intel_old_crtc_state_disables(state, old_crtc_state,
   14993 						      new_crtc_state, crtc);
   14994 	}
   14995 }
   14996 
   14997 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
   14998 {
   14999 	struct intel_crtc *crtc;
   15000 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   15001 	int i;
   15002 
   15003 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   15004 		if (!new_crtc_state->hw.active)
   15005 			continue;
   15006 
   15007 		intel_update_crtc(crtc, state, old_crtc_state,
   15008 				  new_crtc_state);
   15009 	}
   15010 }
   15011 
   15012 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
   15013 					      struct intel_atomic_state *state,
   15014 					      struct intel_crtc_state *new_crtc_state)
   15015 {
   15016 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   15017 
   15018 	intel_crtc_update_active_timings(new_crtc_state);
   15019 	dev_priv->display.crtc_enable(state, crtc);
   15020 	intel_crtc_enable_pipe_crc(crtc);
   15021 }
   15022 
   15023 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
   15024 				       struct intel_atomic_state *state)
   15025 {
   15026 	struct drm_connector *uninitialized_var(conn);
   15027 	struct drm_connector_state *conn_state;
   15028 	struct intel_dp *intel_dp;
   15029 	int i;
   15030 
   15031 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   15032 		if (conn_state->crtc == &crtc->base)
   15033 			break;
   15034 	}
   15035 	intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
   15036 	intel_dp_stop_link_train(intel_dp);
   15037 }
   15038 
   15039 /*
   15040  * TODO: This is only called from port sync and it is identical to what will be
   15041  * executed again in intel_update_crtc() over port sync pipes
   15042  */
   15043 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
   15044 					   struct intel_atomic_state *state)
   15045 {
   15046 	struct intel_crtc_state *new_crtc_state =
   15047 		intel_atomic_get_new_crtc_state(state, crtc);
   15048 	struct intel_crtc_state *old_crtc_state =
   15049 		intel_atomic_get_old_crtc_state(state, crtc);
   15050 	struct intel_plane_state *new_plane_state =
   15051 		intel_atomic_get_new_plane_state(state,
   15052 						 to_intel_plane(crtc->base.primary));
   15053 	bool modeset = needs_modeset(new_crtc_state);
   15054 
   15055 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
   15056 		intel_fbc_disable(crtc);
   15057 	else if (new_plane_state)
   15058 		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
   15059 
   15060 	/* Perform vblank evasion around commit operation */
   15061 	intel_pipe_update_start(new_crtc_state);
   15062 	commit_pipe_config(state, old_crtc_state, new_crtc_state);
   15063 	skl_update_planes_on_crtc(state, crtc);
   15064 	intel_pipe_update_end(new_crtc_state);
   15065 
   15066 	/*
   15067 	 * We usually enable FIFO underrun interrupts as part of the
   15068 	 * CRTC enable sequence during modesets.  But when we inherit a
   15069 	 * valid pipe configuration from the BIOS we need to take care
   15070 	 * of enabling them on the CRTC's first fastset.
   15071 	 */
   15072 	if (new_crtc_state->update_pipe && !modeset &&
   15073 	    old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
   15074 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
   15075 }
   15076 
   15077 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
   15078 					       struct intel_atomic_state *state,
   15079 					       struct intel_crtc_state *old_crtc_state,
   15080 					       struct intel_crtc_state *new_crtc_state)
   15081 {
   15082 	struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
   15083 	struct intel_crtc_state *new_slave_crtc_state =
   15084 		intel_atomic_get_new_crtc_state(state, slave_crtc);
   15085 	struct intel_crtc_state *old_slave_crtc_state =
   15086 		intel_atomic_get_old_crtc_state(state, slave_crtc);
   15087 
   15088 	WARN_ON(!slave_crtc || !new_slave_crtc_state ||
   15089 		!old_slave_crtc_state);
   15090 
   15091 	DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
   15092 		      crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
   15093 		      slave_crtc->base.name);
   15094 
   15095 	/* Enable seq for slave with with DP_TP_CTL left Idle until the
   15096 	 * master is ready
   15097 	 */
   15098 	intel_crtc_enable_trans_port_sync(slave_crtc,
   15099 					  state,
   15100 					  new_slave_crtc_state);
   15101 
   15102 	/* Enable seq for master with with DP_TP_CTL left Idle */
   15103 	intel_crtc_enable_trans_port_sync(crtc,
   15104 					  state,
   15105 					  new_crtc_state);
   15106 
   15107 	/* Set Slave's DP_TP_CTL to Normal */
   15108 	intel_set_dp_tp_ctl_normal(slave_crtc,
   15109 				   state);
   15110 
   15111 	/* Set Master's DP_TP_CTL To Normal */
   15112 	usleep_range(200, 400);
   15113 	intel_set_dp_tp_ctl_normal(crtc,
   15114 				   state);
   15115 
   15116 	/* Now do the post crtc enable for all master and slaves */
   15117 	intel_post_crtc_enable_updates(slave_crtc,
   15118 				       state);
   15119 	intel_post_crtc_enable_updates(crtc,
   15120 				       state);
   15121 }
   15122 
   15123 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
   15124 {
   15125 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   15126 	struct intel_crtc *crtc;
   15127 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   15128 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
   15129 	u8 required_slices = state->wm_results.ddb.enabled_slices;
   15130 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
   15131 	const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
   15132 	u8 update_pipes = 0, modeset_pipes = 0;
   15133 	int i;
   15134 
   15135 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   15136 		if (!new_crtc_state->hw.active)
   15137 			continue;
   15138 
   15139 		/* ignore allocations for crtc's that have been turned off. */
   15140 		if (!needs_modeset(new_crtc_state)) {
   15141 			entries[i] = old_crtc_state->wm.skl.ddb;
   15142 			update_pipes |= BIT(crtc->pipe);
   15143 		} else {
   15144 			modeset_pipes |= BIT(crtc->pipe);
   15145 		}
   15146 	}
   15147 
   15148 	/* If 2nd DBuf slice required, enable it here */
   15149 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
   15150 		icl_dbuf_slices_update(dev_priv, required_slices);
   15151 
   15152 	/*
   15153 	 * Whenever the number of active pipes changes, we need to make sure we
   15154 	 * update the pipes in the right order so that their ddb allocations
   15155 	 * never overlap with each other between CRTC updates. Otherwise we'll
   15156 	 * cause pipe underruns and other bad stuff.
   15157 	 *
   15158 	 * So first lets enable all pipes that do not need a fullmodeset as
   15159 	 * those don't have any external dependency.
   15160 	 */
   15161 	while (update_pipes) {
   15162 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15163 						    new_crtc_state, i) {
   15164 			enum pipe pipe = crtc->pipe;
   15165 
   15166 			if ((update_pipes & BIT(pipe)) == 0)
   15167 				continue;
   15168 
   15169 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   15170 							entries, num_pipes, i))
   15171 				continue;
   15172 
   15173 			entries[i] = new_crtc_state->wm.skl.ddb;
   15174 			update_pipes &= ~BIT(pipe);
   15175 
   15176 			intel_update_crtc(crtc, state, old_crtc_state,
   15177 					  new_crtc_state);
   15178 
   15179 			/*
   15180 			 * If this is an already active pipe, it's DDB changed,
   15181 			 * and this isn't the last pipe that needs updating
   15182 			 * then we need to wait for a vblank to pass for the
   15183 			 * new ddb allocation to take effect.
   15184 			 */
   15185 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
   15186 						 &old_crtc_state->wm.skl.ddb) &&
   15187 			    (update_pipes | modeset_pipes))
   15188 				intel_wait_for_vblank(dev_priv, pipe);
   15189 		}
   15190 	}
   15191 
   15192 	/*
   15193 	 * Enable all pipes that needs a modeset and do not depends on other
   15194 	 * pipes
   15195 	 */
   15196 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15197 					    new_crtc_state, i) {
   15198 		enum pipe pipe = crtc->pipe;
   15199 
   15200 		if ((modeset_pipes & BIT(pipe)) == 0)
   15201 			continue;
   15202 
   15203 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
   15204 		    is_trans_port_sync_slave(new_crtc_state))
   15205 			continue;
   15206 
   15207 		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   15208 						    entries, num_pipes, i));
   15209 
   15210 		entries[i] = new_crtc_state->wm.skl.ddb;
   15211 		modeset_pipes &= ~BIT(pipe);
   15212 
   15213 		if (is_trans_port_sync_mode(new_crtc_state)) {
   15214 			struct intel_crtc *slave_crtc;
   15215 
   15216 			intel_update_trans_port_sync_crtcs(crtc, state,
   15217 							   old_crtc_state,
   15218 							   new_crtc_state);
   15219 
   15220 			slave_crtc = intel_get_slave_crtc(new_crtc_state);
   15221 			/* TODO: update entries[] of slave */
   15222 			modeset_pipes &= ~BIT(slave_crtc->pipe);
   15223 
   15224 		} else {
   15225 			intel_update_crtc(crtc, state, old_crtc_state,
   15226 					  new_crtc_state);
   15227 		}
   15228 	}
   15229 
   15230 	/*
   15231 	 * Finally enable all pipes that needs a modeset and depends on
   15232 	 * other pipes, right now it is only MST slaves as both port sync slave
   15233 	 * and master are enabled together
   15234 	 */
   15235 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15236 					    new_crtc_state, i) {
   15237 		enum pipe pipe = crtc->pipe;
   15238 
   15239 		if ((modeset_pipes & BIT(pipe)) == 0)
   15240 			continue;
   15241 
   15242 		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   15243 						    entries, num_pipes, i));
   15244 
   15245 		entries[i] = new_crtc_state->wm.skl.ddb;
   15246 		modeset_pipes &= ~BIT(pipe);
   15247 
   15248 		intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
   15249 	}
   15250 
   15251 	WARN_ON(modeset_pipes);
   15252 
   15253 	/* If 2nd DBuf slice is no more required disable it */
   15254 	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
   15255 		icl_dbuf_slices_update(dev_priv, required_slices);
   15256 }
   15257 
   15258 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
   15259 {
   15260 	struct intel_atomic_state *state, *next;
   15261 	struct llist_node *freed;
   15262 
   15263 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
   15264 	llist_for_each_entry_safe(state, next, freed, freed)
   15265 		drm_atomic_state_put(&state->base);
   15266 }
   15267 
   15268 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
   15269 {
   15270 	struct drm_i915_private *dev_priv =
   15271 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
   15272 
   15273 	intel_atomic_helper_free_state(dev_priv);
   15274 }
   15275 
   15276 static int
   15277 intel_atomic_commit_fence_wake(struct i915_sw_fence_waiter *waiter,
   15278     unsigned mode, int flags, void *not_a_cookie)
   15279 {
   15280 	struct intel_atomic_state *intel_state = waiter->private;
   15281 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
   15282 
   15283 	spin_lock(&dev_priv->atomic_commit_lock);
   15284 	DRM_SPIN_WAKEUP_ALL(&dev_priv->atomic_commit_wq,
   15285 	    &dev_priv->atomic_commit_lock);
   15286 	spin_unlock(&dev_priv->atomic_commit_lock);
   15287 
   15288 	return 0;
   15289 }
   15290 
   15291 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
   15292 {
   15293 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
   15294 	struct i915_sw_fence_waiter waiter;
   15295 	int ret;
   15296 
   15297 	waiter.flags = 0;
   15298 	waiter.func = intel_atomic_commit_fence_wake;
   15299 	waiter.private = intel_state;
   15300 
   15301 	spin_lock(&intel_state->commit_ready.wait.lock);
   15302 	list_add_tail(&waiter.entry, &intel_state->commit_ready.wait.head);
   15303 	spin_unlock(&intel_state->commit_ready.wait.lock);
   15304 
   15305 	spin_lock(&dev_priv->atomic_commit_lock);
   15306 	DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &dev_priv->atomic_commit_wq,
   15307 	    &dev_priv->atomic_commit_lock,
   15308 	    (i915_sw_fence_done(&intel_state->commit_ready) ||
   15309 		test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)));
   15310 	spin_unlock(&dev_priv->atomic_commit_lock);
   15311 
   15312 	spin_lock(&intel_state->commit_ready.wait.lock);
   15313 	list_del(&waiter.entry);
   15314 	spin_unlock(&intel_state->commit_ready.wait.lock);
   15315 }
   15316 
   15317 static void intel_atomic_cleanup_work(struct work_struct *work)
   15318 {
   15319 	struct drm_atomic_state *state =
   15320 		container_of(work, struct drm_atomic_state, commit_work);
   15321 	struct drm_i915_private *i915 = to_i915(state->dev);
   15322 
   15323 	drm_atomic_helper_cleanup_planes(&i915->drm, state);
   15324 	drm_atomic_helper_commit_cleanup_done(state);
   15325 	drm_atomic_state_put(state);
   15326 
   15327 	intel_atomic_helper_free_state(i915);
   15328 }
   15329 
   15330 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
   15331 {
   15332 	struct drm_device *dev = state->base.dev;
   15333 	struct drm_i915_private *dev_priv = to_i915(dev);
   15334 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   15335 	struct intel_crtc *crtc;
   15336 	u64 put_domains[I915_MAX_PIPES] = {};
   15337 	intel_wakeref_t wakeref = 0;
   15338 	int i;
   15339 
   15340 	intel_atomic_commit_fence_wait(state);
   15341 
   15342 	drm_atomic_helper_wait_for_dependencies(&state->base);
   15343 
   15344 	if (state->modeset)
   15345 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
   15346 
   15347 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15348 					    new_crtc_state, i) {
   15349 		if (needs_modeset(new_crtc_state) ||
   15350 		    new_crtc_state->update_pipe) {
   15351 
   15352 			put_domains[crtc->pipe] =
   15353 				modeset_get_crtc_power_domains(new_crtc_state);
   15354 		}
   15355 	}
   15356 
   15357 	intel_commit_modeset_disables(state);
   15358 
   15359 	/* FIXME: Eventually get rid of our crtc->config pointer */
   15360 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   15361 		crtc->config = new_crtc_state;
   15362 
   15363 	if (state->modeset) {
   15364 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
   15365 
   15366 		intel_set_cdclk_pre_plane_update(dev_priv,
   15367 						 &state->cdclk.actual,
   15368 						 &dev_priv->cdclk.actual,
   15369 						 state->cdclk.pipe);
   15370 
   15371 		/*
   15372 		 * SKL workaround: bspec recommends we disable the SAGV when we
   15373 		 * have more then one pipe enabled
   15374 		 */
   15375 		if (!intel_can_enable_sagv(state))
   15376 			intel_disable_sagv(dev_priv);
   15377 
   15378 		intel_modeset_verify_disabled(dev_priv, state);
   15379 	}
   15380 
   15381 	/* Complete the events for pipes that have now been disabled */
   15382 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   15383 		bool modeset = needs_modeset(new_crtc_state);
   15384 
   15385 		/* Complete events for now disable pipes here. */
   15386 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
   15387 			spin_lock_irq(&dev->event_lock);
   15388 			drm_crtc_send_vblank_event(&crtc->base,
   15389 						   new_crtc_state->uapi.event);
   15390 			spin_unlock_irq(&dev->event_lock);
   15391 
   15392 			new_crtc_state->uapi.event = NULL;
   15393 		}
   15394 	}
   15395 
   15396 	if (state->modeset)
   15397 		intel_encoders_update_prepare(state);
   15398 
   15399 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
   15400 	dev_priv->display.commit_modeset_enables(state);
   15401 
   15402 	if (state->modeset) {
   15403 		intel_encoders_update_complete(state);
   15404 
   15405 		intel_set_cdclk_post_plane_update(dev_priv,
   15406 						  &state->cdclk.actual,
   15407 						  &dev_priv->cdclk.actual,
   15408 						  state->cdclk.pipe);
   15409 	}
   15410 
   15411 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
   15412 	 * already, but still need the state for the delayed optimization. To
   15413 	 * fix this:
   15414 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
   15415 	 * - schedule that vblank worker _before_ calling hw_done
   15416 	 * - at the start of commit_tail, cancel it _synchrously
   15417 	 * - switch over to the vblank wait helper in the core after that since
   15418 	 *   we don't need out special handling any more.
   15419 	 */
   15420 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
   15421 
   15422 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   15423 		if (new_crtc_state->hw.active &&
   15424 		    !needs_modeset(new_crtc_state) &&
   15425 		    !new_crtc_state->preload_luts &&
   15426 		    (new_crtc_state->uapi.color_mgmt_changed ||
   15427 		     new_crtc_state->update_pipe))
   15428 			intel_color_load_luts(new_crtc_state);
   15429 	}
   15430 
   15431 	/*
   15432 	 * Now that the vblank has passed, we can go ahead and program the
   15433 	 * optimal watermarks on platforms that need two-step watermark
   15434 	 * programming.
   15435 	 *
   15436 	 * TODO: Move this (and other cleanup) to an async worker eventually.
   15437 	 */
   15438 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15439 					    new_crtc_state, i) {
   15440 		/*
   15441 		 * Gen2 reports pipe underruns whenever all planes are disabled.
   15442 		 * So re-enable underrun reporting after some planes get enabled.
   15443 		 *
   15444 		 * We do this before .optimize_watermarks() so that we have a
   15445 		 * chance of catching underruns with the intermediate watermarks
   15446 		 * vs. the new plane configuration.
   15447 		 */
   15448 		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
   15449 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
   15450 
   15451 		if (dev_priv->display.optimize_watermarks)
   15452 			dev_priv->display.optimize_watermarks(state, crtc);
   15453 	}
   15454 
   15455 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   15456 		intel_post_plane_update(state, crtc);
   15457 
   15458 		if (put_domains[i])
   15459 			modeset_put_power_domains(dev_priv, put_domains[i]);
   15460 
   15461 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
   15462 	}
   15463 
   15464 	/* Underruns don't always raise interrupts, so check manually */
   15465 	intel_check_cpu_fifo_underruns(dev_priv);
   15466 	intel_check_pch_fifo_underruns(dev_priv);
   15467 
   15468 	if (state->modeset)
   15469 		intel_verify_planes(state);
   15470 
   15471 	if (state->modeset && intel_can_enable_sagv(state))
   15472 		intel_enable_sagv(dev_priv);
   15473 
   15474 	drm_atomic_helper_commit_hw_done(&state->base);
   15475 
   15476 	if (state->modeset) {
   15477 		/* As one of the primary mmio accessors, KMS has a high
   15478 		 * likelihood of triggering bugs in unclaimed access. After we
   15479 		 * finish modesetting, see if an error has been flagged, and if
   15480 		 * so enable debugging for the next modeset - and hope we catch
   15481 		 * the culprit.
   15482 		 */
   15483 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
   15484 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
   15485 	}
   15486 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   15487 
   15488 	/*
   15489 	 * Defer the cleanup of the old state to a separate worker to not
   15490 	 * impede the current task (userspace for blocking modesets) that
   15491 	 * are executed inline. For out-of-line asynchronous modesets/flips,
   15492 	 * deferring to a new worker seems overkill, but we would place a
   15493 	 * schedule point (cond_resched()) here anyway to keep latencies
   15494 	 * down.
   15495 	 */
   15496 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
   15497 	queue_work(system_highpri_wq, &state->base.commit_work);
   15498 }
   15499 
   15500 static void intel_atomic_commit_work(struct work_struct *work)
   15501 {
   15502 	struct intel_atomic_state *state =
   15503 		container_of(work, struct intel_atomic_state, base.commit_work);
   15504 
   15505 	intel_atomic_commit_tail(state);
   15506 }
   15507 
   15508 int __i915_sw_fence_call
   15509 intel_atomic_commit_ready(struct i915_sw_fence *fence,
   15510 			  enum i915_sw_fence_notify notify)
   15511 {
   15512 	struct intel_atomic_state *state =
   15513 		container_of(fence, struct intel_atomic_state, commit_ready);
   15514 
   15515 	switch (notify) {
   15516 	case FENCE_COMPLETE:
   15517 		/* we do blocking waits in the worker, nothing to do here */
   15518 		break;
   15519 	case FENCE_FREE:
   15520 		{
   15521 			struct intel_atomic_helper *helper =
   15522 				&to_i915(state->base.dev)->atomic_helper;
   15523 
   15524 			if (llist_add(&state->freed, &helper->free_list))
   15525 				schedule_work(&helper->free_work);
   15526 			break;
   15527 		}
   15528 	}
   15529 
   15530 	return NOTIFY_DONE;
   15531 }
   15532 
   15533 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
   15534 {
   15535 	struct intel_plane_state *old_plane_state, *new_plane_state;
   15536 	struct intel_plane *plane;
   15537 	int i;
   15538 
   15539 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   15540 					     new_plane_state, i)
   15541 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
   15542 					to_intel_frontbuffer(new_plane_state->hw.fb),
   15543 					plane->frontbuffer_bit);
   15544 }
   15545 
   15546 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
   15547 {
   15548 	struct intel_crtc *crtc;
   15549 
   15550 	for_each_intel_crtc(&dev_priv->drm, crtc)
   15551 		drm_modeset_lock_assert_held(&crtc->base.mutex);
   15552 }
   15553 
   15554 static int intel_atomic_commit(struct drm_device *dev,
   15555 			       struct drm_atomic_state *_state,
   15556 			       bool nonblock)
   15557 {
   15558 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
   15559 	struct drm_i915_private *dev_priv = to_i915(dev);
   15560 	int ret = 0;
   15561 
   15562 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
   15563 
   15564 	drm_atomic_state_get(&state->base);
   15565 	i915_sw_fence_reinit(&state->commit_ready);
   15566 
   15567 	/*
   15568 	 * The intel_legacy_cursor_update() fast path takes care
   15569 	 * of avoiding the vblank waits for simple cursor
   15570 	 * movement and flips. For cursor on/off and size changes,
   15571 	 * we want to perform the vblank waits so that watermark
   15572 	 * updates happen during the correct frames. Gen9+ have
   15573 	 * double buffered watermarks and so shouldn't need this.
   15574 	 *
   15575 	 * Unset state->legacy_cursor_update before the call to
   15576 	 * drm_atomic_helper_setup_commit() because otherwise
   15577 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
   15578 	 * we get FIFO underruns because we didn't wait
   15579 	 * for vblank.
   15580 	 *
   15581 	 * FIXME doing watermarks and fb cleanup from a vblank worker
   15582 	 * (assuming we had any) would solve these problems.
   15583 	 */
   15584 	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
   15585 		struct intel_crtc_state *new_crtc_state;
   15586 		struct intel_crtc *crtc;
   15587 		int i;
   15588 
   15589 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   15590 			if (new_crtc_state->wm.need_postvbl_update ||
   15591 			    new_crtc_state->update_wm_post)
   15592 				state->base.legacy_cursor_update = false;
   15593 	}
   15594 
   15595 	ret = intel_atomic_prepare_commit(state);
   15596 	if (ret) {
   15597 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
   15598 		i915_sw_fence_commit(&state->commit_ready);
   15599 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   15600 		return ret;
   15601 	}
   15602 
   15603 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
   15604 	if (!ret)
   15605 		ret = drm_atomic_helper_swap_state(&state->base, true);
   15606 
   15607 	if (ret) {
   15608 		i915_sw_fence_commit(&state->commit_ready);
   15609 
   15610 		drm_atomic_helper_cleanup_planes(dev, &state->base);
   15611 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   15612 		return ret;
   15613 	}
   15614 	dev_priv->wm.distrust_bios_wm = false;
   15615 	intel_shared_dpll_swap_state(state);
   15616 	intel_atomic_track_fbs(state);
   15617 
   15618 	if (state->global_state_changed) {
   15619 		assert_global_state_locked(dev_priv);
   15620 
   15621 		memcpy(dev_priv->min_cdclk, state->min_cdclk,
   15622 		       sizeof(state->min_cdclk));
   15623 		memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
   15624 		       sizeof(state->min_voltage_level));
   15625 		dev_priv->active_pipes = state->active_pipes;
   15626 		dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
   15627 
   15628 		intel_cdclk_swap_state(state);
   15629 	}
   15630 
   15631 	drm_atomic_state_get(&state->base);
   15632 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
   15633 
   15634 	i915_sw_fence_commit(&state->commit_ready);
   15635 	if (nonblock && state->modeset) {
   15636 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
   15637 	} else if (nonblock) {
   15638 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
   15639 	} else {
   15640 		if (state->modeset)
   15641 			flush_workqueue(dev_priv->modeset_wq);
   15642 		intel_atomic_commit_tail(state);
   15643 	}
   15644 
   15645 	return 0;
   15646 }
   15647 
   15648 #ifdef __NetBSD__
   15649 
   15650 /* XXX */
   15651 
   15652 #else
   15653 
   15654 struct wait_rps_boost {
   15655 	struct wait_queue_entry wait;
   15656 
   15657 	struct drm_crtc *crtc;
   15658 	struct i915_request *request;
   15659 };
   15660 
   15661 static int do_rps_boost(struct wait_queue_entry *_wait,
   15662 			unsigned mode, int sync, void *key)
   15663 {
   15664 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
   15665 	struct i915_request *rq = wait->request;
   15666 
   15667 	/*
   15668 	 * If we missed the vblank, but the request is already running it
   15669 	 * is reasonable to assume that it will complete before the next
   15670 	 * vblank without our intervention, so leave RPS alone.
   15671 	 */
   15672 	if (!i915_request_started(rq))
   15673 		intel_rps_boost(rq);
   15674 	i915_request_put(rq);
   15675 
   15676 	drm_crtc_vblank_put(wait->crtc);
   15677 
   15678 	list_del(&wait->wait.entry);
   15679 	kfree(wait);
   15680 	return 1;
   15681 }
   15682 
   15683 #endif
   15684 
   15685 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
   15686 				       struct dma_fence *fence)
   15687 {
   15688 #ifndef __NetBSD__		/* XXX i915 rps boost */
   15689 	struct wait_rps_boost *wait;
   15690 
   15691 	if (!dma_fence_is_i915(fence))
   15692 		return;
   15693 
   15694 	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
   15695 		return;
   15696 
   15697 	if (drm_crtc_vblank_get(crtc))
   15698 		return;
   15699 
   15700 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
   15701 	if (!wait) {
   15702 		drm_crtc_vblank_put(crtc);
   15703 		return;
   15704 	}
   15705 
   15706 	wait->request = to_request(dma_fence_get(fence));
   15707 	wait->crtc = crtc;
   15708 
   15709 	wait->wait.func = do_rps_boost;
   15710 	wait->wait.flags = 0;
   15711 
   15712 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
   15713 #endif
   15714 }
   15715 
   15716 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
   15717 {
   15718 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   15719 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   15720 	struct drm_framebuffer *fb = plane_state->hw.fb;
   15721 	struct i915_vma *vma;
   15722 
   15723 	if (plane->id == PLANE_CURSOR &&
   15724 	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
   15725 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   15726 		const int align = intel_cursor_alignment(dev_priv);
   15727 		int err;
   15728 
   15729 		err = i915_gem_object_attach_phys(obj, align);
   15730 		if (err)
   15731 			return err;
   15732 	}
   15733 
   15734 	vma = intel_pin_and_fence_fb_obj(fb,
   15735 					 &plane_state->view,
   15736 					 intel_plane_uses_fence(plane_state),
   15737 					 &plane_state->flags);
   15738 	if (IS_ERR(vma))
   15739 		return PTR_ERR(vma);
   15740 
   15741 	plane_state->vma = vma;
   15742 
   15743 	return 0;
   15744 }
   15745 
   15746 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
   15747 {
   15748 	struct i915_vma *vma;
   15749 
   15750 	vma = fetch_and_zero(&old_plane_state->vma);
   15751 	if (vma)
   15752 		intel_unpin_fb_vma(vma, old_plane_state->flags);
   15753 }
   15754 
   15755 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
   15756 {
   15757 	struct i915_sched_attr attr = {
   15758 		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
   15759 	};
   15760 
   15761 	i915_gem_object_wait_priority(obj, 0, &attr);
   15762 }
   15763 
   15764 /**
   15765  * intel_prepare_plane_fb - Prepare fb for usage on plane
   15766  * @plane: drm plane to prepare for
   15767  * @_new_plane_state: the plane state being prepared
   15768  *
   15769  * Prepares a framebuffer for usage on a display plane.  Generally this
   15770  * involves pinning the underlying object and updating the frontbuffer tracking
   15771  * bits.  Some older platforms need special physical address handling for
   15772  * cursor planes.
   15773  *
   15774  * Returns 0 on success, negative error code on failure.
   15775  */
   15776 int
   15777 intel_prepare_plane_fb(struct drm_plane *plane,
   15778 		       struct drm_plane_state *_new_plane_state)
   15779 {
   15780 	struct intel_plane_state *new_plane_state =
   15781 		to_intel_plane_state(_new_plane_state);
   15782 	struct intel_atomic_state *intel_state =
   15783 		to_intel_atomic_state(new_plane_state->uapi.state);
   15784 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
   15785 	struct drm_framebuffer *fb = new_plane_state->hw.fb;
   15786 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   15787 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
   15788 	int ret;
   15789 
   15790 	if (old_obj) {
   15791 		struct intel_crtc_state *crtc_state =
   15792 			intel_atomic_get_new_crtc_state(intel_state,
   15793 							to_intel_crtc(plane->state->crtc));
   15794 
   15795 		/* Big Hammer, we also need to ensure that any pending
   15796 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
   15797 		 * current scanout is retired before unpinning the old
   15798 		 * framebuffer. Note that we rely on userspace rendering
   15799 		 * into the buffer attached to the pipe they are waiting
   15800 		 * on. If not, userspace generates a GPU hang with IPEHR
   15801 		 * point to the MI_WAIT_FOR_EVENT.
   15802 		 *
   15803 		 * This should only fail upon a hung GPU, in which case we
   15804 		 * can safely continue.
   15805 		 */
   15806 		if (needs_modeset(crtc_state)) {
   15807 			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
   15808 							      old_obj->base.resv, NULL,
   15809 							      false, 0,
   15810 							      GFP_KERNEL);
   15811 			if (ret < 0)
   15812 				return ret;
   15813 		}
   15814 	}
   15815 
   15816 	if (new_plane_state->uapi.fence) { /* explicit fencing */
   15817 		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
   15818 						    new_plane_state->uapi.fence,
   15819 						    I915_FENCE_TIMEOUT,
   15820 						    GFP_KERNEL);
   15821 		if (ret < 0)
   15822 			return ret;
   15823 	}
   15824 
   15825 	if (!obj)
   15826 		return 0;
   15827 
   15828 	ret = i915_gem_object_pin_pages(obj);
   15829 	if (ret)
   15830 		return ret;
   15831 
   15832 	ret = intel_plane_pin_fb(new_plane_state);
   15833 
   15834 	i915_gem_object_unpin_pages(obj);
   15835 	if (ret)
   15836 		return ret;
   15837 
   15838 	fb_obj_bump_render_priority(obj);
   15839 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
   15840 
   15841 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
   15842 		struct dma_fence *fence;
   15843 
   15844 		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
   15845 						      obj->base.resv, NULL,
   15846 						      false, I915_FENCE_TIMEOUT,
   15847 						      GFP_KERNEL);
   15848 		if (ret < 0)
   15849 			return ret;
   15850 
   15851 		fence = dma_resv_get_excl_rcu(obj->base.resv);
   15852 		if (fence) {
   15853 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
   15854 						   fence);
   15855 			dma_fence_put(fence);
   15856 		}
   15857 	} else {
   15858 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
   15859 					   new_plane_state->uapi.fence);
   15860 	}
   15861 
   15862 	/*
   15863 	 * We declare pageflips to be interactive and so merit a small bias
   15864 	 * towards upclocking to deliver the frame on time. By only changing
   15865 	 * the RPS thresholds to sample more regularly and aim for higher
   15866 	 * clocks we can hopefully deliver low power workloads (like kodi)
   15867 	 * that are not quite steady state without resorting to forcing
   15868 	 * maximum clocks following a vblank miss (see do_rps_boost()).
   15869 	 */
   15870 	if (!intel_state->rps_interactive) {
   15871 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
   15872 		intel_state->rps_interactive = true;
   15873 	}
   15874 
   15875 	return 0;
   15876 }
   15877 
   15878 /**
   15879  * intel_cleanup_plane_fb - Cleans up an fb after plane use
   15880  * @plane: drm plane to clean up for
   15881  * @_old_plane_state: the state from the previous modeset
   15882  *
   15883  * Cleans up a framebuffer that has just been removed from a plane.
   15884  */
   15885 void
   15886 intel_cleanup_plane_fb(struct drm_plane *plane,
   15887 		       struct drm_plane_state *_old_plane_state)
   15888 {
   15889 	struct intel_plane_state *old_plane_state =
   15890 		to_intel_plane_state(_old_plane_state);
   15891 	struct intel_atomic_state *intel_state =
   15892 		to_intel_atomic_state(old_plane_state->uapi.state);
   15893 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
   15894 
   15895 	if (intel_state->rps_interactive) {
   15896 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
   15897 		intel_state->rps_interactive = false;
   15898 	}
   15899 
   15900 	/* Should only be called after a successful intel_prepare_plane_fb()! */
   15901 	intel_plane_unpin_fb(old_plane_state);
   15902 }
   15903 
   15904 /**
   15905  * intel_plane_destroy - destroy a plane
   15906  * @plane: plane to destroy
   15907  *
   15908  * Common destruction function for all types of planes (primary, cursor,
   15909  * sprite).
   15910  */
   15911 void intel_plane_destroy(struct drm_plane *plane)
   15912 {
   15913 	drm_plane_cleanup(plane);
   15914 	kfree(to_intel_plane(plane));
   15915 }
   15916 
   15917 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
   15918 					    u32 format, u64 modifier)
   15919 {
   15920 	switch (modifier) {
   15921 	case DRM_FORMAT_MOD_LINEAR:
   15922 	case I915_FORMAT_MOD_X_TILED:
   15923 		break;
   15924 	default:
   15925 		return false;
   15926 	}
   15927 
   15928 	switch (format) {
   15929 	case DRM_FORMAT_C8:
   15930 	case DRM_FORMAT_RGB565:
   15931 	case DRM_FORMAT_XRGB1555:
   15932 	case DRM_FORMAT_XRGB8888:
   15933 		return modifier == DRM_FORMAT_MOD_LINEAR ||
   15934 			modifier == I915_FORMAT_MOD_X_TILED;
   15935 	default:
   15936 		return false;
   15937 	}
   15938 }
   15939 
   15940 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
   15941 					    u32 format, u64 modifier)
   15942 {
   15943 	switch (modifier) {
   15944 	case DRM_FORMAT_MOD_LINEAR:
   15945 	case I915_FORMAT_MOD_X_TILED:
   15946 		break;
   15947 	default:
   15948 		return false;
   15949 	}
   15950 
   15951 	switch (format) {
   15952 	case DRM_FORMAT_C8:
   15953 	case DRM_FORMAT_RGB565:
   15954 	case DRM_FORMAT_XRGB8888:
   15955 	case DRM_FORMAT_XBGR8888:
   15956 	case DRM_FORMAT_ARGB8888:
   15957 	case DRM_FORMAT_ABGR8888:
   15958 	case DRM_FORMAT_XRGB2101010:
   15959 	case DRM_FORMAT_XBGR2101010:
   15960 	case DRM_FORMAT_ARGB2101010:
   15961 	case DRM_FORMAT_ABGR2101010:
   15962 	case DRM_FORMAT_XBGR16161616F:
   15963 		return modifier == DRM_FORMAT_MOD_LINEAR ||
   15964 			modifier == I915_FORMAT_MOD_X_TILED;
   15965 	default:
   15966 		return false;
   15967 	}
   15968 }
   15969 
   15970 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
   15971 					      u32 format, u64 modifier)
   15972 {
   15973 	return modifier == DRM_FORMAT_MOD_LINEAR &&
   15974 		format == DRM_FORMAT_ARGB8888;
   15975 }
   15976 
   15977 static const struct drm_plane_funcs i965_plane_funcs = {
   15978 	.update_plane = drm_atomic_helper_update_plane,
   15979 	.disable_plane = drm_atomic_helper_disable_plane,
   15980 	.destroy = intel_plane_destroy,
   15981 	.atomic_duplicate_state = intel_plane_duplicate_state,
   15982 	.atomic_destroy_state = intel_plane_destroy_state,
   15983 	.format_mod_supported = i965_plane_format_mod_supported,
   15984 };
   15985 
   15986 static const struct drm_plane_funcs i8xx_plane_funcs = {
   15987 	.update_plane = drm_atomic_helper_update_plane,
   15988 	.disable_plane = drm_atomic_helper_disable_plane,
   15989 	.destroy = intel_plane_destroy,
   15990 	.atomic_duplicate_state = intel_plane_duplicate_state,
   15991 	.atomic_destroy_state = intel_plane_destroy_state,
   15992 	.format_mod_supported = i8xx_plane_format_mod_supported,
   15993 };
   15994 
   15995 static int
   15996 intel_legacy_cursor_update(struct drm_plane *_plane,
   15997 			   struct drm_crtc *_crtc,
   15998 			   struct drm_framebuffer *fb,
   15999 			   int crtc_x, int crtc_y,
   16000 			   unsigned int crtc_w, unsigned int crtc_h,
   16001 			   u32 src_x, u32 src_y,
   16002 			   u32 src_w, u32 src_h,
   16003 			   struct drm_modeset_acquire_ctx *ctx)
   16004 {
   16005 	struct intel_plane *plane = to_intel_plane(_plane);
   16006 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
   16007 	struct intel_plane_state *old_plane_state =
   16008 		to_intel_plane_state(plane->base.state);
   16009 	struct intel_plane_state *new_plane_state;
   16010 	struct intel_crtc_state *crtc_state =
   16011 		to_intel_crtc_state(crtc->base.state);
   16012 	struct intel_crtc_state *new_crtc_state;
   16013 	int ret;
   16014 
   16015 	/*
   16016 	 * When crtc is inactive or there is a modeset pending,
   16017 	 * wait for it to complete in the slowpath
   16018 	 */
   16019 	if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
   16020 	    crtc_state->update_pipe)
   16021 		goto slow;
   16022 
   16023 	/*
   16024 	 * Don't do an async update if there is an outstanding commit modifying
   16025 	 * the plane.  This prevents our async update's changes from getting
   16026 	 * overridden by a previous synchronous update's state.
   16027 	 */
   16028 	if (old_plane_state->uapi.commit &&
   16029 	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
   16030 		goto slow;
   16031 
   16032 	/*
   16033 	 * If any parameters change that may affect watermarks,
   16034 	 * take the slowpath. Only changing fb or position should be
   16035 	 * in the fastpath.
   16036 	 */
   16037 	if (old_plane_state->uapi.crtc != &crtc->base ||
   16038 	    old_plane_state->uapi.src_w != src_w ||
   16039 	    old_plane_state->uapi.src_h != src_h ||
   16040 	    old_plane_state->uapi.crtc_w != crtc_w ||
   16041 	    old_plane_state->uapi.crtc_h != crtc_h ||
   16042 	    !old_plane_state->uapi.fb != !fb)
   16043 		goto slow;
   16044 
   16045 	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
   16046 	if (!new_plane_state)
   16047 		return -ENOMEM;
   16048 
   16049 	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
   16050 	if (!new_crtc_state) {
   16051 		ret = -ENOMEM;
   16052 		goto out_free;
   16053 	}
   16054 
   16055 	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
   16056 
   16057 	new_plane_state->uapi.src_x = src_x;
   16058 	new_plane_state->uapi.src_y = src_y;
   16059 	new_plane_state->uapi.src_w = src_w;
   16060 	new_plane_state->uapi.src_h = src_h;
   16061 	new_plane_state->uapi.crtc_x = crtc_x;
   16062 	new_plane_state->uapi.crtc_y = crtc_y;
   16063 	new_plane_state->uapi.crtc_w = crtc_w;
   16064 	new_plane_state->uapi.crtc_h = crtc_h;
   16065 
   16066 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
   16067 						  old_plane_state, new_plane_state);
   16068 	if (ret)
   16069 		goto out_free;
   16070 
   16071 	ret = intel_plane_pin_fb(new_plane_state);
   16072 	if (ret)
   16073 		goto out_free;
   16074 
   16075 	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
   16076 				ORIGIN_FLIP);
   16077 	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
   16078 				to_intel_frontbuffer(new_plane_state->hw.fb),
   16079 				plane->frontbuffer_bit);
   16080 
   16081 	/* Swap plane state */
   16082 	plane->base.state = &new_plane_state->uapi;
   16083 
   16084 	/*
   16085 	 * We cannot swap crtc_state as it may be in use by an atomic commit or
   16086 	 * page flip that's running simultaneously. If we swap crtc_state and
   16087 	 * destroy the old state, we will cause a use-after-free there.
   16088 	 *
   16089 	 * Only update active_planes, which is needed for our internal
   16090 	 * bookkeeping. Either value will do the right thing when updating
   16091 	 * planes atomically. If the cursor was part of the atomic update then
   16092 	 * we would have taken the slowpath.
   16093 	 */
   16094 	crtc_state->active_planes = new_crtc_state->active_planes;
   16095 
   16096 	if (new_plane_state->uapi.visible)
   16097 		intel_update_plane(plane, crtc_state, new_plane_state);
   16098 	else
   16099 		intel_disable_plane(plane, crtc_state);
   16100 
   16101 	intel_plane_unpin_fb(old_plane_state);
   16102 
   16103 out_free:
   16104 	if (new_crtc_state)
   16105 		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
   16106 	if (ret)
   16107 		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
   16108 	else
   16109 		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
   16110 	return ret;
   16111 
   16112 slow:
   16113 	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
   16114 					      crtc_x, crtc_y, crtc_w, crtc_h,
   16115 					      src_x, src_y, src_w, src_h, ctx);
   16116 }
   16117 
   16118 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
   16119 	.update_plane = intel_legacy_cursor_update,
   16120 	.disable_plane = drm_atomic_helper_disable_plane,
   16121 	.destroy = intel_plane_destroy,
   16122 	.atomic_duplicate_state = intel_plane_duplicate_state,
   16123 	.atomic_destroy_state = intel_plane_destroy_state,
   16124 	.format_mod_supported = intel_cursor_format_mod_supported,
   16125 };
   16126 
   16127 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
   16128 			       enum i9xx_plane_id i9xx_plane)
   16129 {
   16130 	if (!HAS_FBC(dev_priv))
   16131 		return false;
   16132 
   16133 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   16134 		return i9xx_plane == PLANE_A; /* tied to pipe A */
   16135 	else if (IS_IVYBRIDGE(dev_priv))
   16136 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
   16137 			i9xx_plane == PLANE_C;
   16138 	else if (INTEL_GEN(dev_priv) >= 4)
   16139 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
   16140 	else
   16141 		return i9xx_plane == PLANE_A;
   16142 }
   16143 
   16144 static struct intel_plane *
   16145 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
   16146 {
   16147 	struct intel_plane *plane;
   16148 	const struct drm_plane_funcs *plane_funcs;
   16149 	unsigned int supported_rotations;
   16150 	unsigned int possible_crtcs;
   16151 	const u32 *formats;
   16152 	int num_formats;
   16153 	int ret, zpos;
   16154 
   16155 	if (INTEL_GEN(dev_priv) >= 9)
   16156 		return skl_universal_plane_create(dev_priv, pipe,
   16157 						  PLANE_PRIMARY);
   16158 
   16159 	plane = intel_plane_alloc();
   16160 	if (IS_ERR(plane))
   16161 		return plane;
   16162 
   16163 	plane->pipe = pipe;
   16164 	/*
   16165 	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
   16166 	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
   16167 	 */
   16168 	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
   16169 		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
   16170 	else
   16171 		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
   16172 	plane->id = PLANE_PRIMARY;
   16173 	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
   16174 
   16175 	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
   16176 	if (plane->has_fbc) {
   16177 		struct intel_fbc *fbc = &dev_priv->fbc;
   16178 
   16179 		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
   16180 	}
   16181 
   16182 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   16183 		formats = vlv_primary_formats;
   16184 		num_formats = ARRAY_SIZE(vlv_primary_formats);
   16185 	} else if (INTEL_GEN(dev_priv) >= 4) {
   16186 		/*
   16187 		 * WaFP16GammaEnabling:ivb
   16188 		 * "Workaround : When using the 64-bit format, the plane
   16189 		 *  output on each color channel has one quarter amplitude.
   16190 		 *  It can be brought up to full amplitude by using pipe
   16191 		 *  gamma correction or pipe color space conversion to
   16192 		 *  multiply the plane output by four."
   16193 		 *
   16194 		 * There is no dedicated plane gamma for the primary plane,
   16195 		 * and using the pipe gamma/csc could conflict with other
   16196 		 * planes, so we choose not to expose fp16 on IVB primary
   16197 		 * planes. HSW primary planes no longer have this problem.
   16198 		 */
   16199 		if (IS_IVYBRIDGE(dev_priv)) {
   16200 			formats = ivb_primary_formats;
   16201 			num_formats = ARRAY_SIZE(ivb_primary_formats);
   16202 		} else {
   16203 			formats = i965_primary_formats;
   16204 			num_formats = ARRAY_SIZE(i965_primary_formats);
   16205 		}
   16206 	} else {
   16207 		formats = i8xx_primary_formats;
   16208 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
   16209 	}
   16210 
   16211 	if (INTEL_GEN(dev_priv) >= 4)
   16212 		plane_funcs = &i965_plane_funcs;
   16213 	else
   16214 		plane_funcs = &i8xx_plane_funcs;
   16215 
   16216 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   16217 		plane->min_cdclk = vlv_plane_min_cdclk;
   16218 	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   16219 		plane->min_cdclk = hsw_plane_min_cdclk;
   16220 	else if (IS_IVYBRIDGE(dev_priv))
   16221 		plane->min_cdclk = ivb_plane_min_cdclk;
   16222 	else
   16223 		plane->min_cdclk = i9xx_plane_min_cdclk;
   16224 
   16225 	plane->max_stride = i9xx_plane_max_stride;
   16226 	plane->update_plane = i9xx_update_plane;
   16227 	plane->disable_plane = i9xx_disable_plane;
   16228 	plane->get_hw_state = i9xx_plane_get_hw_state;
   16229 	plane->check_plane = i9xx_plane_check;
   16230 
   16231 	possible_crtcs = BIT(pipe);
   16232 
   16233 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   16234 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
   16235 					       possible_crtcs, plane_funcs,
   16236 					       formats, num_formats,
   16237 					       i9xx_format_modifiers,
   16238 					       DRM_PLANE_TYPE_PRIMARY,
   16239 					       "primary %c", pipe_name(pipe));
   16240 	else
   16241 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
   16242 					       possible_crtcs, plane_funcs,
   16243 					       formats, num_formats,
   16244 					       i9xx_format_modifiers,
   16245 					       DRM_PLANE_TYPE_PRIMARY,
   16246 					       "plane %c",
   16247 					       plane_name(plane->i9xx_plane));
   16248 	if (ret)
   16249 		goto fail;
   16250 
   16251 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
   16252 		supported_rotations =
   16253 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
   16254 			DRM_MODE_REFLECT_X;
   16255 	} else if (INTEL_GEN(dev_priv) >= 4) {
   16256 		supported_rotations =
   16257 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
   16258 	} else {
   16259 		supported_rotations = DRM_MODE_ROTATE_0;
   16260 	}
   16261 
   16262 	if (INTEL_GEN(dev_priv) >= 4)
   16263 		drm_plane_create_rotation_property(&plane->base,
   16264 						   DRM_MODE_ROTATE_0,
   16265 						   supported_rotations);
   16266 
   16267 	zpos = 0;
   16268 	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
   16269 
   16270 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
   16271 
   16272 	return plane;
   16273 
   16274 fail:
   16275 	intel_plane_free(plane);
   16276 
   16277 	return ERR_PTR(ret);
   16278 }
   16279 
   16280 static struct intel_plane *
   16281 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
   16282 			  enum pipe pipe)
   16283 {
   16284 	unsigned int possible_crtcs;
   16285 	struct intel_plane *cursor;
   16286 	int ret, zpos;
   16287 
   16288 	cursor = intel_plane_alloc();
   16289 	if (IS_ERR(cursor))
   16290 		return cursor;
   16291 
   16292 	cursor->pipe = pipe;
   16293 	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
   16294 	cursor->id = PLANE_CURSOR;
   16295 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
   16296 
   16297 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
   16298 		cursor->max_stride = i845_cursor_max_stride;
   16299 		cursor->update_plane = i845_update_cursor;
   16300 		cursor->disable_plane = i845_disable_cursor;
   16301 		cursor->get_hw_state = i845_cursor_get_hw_state;
   16302 		cursor->check_plane = i845_check_cursor;
   16303 	} else {
   16304 		cursor->max_stride = i9xx_cursor_max_stride;
   16305 		cursor->update_plane = i9xx_update_cursor;
   16306 		cursor->disable_plane = i9xx_disable_cursor;
   16307 		cursor->get_hw_state = i9xx_cursor_get_hw_state;
   16308 		cursor->check_plane = i9xx_check_cursor;
   16309 	}
   16310 
   16311 	cursor->cursor.base = ~0;
   16312 	cursor->cursor.cntl = ~0;
   16313 
   16314 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
   16315 		cursor->cursor.size = ~0;
   16316 
   16317 	possible_crtcs = BIT(pipe);
   16318 
   16319 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
   16320 				       possible_crtcs, &intel_cursor_plane_funcs,
   16321 				       intel_cursor_formats,
   16322 				       ARRAY_SIZE(intel_cursor_formats),
   16323 				       cursor_format_modifiers,
   16324 				       DRM_PLANE_TYPE_CURSOR,
   16325 				       "cursor %c", pipe_name(pipe));
   16326 	if (ret)
   16327 		goto fail;
   16328 
   16329 	if (INTEL_GEN(dev_priv) >= 4)
   16330 		drm_plane_create_rotation_property(&cursor->base,
   16331 						   DRM_MODE_ROTATE_0,
   16332 						   DRM_MODE_ROTATE_0 |
   16333 						   DRM_MODE_ROTATE_180);
   16334 
   16335 	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
   16336 	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
   16337 
   16338 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
   16339 
   16340 	return cursor;
   16341 
   16342 fail:
   16343 	intel_plane_free(cursor);
   16344 
   16345 	return ERR_PTR(ret);
   16346 }
   16347 
   16348 #define INTEL_CRTC_FUNCS \
   16349 	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
   16350 	.set_config = drm_atomic_helper_set_config, \
   16351 	.destroy = intel_crtc_destroy, \
   16352 	.page_flip = drm_atomic_helper_page_flip, \
   16353 	.atomic_duplicate_state = intel_crtc_duplicate_state, \
   16354 	.atomic_destroy_state = intel_crtc_destroy_state, \
   16355 	.set_crc_source = intel_crtc_set_crc_source, \
   16356 	.verify_crc_source = intel_crtc_verify_crc_source, \
   16357 	.get_crc_sources = intel_crtc_get_crc_sources
   16358 
   16359 static const struct drm_crtc_funcs bdw_crtc_funcs = {
   16360 	INTEL_CRTC_FUNCS,
   16361 
   16362 	.get_vblank_counter = g4x_get_vblank_counter,
   16363 	.enable_vblank = bdw_enable_vblank,
   16364 	.disable_vblank = bdw_disable_vblank,
   16365 };
   16366 
   16367 static const struct drm_crtc_funcs ilk_crtc_funcs = {
   16368 	INTEL_CRTC_FUNCS,
   16369 
   16370 	.get_vblank_counter = g4x_get_vblank_counter,
   16371 	.enable_vblank = ilk_enable_vblank,
   16372 	.disable_vblank = ilk_disable_vblank,
   16373 };
   16374 
   16375 static const struct drm_crtc_funcs g4x_crtc_funcs = {
   16376 	INTEL_CRTC_FUNCS,
   16377 
   16378 	.get_vblank_counter = g4x_get_vblank_counter,
   16379 	.enable_vblank = i965_enable_vblank,
   16380 	.disable_vblank = i965_disable_vblank,
   16381 };
   16382 
   16383 static const struct drm_crtc_funcs i965_crtc_funcs = {
   16384 	INTEL_CRTC_FUNCS,
   16385 
   16386 	.get_vblank_counter = i915_get_vblank_counter,
   16387 	.enable_vblank = i965_enable_vblank,
   16388 	.disable_vblank = i965_disable_vblank,
   16389 };
   16390 
   16391 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
   16392 	INTEL_CRTC_FUNCS,
   16393 
   16394 	.get_vblank_counter = i915_get_vblank_counter,
   16395 	.enable_vblank = i915gm_enable_vblank,
   16396 	.disable_vblank = i915gm_disable_vblank,
   16397 };
   16398 
   16399 static const struct drm_crtc_funcs i915_crtc_funcs = {
   16400 	INTEL_CRTC_FUNCS,
   16401 
   16402 	.get_vblank_counter = i915_get_vblank_counter,
   16403 	.enable_vblank = i8xx_enable_vblank,
   16404 	.disable_vblank = i8xx_disable_vblank,
   16405 };
   16406 
   16407 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
   16408 	INTEL_CRTC_FUNCS,
   16409 
   16410 	/* no hw vblank counter */
   16411 	.enable_vblank = i8xx_enable_vblank,
   16412 	.disable_vblank = i8xx_disable_vblank,
   16413 };
   16414 
   16415 static struct intel_crtc *intel_crtc_alloc(void)
   16416 {
   16417 	struct intel_crtc_state *crtc_state;
   16418 	struct intel_crtc *crtc;
   16419 
   16420 	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
   16421 	if (!crtc)
   16422 		return ERR_PTR(-ENOMEM);
   16423 
   16424 	crtc_state = intel_crtc_state_alloc(crtc);
   16425 	if (!crtc_state) {
   16426 		kfree(crtc);
   16427 		return ERR_PTR(-ENOMEM);
   16428 	}
   16429 
   16430 	crtc->base.state = &crtc_state->uapi;
   16431 	crtc->config = crtc_state;
   16432 
   16433 	return crtc;
   16434 }
   16435 
   16436 static void intel_crtc_free(struct intel_crtc *crtc)
   16437 {
   16438 	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
   16439 	kfree(crtc);
   16440 }
   16441 
   16442 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
   16443 {
   16444 	struct intel_plane *primary, *cursor;
   16445 	const struct drm_crtc_funcs *funcs;
   16446 	struct intel_crtc *crtc;
   16447 	int sprite, ret;
   16448 
   16449 	crtc = intel_crtc_alloc();
   16450 	if (IS_ERR(crtc))
   16451 		return PTR_ERR(crtc);
   16452 
   16453 	crtc->pipe = pipe;
   16454 	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
   16455 
   16456 	primary = intel_primary_plane_create(dev_priv, pipe);
   16457 	if (IS_ERR(primary)) {
   16458 		ret = PTR_ERR(primary);
   16459 		goto fail;
   16460 	}
   16461 	crtc->plane_ids_mask |= BIT(primary->id);
   16462 
   16463 	for_each_sprite(dev_priv, pipe, sprite) {
   16464 		struct intel_plane *plane;
   16465 
   16466 		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
   16467 		if (IS_ERR(plane)) {
   16468 			ret = PTR_ERR(plane);
   16469 			goto fail;
   16470 		}
   16471 		crtc->plane_ids_mask |= BIT(plane->id);
   16472 	}
   16473 
   16474 	cursor = intel_cursor_plane_create(dev_priv, pipe);
   16475 	if (IS_ERR(cursor)) {
   16476 		ret = PTR_ERR(cursor);
   16477 		goto fail;
   16478 	}
   16479 	crtc->plane_ids_mask |= BIT(cursor->id);
   16480 
   16481 	if (HAS_GMCH(dev_priv)) {
   16482 		if (IS_CHERRYVIEW(dev_priv) ||
   16483 		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
   16484 			funcs = &g4x_crtc_funcs;
   16485 		else if (IS_GEN(dev_priv, 4))
   16486 			funcs = &i965_crtc_funcs;
   16487 		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
   16488 			funcs = &i915gm_crtc_funcs;
   16489 		else if (IS_GEN(dev_priv, 3))
   16490 			funcs = &i915_crtc_funcs;
   16491 		else
   16492 			funcs = &i8xx_crtc_funcs;
   16493 	} else {
   16494 		if (INTEL_GEN(dev_priv) >= 8)
   16495 			funcs = &bdw_crtc_funcs;
   16496 		else
   16497 			funcs = &ilk_crtc_funcs;
   16498 	}
   16499 
   16500 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
   16501 					&primary->base, &cursor->base,
   16502 					funcs, "pipe %c", pipe_name(pipe));
   16503 	if (ret)
   16504 		goto fail;
   16505 
   16506 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
   16507 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
   16508 	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
   16509 
   16510 	if (INTEL_GEN(dev_priv) < 9) {
   16511 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
   16512 
   16513 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
   16514 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
   16515 		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
   16516 	}
   16517 
   16518 	intel_color_init(crtc);
   16519 
   16520 	WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
   16521 
   16522 	return 0;
   16523 
   16524 fail:
   16525 	intel_crtc_free(crtc);
   16526 
   16527 	return ret;
   16528 }
   16529 
   16530 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
   16531 				      struct drm_file *file)
   16532 {
   16533 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
   16534 	struct drm_crtc *drmmode_crtc;
   16535 	struct intel_crtc *crtc;
   16536 
   16537 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
   16538 	if (!drmmode_crtc)
   16539 		return -ENOENT;
   16540 
   16541 	crtc = to_intel_crtc(drmmode_crtc);
   16542 	pipe_from_crtc_id->pipe = crtc->pipe;
   16543 
   16544 	return 0;
   16545 }
   16546 
   16547 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
   16548 {
   16549 	struct drm_device *dev = encoder->base.dev;
   16550 	struct intel_encoder *source_encoder;
   16551 	u32 possible_clones = 0;
   16552 
   16553 	for_each_intel_encoder(dev, source_encoder) {
   16554 		if (encoders_cloneable(encoder, source_encoder))
   16555 			possible_clones |= drm_encoder_mask(&source_encoder->base);
   16556 	}
   16557 
   16558 	return possible_clones;
   16559 }
   16560 
   16561 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
   16562 {
   16563 	struct drm_device *dev = encoder->base.dev;
   16564 	struct intel_crtc *crtc;
   16565 	u32 possible_crtcs = 0;
   16566 
   16567 	for_each_intel_crtc(dev, crtc) {
   16568 		if (encoder->pipe_mask & BIT(crtc->pipe))
   16569 			possible_crtcs |= drm_crtc_mask(&crtc->base);
   16570 	}
   16571 
   16572 	return possible_crtcs;
   16573 }
   16574 
   16575 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
   16576 {
   16577 	if (!IS_MOBILE(dev_priv))
   16578 		return false;
   16579 
   16580 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
   16581 		return false;
   16582 
   16583 	if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
   16584 		return false;
   16585 
   16586 	return true;
   16587 }
   16588 
   16589 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
   16590 {
   16591 	if (INTEL_GEN(dev_priv) >= 9)
   16592 		return false;
   16593 
   16594 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
   16595 		return false;
   16596 
   16597 	if (HAS_PCH_LPT_H(dev_priv) &&
   16598 	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
   16599 		return false;
   16600 
   16601 	/* DDI E can't be used if DDI A requires 4 lanes */
   16602 	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
   16603 		return false;
   16604 
   16605 	if (!dev_priv->vbt.int_crt_support)
   16606 		return false;
   16607 
   16608 	return true;
   16609 }
   16610 
   16611 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
   16612 {
   16613 	int pps_num;
   16614 	int pps_idx;
   16615 
   16616 	if (HAS_DDI(dev_priv))
   16617 		return;
   16618 	/*
   16619 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
   16620 	 * everywhere where registers can be write protected.
   16621 	 */
   16622 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   16623 		pps_num = 2;
   16624 	else
   16625 		pps_num = 1;
   16626 
   16627 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
   16628 		u32 val = I915_READ(PP_CONTROL(pps_idx));
   16629 
   16630 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
   16631 		I915_WRITE(PP_CONTROL(pps_idx), val);
   16632 	}
   16633 }
   16634 
   16635 static void intel_pps_init(struct drm_i915_private *dev_priv)
   16636 {
   16637 	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
   16638 		dev_priv->pps_mmio_base = PCH_PPS_BASE;
   16639 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   16640 		dev_priv->pps_mmio_base = VLV_PPS_BASE;
   16641 	else
   16642 		dev_priv->pps_mmio_base = PPS_BASE;
   16643 
   16644 	intel_pps_unlock_regs_wa(dev_priv);
   16645 }
   16646 
   16647 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
   16648 {
   16649 	struct intel_encoder *encoder;
   16650 	bool dpd_is_edp = false;
   16651 
   16652 	intel_pps_init(dev_priv);
   16653 
   16654 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
   16655 		return;
   16656 
   16657 	if (INTEL_GEN(dev_priv) >= 12) {
   16658 		intel_ddi_init(dev_priv, PORT_A);
   16659 		intel_ddi_init(dev_priv, PORT_B);
   16660 		intel_ddi_init(dev_priv, PORT_D);
   16661 		intel_ddi_init(dev_priv, PORT_E);
   16662 		intel_ddi_init(dev_priv, PORT_F);
   16663 		intel_ddi_init(dev_priv, PORT_G);
   16664 		intel_ddi_init(dev_priv, PORT_H);
   16665 		intel_ddi_init(dev_priv, PORT_I);
   16666 		icl_dsi_init(dev_priv);
   16667 	} else if (IS_ELKHARTLAKE(dev_priv)) {
   16668 		intel_ddi_init(dev_priv, PORT_A);
   16669 		intel_ddi_init(dev_priv, PORT_B);
   16670 		intel_ddi_init(dev_priv, PORT_C);
   16671 		intel_ddi_init(dev_priv, PORT_D);
   16672 		icl_dsi_init(dev_priv);
   16673 	} else if (IS_GEN(dev_priv, 11)) {
   16674 		intel_ddi_init(dev_priv, PORT_A);
   16675 		intel_ddi_init(dev_priv, PORT_B);
   16676 		intel_ddi_init(dev_priv, PORT_C);
   16677 		intel_ddi_init(dev_priv, PORT_D);
   16678 		intel_ddi_init(dev_priv, PORT_E);
   16679 		/*
   16680 		 * On some ICL SKUs port F is not present. No strap bits for
   16681 		 * this, so rely on VBT.
   16682 		 * Work around broken VBTs on SKUs known to have no port F.
   16683 		 */
   16684 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
   16685 		    intel_bios_is_port_present(dev_priv, PORT_F))
   16686 			intel_ddi_init(dev_priv, PORT_F);
   16687 
   16688 		icl_dsi_init(dev_priv);
   16689 	} else if (IS_GEN9_LP(dev_priv)) {
   16690 		/*
   16691 		 * FIXME: Broxton doesn't support port detection via the
   16692 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
   16693 		 * detect the ports.
   16694 		 */
   16695 		intel_ddi_init(dev_priv, PORT_A);
   16696 		intel_ddi_init(dev_priv, PORT_B);
   16697 		intel_ddi_init(dev_priv, PORT_C);
   16698 
   16699 		vlv_dsi_init(dev_priv);
   16700 	} else if (HAS_DDI(dev_priv)) {
   16701 		int found;
   16702 
   16703 		if (intel_ddi_crt_present(dev_priv))
   16704 			intel_crt_init(dev_priv);
   16705 
   16706 		/*
   16707 		 * Haswell uses DDI functions to detect digital outputs.
   16708 		 * On SKL pre-D0 the strap isn't connected, so we assume
   16709 		 * it's there.
   16710 		 */
   16711 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
   16712 		/* WaIgnoreDDIAStrap: skl */
   16713 		if (found || IS_GEN9_BC(dev_priv))
   16714 			intel_ddi_init(dev_priv, PORT_A);
   16715 
   16716 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
   16717 		 * register */
   16718 		found = I915_READ(SFUSE_STRAP);
   16719 
   16720 		if (found & SFUSE_STRAP_DDIB_DETECTED)
   16721 			intel_ddi_init(dev_priv, PORT_B);
   16722 		if (found & SFUSE_STRAP_DDIC_DETECTED)
   16723 			intel_ddi_init(dev_priv, PORT_C);
   16724 		if (found & SFUSE_STRAP_DDID_DETECTED)
   16725 			intel_ddi_init(dev_priv, PORT_D);
   16726 		if (found & SFUSE_STRAP_DDIF_DETECTED)
   16727 			intel_ddi_init(dev_priv, PORT_F);
   16728 		/*
   16729 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
   16730 		 */
   16731 		if (IS_GEN9_BC(dev_priv) &&
   16732 		    intel_bios_is_port_present(dev_priv, PORT_E))
   16733 			intel_ddi_init(dev_priv, PORT_E);
   16734 
   16735 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   16736 		int found;
   16737 
   16738 		/*
   16739 		 * intel_edp_init_connector() depends on this completing first,
   16740 		 * to prevent the registration of both eDP and LVDS and the
   16741 		 * incorrect sharing of the PPS.
   16742 		 */
   16743 		intel_lvds_init(dev_priv);
   16744 		intel_crt_init(dev_priv);
   16745 
   16746 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
   16747 
   16748 		if (ilk_has_edp_a(dev_priv))
   16749 			intel_dp_init(dev_priv, DP_A, PORT_A);
   16750 
   16751 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
   16752 			/* PCH SDVOB multiplex with HDMIB */
   16753 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
   16754 			if (!found)
   16755 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
   16756 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
   16757 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
   16758 		}
   16759 
   16760 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
   16761 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
   16762 
   16763 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
   16764 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
   16765 
   16766 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
   16767 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
   16768 
   16769 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
   16770 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
   16771 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   16772 		bool has_edp, has_port;
   16773 
   16774 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
   16775 			intel_crt_init(dev_priv);
   16776 
   16777 		/*
   16778 		 * The DP_DETECTED bit is the latched state of the DDC
   16779 		 * SDA pin at boot. However since eDP doesn't require DDC
   16780 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
   16781 		 * eDP ports may have been muxed to an alternate function.
   16782 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
   16783 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
   16784 		 * detect eDP ports.
   16785 		 *
   16786 		 * Sadly the straps seem to be missing sometimes even for HDMI
   16787 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
   16788 		 * and VBT for the presence of the port. Additionally we can't
   16789 		 * trust the port type the VBT declares as we've seen at least
   16790 		 * HDMI ports that the VBT claim are DP or eDP.
   16791 		 */
   16792 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
   16793 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
   16794 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
   16795 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
   16796 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
   16797 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
   16798 
   16799 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
   16800 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
   16801 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
   16802 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
   16803 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
   16804 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
   16805 
   16806 		if (IS_CHERRYVIEW(dev_priv)) {
   16807 			/*
   16808 			 * eDP not supported on port D,
   16809 			 * so no need to worry about it
   16810 			 */
   16811 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
   16812 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
   16813 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
   16814 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
   16815 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
   16816 		}
   16817 
   16818 		vlv_dsi_init(dev_priv);
   16819 	} else if (IS_PINEVIEW(dev_priv)) {
   16820 		intel_lvds_init(dev_priv);
   16821 		intel_crt_init(dev_priv);
   16822 	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
   16823 		bool found = false;
   16824 
   16825 		if (IS_MOBILE(dev_priv))
   16826 			intel_lvds_init(dev_priv);
   16827 
   16828 		intel_crt_init(dev_priv);
   16829 
   16830 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
   16831 			DRM_DEBUG_KMS("probing SDVOB\n");
   16832 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
   16833 			if (!found && IS_G4X(dev_priv)) {
   16834 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
   16835 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
   16836 			}
   16837 
   16838 			if (!found && IS_G4X(dev_priv))
   16839 				intel_dp_init(dev_priv, DP_B, PORT_B);
   16840 		}
   16841 
   16842 		/* Before G4X SDVOC doesn't have its own detect register */
   16843 
   16844 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
   16845 			DRM_DEBUG_KMS("probing SDVOC\n");
   16846 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
   16847 		}
   16848 
   16849 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
   16850 
   16851 			if (IS_G4X(dev_priv)) {
   16852 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
   16853 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
   16854 			}
   16855 			if (IS_G4X(dev_priv))
   16856 				intel_dp_init(dev_priv, DP_C, PORT_C);
   16857 		}
   16858 
   16859 		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
   16860 			intel_dp_init(dev_priv, DP_D, PORT_D);
   16861 
   16862 		if (SUPPORTS_TV(dev_priv))
   16863 			intel_tv_init(dev_priv);
   16864 	} else if (IS_GEN(dev_priv, 2)) {
   16865 		if (IS_I85X(dev_priv))
   16866 			intel_lvds_init(dev_priv);
   16867 
   16868 		intel_crt_init(dev_priv);
   16869 		intel_dvo_init(dev_priv);
   16870 	}
   16871 
   16872 	intel_psr_init(dev_priv);
   16873 
   16874 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   16875 		encoder->base.possible_crtcs =
   16876 			intel_encoder_possible_crtcs(encoder);
   16877 		encoder->base.possible_clones =
   16878 			intel_encoder_possible_clones(encoder);
   16879 	}
   16880 
   16881 	intel_init_pch_refclk(dev_priv);
   16882 
   16883 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
   16884 }
   16885 
   16886 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
   16887 {
   16888 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   16889 
   16890 	drm_framebuffer_cleanup(fb);
   16891 	intel_frontbuffer_put(intel_fb->frontbuffer);
   16892 
   16893 	kfree(intel_fb);
   16894 }
   16895 
   16896 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
   16897 						struct drm_file *file,
   16898 						unsigned int *handle)
   16899 {
   16900 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   16901 
   16902 	if (obj->userptr.mm) {
   16903 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
   16904 		return -EINVAL;
   16905 	}
   16906 
   16907 	return drm_gem_handle_create(file, &obj->base, handle);
   16908 }
   16909 
   16910 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
   16911 					struct drm_file *file,
   16912 					unsigned flags, unsigned color,
   16913 					struct drm_clip_rect *clips,
   16914 					unsigned num_clips)
   16915 {
   16916 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   16917 
   16918 	i915_gem_object_flush_if_display(obj);
   16919 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
   16920 
   16921 	return 0;
   16922 }
   16923 
   16924 static const struct drm_framebuffer_funcs intel_fb_funcs = {
   16925 	.destroy = intel_user_framebuffer_destroy,
   16926 	.create_handle = intel_user_framebuffer_create_handle,
   16927 	.dirty = intel_user_framebuffer_dirty,
   16928 };
   16929 
   16930 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
   16931 				  struct drm_i915_gem_object *obj,
   16932 				  struct drm_mode_fb_cmd2 *mode_cmd)
   16933 {
   16934 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
   16935 	struct drm_framebuffer *fb = &intel_fb->base;
   16936 	u32 max_stride;
   16937 	unsigned int tiling, stride;
   16938 	int ret = -EINVAL;
   16939 	int i;
   16940 
   16941 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
   16942 	if (!intel_fb->frontbuffer)
   16943 		return -ENOMEM;
   16944 
   16945 	i915_gem_object_lock(obj);
   16946 	tiling = i915_gem_object_get_tiling(obj);
   16947 	stride = i915_gem_object_get_stride(obj);
   16948 	i915_gem_object_unlock(obj);
   16949 
   16950 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
   16951 		/*
   16952 		 * If there's a fence, enforce that
   16953 		 * the fb modifier and tiling mode match.
   16954 		 */
   16955 		if (tiling != I915_TILING_NONE &&
   16956 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
   16957 			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
   16958 			goto err;
   16959 		}
   16960 	} else {
   16961 		if (tiling == I915_TILING_X) {
   16962 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
   16963 		} else if (tiling == I915_TILING_Y) {
   16964 			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
   16965 			goto err;
   16966 		}
   16967 	}
   16968 
   16969 	if (!drm_any_plane_has_format(&dev_priv->drm,
   16970 				      mode_cmd->pixel_format,
   16971 				      mode_cmd->modifier[0])) {
   16972 		struct drm_format_name_buf format_name;
   16973 
   16974 		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%"PRIx64"\n",
   16975 			      drm_get_format_name(mode_cmd->pixel_format,
   16976 						  &format_name),
   16977 			      mode_cmd->modifier[0]);
   16978 		goto err;
   16979 	}
   16980 
   16981 	/*
   16982 	 * gen2/3 display engine uses the fence if present,
   16983 	 * so the tiling mode must match the fb modifier exactly.
   16984 	 */
   16985 	if (INTEL_GEN(dev_priv) < 4 &&
   16986 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
   16987 		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
   16988 		goto err;
   16989 	}
   16990 
   16991 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
   16992 					 mode_cmd->modifier[0]);
   16993 	if (mode_cmd->pitches[0] > max_stride) {
   16994 		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
   16995 			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
   16996 			      "tiled" : "linear",
   16997 			      mode_cmd->pitches[0], max_stride);
   16998 		goto err;
   16999 	}
   17000 
   17001 	/*
   17002 	 * If there's a fence, enforce that
   17003 	 * the fb pitch and fence stride match.
   17004 	 */
   17005 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
   17006 		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
   17007 			      mode_cmd->pitches[0], stride);
   17008 		goto err;
   17009 	}
   17010 
   17011 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
   17012 	if (mode_cmd->offsets[0] != 0) {
   17013 		DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
   17014 			      mode_cmd->offsets[0]);
   17015 		goto err;
   17016 	}
   17017 
   17018 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
   17019 
   17020 	for (i = 0; i < fb->format->num_planes; i++) {
   17021 		u32 stride_alignment;
   17022 
   17023 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
   17024 			DRM_DEBUG_KMS("bad plane %d handle\n", i);
   17025 			goto err;
   17026 		}
   17027 
   17028 		stride_alignment = intel_fb_stride_alignment(fb, i);
   17029 		if (fb->pitches[i] & (stride_alignment - 1)) {
   17030 			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
   17031 				      i, fb->pitches[i], stride_alignment);
   17032 			goto err;
   17033 		}
   17034 
   17035 		if (is_gen12_ccs_plane(fb, i)) {
   17036 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
   17037 
   17038 			if (fb->pitches[i] != ccs_aux_stride) {
   17039 				DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
   17040 					      i,
   17041 					      fb->pitches[i], ccs_aux_stride);
   17042 				goto err;
   17043 			}
   17044 		}
   17045 
   17046 		fb->obj[i] = &obj->base;
   17047 	}
   17048 
   17049 	ret = intel_fill_fb_info(dev_priv, fb);
   17050 	if (ret)
   17051 		goto err;
   17052 
   17053 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
   17054 	if (ret) {
   17055 		DRM_ERROR("framebuffer init failed %d\n", ret);
   17056 		goto err;
   17057 	}
   17058 
   17059 	return 0;
   17060 
   17061 err:
   17062 	intel_frontbuffer_put(intel_fb->frontbuffer);
   17063 	return ret;
   17064 }
   17065 
   17066 static struct drm_framebuffer *
   17067 intel_user_framebuffer_create(struct drm_device *dev,
   17068 			      struct drm_file *filp,
   17069 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
   17070 {
   17071 	struct drm_framebuffer *fb;
   17072 	struct drm_i915_gem_object *obj;
   17073 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
   17074 
   17075 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
   17076 	if (!obj)
   17077 		return ERR_PTR(-ENOENT);
   17078 
   17079 	fb = intel_framebuffer_create(obj, &mode_cmd);
   17080 	i915_gem_object_put(obj);
   17081 
   17082 	return fb;
   17083 }
   17084 
   17085 static void intel_atomic_state_free(struct drm_atomic_state *state)
   17086 {
   17087 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
   17088 
   17089 	drm_atomic_state_default_release(state);
   17090 
   17091 	i915_sw_fence_fini(&intel_state->commit_ready);
   17092 
   17093 	kfree(state);
   17094 }
   17095 
   17096 static enum drm_mode_status
   17097 intel_mode_valid(struct drm_device *dev,
   17098 		 const struct drm_display_mode *mode)
   17099 {
   17100 	struct drm_i915_private *dev_priv = to_i915(dev);
   17101 	int hdisplay_max, htotal_max;
   17102 	int vdisplay_max, vtotal_max;
   17103 
   17104 	/*
   17105 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
   17106 	 * of DBLSCAN modes to the output's mode list when they detect
   17107 	 * the scaling mode property on the connector. And they don't
   17108 	 * ask the kernel to validate those modes in any way until
   17109 	 * modeset time at which point the client gets a protocol error.
   17110 	 * So in order to not upset those clients we silently ignore the
   17111 	 * DBLSCAN flag on such connectors. For other connectors we will
   17112 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
   17113 	 * And we always reject DBLSCAN modes in connector->mode_valid()
   17114 	 * as we never want such modes on the connector's mode list.
   17115 	 */
   17116 
   17117 	if (mode->vscan > 1)
   17118 		return MODE_NO_VSCAN;
   17119 
   17120 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
   17121 		return MODE_H_ILLEGAL;
   17122 
   17123 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
   17124 			   DRM_MODE_FLAG_NCSYNC |
   17125 			   DRM_MODE_FLAG_PCSYNC))
   17126 		return MODE_HSYNC;
   17127 
   17128 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
   17129 			   DRM_MODE_FLAG_PIXMUX |
   17130 			   DRM_MODE_FLAG_CLKDIV2))
   17131 		return MODE_BAD;
   17132 
   17133 	/* Transcoder timing limits */
   17134 	if (INTEL_GEN(dev_priv) >= 11) {
   17135 		hdisplay_max = 16384;
   17136 		vdisplay_max = 8192;
   17137 		htotal_max = 16384;
   17138 		vtotal_max = 8192;
   17139 	} else if (INTEL_GEN(dev_priv) >= 9 ||
   17140 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
   17141 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
   17142 		vdisplay_max = 4096;
   17143 		htotal_max = 8192;
   17144 		vtotal_max = 8192;
   17145 	} else if (INTEL_GEN(dev_priv) >= 3) {
   17146 		hdisplay_max = 4096;
   17147 		vdisplay_max = 4096;
   17148 		htotal_max = 8192;
   17149 		vtotal_max = 8192;
   17150 	} else {
   17151 		hdisplay_max = 2048;
   17152 		vdisplay_max = 2048;
   17153 		htotal_max = 4096;
   17154 		vtotal_max = 4096;
   17155 	}
   17156 
   17157 	if (mode->hdisplay > hdisplay_max ||
   17158 	    mode->hsync_start > htotal_max ||
   17159 	    mode->hsync_end > htotal_max ||
   17160 	    mode->htotal > htotal_max)
   17161 		return MODE_H_ILLEGAL;
   17162 
   17163 	if (mode->vdisplay > vdisplay_max ||
   17164 	    mode->vsync_start > vtotal_max ||
   17165 	    mode->vsync_end > vtotal_max ||
   17166 	    mode->vtotal > vtotal_max)
   17167 		return MODE_V_ILLEGAL;
   17168 
   17169 	if (INTEL_GEN(dev_priv) >= 5) {
   17170 		if (mode->hdisplay < 64 ||
   17171 		    mode->htotal - mode->hdisplay < 32)
   17172 			return MODE_H_ILLEGAL;
   17173 
   17174 		if (mode->vtotal - mode->vdisplay < 5)
   17175 			return MODE_V_ILLEGAL;
   17176 	} else {
   17177 		if (mode->htotal - mode->hdisplay < 32)
   17178 			return MODE_H_ILLEGAL;
   17179 
   17180 		if (mode->vtotal - mode->vdisplay < 3)
   17181 			return MODE_V_ILLEGAL;
   17182 	}
   17183 
   17184 	return MODE_OK;
   17185 }
   17186 
   17187 enum drm_mode_status
   17188 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
   17189 				const struct drm_display_mode *mode)
   17190 {
   17191 	int plane_width_max, plane_height_max;
   17192 
   17193 	/*
   17194 	 * intel_mode_valid() should be
   17195 	 * sufficient on older platforms.
   17196 	 */
   17197 	if (INTEL_GEN(dev_priv) < 9)
   17198 		return MODE_OK;
   17199 
   17200 	/*
   17201 	 * Most people will probably want a fullscreen
   17202 	 * plane so let's not advertize modes that are
   17203 	 * too big for that.
   17204 	 */
   17205 	if (INTEL_GEN(dev_priv) >= 11) {
   17206 		plane_width_max = 5120;
   17207 		plane_height_max = 4320;
   17208 	} else {
   17209 		plane_width_max = 5120;
   17210 		plane_height_max = 4096;
   17211 	}
   17212 
   17213 	if (mode->hdisplay > plane_width_max)
   17214 		return MODE_H_ILLEGAL;
   17215 
   17216 	if (mode->vdisplay > plane_height_max)
   17217 		return MODE_V_ILLEGAL;
   17218 
   17219 	return MODE_OK;
   17220 }
   17221 
   17222 static const struct drm_mode_config_funcs intel_mode_funcs = {
   17223 	.fb_create = intel_user_framebuffer_create,
   17224 	.get_format_info = intel_get_format_info,
   17225 	.output_poll_changed = intel_fbdev_output_poll_changed,
   17226 	.mode_valid = intel_mode_valid,
   17227 	.atomic_check = intel_atomic_check,
   17228 	.atomic_commit = intel_atomic_commit,
   17229 	.atomic_state_alloc = intel_atomic_state_alloc,
   17230 	.atomic_state_clear = intel_atomic_state_clear,
   17231 	.atomic_state_free = intel_atomic_state_free,
   17232 };
   17233 
   17234 /**
   17235  * intel_init_display_hooks - initialize the display modesetting hooks
   17236  * @dev_priv: device private
   17237  */
   17238 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
   17239 {
   17240 	intel_init_cdclk_hooks(dev_priv);
   17241 
   17242 	if (INTEL_GEN(dev_priv) >= 9) {
   17243 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
   17244 		dev_priv->display.get_initial_plane_config =
   17245 			skl_get_initial_plane_config;
   17246 		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
   17247 		dev_priv->display.crtc_enable = hsw_crtc_enable;
   17248 		dev_priv->display.crtc_disable = hsw_crtc_disable;
   17249 	} else if (HAS_DDI(dev_priv)) {
   17250 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
   17251 		dev_priv->display.get_initial_plane_config =
   17252 			i9xx_get_initial_plane_config;
   17253 		dev_priv->display.crtc_compute_clock =
   17254 			hsw_crtc_compute_clock;
   17255 		dev_priv->display.crtc_enable = hsw_crtc_enable;
   17256 		dev_priv->display.crtc_disable = hsw_crtc_disable;
   17257 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   17258 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
   17259 		dev_priv->display.get_initial_plane_config =
   17260 			i9xx_get_initial_plane_config;
   17261 		dev_priv->display.crtc_compute_clock =
   17262 			ilk_crtc_compute_clock;
   17263 		dev_priv->display.crtc_enable = ilk_crtc_enable;
   17264 		dev_priv->display.crtc_disable = ilk_crtc_disable;
   17265 	} else if (IS_CHERRYVIEW(dev_priv)) {
   17266 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17267 		dev_priv->display.get_initial_plane_config =
   17268 			i9xx_get_initial_plane_config;
   17269 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
   17270 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
   17271 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17272 	} else if (IS_VALLEYVIEW(dev_priv)) {
   17273 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17274 		dev_priv->display.get_initial_plane_config =
   17275 			i9xx_get_initial_plane_config;
   17276 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
   17277 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
   17278 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17279 	} else if (IS_G4X(dev_priv)) {
   17280 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17281 		dev_priv->display.get_initial_plane_config =
   17282 			i9xx_get_initial_plane_config;
   17283 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
   17284 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17285 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17286 	} else if (IS_PINEVIEW(dev_priv)) {
   17287 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17288 		dev_priv->display.get_initial_plane_config =
   17289 			i9xx_get_initial_plane_config;
   17290 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
   17291 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17292 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17293 	} else if (!IS_GEN(dev_priv, 2)) {
   17294 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17295 		dev_priv->display.get_initial_plane_config =
   17296 			i9xx_get_initial_plane_config;
   17297 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
   17298 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17299 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17300 	} else {
   17301 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17302 		dev_priv->display.get_initial_plane_config =
   17303 			i9xx_get_initial_plane_config;
   17304 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
   17305 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17306 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17307 	}
   17308 
   17309 	if (IS_GEN(dev_priv, 5)) {
   17310 		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
   17311 	} else if (IS_GEN(dev_priv, 6)) {
   17312 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
   17313 	} else if (IS_IVYBRIDGE(dev_priv)) {
   17314 		/* FIXME: detect B0+ stepping and use auto training */
   17315 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
   17316 	}
   17317 
   17318 	if (INTEL_GEN(dev_priv) >= 9)
   17319 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
   17320 	else
   17321 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
   17322 
   17323 }
   17324 
   17325 void intel_modeset_init_hw(struct drm_i915_private *i915)
   17326 {
   17327 	intel_update_cdclk(i915);
   17328 	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
   17329 	i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
   17330 }
   17331 
   17332 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
   17333 {
   17334 	struct drm_plane *plane;
   17335 	struct drm_crtc *crtc;
   17336 
   17337 	drm_for_each_crtc(crtc, state->dev) {
   17338 		struct drm_crtc_state *crtc_state;
   17339 
   17340 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
   17341 		if (IS_ERR(crtc_state))
   17342 			return PTR_ERR(crtc_state);
   17343 	}
   17344 
   17345 	drm_for_each_plane(plane, state->dev) {
   17346 		struct drm_plane_state *plane_state;
   17347 
   17348 		plane_state = drm_atomic_get_plane_state(state, plane);
   17349 		if (IS_ERR(plane_state))
   17350 			return PTR_ERR(plane_state);
   17351 	}
   17352 
   17353 	return 0;
   17354 }
   17355 
   17356 /*
   17357  * Calculate what we think the watermarks should be for the state we've read
   17358  * out of the hardware and then immediately program those watermarks so that
   17359  * we ensure the hardware settings match our internal state.
   17360  *
   17361  * We can calculate what we think WM's should be by creating a duplicate of the
   17362  * current state (which was constructed during hardware readout) and running it
   17363  * through the atomic check code to calculate new watermark values in the
   17364  * state object.
   17365  */
   17366 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
   17367 {
   17368 	struct drm_atomic_state *state;
   17369 	struct intel_atomic_state *intel_state;
   17370 	struct intel_crtc *crtc;
   17371 	struct intel_crtc_state *crtc_state;
   17372 	struct drm_modeset_acquire_ctx ctx;
   17373 	int ret;
   17374 	int i;
   17375 
   17376 	/* Only supported on platforms that use atomic watermark design */
   17377 	if (!dev_priv->display.optimize_watermarks)
   17378 		return;
   17379 
   17380 	state = drm_atomic_state_alloc(&dev_priv->drm);
   17381 	if (WARN_ON(!state))
   17382 		return;
   17383 
   17384 	intel_state = to_intel_atomic_state(state);
   17385 
   17386 	drm_modeset_acquire_init(&ctx, 0);
   17387 
   17388 retry:
   17389 	state->acquire_ctx = &ctx;
   17390 
   17391 	/*
   17392 	 * Hardware readout is the only time we don't want to calculate
   17393 	 * intermediate watermarks (since we don't trust the current
   17394 	 * watermarks).
   17395 	 */
   17396 	if (!HAS_GMCH(dev_priv))
   17397 		intel_state->skip_intermediate_wm = true;
   17398 
   17399 	ret = sanitize_watermarks_add_affected(state);
   17400 	if (ret)
   17401 		goto fail;
   17402 
   17403 	ret = intel_atomic_check(&dev_priv->drm, state);
   17404 	if (ret)
   17405 		goto fail;
   17406 
   17407 	/* Write calculated watermark values back */
   17408 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
   17409 		crtc_state->wm.need_postvbl_update = true;
   17410 		dev_priv->display.optimize_watermarks(intel_state, crtc);
   17411 
   17412 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
   17413 	}
   17414 
   17415 fail:
   17416 	if (ret == -EDEADLK) {
   17417 		drm_atomic_state_clear(state);
   17418 		drm_modeset_backoff(&ctx);
   17419 		goto retry;
   17420 	}
   17421 
   17422 	/*
   17423 	 * If we fail here, it means that the hardware appears to be
   17424 	 * programmed in a way that shouldn't be possible, given our
   17425 	 * understanding of watermark requirements.  This might mean a
   17426 	 * mistake in the hardware readout code or a mistake in the
   17427 	 * watermark calculations for a given platform.  Raise a WARN
   17428 	 * so that this is noticeable.
   17429 	 *
   17430 	 * If this actually happens, we'll have to just leave the
   17431 	 * BIOS-programmed watermarks untouched and hope for the best.
   17432 	 */
   17433 	WARN(ret, "Could not determine valid watermarks for inherited state\n");
   17434 
   17435 	drm_atomic_state_put(state);
   17436 
   17437 	drm_modeset_drop_locks(&ctx);
   17438 	drm_modeset_acquire_fini(&ctx);
   17439 }
   17440 
   17441 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
   17442 {
   17443 	if (IS_GEN(dev_priv, 5)) {
   17444 		u32 fdi_pll_clk =
   17445 			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
   17446 
   17447 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
   17448 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
   17449 		dev_priv->fdi_pll_freq = 270000;
   17450 	} else {
   17451 		return;
   17452 	}
   17453 
   17454 	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
   17455 }
   17456 
   17457 static int intel_initial_commit(struct drm_device *dev)
   17458 {
   17459 	struct drm_atomic_state *state = NULL;
   17460 	struct drm_modeset_acquire_ctx ctx;
   17461 	struct intel_crtc *crtc;
   17462 	int ret = 0;
   17463 
   17464 	state = drm_atomic_state_alloc(dev);
   17465 	if (!state)
   17466 		return -ENOMEM;
   17467 
   17468 	drm_modeset_acquire_init(&ctx, 0);
   17469 
   17470 retry:
   17471 	state->acquire_ctx = &ctx;
   17472 
   17473 	for_each_intel_crtc(dev, crtc) {
   17474 		struct intel_crtc_state *crtc_state =
   17475 			intel_atomic_get_crtc_state(state, crtc);
   17476 
   17477 		if (IS_ERR(crtc_state)) {
   17478 			ret = PTR_ERR(crtc_state);
   17479 			goto out;
   17480 		}
   17481 
   17482 		if (crtc_state->hw.active) {
   17483 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
   17484 			if (ret)
   17485 				goto out;
   17486 
   17487 			/*
   17488 			 * FIXME hack to force a LUT update to avoid the
   17489 			 * plane update forcing the pipe gamma on without
   17490 			 * having a proper LUT loaded. Remove once we
   17491 			 * have readout for pipe gamma enable.
   17492 			 */
   17493 			crtc_state->uapi.color_mgmt_changed = true;
   17494 
   17495 			/*
   17496 			 * FIXME hack to force full modeset when DSC is being
   17497 			 * used.
   17498 			 *
   17499 			 * As long as we do not have full state readout and
   17500 			 * config comparison of crtc_state->dsc, we have no way
   17501 			 * to ensure reliable fastset. Remove once we have
   17502 			 * readout for DSC.
   17503 			 */
   17504 			if (crtc_state->dsc.compression_enable) {
   17505 				ret = drm_atomic_add_affected_connectors(state,
   17506 									 &crtc->base);
   17507 				if (ret)
   17508 					goto out;
   17509 				crtc_state->uapi.mode_changed = true;
   17510 				drm_dbg_kms(dev, "Force full modeset for DSC\n");
   17511 			}
   17512 		}
   17513 	}
   17514 
   17515 	ret = drm_atomic_commit(state);
   17516 
   17517 out:
   17518 	if (ret == -EDEADLK) {
   17519 		drm_atomic_state_clear(state);
   17520 		drm_modeset_backoff(&ctx);
   17521 		goto retry;
   17522 	}
   17523 
   17524 	drm_atomic_state_put(state);
   17525 
   17526 	drm_modeset_drop_locks(&ctx);
   17527 	drm_modeset_acquire_fini(&ctx);
   17528 
   17529 	return ret;
   17530 }
   17531 
   17532 static void intel_mode_config_init(struct drm_i915_private *i915)
   17533 {
   17534 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
   17535 
   17536 	drm_mode_config_init(&i915->drm);
   17537 
   17538 	mode_config->min_width = 0;
   17539 	mode_config->min_height = 0;
   17540 
   17541 	mode_config->preferred_depth = 24;
   17542 	mode_config->prefer_shadow = 1;
   17543 
   17544 	mode_config->allow_fb_modifiers = true;
   17545 
   17546 	mode_config->funcs = &intel_mode_funcs;
   17547 
   17548 	/*
   17549 	 * Maximum framebuffer dimensions, chosen to match
   17550 	 * the maximum render engine surface size on gen4+.
   17551 	 */
   17552 	if (INTEL_GEN(i915) >= 7) {
   17553 		mode_config->max_width = 16384;
   17554 		mode_config->max_height = 16384;
   17555 	} else if (INTEL_GEN(i915) >= 4) {
   17556 		mode_config->max_width = 8192;
   17557 		mode_config->max_height = 8192;
   17558 	} else if (IS_GEN(i915, 3)) {
   17559 		mode_config->max_width = 4096;
   17560 		mode_config->max_height = 4096;
   17561 	} else {
   17562 		mode_config->max_width = 2048;
   17563 		mode_config->max_height = 2048;
   17564 	}
   17565 
   17566 	if (IS_I845G(i915) || IS_I865G(i915)) {
   17567 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
   17568 		mode_config->cursor_height = 1023;
   17569 	} else if (IS_GEN(i915, 2)) {
   17570 		mode_config->cursor_width = 64;
   17571 		mode_config->cursor_height = 64;
   17572 	} else {
   17573 		mode_config->cursor_width = 256;
   17574 		mode_config->cursor_height = 256;
   17575 	}
   17576 }
   17577 
   17578 int intel_modeset_init(struct drm_i915_private *i915)
   17579 {
   17580 	struct drm_device *dev = &i915->drm;
   17581 	enum pipe pipe;
   17582 	struct intel_crtc *crtc;
   17583 	int ret;
   17584 
   17585 	mutex_init(&i915->drrs.mutex);
   17586 
   17587 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
   17588 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
   17589 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
   17590 
   17591 	spin_lock_init(&i915->atomic_commit_lock);
   17592 	DRM_INIT_WAITQUEUE(&i915->atomic_commit_wq, "i915cmit");
   17593 
   17594 	intel_mode_config_init(i915);
   17595 
   17596 	ret = intel_bw_init(i915);
   17597 	if (ret)
   17598 		return ret;
   17599 
   17600 	init_llist_head(&i915->atomic_helper.free_list);
   17601 	INIT_WORK(&i915->atomic_helper.free_work,
   17602 		  intel_atomic_helper_free_state_worker);
   17603 
   17604 	intel_init_quirks(i915);
   17605 
   17606 	intel_fbc_init(i915);
   17607 
   17608 	intel_init_pm(i915);
   17609 
   17610 	intel_panel_sanitize_ssc(i915);
   17611 
   17612 	intel_gmbus_setup(i915);
   17613 
   17614 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
   17615 		      INTEL_NUM_PIPES(i915),
   17616 		      INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
   17617 
   17618 	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
   17619 		for_each_pipe(i915, pipe) {
   17620 			ret = intel_crtc_init(i915, pipe);
   17621 			if (ret) {
   17622 				drm_mode_config_cleanup(dev);
   17623 				return ret;
   17624 			}
   17625 		}
   17626 	}
   17627 
   17628 	intel_shared_dpll_init(dev);
   17629 	intel_update_fdi_pll_freq(i915);
   17630 
   17631 	intel_update_czclk(i915);
   17632 	intel_modeset_init_hw(i915);
   17633 
   17634 	intel_hdcp_component_init(i915);
   17635 
   17636 	if (i915->max_cdclk_freq == 0)
   17637 		intel_update_max_cdclk(i915);
   17638 
   17639 	/* Just disable it once at startup */
   17640 #ifndef __NetBSD__		/* XXX We wait until intelfb is ready.  */
   17641 	intel_vga_disable(i915);
   17642 #endif
   17643 	intel_setup_outputs(i915);
   17644 
   17645 	drm_modeset_lock_all(dev);
   17646 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
   17647 	drm_modeset_unlock_all(dev);
   17648 
   17649 	for_each_intel_crtc(dev, crtc) {
   17650 		struct intel_initial_plane_config plane_config = {};
   17651 
   17652 		if (!crtc->active)
   17653 			continue;
   17654 
   17655 		/*
   17656 		 * Note that reserving the BIOS fb up front prevents us
   17657 		 * from stuffing other stolen allocations like the ring
   17658 		 * on top.  This prevents some ugliness at boot time, and
   17659 		 * can even allow for smooth boot transitions if the BIOS
   17660 		 * fb is large enough for the active pipe configuration.
   17661 		 */
   17662 		i915->display.get_initial_plane_config(crtc, &plane_config);
   17663 
   17664 		/*
   17665 		 * If the fb is shared between multiple heads, we'll
   17666 		 * just get the first one.
   17667 		 */
   17668 		intel_find_initial_plane_obj(crtc, &plane_config);
   17669 	}
   17670 
   17671 	/*
   17672 	 * Make sure hardware watermarks really match the state we read out.
   17673 	 * Note that we need to do this after reconstructing the BIOS fb's
   17674 	 * since the watermark calculation done here will use pstate->fb.
   17675 	 */
   17676 	if (!HAS_GMCH(i915))
   17677 		sanitize_watermarks(i915);
   17678 
   17679 	/*
   17680 	 * Force all active planes to recompute their states. So that on
   17681 	 * mode_setcrtc after probe, all the intel_plane_state variables
   17682 	 * are already calculated and there is no assert_plane warnings
   17683 	 * during bootup.
   17684 	 */
   17685 	ret = intel_initial_commit(dev);
   17686 	if (ret)
   17687 		DRM_DEBUG_KMS("Initial commit in probe failed.\n");
   17688 
   17689 	return 0;
   17690 }
   17691 
   17692 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
   17693 {
   17694 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   17695 	/* 640x480@60Hz, ~25175 kHz */
   17696 	struct dpll clock = {
   17697 		.m1 = 18,
   17698 		.m2 = 7,
   17699 		.p1 = 13,
   17700 		.p2 = 4,
   17701 		.n = 2,
   17702 	};
   17703 	u32 dpll, fp;
   17704 	int i;
   17705 
   17706 	WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
   17707 
   17708 	DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
   17709 		      pipe_name(pipe), clock.vco, clock.dot);
   17710 
   17711 	fp = i9xx_dpll_compute_fp(&clock);
   17712 	dpll = DPLL_DVO_2X_MODE |
   17713 		DPLL_VGA_MODE_DIS |
   17714 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
   17715 		PLL_P2_DIVIDE_BY_4 |
   17716 		PLL_REF_INPUT_DREFCLK |
   17717 		DPLL_VCO_ENABLE;
   17718 
   17719 	I915_WRITE(FP0(pipe), fp);
   17720 	I915_WRITE(FP1(pipe), fp);
   17721 
   17722 	I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
   17723 	I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
   17724 	I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
   17725 	I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
   17726 	I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
   17727 	I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
   17728 	I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
   17729 
   17730 	/*
   17731 	 * Apparently we need to have VGA mode enabled prior to changing
   17732 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
   17733 	 * dividers, even though the register value does change.
   17734 	 */
   17735 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
   17736 	I915_WRITE(DPLL(pipe), dpll);
   17737 
   17738 	/* Wait for the clocks to stabilize. */
   17739 	POSTING_READ(DPLL(pipe));
   17740 	udelay(150);
   17741 
   17742 	/* The pixel multiplier can only be updated once the
   17743 	 * DPLL is enabled and the clocks are stable.
   17744 	 *
   17745 	 * So write it again.
   17746 	 */
   17747 	I915_WRITE(DPLL(pipe), dpll);
   17748 
   17749 	/* We do this three times for luck */
   17750 	for (i = 0; i < 3 ; i++) {
   17751 		I915_WRITE(DPLL(pipe), dpll);
   17752 		POSTING_READ(DPLL(pipe));
   17753 		udelay(150); /* wait for warmup */
   17754 	}
   17755 
   17756 	I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
   17757 	POSTING_READ(PIPECONF(pipe));
   17758 
   17759 	intel_wait_for_pipe_scanline_moving(crtc);
   17760 }
   17761 
   17762 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
   17763 {
   17764 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   17765 
   17766 	DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
   17767 		      pipe_name(pipe));
   17768 
   17769 	WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
   17770 	WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
   17771 	WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
   17772 	WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
   17773 	WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
   17774 
   17775 	I915_WRITE(PIPECONF(pipe), 0);
   17776 	POSTING_READ(PIPECONF(pipe));
   17777 
   17778 	intel_wait_for_pipe_scanline_stopped(crtc);
   17779 
   17780 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
   17781 	POSTING_READ(DPLL(pipe));
   17782 }
   17783 
   17784 static void
   17785 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
   17786 {
   17787 	struct intel_crtc *crtc;
   17788 
   17789 	if (INTEL_GEN(dev_priv) >= 4)
   17790 		return;
   17791 
   17792 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   17793 		struct intel_plane *plane =
   17794 			to_intel_plane(crtc->base.primary);
   17795 		struct intel_crtc *plane_crtc;
   17796 		enum pipe pipe;
   17797 
   17798 		if (!plane->get_hw_state(plane, &pipe))
   17799 			continue;
   17800 
   17801 		if (pipe == crtc->pipe)
   17802 			continue;
   17803 
   17804 		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
   17805 			      plane->base.base.id, plane->base.name);
   17806 
   17807 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   17808 		intel_plane_disable_noatomic(plane_crtc, plane);
   17809 	}
   17810 }
   17811 
   17812 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
   17813 {
   17814 	struct drm_device *dev = crtc->base.dev;
   17815 	struct intel_encoder *encoder;
   17816 
   17817 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
   17818 		return true;
   17819 
   17820 	return false;
   17821 }
   17822 
   17823 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
   17824 {
   17825 	struct drm_device *dev = encoder->base.dev;
   17826 	struct intel_connector *connector;
   17827 
   17828 	for_each_connector_on_encoder(dev, &encoder->base, connector)
   17829 		return connector;
   17830 
   17831 	return NULL;
   17832 }
   17833 
   17834 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
   17835 			      enum pipe pch_transcoder)
   17836 {
   17837 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
   17838 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
   17839 }
   17840 
   17841 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
   17842 {
   17843 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   17844 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   17845 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   17846 
   17847 	if (INTEL_GEN(dev_priv) >= 9 ||
   17848 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
   17849 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
   17850 		u32 val;
   17851 
   17852 		if (transcoder_is_dsi(cpu_transcoder))
   17853 			return;
   17854 
   17855 		val = I915_READ(reg);
   17856 		val &= ~HSW_FRAME_START_DELAY_MASK;
   17857 		val |= HSW_FRAME_START_DELAY(0);
   17858 		I915_WRITE(reg, val);
   17859 	} else {
   17860 		i915_reg_t reg = PIPECONF(cpu_transcoder);
   17861 		u32 val;
   17862 
   17863 		val = I915_READ(reg);
   17864 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
   17865 		val |= PIPECONF_FRAME_START_DELAY(0);
   17866 		I915_WRITE(reg, val);
   17867 	}
   17868 
   17869 	if (!crtc_state->has_pch_encoder)
   17870 		return;
   17871 
   17872 	if (HAS_PCH_IBX(dev_priv)) {
   17873 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
   17874 		u32 val;
   17875 
   17876 		val = I915_READ(reg);
   17877 		val &= ~TRANS_FRAME_START_DELAY_MASK;
   17878 		val |= TRANS_FRAME_START_DELAY(0);
   17879 		I915_WRITE(reg, val);
   17880 	} else {
   17881 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
   17882 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
   17883 		u32 val;
   17884 
   17885 		val = I915_READ(reg);
   17886 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
   17887 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
   17888 		I915_WRITE(reg, val);
   17889 	}
   17890 }
   17891 
   17892 static void intel_sanitize_crtc(struct intel_crtc *crtc,
   17893 				struct drm_modeset_acquire_ctx *ctx)
   17894 {
   17895 	struct drm_device *dev = crtc->base.dev;
   17896 	struct drm_i915_private *dev_priv = to_i915(dev);
   17897 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
   17898 
   17899 	if (crtc_state->hw.active) {
   17900 		struct intel_plane *plane;
   17901 
   17902 		/* Clear any frame start delays used for debugging left by the BIOS */
   17903 		intel_sanitize_frame_start_delay(crtc_state);
   17904 
   17905 		/* Disable everything but the primary plane */
   17906 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
   17907 			const struct intel_plane_state *plane_state =
   17908 				to_intel_plane_state(plane->base.state);
   17909 
   17910 			if (plane_state->uapi.visible &&
   17911 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
   17912 				intel_plane_disable_noatomic(crtc, plane);
   17913 		}
   17914 
   17915 		/*
   17916 		 * Disable any background color set by the BIOS, but enable the
   17917 		 * gamma and CSC to match how we program our planes.
   17918 		 */
   17919 		if (INTEL_GEN(dev_priv) >= 9)
   17920 			I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
   17921 				   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
   17922 				   SKL_BOTTOM_COLOR_CSC_ENABLE);
   17923 	}
   17924 
   17925 	/* Adjust the state of the output pipe according to whether we
   17926 	 * have active connectors/encoders. */
   17927 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
   17928 		intel_crtc_disable_noatomic(crtc, ctx);
   17929 
   17930 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
   17931 		/*
   17932 		 * We start out with underrun reporting disabled to avoid races.
   17933 		 * For correct bookkeeping mark this on active crtcs.
   17934 		 *
   17935 		 * Also on gmch platforms we dont have any hardware bits to
   17936 		 * disable the underrun reporting. Which means we need to start
   17937 		 * out with underrun reporting disabled also on inactive pipes,
   17938 		 * since otherwise we'll complain about the garbage we read when
   17939 		 * e.g. coming up after runtime pm.
   17940 		 *
   17941 		 * No protection against concurrent access is required - at
   17942 		 * worst a fifo underrun happens which also sets this to false.
   17943 		 */
   17944 		crtc->cpu_fifo_underrun_disabled = true;
   17945 		/*
   17946 		 * We track the PCH trancoder underrun reporting state
   17947 		 * within the crtc. With crtc for pipe A housing the underrun
   17948 		 * reporting state for PCH transcoder A, crtc for pipe B housing
   17949 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
   17950 		 * and marking underrun reporting as disabled for the non-existing
   17951 		 * PCH transcoders B and C would prevent enabling the south
   17952 		 * error interrupt (see cpt_can_enable_serr_int()).
   17953 		 */
   17954 		if (has_pch_trancoder(dev_priv, crtc->pipe))
   17955 			crtc->pch_fifo_underrun_disabled = true;
   17956 	}
   17957 }
   17958 
   17959 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
   17960 {
   17961 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   17962 
   17963 	/*
   17964 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
   17965 	 * the hardware when a high res displays plugged in. DPLL P
   17966 	 * divider is zero, and the pipe timings are bonkers. We'll
   17967 	 * try to disable everything in that case.
   17968 	 *
   17969 	 * FIXME would be nice to be able to sanitize this state
   17970 	 * without several WARNs, but for now let's take the easy
   17971 	 * road.
   17972 	 */
   17973 	return IS_GEN(dev_priv, 6) &&
   17974 		crtc_state->hw.active &&
   17975 		crtc_state->shared_dpll &&
   17976 		crtc_state->port_clock == 0;
   17977 }
   17978 
   17979 static void intel_sanitize_encoder(struct intel_encoder *encoder)
   17980 {
   17981 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   17982 	struct intel_connector *connector;
   17983 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
   17984 	struct intel_crtc_state *crtc_state = crtc ?
   17985 		to_intel_crtc_state(crtc->base.state) : NULL;
   17986 
   17987 	/* We need to check both for a crtc link (meaning that the
   17988 	 * encoder is active and trying to read from a pipe) and the
   17989 	 * pipe itself being active. */
   17990 	bool has_active_crtc = crtc_state &&
   17991 		crtc_state->hw.active;
   17992 
   17993 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
   17994 		DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
   17995 			      pipe_name(crtc->pipe));
   17996 		has_active_crtc = false;
   17997 	}
   17998 
   17999 	connector = intel_encoder_find_connector(encoder);
   18000 	if (connector && !has_active_crtc) {
   18001 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
   18002 			      encoder->base.base.id,
   18003 			      encoder->base.name);
   18004 
   18005 		/* Connector is active, but has no active pipe. This is
   18006 		 * fallout from our resume register restoring. Disable
   18007 		 * the encoder manually again. */
   18008 		if (crtc_state) {
   18009 			struct drm_encoder *best_encoder;
   18010 
   18011 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
   18012 				      encoder->base.base.id,
   18013 				      encoder->base.name);
   18014 
   18015 			/* avoid oopsing in case the hooks consult best_encoder */
   18016 			best_encoder = connector->base.state->best_encoder;
   18017 			connector->base.state->best_encoder = &encoder->base;
   18018 
   18019 			if (encoder->disable)
   18020 				encoder->disable(encoder, crtc_state,
   18021 						 connector->base.state);
   18022 			if (encoder->post_disable)
   18023 				encoder->post_disable(encoder, crtc_state,
   18024 						      connector->base.state);
   18025 
   18026 			connector->base.state->best_encoder = best_encoder;
   18027 		}
   18028 		encoder->base.crtc = NULL;
   18029 
   18030 		/* Inconsistent output/port/pipe state happens presumably due to
   18031 		 * a bug in one of the get_hw_state functions. Or someplace else
   18032 		 * in our code, like the register restore mess on resume. Clamp
   18033 		 * things to off as a safer default. */
   18034 
   18035 		connector->base.dpms = DRM_MODE_DPMS_OFF;
   18036 		connector->base.encoder = NULL;
   18037 	}
   18038 
   18039 	/* notify opregion of the sanitized encoder state */
   18040 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
   18041 
   18042 	if (INTEL_GEN(dev_priv) >= 11)
   18043 		icl_sanitize_encoder_pll_mapping(encoder);
   18044 }
   18045 
   18046 /* FIXME read out full plane state for all planes */
   18047 static void readout_plane_state(struct drm_i915_private *dev_priv)
   18048 {
   18049 	struct intel_plane *plane;
   18050 	struct intel_crtc *crtc;
   18051 
   18052 	for_each_intel_plane(&dev_priv->drm, plane) {
   18053 		struct intel_plane_state *plane_state =
   18054 			to_intel_plane_state(plane->base.state);
   18055 		struct intel_crtc_state *crtc_state;
   18056 		enum pipe pipe = PIPE_A;
   18057 		bool visible;
   18058 
   18059 		visible = plane->get_hw_state(plane, &pipe);
   18060 
   18061 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   18062 		crtc_state = to_intel_crtc_state(crtc->base.state);
   18063 
   18064 		intel_set_plane_visible(crtc_state, plane_state, visible);
   18065 
   18066 		DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
   18067 			      plane->base.base.id, plane->base.name,
   18068 			      enableddisabled(visible), pipe_name(pipe));
   18069 	}
   18070 
   18071 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   18072 		struct intel_crtc_state *crtc_state =
   18073 			to_intel_crtc_state(crtc->base.state);
   18074 
   18075 		fixup_active_planes(crtc_state);
   18076 	}
   18077 }
   18078 
   18079 static void intel_modeset_readout_hw_state(struct drm_device *dev)
   18080 {
   18081 	struct drm_i915_private *dev_priv = to_i915(dev);
   18082 	enum pipe pipe;
   18083 	struct intel_crtc *crtc;
   18084 	struct intel_encoder *encoder;
   18085 	struct intel_connector *connector;
   18086 	struct drm_connector_list_iter conn_iter;
   18087 	int i;
   18088 
   18089 	dev_priv->active_pipes = 0;
   18090 
   18091 	for_each_intel_crtc(dev, crtc) {
   18092 		struct intel_crtc_state *crtc_state =
   18093 			to_intel_crtc_state(crtc->base.state);
   18094 
   18095 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
   18096 		intel_crtc_free_hw_state(crtc_state);
   18097 		intel_crtc_state_reset(crtc_state, crtc);
   18098 
   18099 		crtc_state->hw.active = crtc_state->hw.enable =
   18100 			dev_priv->display.get_pipe_config(crtc, crtc_state);
   18101 
   18102 		crtc->base.enabled = crtc_state->hw.enable;
   18103 		crtc->active = crtc_state->hw.active;
   18104 
   18105 		if (crtc_state->hw.active)
   18106 			dev_priv->active_pipes |= BIT(crtc->pipe);
   18107 
   18108 		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
   18109 			      crtc->base.base.id, crtc->base.name,
   18110 			      enableddisabled(crtc_state->hw.active));
   18111 	}
   18112 
   18113 	readout_plane_state(dev_priv);
   18114 
   18115 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
   18116 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
   18117 
   18118 		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
   18119 							&pll->state.hw_state);
   18120 
   18121 		if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
   18122 		    pll->info->id == DPLL_ID_EHL_DPLL4) {
   18123 			pll->wakeref = intel_display_power_get(dev_priv,
   18124 							       POWER_DOMAIN_DPLL_DC_OFF);
   18125 		}
   18126 
   18127 		pll->state.crtc_mask = 0;
   18128 		for_each_intel_crtc(dev, crtc) {
   18129 			struct intel_crtc_state *crtc_state =
   18130 				to_intel_crtc_state(crtc->base.state);
   18131 
   18132 			if (crtc_state->hw.active &&
   18133 			    crtc_state->shared_dpll == pll)
   18134 				pll->state.crtc_mask |= 1 << crtc->pipe;
   18135 		}
   18136 		pll->active_mask = pll->state.crtc_mask;
   18137 
   18138 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
   18139 			      pll->info->name, pll->state.crtc_mask, pll->on);
   18140 	}
   18141 
   18142 	for_each_intel_encoder(dev, encoder) {
   18143 		pipe = 0;
   18144 
   18145 		if (encoder->get_hw_state(encoder, &pipe)) {
   18146 			struct intel_crtc_state *crtc_state;
   18147 
   18148 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   18149 			crtc_state = to_intel_crtc_state(crtc->base.state);
   18150 
   18151 			encoder->base.crtc = &crtc->base;
   18152 			encoder->get_config(encoder, crtc_state);
   18153 		} else {
   18154 			encoder->base.crtc = NULL;
   18155 		}
   18156 
   18157 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
   18158 			      encoder->base.base.id, encoder->base.name,
   18159 			      enableddisabled(encoder->base.crtc),
   18160 			      pipe_name(pipe));
   18161 	}
   18162 
   18163 	drm_connector_list_iter_begin(dev, &conn_iter);
   18164 	for_each_intel_connector_iter(connector, &conn_iter) {
   18165 		if (connector->get_hw_state(connector)) {
   18166 			struct intel_crtc_state *crtc_state;
   18167 			struct intel_crtc *crtc;
   18168 
   18169 			connector->base.dpms = DRM_MODE_DPMS_ON;
   18170 
   18171 			encoder = connector->encoder;
   18172 			connector->base.encoder = &encoder->base;
   18173 
   18174 			crtc = to_intel_crtc(encoder->base.crtc);
   18175 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
   18176 
   18177 			if (crtc_state && crtc_state->hw.active) {
   18178 				/*
   18179 				 * This has to be done during hardware readout
   18180 				 * because anything calling .crtc_disable may
   18181 				 * rely on the connector_mask being accurate.
   18182 				 */
   18183 				crtc_state->uapi.connector_mask |=
   18184 					drm_connector_mask(&connector->base);
   18185 				crtc_state->uapi.encoder_mask |=
   18186 					drm_encoder_mask(&encoder->base);
   18187 			}
   18188 		} else {
   18189 			connector->base.dpms = DRM_MODE_DPMS_OFF;
   18190 			connector->base.encoder = NULL;
   18191 		}
   18192 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
   18193 			      connector->base.base.id, connector->base.name,
   18194 			      enableddisabled(connector->base.encoder));
   18195 	}
   18196 	drm_connector_list_iter_end(&conn_iter);
   18197 
   18198 	for_each_intel_crtc(dev, crtc) {
   18199 		struct intel_bw_state *bw_state =
   18200 			to_intel_bw_state(dev_priv->bw_obj.state);
   18201 		struct intel_crtc_state *crtc_state =
   18202 			to_intel_crtc_state(crtc->base.state);
   18203 		struct intel_plane *plane;
   18204 		int min_cdclk = 0;
   18205 
   18206 		if (crtc_state->hw.active) {
   18207 			struct drm_display_mode *mode = &crtc_state->hw.mode;
   18208 
   18209 			intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
   18210 						    crtc_state);
   18211 
   18212 			*mode = crtc_state->hw.adjusted_mode;
   18213 			mode->hdisplay = crtc_state->pipe_src_w;
   18214 			mode->vdisplay = crtc_state->pipe_src_h;
   18215 
   18216 			/*
   18217 			 * The initial mode needs to be set in order to keep
   18218 			 * the atomic core happy. It wants a valid mode if the
   18219 			 * crtc's enabled, so we do the above call.
   18220 			 *
   18221 			 * But we don't set all the derived state fully, hence
   18222 			 * set a flag to indicate that a full recalculation is
   18223 			 * needed on the next commit.
   18224 			 */
   18225 			mode->private_flags = I915_MODE_FLAG_INHERITED;
   18226 
   18227 			intel_crtc_compute_pixel_rate(crtc_state);
   18228 
   18229 			intel_crtc_update_active_timings(crtc_state);
   18230 
   18231 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
   18232 		}
   18233 
   18234 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   18235 			const struct intel_plane_state *plane_state =
   18236 				to_intel_plane_state(plane->base.state);
   18237 
   18238 			/*
   18239 			 * FIXME don't have the fb yet, so can't
   18240 			 * use intel_plane_data_rate() :(
   18241 			 */
   18242 			if (plane_state->uapi.visible)
   18243 				crtc_state->data_rate[plane->id] =
   18244 					4 * crtc_state->pixel_rate;
   18245 			/*
   18246 			 * FIXME don't have the fb yet, so can't
   18247 			 * use plane->min_cdclk() :(
   18248 			 */
   18249 			if (plane_state->uapi.visible && plane->min_cdclk) {
   18250 				if (crtc_state->double_wide ||
   18251 				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   18252 					crtc_state->min_cdclk[plane->id] =
   18253 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
   18254 				else
   18255 					crtc_state->min_cdclk[plane->id] =
   18256 						crtc_state->pixel_rate;
   18257 			}
   18258 			DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
   18259 				      plane->base.base.id, plane->base.name,
   18260 				      crtc_state->min_cdclk[plane->id]);
   18261 		}
   18262 
   18263 		if (crtc_state->hw.active) {
   18264 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
   18265 			if (WARN_ON(min_cdclk < 0))
   18266 				min_cdclk = 0;
   18267 		}
   18268 
   18269 		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
   18270 		dev_priv->min_voltage_level[crtc->pipe] =
   18271 			crtc_state->min_voltage_level;
   18272 
   18273 		intel_bw_crtc_update(bw_state, crtc_state);
   18274 
   18275 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
   18276 	}
   18277 }
   18278 
   18279 static void
   18280 get_encoder_power_domains(struct drm_i915_private *dev_priv)
   18281 {
   18282 	struct intel_encoder *encoder;
   18283 
   18284 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   18285 		struct intel_crtc_state *crtc_state;
   18286 
   18287 		if (!encoder->get_power_domains)
   18288 			continue;
   18289 
   18290 		/*
   18291 		 * MST-primary and inactive encoders don't have a crtc state
   18292 		 * and neither of these require any power domain references.
   18293 		 */
   18294 		if (!encoder->base.crtc)
   18295 			continue;
   18296 
   18297 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
   18298 		encoder->get_power_domains(encoder, crtc_state);
   18299 	}
   18300 }
   18301 
   18302 static void intel_early_display_was(struct drm_i915_private *dev_priv)
   18303 {
   18304 	/*
   18305 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
   18306 	 * Also known as Wa_14010480278.
   18307 	 */
   18308 	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
   18309 		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
   18310 			   DARBF_GATING_DIS);
   18311 
   18312 	if (IS_HASWELL(dev_priv)) {
   18313 		/*
   18314 		 * WaRsPkgCStateDisplayPMReq:hsw
   18315 		 * System hang if this isn't done before disabling all planes!
   18316 		 */
   18317 		I915_WRITE(CHICKEN_PAR1_1,
   18318 			   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
   18319 	}
   18320 }
   18321 
   18322 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
   18323 				       enum port port, i915_reg_t hdmi_reg)
   18324 {
   18325 	u32 val = I915_READ(hdmi_reg);
   18326 
   18327 	if (val & SDVO_ENABLE ||
   18328 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
   18329 		return;
   18330 
   18331 	DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
   18332 		      port_name(port));
   18333 
   18334 	val &= ~SDVO_PIPE_SEL_MASK;
   18335 	val |= SDVO_PIPE_SEL(PIPE_A);
   18336 
   18337 	I915_WRITE(hdmi_reg, val);
   18338 }
   18339 
   18340 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
   18341 				     enum port port, i915_reg_t dp_reg)
   18342 {
   18343 	u32 val = I915_READ(dp_reg);
   18344 
   18345 	if (val & DP_PORT_EN ||
   18346 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
   18347 		return;
   18348 
   18349 	DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
   18350 		      port_name(port));
   18351 
   18352 	val &= ~DP_PIPE_SEL_MASK;
   18353 	val |= DP_PIPE_SEL(PIPE_A);
   18354 
   18355 	I915_WRITE(dp_reg, val);
   18356 }
   18357 
   18358 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
   18359 {
   18360 	/*
   18361 	 * The BIOS may select transcoder B on some of the PCH
   18362 	 * ports even it doesn't enable the port. This would trip
   18363 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
   18364 	 * Sanitize the transcoder select bits to prevent that. We
   18365 	 * assume that the BIOS never actually enabled the port,
   18366 	 * because if it did we'd actually have to toggle the port
   18367 	 * on and back off to make the transcoder A select stick
   18368 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
   18369 	 * intel_disable_sdvo()).
   18370 	 */
   18371 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
   18372 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
   18373 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
   18374 
   18375 	/* PCH SDVOB multiplex with HDMIB */
   18376 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
   18377 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
   18378 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
   18379 }
   18380 
   18381 /* Scan out the current hw modeset state,
   18382  * and sanitizes it to the current state
   18383  */
   18384 static void
   18385 intel_modeset_setup_hw_state(struct drm_device *dev,
   18386 			     struct drm_modeset_acquire_ctx *ctx)
   18387 {
   18388 	struct drm_i915_private *dev_priv = to_i915(dev);
   18389 	struct intel_encoder *encoder;
   18390 	struct intel_crtc *crtc;
   18391 	intel_wakeref_t wakeref;
   18392 	int i;
   18393 
   18394 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
   18395 
   18396 	intel_early_display_was(dev_priv);
   18397 	intel_modeset_readout_hw_state(dev);
   18398 
   18399 	/* HW state is read out, now we need to sanitize this mess. */
   18400 
   18401 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
   18402 	for_each_intel_encoder(dev, encoder) {
   18403 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
   18404 
   18405 		/* We need to sanitize only the MST primary port. */
   18406 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
   18407 		    intel_phy_is_tc(dev_priv, phy))
   18408 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
   18409 	}
   18410 
   18411 	get_encoder_power_domains(dev_priv);
   18412 
   18413 	if (HAS_PCH_IBX(dev_priv))
   18414 		ibx_sanitize_pch_ports(dev_priv);
   18415 
   18416 	/*
   18417 	 * intel_sanitize_plane_mapping() may need to do vblank
   18418 	 * waits, so we need vblank interrupts restored beforehand.
   18419 	 */
   18420 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   18421 		struct intel_crtc_state *crtc_state =
   18422 			to_intel_crtc_state(crtc->base.state);
   18423 
   18424 		drm_crtc_vblank_reset(&crtc->base);
   18425 
   18426 		if (crtc_state->hw.active)
   18427 			intel_crtc_vblank_on(crtc_state);
   18428 	}
   18429 
   18430 	intel_sanitize_plane_mapping(dev_priv);
   18431 
   18432 	for_each_intel_encoder(dev, encoder)
   18433 		intel_sanitize_encoder(encoder);
   18434 
   18435 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   18436 		struct intel_crtc_state *crtc_state =
   18437 			to_intel_crtc_state(crtc->base.state);
   18438 
   18439 		intel_sanitize_crtc(crtc, ctx);
   18440 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
   18441 	}
   18442 
   18443 	intel_modeset_update_connector_atomic_state(dev);
   18444 
   18445 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
   18446 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
   18447 
   18448 		if (!pll->on || pll->active_mask)
   18449 			continue;
   18450 
   18451 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
   18452 			      pll->info->name);
   18453 
   18454 		pll->info->funcs->disable(dev_priv, pll);
   18455 		pll->on = false;
   18456 	}
   18457 
   18458 	if (IS_G4X(dev_priv)) {
   18459 		g4x_wm_get_hw_state(dev_priv);
   18460 		g4x_wm_sanitize(dev_priv);
   18461 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   18462 		vlv_wm_get_hw_state(dev_priv);
   18463 		vlv_wm_sanitize(dev_priv);
   18464 	} else if (INTEL_GEN(dev_priv) >= 9) {
   18465 		skl_wm_get_hw_state(dev_priv);
   18466 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   18467 		ilk_wm_get_hw_state(dev_priv);
   18468 	}
   18469 
   18470 	for_each_intel_crtc(dev, crtc) {
   18471 		struct intel_crtc_state *crtc_state =
   18472 			to_intel_crtc_state(crtc->base.state);
   18473 		u64 put_domains;
   18474 
   18475 		put_domains = modeset_get_crtc_power_domains(crtc_state);
   18476 		if (WARN_ON(put_domains))
   18477 			modeset_put_power_domains(dev_priv, put_domains);
   18478 	}
   18479 
   18480 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
   18481 }
   18482 
   18483 void intel_display_resume(struct drm_device *dev)
   18484 {
   18485 	struct drm_i915_private *dev_priv = to_i915(dev);
   18486 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
   18487 	struct drm_modeset_acquire_ctx ctx;
   18488 	int ret;
   18489 
   18490 	dev_priv->modeset_restore_state = NULL;
   18491 	if (state)
   18492 		state->acquire_ctx = &ctx;
   18493 
   18494 	drm_modeset_acquire_init(&ctx, 0);
   18495 
   18496 	while (1) {
   18497 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
   18498 		if (ret != -EDEADLK)
   18499 			break;
   18500 
   18501 		drm_modeset_backoff(&ctx);
   18502 	}
   18503 
   18504 	if (!ret)
   18505 		ret = __intel_display_resume(dev, state, &ctx);
   18506 
   18507 	intel_enable_ipc(dev_priv);
   18508 	drm_modeset_drop_locks(&ctx);
   18509 	drm_modeset_acquire_fini(&ctx);
   18510 
   18511 	if (ret)
   18512 		DRM_ERROR("Restoring old state failed with %i\n", ret);
   18513 	if (state)
   18514 		drm_atomic_state_put(state);
   18515 }
   18516 
   18517 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
   18518 {
   18519 	struct intel_connector *connector;
   18520 	struct drm_connector_list_iter conn_iter;
   18521 
   18522 	/* Kill all the work that may have been queued by hpd. */
   18523 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
   18524 	for_each_intel_connector_iter(connector, &conn_iter) {
   18525 		if (connector->modeset_retry_work.func)
   18526 			cancel_work_sync(&connector->modeset_retry_work);
   18527 		if (connector->hdcp.shim) {
   18528 			cancel_delayed_work_sync(&connector->hdcp.check_work);
   18529 			cancel_work_sync(&connector->hdcp.prop_work);
   18530 		}
   18531 	}
   18532 	drm_connector_list_iter_end(&conn_iter);
   18533 }
   18534 
   18535 void intel_modeset_driver_remove(struct drm_i915_private *i915)
   18536 {
   18537 	flush_workqueue(i915->flip_wq);
   18538 	flush_workqueue(i915->modeset_wq);
   18539 
   18540 	flush_work(&i915->atomic_helper.free_work);
   18541 	WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
   18542 
   18543 	/*
   18544 	 * Interrupts and polling as the first thing to avoid creating havoc.
   18545 	 * Too much stuff here (turning of connectors, ...) would
   18546 	 * experience fancy races otherwise.
   18547 	 */
   18548 	intel_irq_uninstall(i915);
   18549 
   18550 	/*
   18551 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
   18552 	 * poll handlers. Hence disable polling after hpd handling is shut down.
   18553 	 */
   18554 	intel_hpd_poll_fini(i915);
   18555 
   18556 	/*
   18557 	 * MST topology needs to be suspended so we don't have any calls to
   18558 	 * fbdev after it's finalized. MST will be destroyed later as part of
   18559 	 * drm_mode_config_cleanup()
   18560 	 */
   18561 	intel_dp_mst_suspend(i915);
   18562 
   18563 	/* poll work can call into fbdev, hence clean that up afterwards */
   18564 	intel_fbdev_fini(i915);
   18565 
   18566 	intel_unregister_dsm_handler();
   18567 
   18568 	intel_fbc_global_disable(i915);
   18569 
   18570 	/* flush any delayed tasks or pending work */
   18571 	flush_scheduled_work();
   18572 
   18573 	intel_hdcp_component_fini(i915);
   18574 
   18575 	drm_mode_config_cleanup(&i915->drm);
   18576 
   18577 	intel_overlay_cleanup(i915);
   18578 
   18579 	intel_shared_dpll_cleanup(&i915->drm);
   18580 
   18581 	intel_gmbus_teardown(i915);
   18582 
   18583 	intel_fbc_cleanup(i915);
   18584 
   18585 	intel_bw_cleanup(i915);
   18586 
   18587 	DRM_DESTROY_WAITQUEUE(&i915->atomic_commit_wq);
   18588 	spin_lock_destroy(&i915->atomic_commit_lock);
   18589 
   18590 	destroy_workqueue(i915->flip_wq);
   18591 	destroy_workqueue(i915->modeset_wq);
   18592 
   18593 	mutex_destroy(&i915->drrs.mutex);
   18594 
   18595 	intel_fbc_cleanup_cfb(i915);
   18596 }
   18597 
   18598 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
   18599 
   18600 struct intel_display_error_state {
   18601 
   18602 	u32 power_well_driver;
   18603 
   18604 	struct intel_cursor_error_state {
   18605 		u32 control;
   18606 		u32 position;
   18607 		u32 base;
   18608 		u32 size;
   18609 	} cursor[I915_MAX_PIPES];
   18610 
   18611 	struct intel_pipe_error_state {
   18612 		bool power_domain_on;
   18613 		u32 source;
   18614 		u32 stat;
   18615 	} pipe[I915_MAX_PIPES];
   18616 
   18617 	struct intel_plane_error_state {
   18618 		u32 control;
   18619 		u32 stride;
   18620 		u32 size;
   18621 		u32 pos;
   18622 		u32 addr;
   18623 		u32 surface;
   18624 		u32 tile_offset;
   18625 	} plane[I915_MAX_PIPES];
   18626 
   18627 	struct intel_transcoder_error_state {
   18628 		bool available;
   18629 		bool power_domain_on;
   18630 		enum transcoder cpu_transcoder;
   18631 
   18632 		u32 conf;
   18633 
   18634 		u32 htotal;
   18635 		u32 hblank;
   18636 		u32 hsync;
   18637 		u32 vtotal;
   18638 		u32 vblank;
   18639 		u32 vsync;
   18640 	} transcoder[5];
   18641 };
   18642 
   18643 struct intel_display_error_state *
   18644 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
   18645 {
   18646 	struct intel_display_error_state *error;
   18647 	int transcoders[] = {
   18648 		TRANSCODER_A,
   18649 		TRANSCODER_B,
   18650 		TRANSCODER_C,
   18651 		TRANSCODER_D,
   18652 		TRANSCODER_EDP,
   18653 	};
   18654 	int i;
   18655 
   18656 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
   18657 
   18658 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
   18659 		return NULL;
   18660 
   18661 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
   18662 	if (error == NULL)
   18663 		return NULL;
   18664 
   18665 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   18666 		error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
   18667 
   18668 	for_each_pipe(dev_priv, i) {
   18669 		error->pipe[i].power_domain_on =
   18670 			__intel_display_power_is_enabled(dev_priv,
   18671 							 POWER_DOMAIN_PIPE(i));
   18672 		if (!error->pipe[i].power_domain_on)
   18673 			continue;
   18674 
   18675 		error->cursor[i].control = I915_READ(CURCNTR(i));
   18676 		error->cursor[i].position = I915_READ(CURPOS(i));
   18677 		error->cursor[i].base = I915_READ(CURBASE(i));
   18678 
   18679 		error->plane[i].control = I915_READ(DSPCNTR(i));
   18680 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
   18681 		if (INTEL_GEN(dev_priv) <= 3) {
   18682 			error->plane[i].size = I915_READ(DSPSIZE(i));
   18683 			error->plane[i].pos = I915_READ(DSPPOS(i));
   18684 		}
   18685 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
   18686 			error->plane[i].addr = I915_READ(DSPADDR(i));
   18687 		if (INTEL_GEN(dev_priv) >= 4) {
   18688 			error->plane[i].surface = I915_READ(DSPSURF(i));
   18689 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
   18690 		}
   18691 
   18692 		error->pipe[i].source = I915_READ(PIPESRC(i));
   18693 
   18694 		if (HAS_GMCH(dev_priv))
   18695 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
   18696 	}
   18697 
   18698 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
   18699 		enum transcoder cpu_transcoder = transcoders[i];
   18700 
   18701 		if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
   18702 			continue;
   18703 
   18704 		error->transcoder[i].available = true;
   18705 		error->transcoder[i].power_domain_on =
   18706 			__intel_display_power_is_enabled(dev_priv,
   18707 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
   18708 		if (!error->transcoder[i].power_domain_on)
   18709 			continue;
   18710 
   18711 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
   18712 
   18713 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
   18714 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
   18715 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
   18716 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
   18717 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
   18718 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
   18719 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
   18720 	}
   18721 
   18722 	return error;
   18723 }
   18724 
   18725 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
   18726 
   18727 void
   18728 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
   18729 				struct intel_display_error_state *error)
   18730 {
   18731 	struct drm_i915_private *dev_priv = m->i915;
   18732 	int i;
   18733 
   18734 	if (!error)
   18735 		return;
   18736 
   18737 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
   18738 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   18739 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
   18740 			   error->power_well_driver);
   18741 	for_each_pipe(dev_priv, i) {
   18742 		err_printf(m, "Pipe [%d]:\n", i);
   18743 		err_printf(m, "  Power: %s\n",
   18744 			   onoff(error->pipe[i].power_domain_on));
   18745 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
   18746 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
   18747 
   18748 		err_printf(m, "Plane [%d]:\n", i);
   18749 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
   18750 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
   18751 		if (INTEL_GEN(dev_priv) <= 3) {
   18752 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
   18753 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
   18754 		}
   18755 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
   18756 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
   18757 		if (INTEL_GEN(dev_priv) >= 4) {
   18758 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
   18759 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
   18760 		}
   18761 
   18762 		err_printf(m, "Cursor [%d]:\n", i);
   18763 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
   18764 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
   18765 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
   18766 	}
   18767 
   18768 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
   18769 		if (!error->transcoder[i].available)
   18770 			continue;
   18771 
   18772 		err_printf(m, "CPU transcoder: %s\n",
   18773 			   transcoder_name(error->transcoder[i].cpu_transcoder));
   18774 		err_printf(m, "  Power: %s\n",
   18775 			   onoff(error->transcoder[i].power_domain_on));
   18776 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
   18777 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
   18778 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
   18779 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
   18780 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
   18781 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
   18782 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
   18783 	}
   18784 }
   18785 
   18786 #endif
   18787