Home | History | Annotate | Line # | Download | only in display
intel_display.c revision 1.1
      1 /*	$NetBSD: intel_display.c,v 1.1 2021/12/18 20:15:28 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2006-2007 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     23  * DEALINGS IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *	Eric Anholt <eric (at) anholt.net>
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: intel_display.c,v 1.1 2021/12/18 20:15:28 riastradh Exp $");
     31 
     32 #include <linux/i2c.h>
     33 #include <linux/input.h>
     34 #include <linux/intel-iommu.h>
     35 #include <linux/kernel.h>
     36 #include <linux/module.h>
     37 #include <linux/dma-resv.h>
     38 #include <linux/slab.h>
     39 
     40 #include <drm/drm_atomic.h>
     41 #include <drm/drm_atomic_helper.h>
     42 #include <drm/drm_atomic_uapi.h>
     43 #include <drm/drm_dp_helper.h>
     44 #include <drm/drm_edid.h>
     45 #include <drm/drm_fourcc.h>
     46 #include <drm/drm_plane_helper.h>
     47 #include <drm/drm_probe_helper.h>
     48 #include <drm/drm_rect.h>
     49 #include <drm/i915_drm.h>
     50 
     51 #include "display/intel_crt.h"
     52 #include "display/intel_ddi.h"
     53 #include "display/intel_dp.h"
     54 #include "display/intel_dp_mst.h"
     55 #include "display/intel_dsi.h"
     56 #include "display/intel_dvo.h"
     57 #include "display/intel_gmbus.h"
     58 #include "display/intel_hdmi.h"
     59 #include "display/intel_lvds.h"
     60 #include "display/intel_sdvo.h"
     61 #include "display/intel_tv.h"
     62 #include "display/intel_vdsc.h"
     63 
     64 #include "gt/intel_rps.h"
     65 
     66 #include "i915_drv.h"
     67 #include "i915_trace.h"
     68 #include "intel_acpi.h"
     69 #include "intel_atomic.h"
     70 #include "intel_atomic_plane.h"
     71 #include "intel_bw.h"
     72 #include "intel_cdclk.h"
     73 #include "intel_color.h"
     74 #include "intel_display_types.h"
     75 #include "intel_dp_link_training.h"
     76 #include "intel_fbc.h"
     77 #include "intel_fbdev.h"
     78 #include "intel_fifo_underrun.h"
     79 #include "intel_frontbuffer.h"
     80 #include "intel_hdcp.h"
     81 #include "intel_hotplug.h"
     82 #include "intel_overlay.h"
     83 #include "intel_pipe_crc.h"
     84 #include "intel_pm.h"
     85 #include "intel_psr.h"
     86 #include "intel_quirks.h"
     87 #include "intel_sideband.h"
     88 #include "intel_sprite.h"
     89 #include "intel_tc.h"
     90 #include "intel_vga.h"
     91 
     92 /* Primary plane formats for gen <= 3 */
     93 static const u32 i8xx_primary_formats[] = {
     94 	DRM_FORMAT_C8,
     95 	DRM_FORMAT_XRGB1555,
     96 	DRM_FORMAT_RGB565,
     97 	DRM_FORMAT_XRGB8888,
     98 };
     99 
    100 /* Primary plane formats for ivb (no fp16 due to hw issue) */
    101 static const u32 ivb_primary_formats[] = {
    102 	DRM_FORMAT_C8,
    103 	DRM_FORMAT_RGB565,
    104 	DRM_FORMAT_XRGB8888,
    105 	DRM_FORMAT_XBGR8888,
    106 	DRM_FORMAT_XRGB2101010,
    107 	DRM_FORMAT_XBGR2101010,
    108 };
    109 
    110 /* Primary plane formats for gen >= 4, except ivb */
    111 static const u32 i965_primary_formats[] = {
    112 	DRM_FORMAT_C8,
    113 	DRM_FORMAT_RGB565,
    114 	DRM_FORMAT_XRGB8888,
    115 	DRM_FORMAT_XBGR8888,
    116 	DRM_FORMAT_XRGB2101010,
    117 	DRM_FORMAT_XBGR2101010,
    118 	DRM_FORMAT_XBGR16161616F,
    119 };
    120 
    121 /* Primary plane formats for vlv/chv */
    122 static const u32 vlv_primary_formats[] = {
    123 	DRM_FORMAT_C8,
    124 	DRM_FORMAT_RGB565,
    125 	DRM_FORMAT_XRGB8888,
    126 	DRM_FORMAT_XBGR8888,
    127 	DRM_FORMAT_ARGB8888,
    128 	DRM_FORMAT_ABGR8888,
    129 	DRM_FORMAT_XRGB2101010,
    130 	DRM_FORMAT_XBGR2101010,
    131 	DRM_FORMAT_ARGB2101010,
    132 	DRM_FORMAT_ABGR2101010,
    133 	DRM_FORMAT_XBGR16161616F,
    134 };
    135 
    136 static const u64 i9xx_format_modifiers[] = {
    137 	I915_FORMAT_MOD_X_TILED,
    138 	DRM_FORMAT_MOD_LINEAR,
    139 	DRM_FORMAT_MOD_INVALID
    140 };
    141 
    142 /* Cursor formats */
    143 static const u32 intel_cursor_formats[] = {
    144 	DRM_FORMAT_ARGB8888,
    145 };
    146 
    147 static const u64 cursor_format_modifiers[] = {
    148 	DRM_FORMAT_MOD_LINEAR,
    149 	DRM_FORMAT_MOD_INVALID
    150 };
    151 
    152 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
    153 				struct intel_crtc_state *pipe_config);
    154 static void ilk_pch_clock_get(struct intel_crtc *crtc,
    155 			      struct intel_crtc_state *pipe_config);
    156 
    157 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
    158 				  struct drm_i915_gem_object *obj,
    159 				  struct drm_mode_fb_cmd2 *mode_cmd);
    160 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
    161 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
    162 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
    163 					 const struct intel_link_m_n *m_n,
    164 					 const struct intel_link_m_n *m2_n2);
    165 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
    166 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
    167 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
    168 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
    169 static void vlv_prepare_pll(struct intel_crtc *crtc,
    170 			    const struct intel_crtc_state *pipe_config);
    171 static void chv_prepare_pll(struct intel_crtc *crtc,
    172 			    const struct intel_crtc_state *pipe_config);
    173 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
    174 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
    175 static void intel_modeset_setup_hw_state(struct drm_device *dev,
    176 					 struct drm_modeset_acquire_ctx *ctx);
    177 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
    178 
    179 struct intel_limit {
    180 	struct {
    181 		int min, max;
    182 	} dot, vco, n, m, m1, m2, p, p1;
    183 
    184 	struct {
    185 		int dot_limit;
    186 		int p2_slow, p2_fast;
    187 	} p2;
    188 };
    189 
    190 /* returns HPLL frequency in kHz */
    191 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
    192 {
    193 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
    194 
    195 	/* Obtain SKU information */
    196 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
    197 		CCK_FUSE_HPLL_FREQ_MASK;
    198 
    199 	return vco_freq[hpll_freq] * 1000;
    200 }
    201 
    202 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
    203 		      const char *name, u32 reg, int ref_freq)
    204 {
    205 	u32 val;
    206 	int divider;
    207 
    208 	val = vlv_cck_read(dev_priv, reg);
    209 	divider = val & CCK_FREQUENCY_VALUES;
    210 
    211 	WARN((val & CCK_FREQUENCY_STATUS) !=
    212 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
    213 	     "%s change in progress\n", name);
    214 
    215 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
    216 }
    217 
    218 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
    219 			   const char *name, u32 reg)
    220 {
    221 	int hpll;
    222 
    223 	vlv_cck_get(dev_priv);
    224 
    225 	if (dev_priv->hpll_freq == 0)
    226 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
    227 
    228 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
    229 
    230 	vlv_cck_put(dev_priv);
    231 
    232 	return hpll;
    233 }
    234 
    235 static void intel_update_czclk(struct drm_i915_private *dev_priv)
    236 {
    237 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
    238 		return;
    239 
    240 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
    241 						      CCK_CZ_CLOCK_CONTROL);
    242 
    243 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
    244 }
    245 
    246 static inline u32 /* units of 100MHz */
    247 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
    248 		    const struct intel_crtc_state *pipe_config)
    249 {
    250 	if (HAS_DDI(dev_priv))
    251 		return pipe_config->port_clock; /* SPLL */
    252 	else
    253 		return dev_priv->fdi_pll_freq;
    254 }
    255 
    256 static const struct intel_limit intel_limits_i8xx_dac = {
    257 	.dot = { .min = 25000, .max = 350000 },
    258 	.vco = { .min = 908000, .max = 1512000 },
    259 	.n = { .min = 2, .max = 16 },
    260 	.m = { .min = 96, .max = 140 },
    261 	.m1 = { .min = 18, .max = 26 },
    262 	.m2 = { .min = 6, .max = 16 },
    263 	.p = { .min = 4, .max = 128 },
    264 	.p1 = { .min = 2, .max = 33 },
    265 	.p2 = { .dot_limit = 165000,
    266 		.p2_slow = 4, .p2_fast = 2 },
    267 };
    268 
    269 static const struct intel_limit intel_limits_i8xx_dvo = {
    270 	.dot = { .min = 25000, .max = 350000 },
    271 	.vco = { .min = 908000, .max = 1512000 },
    272 	.n = { .min = 2, .max = 16 },
    273 	.m = { .min = 96, .max = 140 },
    274 	.m1 = { .min = 18, .max = 26 },
    275 	.m2 = { .min = 6, .max = 16 },
    276 	.p = { .min = 4, .max = 128 },
    277 	.p1 = { .min = 2, .max = 33 },
    278 	.p2 = { .dot_limit = 165000,
    279 		.p2_slow = 4, .p2_fast = 4 },
    280 };
    281 
    282 static const struct intel_limit intel_limits_i8xx_lvds = {
    283 	.dot = { .min = 25000, .max = 350000 },
    284 	.vco = { .min = 908000, .max = 1512000 },
    285 	.n = { .min = 2, .max = 16 },
    286 	.m = { .min = 96, .max = 140 },
    287 	.m1 = { .min = 18, .max = 26 },
    288 	.m2 = { .min = 6, .max = 16 },
    289 	.p = { .min = 4, .max = 128 },
    290 	.p1 = { .min = 1, .max = 6 },
    291 	.p2 = { .dot_limit = 165000,
    292 		.p2_slow = 14, .p2_fast = 7 },
    293 };
    294 
    295 static const struct intel_limit intel_limits_i9xx_sdvo = {
    296 	.dot = { .min = 20000, .max = 400000 },
    297 	.vco = { .min = 1400000, .max = 2800000 },
    298 	.n = { .min = 1, .max = 6 },
    299 	.m = { .min = 70, .max = 120 },
    300 	.m1 = { .min = 8, .max = 18 },
    301 	.m2 = { .min = 3, .max = 7 },
    302 	.p = { .min = 5, .max = 80 },
    303 	.p1 = { .min = 1, .max = 8 },
    304 	.p2 = { .dot_limit = 200000,
    305 		.p2_slow = 10, .p2_fast = 5 },
    306 };
    307 
    308 static const struct intel_limit intel_limits_i9xx_lvds = {
    309 	.dot = { .min = 20000, .max = 400000 },
    310 	.vco = { .min = 1400000, .max = 2800000 },
    311 	.n = { .min = 1, .max = 6 },
    312 	.m = { .min = 70, .max = 120 },
    313 	.m1 = { .min = 8, .max = 18 },
    314 	.m2 = { .min = 3, .max = 7 },
    315 	.p = { .min = 7, .max = 98 },
    316 	.p1 = { .min = 1, .max = 8 },
    317 	.p2 = { .dot_limit = 112000,
    318 		.p2_slow = 14, .p2_fast = 7 },
    319 };
    320 
    321 
    322 static const struct intel_limit intel_limits_g4x_sdvo = {
    323 	.dot = { .min = 25000, .max = 270000 },
    324 	.vco = { .min = 1750000, .max = 3500000},
    325 	.n = { .min = 1, .max = 4 },
    326 	.m = { .min = 104, .max = 138 },
    327 	.m1 = { .min = 17, .max = 23 },
    328 	.m2 = { .min = 5, .max = 11 },
    329 	.p = { .min = 10, .max = 30 },
    330 	.p1 = { .min = 1, .max = 3},
    331 	.p2 = { .dot_limit = 270000,
    332 		.p2_slow = 10,
    333 		.p2_fast = 10
    334 	},
    335 };
    336 
    337 static const struct intel_limit intel_limits_g4x_hdmi = {
    338 	.dot = { .min = 22000, .max = 400000 },
    339 	.vco = { .min = 1750000, .max = 3500000},
    340 	.n = { .min = 1, .max = 4 },
    341 	.m = { .min = 104, .max = 138 },
    342 	.m1 = { .min = 16, .max = 23 },
    343 	.m2 = { .min = 5, .max = 11 },
    344 	.p = { .min = 5, .max = 80 },
    345 	.p1 = { .min = 1, .max = 8},
    346 	.p2 = { .dot_limit = 165000,
    347 		.p2_slow = 10, .p2_fast = 5 },
    348 };
    349 
    350 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
    351 	.dot = { .min = 20000, .max = 115000 },
    352 	.vco = { .min = 1750000, .max = 3500000 },
    353 	.n = { .min = 1, .max = 3 },
    354 	.m = { .min = 104, .max = 138 },
    355 	.m1 = { .min = 17, .max = 23 },
    356 	.m2 = { .min = 5, .max = 11 },
    357 	.p = { .min = 28, .max = 112 },
    358 	.p1 = { .min = 2, .max = 8 },
    359 	.p2 = { .dot_limit = 0,
    360 		.p2_slow = 14, .p2_fast = 14
    361 	},
    362 };
    363 
    364 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
    365 	.dot = { .min = 80000, .max = 224000 },
    366 	.vco = { .min = 1750000, .max = 3500000 },
    367 	.n = { .min = 1, .max = 3 },
    368 	.m = { .min = 104, .max = 138 },
    369 	.m1 = { .min = 17, .max = 23 },
    370 	.m2 = { .min = 5, .max = 11 },
    371 	.p = { .min = 14, .max = 42 },
    372 	.p1 = { .min = 2, .max = 6 },
    373 	.p2 = { .dot_limit = 0,
    374 		.p2_slow = 7, .p2_fast = 7
    375 	},
    376 };
    377 
    378 static const struct intel_limit pnv_limits_sdvo = {
    379 	.dot = { .min = 20000, .max = 400000},
    380 	.vco = { .min = 1700000, .max = 3500000 },
    381 	/* Pineview's Ncounter is a ring counter */
    382 	.n = { .min = 3, .max = 6 },
    383 	.m = { .min = 2, .max = 256 },
    384 	/* Pineview only has one combined m divider, which we treat as m2. */
    385 	.m1 = { .min = 0, .max = 0 },
    386 	.m2 = { .min = 0, .max = 254 },
    387 	.p = { .min = 5, .max = 80 },
    388 	.p1 = { .min = 1, .max = 8 },
    389 	.p2 = { .dot_limit = 200000,
    390 		.p2_slow = 10, .p2_fast = 5 },
    391 };
    392 
    393 static const struct intel_limit pnv_limits_lvds = {
    394 	.dot = { .min = 20000, .max = 400000 },
    395 	.vco = { .min = 1700000, .max = 3500000 },
    396 	.n = { .min = 3, .max = 6 },
    397 	.m = { .min = 2, .max = 256 },
    398 	.m1 = { .min = 0, .max = 0 },
    399 	.m2 = { .min = 0, .max = 254 },
    400 	.p = { .min = 7, .max = 112 },
    401 	.p1 = { .min = 1, .max = 8 },
    402 	.p2 = { .dot_limit = 112000,
    403 		.p2_slow = 14, .p2_fast = 14 },
    404 };
    405 
    406 /* Ironlake / Sandybridge
    407  *
    408  * We calculate clock using (register_value + 2) for N/M1/M2, so here
    409  * the range value for them is (actual_value - 2).
    410  */
    411 static const struct intel_limit ilk_limits_dac = {
    412 	.dot = { .min = 25000, .max = 350000 },
    413 	.vco = { .min = 1760000, .max = 3510000 },
    414 	.n = { .min = 1, .max = 5 },
    415 	.m = { .min = 79, .max = 127 },
    416 	.m1 = { .min = 12, .max = 22 },
    417 	.m2 = { .min = 5, .max = 9 },
    418 	.p = { .min = 5, .max = 80 },
    419 	.p1 = { .min = 1, .max = 8 },
    420 	.p2 = { .dot_limit = 225000,
    421 		.p2_slow = 10, .p2_fast = 5 },
    422 };
    423 
    424 static const struct intel_limit ilk_limits_single_lvds = {
    425 	.dot = { .min = 25000, .max = 350000 },
    426 	.vco = { .min = 1760000, .max = 3510000 },
    427 	.n = { .min = 1, .max = 3 },
    428 	.m = { .min = 79, .max = 118 },
    429 	.m1 = { .min = 12, .max = 22 },
    430 	.m2 = { .min = 5, .max = 9 },
    431 	.p = { .min = 28, .max = 112 },
    432 	.p1 = { .min = 2, .max = 8 },
    433 	.p2 = { .dot_limit = 225000,
    434 		.p2_slow = 14, .p2_fast = 14 },
    435 };
    436 
    437 static const struct intel_limit ilk_limits_dual_lvds = {
    438 	.dot = { .min = 25000, .max = 350000 },
    439 	.vco = { .min = 1760000, .max = 3510000 },
    440 	.n = { .min = 1, .max = 3 },
    441 	.m = { .min = 79, .max = 127 },
    442 	.m1 = { .min = 12, .max = 22 },
    443 	.m2 = { .min = 5, .max = 9 },
    444 	.p = { .min = 14, .max = 56 },
    445 	.p1 = { .min = 2, .max = 8 },
    446 	.p2 = { .dot_limit = 225000,
    447 		.p2_slow = 7, .p2_fast = 7 },
    448 };
    449 
    450 /* LVDS 100mhz refclk limits. */
    451 static const struct intel_limit ilk_limits_single_lvds_100m = {
    452 	.dot = { .min = 25000, .max = 350000 },
    453 	.vco = { .min = 1760000, .max = 3510000 },
    454 	.n = { .min = 1, .max = 2 },
    455 	.m = { .min = 79, .max = 126 },
    456 	.m1 = { .min = 12, .max = 22 },
    457 	.m2 = { .min = 5, .max = 9 },
    458 	.p = { .min = 28, .max = 112 },
    459 	.p1 = { .min = 2, .max = 8 },
    460 	.p2 = { .dot_limit = 225000,
    461 		.p2_slow = 14, .p2_fast = 14 },
    462 };
    463 
    464 static const struct intel_limit ilk_limits_dual_lvds_100m = {
    465 	.dot = { .min = 25000, .max = 350000 },
    466 	.vco = { .min = 1760000, .max = 3510000 },
    467 	.n = { .min = 1, .max = 3 },
    468 	.m = { .min = 79, .max = 126 },
    469 	.m1 = { .min = 12, .max = 22 },
    470 	.m2 = { .min = 5, .max = 9 },
    471 	.p = { .min = 14, .max = 42 },
    472 	.p1 = { .min = 2, .max = 6 },
    473 	.p2 = { .dot_limit = 225000,
    474 		.p2_slow = 7, .p2_fast = 7 },
    475 };
    476 
    477 static const struct intel_limit intel_limits_vlv = {
    478 	 /*
    479 	  * These are the data rate limits (measured in fast clocks)
    480 	  * since those are the strictest limits we have. The fast
    481 	  * clock and actual rate limits are more relaxed, so checking
    482 	  * them would make no difference.
    483 	  */
    484 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
    485 	.vco = { .min = 4000000, .max = 6000000 },
    486 	.n = { .min = 1, .max = 7 },
    487 	.m1 = { .min = 2, .max = 3 },
    488 	.m2 = { .min = 11, .max = 156 },
    489 	.p1 = { .min = 2, .max = 3 },
    490 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
    491 };
    492 
    493 static const struct intel_limit intel_limits_chv = {
    494 	/*
    495 	 * These are the data rate limits (measured in fast clocks)
    496 	 * since those are the strictest limits we have.  The fast
    497 	 * clock and actual rate limits are more relaxed, so checking
    498 	 * them would make no difference.
    499 	 */
    500 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
    501 	.vco = { .min = 4800000, .max = 6480000 },
    502 	.n = { .min = 1, .max = 1 },
    503 	.m1 = { .min = 2, .max = 2 },
    504 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
    505 	.p1 = { .min = 2, .max = 4 },
    506 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
    507 };
    508 
    509 static const struct intel_limit intel_limits_bxt = {
    510 	/* FIXME: find real dot limits */
    511 	.dot = { .min = 0, .max = INT_MAX },
    512 	.vco = { .min = 4800000, .max = 6700000 },
    513 	.n = { .min = 1, .max = 1 },
    514 	.m1 = { .min = 2, .max = 2 },
    515 	/* FIXME: find real m2 limits */
    516 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
    517 	.p1 = { .min = 2, .max = 4 },
    518 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
    519 };
    520 
    521 /* WA Display #0827: Gen9:all */
    522 static void
    523 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
    524 {
    525 	if (enable)
    526 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    527 			   I915_READ(CLKGATE_DIS_PSL(pipe)) |
    528 			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
    529 	else
    530 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    531 			   I915_READ(CLKGATE_DIS_PSL(pipe)) &
    532 			   ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
    533 }
    534 
    535 /* Wa_2006604312:icl */
    536 static void
    537 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
    538 		       bool enable)
    539 {
    540 	if (enable)
    541 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    542 			   I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
    543 	else
    544 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
    545 			   I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
    546 }
    547 
    548 static bool
    549 needs_modeset(const struct intel_crtc_state *state)
    550 {
    551 	return drm_atomic_crtc_needs_modeset(&state->uapi);
    552 }
    553 
    554 bool
    555 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
    556 {
    557 	return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
    558 		crtc_state->sync_mode_slaves_mask);
    559 }
    560 
    561 static bool
    562 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
    563 {
    564 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
    565 }
    566 
    567 /*
    568  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
    569  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
    570  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
    571  * The helpers' return value is the rate of the clock that is fed to the
    572  * display engine's pipe which can be the above fast dot clock rate or a
    573  * divided-down version of it.
    574  */
    575 /* m1 is reserved as 0 in Pineview, n is a ring counter */
    576 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
    577 {
    578 	clock->m = clock->m2 + 2;
    579 	clock->p = clock->p1 * clock->p2;
    580 	if (WARN_ON(clock->n == 0 || clock->p == 0))
    581 		return 0;
    582 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
    583 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    584 
    585 	return clock->dot;
    586 }
    587 
    588 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
    589 {
    590 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
    591 }
    592 
    593 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
    594 {
    595 	clock->m = i9xx_dpll_compute_m(clock);
    596 	clock->p = clock->p1 * clock->p2;
    597 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
    598 		return 0;
    599 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
    600 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    601 
    602 	return clock->dot;
    603 }
    604 
    605 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
    606 {
    607 	clock->m = clock->m1 * clock->m2;
    608 	clock->p = clock->p1 * clock->p2;
    609 	if (WARN_ON(clock->n == 0 || clock->p == 0))
    610 		return 0;
    611 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
    612 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    613 
    614 	return clock->dot / 5;
    615 }
    616 
    617 int chv_calc_dpll_params(int refclk, struct dpll *clock)
    618 {
    619 	clock->m = clock->m1 * clock->m2;
    620 	clock->p = clock->p1 * clock->p2;
    621 	if (WARN_ON(clock->n == 0 || clock->p == 0))
    622 		return 0;
    623 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
    624 					   clock->n << 22);
    625 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
    626 
    627 	return clock->dot / 5;
    628 }
    629 
    630 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
    631 
    632 /*
    633  * Returns whether the given set of divisors are valid for a given refclk with
    634  * the given connectors.
    635  */
    636 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
    637 			       const struct intel_limit *limit,
    638 			       const struct dpll *clock)
    639 {
    640 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
    641 		INTELPllInvalid("n out of range\n");
    642 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
    643 		INTELPllInvalid("p1 out of range\n");
    644 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
    645 		INTELPllInvalid("m2 out of range\n");
    646 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
    647 		INTELPllInvalid("m1 out of range\n");
    648 
    649 	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
    650 	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
    651 		if (clock->m1 <= clock->m2)
    652 			INTELPllInvalid("m1 <= m2\n");
    653 
    654 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
    655 	    !IS_GEN9_LP(dev_priv)) {
    656 		if (clock->p < limit->p.min || limit->p.max < clock->p)
    657 			INTELPllInvalid("p out of range\n");
    658 		if (clock->m < limit->m.min || limit->m.max < clock->m)
    659 			INTELPllInvalid("m out of range\n");
    660 	}
    661 
    662 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
    663 		INTELPllInvalid("vco out of range\n");
    664 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
    665 	 * connector, etc., rather than just a single range.
    666 	 */
    667 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
    668 		INTELPllInvalid("dot out of range\n");
    669 
    670 	return true;
    671 }
    672 
    673 static int
    674 i9xx_select_p2_div(const struct intel_limit *limit,
    675 		   const struct intel_crtc_state *crtc_state,
    676 		   int target)
    677 {
    678 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
    679 
    680 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
    681 		/*
    682 		 * For LVDS just rely on its current settings for dual-channel.
    683 		 * We haven't figured out how to reliably set up different
    684 		 * single/dual channel state, if we even can.
    685 		 */
    686 		if (intel_is_dual_link_lvds(dev_priv))
    687 			return limit->p2.p2_fast;
    688 		else
    689 			return limit->p2.p2_slow;
    690 	} else {
    691 		if (target < limit->p2.dot_limit)
    692 			return limit->p2.p2_slow;
    693 		else
    694 			return limit->p2.p2_fast;
    695 	}
    696 }
    697 
    698 /*
    699  * Returns a set of divisors for the desired target clock with the given
    700  * refclk, or FALSE.  The returned values represent the clock equation:
    701  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    702  *
    703  * Target and reference clocks are specified in kHz.
    704  *
    705  * If match_clock is provided, then best_clock P divider must match the P
    706  * divider from @match_clock used for LVDS downclocking.
    707  */
    708 static bool
    709 i9xx_find_best_dpll(const struct intel_limit *limit,
    710 		    struct intel_crtc_state *crtc_state,
    711 		    int target, int refclk, struct dpll *match_clock,
    712 		    struct dpll *best_clock)
    713 {
    714 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
    715 	struct dpll clock;
    716 	int err = target;
    717 
    718 	memset(best_clock, 0, sizeof(*best_clock));
    719 
    720 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
    721 
    722 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
    723 	     clock.m1++) {
    724 		for (clock.m2 = limit->m2.min;
    725 		     clock.m2 <= limit->m2.max; clock.m2++) {
    726 			if (clock.m2 >= clock.m1)
    727 				break;
    728 			for (clock.n = limit->n.min;
    729 			     clock.n <= limit->n.max; clock.n++) {
    730 				for (clock.p1 = limit->p1.min;
    731 					clock.p1 <= limit->p1.max; clock.p1++) {
    732 					int this_err;
    733 
    734 					i9xx_calc_dpll_params(refclk, &clock);
    735 					if (!intel_PLL_is_valid(to_i915(dev),
    736 								limit,
    737 								&clock))
    738 						continue;
    739 					if (match_clock &&
    740 					    clock.p != match_clock->p)
    741 						continue;
    742 
    743 					this_err = abs(clock.dot - target);
    744 					if (this_err < err) {
    745 						*best_clock = clock;
    746 						err = this_err;
    747 					}
    748 				}
    749 			}
    750 		}
    751 	}
    752 
    753 	return (err != target);
    754 }
    755 
    756 /*
    757  * Returns a set of divisors for the desired target clock with the given
    758  * refclk, or FALSE.  The returned values represent the clock equation:
    759  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    760  *
    761  * Target and reference clocks are specified in kHz.
    762  *
    763  * If match_clock is provided, then best_clock P divider must match the P
    764  * divider from @match_clock used for LVDS downclocking.
    765  */
    766 static bool
    767 pnv_find_best_dpll(const struct intel_limit *limit,
    768 		   struct intel_crtc_state *crtc_state,
    769 		   int target, int refclk, struct dpll *match_clock,
    770 		   struct dpll *best_clock)
    771 {
    772 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
    773 	struct dpll clock;
    774 	int err = target;
    775 
    776 	memset(best_clock, 0, sizeof(*best_clock));
    777 
    778 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
    779 
    780 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
    781 	     clock.m1++) {
    782 		for (clock.m2 = limit->m2.min;
    783 		     clock.m2 <= limit->m2.max; clock.m2++) {
    784 			for (clock.n = limit->n.min;
    785 			     clock.n <= limit->n.max; clock.n++) {
    786 				for (clock.p1 = limit->p1.min;
    787 					clock.p1 <= limit->p1.max; clock.p1++) {
    788 					int this_err;
    789 
    790 					pnv_calc_dpll_params(refclk, &clock);
    791 					if (!intel_PLL_is_valid(to_i915(dev),
    792 								limit,
    793 								&clock))
    794 						continue;
    795 					if (match_clock &&
    796 					    clock.p != match_clock->p)
    797 						continue;
    798 
    799 					this_err = abs(clock.dot - target);
    800 					if (this_err < err) {
    801 						*best_clock = clock;
    802 						err = this_err;
    803 					}
    804 				}
    805 			}
    806 		}
    807 	}
    808 
    809 	return (err != target);
    810 }
    811 
    812 /*
    813  * Returns a set of divisors for the desired target clock with the given
    814  * refclk, or FALSE.  The returned values represent the clock equation:
    815  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    816  *
    817  * Target and reference clocks are specified in kHz.
    818  *
    819  * If match_clock is provided, then best_clock P divider must match the P
    820  * divider from @match_clock used for LVDS downclocking.
    821  */
    822 static bool
    823 g4x_find_best_dpll(const struct intel_limit *limit,
    824 		   struct intel_crtc_state *crtc_state,
    825 		   int target, int refclk, struct dpll *match_clock,
    826 		   struct dpll *best_clock)
    827 {
    828 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
    829 	struct dpll clock;
    830 	int max_n;
    831 	bool found = false;
    832 	/* approximately equals target * 0.00585 */
    833 	int err_most = (target >> 8) + (target >> 9);
    834 
    835 	memset(best_clock, 0, sizeof(*best_clock));
    836 
    837 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
    838 
    839 	max_n = limit->n.max;
    840 	/* based on hardware requirement, prefer smaller n to precision */
    841 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
    842 		/* based on hardware requirement, prefere larger m1,m2 */
    843 		for (clock.m1 = limit->m1.max;
    844 		     clock.m1 >= limit->m1.min; clock.m1--) {
    845 			for (clock.m2 = limit->m2.max;
    846 			     clock.m2 >= limit->m2.min; clock.m2--) {
    847 				for (clock.p1 = limit->p1.max;
    848 				     clock.p1 >= limit->p1.min; clock.p1--) {
    849 					int this_err;
    850 
    851 					i9xx_calc_dpll_params(refclk, &clock);
    852 					if (!intel_PLL_is_valid(to_i915(dev),
    853 								limit,
    854 								&clock))
    855 						continue;
    856 
    857 					this_err = abs(clock.dot - target);
    858 					if (this_err < err_most) {
    859 						*best_clock = clock;
    860 						err_most = this_err;
    861 						max_n = clock.n;
    862 						found = true;
    863 					}
    864 				}
    865 			}
    866 		}
    867 	}
    868 	return found;
    869 }
    870 
    871 /*
    872  * Check if the calculated PLL configuration is more optimal compared to the
    873  * best configuration and error found so far. Return the calculated error.
    874  */
    875 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
    876 			       const struct dpll *calculated_clock,
    877 			       const struct dpll *best_clock,
    878 			       unsigned int best_error_ppm,
    879 			       unsigned int *error_ppm)
    880 {
    881 	/*
    882 	 * For CHV ignore the error and consider only the P value.
    883 	 * Prefer a bigger P value based on HW requirements.
    884 	 */
    885 	if (IS_CHERRYVIEW(to_i915(dev))) {
    886 		*error_ppm = 0;
    887 
    888 		return calculated_clock->p > best_clock->p;
    889 	}
    890 
    891 	if (WARN_ON_ONCE(!target_freq))
    892 		return false;
    893 
    894 	*error_ppm = div_u64(1000000ULL *
    895 				abs(target_freq - calculated_clock->dot),
    896 			     target_freq);
    897 	/*
    898 	 * Prefer a better P value over a better (smaller) error if the error
    899 	 * is small. Ensure this preference for future configurations too by
    900 	 * setting the error to 0.
    901 	 */
    902 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
    903 		*error_ppm = 0;
    904 
    905 		return true;
    906 	}
    907 
    908 	return *error_ppm + 10 < best_error_ppm;
    909 }
    910 
    911 /*
    912  * Returns a set of divisors for the desired target clock with the given
    913  * refclk, or FALSE.  The returned values represent the clock equation:
    914  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    915  */
    916 static bool
    917 vlv_find_best_dpll(const struct intel_limit *limit,
    918 		   struct intel_crtc_state *crtc_state,
    919 		   int target, int refclk, struct dpll *match_clock,
    920 		   struct dpll *best_clock)
    921 {
    922 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    923 	struct drm_device *dev = crtc->base.dev;
    924 	struct dpll clock;
    925 	unsigned int bestppm = 1000000;
    926 	/* min update 19.2 MHz */
    927 	int max_n = min(limit->n.max, refclk / 19200);
    928 	bool found = false;
    929 
    930 	target *= 5; /* fast clock */
    931 
    932 	memset(best_clock, 0, sizeof(*best_clock));
    933 
    934 	/* based on hardware requirement, prefer smaller n to precision */
    935 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
    936 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
    937 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
    938 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
    939 				clock.p = clock.p1 * clock.p2;
    940 				/* based on hardware requirement, prefer bigger m1,m2 values */
    941 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
    942 					unsigned int ppm;
    943 
    944 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
    945 								     refclk * clock.m1);
    946 
    947 					vlv_calc_dpll_params(refclk, &clock);
    948 
    949 					if (!intel_PLL_is_valid(to_i915(dev),
    950 								limit,
    951 								&clock))
    952 						continue;
    953 
    954 					if (!vlv_PLL_is_optimal(dev, target,
    955 								&clock,
    956 								best_clock,
    957 								bestppm, &ppm))
    958 						continue;
    959 
    960 					*best_clock = clock;
    961 					bestppm = ppm;
    962 					found = true;
    963 				}
    964 			}
    965 		}
    966 	}
    967 
    968 	return found;
    969 }
    970 
    971 /*
    972  * Returns a set of divisors for the desired target clock with the given
    973  * refclk, or FALSE.  The returned values represent the clock equation:
    974  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
    975  */
    976 static bool
    977 chv_find_best_dpll(const struct intel_limit *limit,
    978 		   struct intel_crtc_state *crtc_state,
    979 		   int target, int refclk, struct dpll *match_clock,
    980 		   struct dpll *best_clock)
    981 {
    982 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    983 	struct drm_device *dev = crtc->base.dev;
    984 	unsigned int best_error_ppm;
    985 	struct dpll clock;
    986 	u64 m2;
    987 	int found = false;
    988 
    989 	memset(best_clock, 0, sizeof(*best_clock));
    990 	best_error_ppm = 1000000;
    991 
    992 	/*
    993 	 * Based on hardware doc, the n always set to 1, and m1 always
    994 	 * set to 2.  If requires to support 200Mhz refclk, we need to
    995 	 * revisit this because n may not 1 anymore.
    996 	 */
    997 	clock.n = 1, clock.m1 = 2;
    998 	target *= 5;	/* fast clock */
    999 
   1000 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
   1001 		for (clock.p2 = limit->p2.p2_fast;
   1002 				clock.p2 >= limit->p2.p2_slow;
   1003 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
   1004 			unsigned int error_ppm;
   1005 
   1006 			clock.p = clock.p1 * clock.p2;
   1007 
   1008 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
   1009 						   refclk * clock.m1);
   1010 
   1011 			if (m2 > INT_MAX/clock.m1)
   1012 				continue;
   1013 
   1014 			clock.m2 = m2;
   1015 
   1016 			chv_calc_dpll_params(refclk, &clock);
   1017 
   1018 			if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
   1019 				continue;
   1020 
   1021 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
   1022 						best_error_ppm, &error_ppm))
   1023 				continue;
   1024 
   1025 			*best_clock = clock;
   1026 			best_error_ppm = error_ppm;
   1027 			found = true;
   1028 		}
   1029 	}
   1030 
   1031 	return found;
   1032 }
   1033 
   1034 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
   1035 			struct dpll *best_clock)
   1036 {
   1037 	int refclk = 100000;
   1038 	const struct intel_limit *limit = &intel_limits_bxt;
   1039 
   1040 	return chv_find_best_dpll(limit, crtc_state,
   1041 				  crtc_state->port_clock, refclk,
   1042 				  NULL, best_clock);
   1043 }
   1044 
   1045 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
   1046 				    enum pipe pipe)
   1047 {
   1048 	i915_reg_t reg = PIPEDSL(pipe);
   1049 	u32 line1, line2;
   1050 	u32 line_mask;
   1051 
   1052 	if (IS_GEN(dev_priv, 2))
   1053 		line_mask = DSL_LINEMASK_GEN2;
   1054 	else
   1055 		line_mask = DSL_LINEMASK_GEN3;
   1056 
   1057 	line1 = I915_READ(reg) & line_mask;
   1058 	msleep(5);
   1059 	line2 = I915_READ(reg) & line_mask;
   1060 
   1061 	return line1 != line2;
   1062 }
   1063 
   1064 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
   1065 {
   1066 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1067 	enum pipe pipe = crtc->pipe;
   1068 
   1069 	/* Wait for the display line to settle/start moving */
   1070 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
   1071 		DRM_ERROR("pipe %c scanline %s wait timed out\n",
   1072 			  pipe_name(pipe), onoff(state));
   1073 }
   1074 
   1075 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
   1076 {
   1077 	wait_for_pipe_scanline_moving(crtc, false);
   1078 }
   1079 
   1080 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
   1081 {
   1082 	wait_for_pipe_scanline_moving(crtc, true);
   1083 }
   1084 
   1085 static void
   1086 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
   1087 {
   1088 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   1089 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1090 
   1091 	if (INTEL_GEN(dev_priv) >= 4) {
   1092 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
   1093 		i915_reg_t reg = PIPECONF(cpu_transcoder);
   1094 
   1095 		/* Wait for the Pipe State to go off */
   1096 		if (intel_de_wait_for_clear(dev_priv, reg,
   1097 					    I965_PIPECONF_ACTIVE, 100))
   1098 			WARN(1, "pipe_off wait timed out\n");
   1099 	} else {
   1100 		intel_wait_for_pipe_scanline_stopped(crtc);
   1101 	}
   1102 }
   1103 
   1104 /* Only for pre-ILK configs */
   1105 void assert_pll(struct drm_i915_private *dev_priv,
   1106 		enum pipe pipe, bool state)
   1107 {
   1108 	u32 val;
   1109 	bool cur_state;
   1110 
   1111 	val = I915_READ(DPLL(pipe));
   1112 	cur_state = !!(val & DPLL_VCO_ENABLE);
   1113 	I915_STATE_WARN(cur_state != state,
   1114 	     "PLL state assertion failure (expected %s, current %s)\n",
   1115 			onoff(state), onoff(cur_state));
   1116 }
   1117 
   1118 /* XXX: the dsi pll is shared between MIPI DSI ports */
   1119 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
   1120 {
   1121 	u32 val;
   1122 	bool cur_state;
   1123 
   1124 	vlv_cck_get(dev_priv);
   1125 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
   1126 	vlv_cck_put(dev_priv);
   1127 
   1128 	cur_state = val & DSI_PLL_VCO_EN;
   1129 	I915_STATE_WARN(cur_state != state,
   1130 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
   1131 			onoff(state), onoff(cur_state));
   1132 }
   1133 
   1134 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
   1135 			  enum pipe pipe, bool state)
   1136 {
   1137 	bool cur_state;
   1138 
   1139 	if (HAS_DDI(dev_priv)) {
   1140 		/*
   1141 		 * DDI does not have a specific FDI_TX register.
   1142 		 *
   1143 		 * FDI is never fed from EDP transcoder
   1144 		 * so pipe->transcoder cast is fine here.
   1145 		 */
   1146 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
   1147 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
   1148 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
   1149 	} else {
   1150 		u32 val = I915_READ(FDI_TX_CTL(pipe));
   1151 		cur_state = !!(val & FDI_TX_ENABLE);
   1152 	}
   1153 	I915_STATE_WARN(cur_state != state,
   1154 	     "FDI TX state assertion failure (expected %s, current %s)\n",
   1155 			onoff(state), onoff(cur_state));
   1156 }
   1157 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
   1158 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
   1159 
   1160 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
   1161 			  enum pipe pipe, bool state)
   1162 {
   1163 	u32 val;
   1164 	bool cur_state;
   1165 
   1166 	val = I915_READ(FDI_RX_CTL(pipe));
   1167 	cur_state = !!(val & FDI_RX_ENABLE);
   1168 	I915_STATE_WARN(cur_state != state,
   1169 	     "FDI RX state assertion failure (expected %s, current %s)\n",
   1170 			onoff(state), onoff(cur_state));
   1171 }
   1172 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
   1173 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
   1174 
   1175 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
   1176 				      enum pipe pipe)
   1177 {
   1178 	u32 val;
   1179 
   1180 	/* ILK FDI PLL is always enabled */
   1181 	if (IS_GEN(dev_priv, 5))
   1182 		return;
   1183 
   1184 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
   1185 	if (HAS_DDI(dev_priv))
   1186 		return;
   1187 
   1188 	val = I915_READ(FDI_TX_CTL(pipe));
   1189 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
   1190 }
   1191 
   1192 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
   1193 		       enum pipe pipe, bool state)
   1194 {
   1195 	u32 val;
   1196 	bool cur_state;
   1197 
   1198 	val = I915_READ(FDI_RX_CTL(pipe));
   1199 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
   1200 	I915_STATE_WARN(cur_state != state,
   1201 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
   1202 			onoff(state), onoff(cur_state));
   1203 }
   1204 
   1205 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
   1206 {
   1207 	i915_reg_t pp_reg;
   1208 	u32 val;
   1209 	enum pipe panel_pipe = INVALID_PIPE;
   1210 	bool locked = true;
   1211 
   1212 	if (WARN_ON(HAS_DDI(dev_priv)))
   1213 		return;
   1214 
   1215 	if (HAS_PCH_SPLIT(dev_priv)) {
   1216 		u32 port_sel;
   1217 
   1218 		pp_reg = PP_CONTROL(0);
   1219 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
   1220 
   1221 		switch (port_sel) {
   1222 		case PANEL_PORT_SELECT_LVDS:
   1223 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
   1224 			break;
   1225 		case PANEL_PORT_SELECT_DPA:
   1226 			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
   1227 			break;
   1228 		case PANEL_PORT_SELECT_DPC:
   1229 			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
   1230 			break;
   1231 		case PANEL_PORT_SELECT_DPD:
   1232 			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
   1233 			break;
   1234 		default:
   1235 			MISSING_CASE(port_sel);
   1236 			break;
   1237 		}
   1238 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   1239 		/* presumably write lock depends on pipe, not port select */
   1240 		pp_reg = PP_CONTROL(pipe);
   1241 		panel_pipe = pipe;
   1242 	} else {
   1243 		u32 port_sel;
   1244 
   1245 		pp_reg = PP_CONTROL(0);
   1246 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
   1247 
   1248 		WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
   1249 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
   1250 	}
   1251 
   1252 	val = I915_READ(pp_reg);
   1253 	if (!(val & PANEL_POWER_ON) ||
   1254 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
   1255 		locked = false;
   1256 
   1257 	I915_STATE_WARN(panel_pipe == pipe && locked,
   1258 	     "panel assertion failure, pipe %c regs locked\n",
   1259 	     pipe_name(pipe));
   1260 }
   1261 
   1262 void assert_pipe(struct drm_i915_private *dev_priv,
   1263 		 enum transcoder cpu_transcoder, bool state)
   1264 {
   1265 	bool cur_state;
   1266 	enum intel_display_power_domain power_domain;
   1267 	intel_wakeref_t wakeref;
   1268 
   1269 	/* we keep both pipes enabled on 830 */
   1270 	if (IS_I830(dev_priv))
   1271 		state = true;
   1272 
   1273 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   1274 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   1275 	if (wakeref) {
   1276 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
   1277 		cur_state = !!(val & PIPECONF_ENABLE);
   1278 
   1279 		intel_display_power_put(dev_priv, power_domain, wakeref);
   1280 	} else {
   1281 		cur_state = false;
   1282 	}
   1283 
   1284 	I915_STATE_WARN(cur_state != state,
   1285 			"transcoder %s assertion failure (expected %s, current %s)\n",
   1286 			transcoder_name(cpu_transcoder),
   1287 			onoff(state), onoff(cur_state));
   1288 }
   1289 
   1290 static void assert_plane(struct intel_plane *plane, bool state)
   1291 {
   1292 	enum pipe pipe;
   1293 	bool cur_state;
   1294 
   1295 	cur_state = plane->get_hw_state(plane, &pipe);
   1296 
   1297 	I915_STATE_WARN(cur_state != state,
   1298 			"%s assertion failure (expected %s, current %s)\n",
   1299 			plane->base.name, onoff(state), onoff(cur_state));
   1300 }
   1301 
   1302 #define assert_plane_enabled(p) assert_plane(p, true)
   1303 #define assert_plane_disabled(p) assert_plane(p, false)
   1304 
   1305 static void assert_planes_disabled(struct intel_crtc *crtc)
   1306 {
   1307 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1308 	struct intel_plane *plane;
   1309 
   1310 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
   1311 		assert_plane_disabled(plane);
   1312 }
   1313 
   1314 static void assert_vblank_disabled(struct drm_crtc *crtc)
   1315 {
   1316 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
   1317 		drm_crtc_vblank_put(crtc);
   1318 }
   1319 
   1320 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
   1321 				    enum pipe pipe)
   1322 {
   1323 	u32 val;
   1324 	bool enabled;
   1325 
   1326 	val = I915_READ(PCH_TRANSCONF(pipe));
   1327 	enabled = !!(val & TRANS_ENABLE);
   1328 	I915_STATE_WARN(enabled,
   1329 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
   1330 	     pipe_name(pipe));
   1331 }
   1332 
   1333 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
   1334 				   enum pipe pipe, enum port port,
   1335 				   i915_reg_t dp_reg)
   1336 {
   1337 	enum pipe port_pipe;
   1338 	bool state;
   1339 
   1340 	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
   1341 
   1342 	I915_STATE_WARN(state && port_pipe == pipe,
   1343 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
   1344 			port_name(port), pipe_name(pipe));
   1345 
   1346 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
   1347 			"IBX PCH DP %c still using transcoder B\n",
   1348 			port_name(port));
   1349 }
   1350 
   1351 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
   1352 				     enum pipe pipe, enum port port,
   1353 				     i915_reg_t hdmi_reg)
   1354 {
   1355 	enum pipe port_pipe;
   1356 	bool state;
   1357 
   1358 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
   1359 
   1360 	I915_STATE_WARN(state && port_pipe == pipe,
   1361 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
   1362 			port_name(port), pipe_name(pipe));
   1363 
   1364 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
   1365 			"IBX PCH HDMI %c still using transcoder B\n",
   1366 			port_name(port));
   1367 }
   1368 
   1369 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
   1370 				      enum pipe pipe)
   1371 {
   1372 	enum pipe port_pipe;
   1373 
   1374 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
   1375 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
   1376 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
   1377 
   1378 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
   1379 			port_pipe == pipe,
   1380 			"PCH VGA enabled on transcoder %c, should be disabled\n",
   1381 			pipe_name(pipe));
   1382 
   1383 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
   1384 			port_pipe == pipe,
   1385 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
   1386 			pipe_name(pipe));
   1387 
   1388 	/* PCH SDVOB multiplex with HDMIB */
   1389 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
   1390 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
   1391 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
   1392 }
   1393 
   1394 static void _vlv_enable_pll(struct intel_crtc *crtc,
   1395 			    const struct intel_crtc_state *pipe_config)
   1396 {
   1397 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1398 	enum pipe pipe = crtc->pipe;
   1399 
   1400 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
   1401 	POSTING_READ(DPLL(pipe));
   1402 	udelay(150);
   1403 
   1404 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
   1405 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
   1406 }
   1407 
   1408 static void vlv_enable_pll(struct intel_crtc *crtc,
   1409 			   const struct intel_crtc_state *pipe_config)
   1410 {
   1411 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1412 	enum pipe pipe = crtc->pipe;
   1413 
   1414 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
   1415 
   1416 	/* PLL is protected by panel, make sure we can write it */
   1417 	assert_panel_unlocked(dev_priv, pipe);
   1418 
   1419 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
   1420 		_vlv_enable_pll(crtc, pipe_config);
   1421 
   1422 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
   1423 	POSTING_READ(DPLL_MD(pipe));
   1424 }
   1425 
   1426 
   1427 static void _chv_enable_pll(struct intel_crtc *crtc,
   1428 			    const struct intel_crtc_state *pipe_config)
   1429 {
   1430 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1431 	enum pipe pipe = crtc->pipe;
   1432 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   1433 	u32 tmp;
   1434 
   1435 	vlv_dpio_get(dev_priv);
   1436 
   1437 	/* Enable back the 10bit clock to display controller */
   1438 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
   1439 	tmp |= DPIO_DCLKP_EN;
   1440 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
   1441 
   1442 	vlv_dpio_put(dev_priv);
   1443 
   1444 	/*
   1445 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
   1446 	 */
   1447 	udelay(1);
   1448 
   1449 	/* Enable PLL */
   1450 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
   1451 
   1452 	/* Check PLL is locked */
   1453 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
   1454 		DRM_ERROR("PLL %d failed to lock\n", pipe);
   1455 }
   1456 
   1457 static void chv_enable_pll(struct intel_crtc *crtc,
   1458 			   const struct intel_crtc_state *pipe_config)
   1459 {
   1460 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1461 	enum pipe pipe = crtc->pipe;
   1462 
   1463 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
   1464 
   1465 	/* PLL is protected by panel, make sure we can write it */
   1466 	assert_panel_unlocked(dev_priv, pipe);
   1467 
   1468 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
   1469 		_chv_enable_pll(crtc, pipe_config);
   1470 
   1471 	if (pipe != PIPE_A) {
   1472 		/*
   1473 		 * WaPixelRepeatModeFixForC0:chv
   1474 		 *
   1475 		 * DPLLCMD is AWOL. Use chicken bits to propagate
   1476 		 * the value from DPLLBMD to either pipe B or C.
   1477 		 */
   1478 		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
   1479 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
   1480 		I915_WRITE(CBR4_VLV, 0);
   1481 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
   1482 
   1483 		/*
   1484 		 * DPLLB VGA mode also seems to cause problems.
   1485 		 * We should always have it disabled.
   1486 		 */
   1487 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
   1488 	} else {
   1489 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
   1490 		POSTING_READ(DPLL_MD(pipe));
   1491 	}
   1492 }
   1493 
   1494 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
   1495 {
   1496 	if (IS_I830(dev_priv))
   1497 		return false;
   1498 
   1499 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
   1500 }
   1501 
   1502 static void i9xx_enable_pll(struct intel_crtc *crtc,
   1503 			    const struct intel_crtc_state *crtc_state)
   1504 {
   1505 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1506 	i915_reg_t reg = DPLL(crtc->pipe);
   1507 	u32 dpll = crtc_state->dpll_hw_state.dpll;
   1508 	int i;
   1509 
   1510 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
   1511 
   1512 	/* PLL is protected by panel, make sure we can write it */
   1513 	if (i9xx_has_pps(dev_priv))
   1514 		assert_panel_unlocked(dev_priv, crtc->pipe);
   1515 
   1516 	/*
   1517 	 * Apparently we need to have VGA mode enabled prior to changing
   1518 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
   1519 	 * dividers, even though the register value does change.
   1520 	 */
   1521 	I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
   1522 	I915_WRITE(reg, dpll);
   1523 
   1524 	/* Wait for the clocks to stabilize. */
   1525 	POSTING_READ(reg);
   1526 	udelay(150);
   1527 
   1528 	if (INTEL_GEN(dev_priv) >= 4) {
   1529 		I915_WRITE(DPLL_MD(crtc->pipe),
   1530 			   crtc_state->dpll_hw_state.dpll_md);
   1531 	} else {
   1532 		/* The pixel multiplier can only be updated once the
   1533 		 * DPLL is enabled and the clocks are stable.
   1534 		 *
   1535 		 * So write it again.
   1536 		 */
   1537 		I915_WRITE(reg, dpll);
   1538 	}
   1539 
   1540 	/* We do this three times for luck */
   1541 	for (i = 0; i < 3; i++) {
   1542 		I915_WRITE(reg, dpll);
   1543 		POSTING_READ(reg);
   1544 		udelay(150); /* wait for warmup */
   1545 	}
   1546 }
   1547 
   1548 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
   1549 {
   1550 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1551 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1552 	enum pipe pipe = crtc->pipe;
   1553 
   1554 	/* Don't disable pipe or pipe PLLs if needed */
   1555 	if (IS_I830(dev_priv))
   1556 		return;
   1557 
   1558 	/* Make sure the pipe isn't still relying on us */
   1559 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
   1560 
   1561 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
   1562 	POSTING_READ(DPLL(pipe));
   1563 }
   1564 
   1565 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
   1566 {
   1567 	u32 val;
   1568 
   1569 	/* Make sure the pipe isn't still relying on us */
   1570 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
   1571 
   1572 	val = DPLL_INTEGRATED_REF_CLK_VLV |
   1573 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   1574 	if (pipe != PIPE_A)
   1575 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
   1576 
   1577 	I915_WRITE(DPLL(pipe), val);
   1578 	POSTING_READ(DPLL(pipe));
   1579 }
   1580 
   1581 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
   1582 {
   1583 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   1584 	u32 val;
   1585 
   1586 	/* Make sure the pipe isn't still relying on us */
   1587 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
   1588 
   1589 	val = DPLL_SSC_REF_CLK_CHV |
   1590 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   1591 	if (pipe != PIPE_A)
   1592 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
   1593 
   1594 	I915_WRITE(DPLL(pipe), val);
   1595 	POSTING_READ(DPLL(pipe));
   1596 
   1597 	vlv_dpio_get(dev_priv);
   1598 
   1599 	/* Disable 10bit clock to display controller */
   1600 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
   1601 	val &= ~DPIO_DCLKP_EN;
   1602 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
   1603 
   1604 	vlv_dpio_put(dev_priv);
   1605 }
   1606 
   1607 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
   1608 			 struct intel_digital_port *dport,
   1609 			 unsigned int expected_mask)
   1610 {
   1611 	u32 port_mask;
   1612 	i915_reg_t dpll_reg;
   1613 
   1614 	switch (dport->base.port) {
   1615 	case PORT_B:
   1616 		port_mask = DPLL_PORTB_READY_MASK;
   1617 		dpll_reg = DPLL(0);
   1618 		break;
   1619 	case PORT_C:
   1620 		port_mask = DPLL_PORTC_READY_MASK;
   1621 		dpll_reg = DPLL(0);
   1622 		expected_mask <<= 4;
   1623 		break;
   1624 	case PORT_D:
   1625 		port_mask = DPLL_PORTD_READY_MASK;
   1626 		dpll_reg = DPIO_PHY_STATUS;
   1627 		break;
   1628 	default:
   1629 		BUG();
   1630 	}
   1631 
   1632 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
   1633 				       port_mask, expected_mask, 1000))
   1634 		WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
   1635 		     dport->base.base.base.id, dport->base.base.name,
   1636 		     I915_READ(dpll_reg) & port_mask, expected_mask);
   1637 }
   1638 
   1639 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
   1640 {
   1641 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1642 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1643 	enum pipe pipe = crtc->pipe;
   1644 	i915_reg_t reg;
   1645 	u32 val, pipeconf_val;
   1646 
   1647 	/* Make sure PCH DPLL is enabled */
   1648 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
   1649 
   1650 	/* FDI must be feeding us bits for PCH ports */
   1651 	assert_fdi_tx_enabled(dev_priv, pipe);
   1652 	assert_fdi_rx_enabled(dev_priv, pipe);
   1653 
   1654 	if (HAS_PCH_CPT(dev_priv)) {
   1655 		reg = TRANS_CHICKEN2(pipe);
   1656 		val = I915_READ(reg);
   1657 		/*
   1658 		 * Workaround: Set the timing override bit
   1659 		 * before enabling the pch transcoder.
   1660 		 */
   1661 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
   1662 		/* Configure frame start delay to match the CPU */
   1663 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
   1664 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
   1665 		I915_WRITE(reg, val);
   1666 	}
   1667 
   1668 	reg = PCH_TRANSCONF(pipe);
   1669 	val = I915_READ(reg);
   1670 	pipeconf_val = I915_READ(PIPECONF(pipe));
   1671 
   1672 	if (HAS_PCH_IBX(dev_priv)) {
   1673 		/* Configure frame start delay to match the CPU */
   1674 		val &= ~TRANS_FRAME_START_DELAY_MASK;
   1675 		val |= TRANS_FRAME_START_DELAY(0);
   1676 
   1677 		/*
   1678 		 * Make the BPC in transcoder be consistent with
   1679 		 * that in pipeconf reg. For HDMI we must use 8bpc
   1680 		 * here for both 8bpc and 12bpc.
   1681 		 */
   1682 		val &= ~PIPECONF_BPC_MASK;
   1683 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
   1684 			val |= PIPECONF_8BPC;
   1685 		else
   1686 			val |= pipeconf_val & PIPECONF_BPC_MASK;
   1687 	}
   1688 
   1689 	val &= ~TRANS_INTERLACE_MASK;
   1690 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
   1691 		if (HAS_PCH_IBX(dev_priv) &&
   1692 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   1693 			val |= TRANS_LEGACY_INTERLACED_ILK;
   1694 		else
   1695 			val |= TRANS_INTERLACED;
   1696 	} else {
   1697 		val |= TRANS_PROGRESSIVE;
   1698 	}
   1699 
   1700 	I915_WRITE(reg, val | TRANS_ENABLE);
   1701 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
   1702 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
   1703 }
   1704 
   1705 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
   1706 				      enum transcoder cpu_transcoder)
   1707 {
   1708 	u32 val, pipeconf_val;
   1709 
   1710 	/* FDI must be feeding us bits for PCH ports */
   1711 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
   1712 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
   1713 
   1714 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
   1715 	/* Workaround: set timing override bit. */
   1716 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
   1717 	/* Configure frame start delay to match the CPU */
   1718 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
   1719 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
   1720 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
   1721 
   1722 	val = TRANS_ENABLE;
   1723 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
   1724 
   1725 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
   1726 	    PIPECONF_INTERLACED_ILK)
   1727 		val |= TRANS_INTERLACED;
   1728 	else
   1729 		val |= TRANS_PROGRESSIVE;
   1730 
   1731 	I915_WRITE(LPT_TRANSCONF, val);
   1732 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
   1733 				  TRANS_STATE_ENABLE, 100))
   1734 		DRM_ERROR("Failed to enable PCH transcoder\n");
   1735 }
   1736 
   1737 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
   1738 				       enum pipe pipe)
   1739 {
   1740 	i915_reg_t reg;
   1741 	u32 val;
   1742 
   1743 	/* FDI relies on the transcoder */
   1744 	assert_fdi_tx_disabled(dev_priv, pipe);
   1745 	assert_fdi_rx_disabled(dev_priv, pipe);
   1746 
   1747 	/* Ports must be off as well */
   1748 	assert_pch_ports_disabled(dev_priv, pipe);
   1749 
   1750 	reg = PCH_TRANSCONF(pipe);
   1751 	val = I915_READ(reg);
   1752 	val &= ~TRANS_ENABLE;
   1753 	I915_WRITE(reg, val);
   1754 	/* wait for PCH transcoder off, transcoder state */
   1755 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
   1756 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
   1757 
   1758 	if (HAS_PCH_CPT(dev_priv)) {
   1759 		/* Workaround: Clear the timing override chicken bit again. */
   1760 		reg = TRANS_CHICKEN2(pipe);
   1761 		val = I915_READ(reg);
   1762 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
   1763 		I915_WRITE(reg, val);
   1764 	}
   1765 }
   1766 
   1767 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
   1768 {
   1769 	u32 val;
   1770 
   1771 	val = I915_READ(LPT_TRANSCONF);
   1772 	val &= ~TRANS_ENABLE;
   1773 	I915_WRITE(LPT_TRANSCONF, val);
   1774 	/* wait for PCH transcoder off, transcoder state */
   1775 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
   1776 				    TRANS_STATE_ENABLE, 50))
   1777 		DRM_ERROR("Failed to disable PCH transcoder\n");
   1778 
   1779 	/* Workaround: clear timing override bit. */
   1780 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
   1781 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
   1782 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
   1783 }
   1784 
   1785 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
   1786 {
   1787 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1788 
   1789 	if (HAS_PCH_LPT(dev_priv))
   1790 		return PIPE_A;
   1791 	else
   1792 		return crtc->pipe;
   1793 }
   1794 
   1795 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
   1796 {
   1797 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1798 
   1799 	/*
   1800 	 * On i965gm the hardware frame counter reads
   1801 	 * zero when the TV encoder is enabled :(
   1802 	 */
   1803 	if (IS_I965GM(dev_priv) &&
   1804 	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
   1805 		return 0;
   1806 
   1807 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   1808 		return 0xffffffff; /* full 32 bit counter */
   1809 	else if (INTEL_GEN(dev_priv) >= 3)
   1810 		return 0xffffff; /* only 24 bits of frame count */
   1811 	else
   1812 		return 0; /* Gen2 doesn't have a hardware frame counter */
   1813 }
   1814 
   1815 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
   1816 {
   1817 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1818 
   1819 	assert_vblank_disabled(&crtc->base);
   1820 	drm_crtc_set_max_vblank_count(&crtc->base,
   1821 				      intel_crtc_max_vblank_count(crtc_state));
   1822 	drm_crtc_vblank_on(&crtc->base);
   1823 }
   1824 
   1825 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
   1826 {
   1827 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1828 
   1829 	drm_crtc_vblank_off(&crtc->base);
   1830 	assert_vblank_disabled(&crtc->base);
   1831 }
   1832 
   1833 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
   1834 {
   1835 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   1836 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1837 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
   1838 	enum pipe pipe = crtc->pipe;
   1839 	i915_reg_t reg;
   1840 	u32 val;
   1841 
   1842 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
   1843 
   1844 	assert_planes_disabled(crtc);
   1845 
   1846 	/*
   1847 	 * A pipe without a PLL won't actually be able to drive bits from
   1848 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
   1849 	 * need the check.
   1850 	 */
   1851 	if (HAS_GMCH(dev_priv)) {
   1852 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
   1853 			assert_dsi_pll_enabled(dev_priv);
   1854 		else
   1855 			assert_pll_enabled(dev_priv, pipe);
   1856 	} else {
   1857 		if (new_crtc_state->has_pch_encoder) {
   1858 			/* if driving the PCH, we need FDI enabled */
   1859 			assert_fdi_rx_pll_enabled(dev_priv,
   1860 						  intel_crtc_pch_transcoder(crtc));
   1861 			assert_fdi_tx_pll_enabled(dev_priv,
   1862 						  (enum pipe) cpu_transcoder);
   1863 		}
   1864 		/* FIXME: assert CPU port conditions for SNB+ */
   1865 	}
   1866 
   1867 	trace_intel_pipe_enable(crtc);
   1868 
   1869 	reg = PIPECONF(cpu_transcoder);
   1870 	val = I915_READ(reg);
   1871 	if (val & PIPECONF_ENABLE) {
   1872 		/* we keep both pipes enabled on 830 */
   1873 		WARN_ON(!IS_I830(dev_priv));
   1874 		return;
   1875 	}
   1876 
   1877 	I915_WRITE(reg, val | PIPECONF_ENABLE);
   1878 	POSTING_READ(reg);
   1879 
   1880 	/*
   1881 	 * Until the pipe starts PIPEDSL reads will return a stale value,
   1882 	 * which causes an apparent vblank timestamp jump when PIPEDSL
   1883 	 * resets to its proper value. That also messes up the frame count
   1884 	 * when it's derived from the timestamps. So let's wait for the
   1885 	 * pipe to start properly before we call drm_crtc_vblank_on()
   1886 	 */
   1887 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
   1888 		intel_wait_for_pipe_scanline_moving(crtc);
   1889 }
   1890 
   1891 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
   1892 {
   1893 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   1894 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1895 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
   1896 	enum pipe pipe = crtc->pipe;
   1897 	i915_reg_t reg;
   1898 	u32 val;
   1899 
   1900 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
   1901 
   1902 	/*
   1903 	 * Make sure planes won't keep trying to pump pixels to us,
   1904 	 * or we might hang the display.
   1905 	 */
   1906 	assert_planes_disabled(crtc);
   1907 
   1908 	trace_intel_pipe_disable(crtc);
   1909 
   1910 	reg = PIPECONF(cpu_transcoder);
   1911 	val = I915_READ(reg);
   1912 	if ((val & PIPECONF_ENABLE) == 0)
   1913 		return;
   1914 
   1915 	/*
   1916 	 * Double wide has implications for planes
   1917 	 * so best keep it disabled when not needed.
   1918 	 */
   1919 	if (old_crtc_state->double_wide)
   1920 		val &= ~PIPECONF_DOUBLE_WIDE;
   1921 
   1922 	/* Don't disable pipe or pipe PLLs if needed */
   1923 	if (!IS_I830(dev_priv))
   1924 		val &= ~PIPECONF_ENABLE;
   1925 
   1926 	I915_WRITE(reg, val);
   1927 	if ((val & PIPECONF_ENABLE) == 0)
   1928 		intel_wait_for_pipe_off(old_crtc_state);
   1929 }
   1930 
   1931 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
   1932 {
   1933 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
   1934 }
   1935 
   1936 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
   1937 {
   1938 	if (!is_ccs_modifier(fb->modifier))
   1939 		return false;
   1940 
   1941 	return plane >= fb->format->num_planes / 2;
   1942 }
   1943 
   1944 static bool is_gen12_ccs_modifier(u64 modifier)
   1945 {
   1946 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
   1947 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
   1948 
   1949 }
   1950 
   1951 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
   1952 {
   1953 	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
   1954 }
   1955 
   1956 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
   1957 {
   1958 	if (is_ccs_modifier(fb->modifier))
   1959 		return is_ccs_plane(fb, plane);
   1960 
   1961 	return plane == 1;
   1962 }
   1963 
   1964 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
   1965 {
   1966 	WARN_ON(!is_ccs_modifier(fb->modifier) ||
   1967 		(main_plane && main_plane >= fb->format->num_planes / 2));
   1968 
   1969 	return fb->format->num_planes / 2 + main_plane;
   1970 }
   1971 
   1972 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
   1973 {
   1974 	WARN_ON(!is_ccs_modifier(fb->modifier) ||
   1975 		ccs_plane < fb->format->num_planes / 2);
   1976 
   1977 	return ccs_plane - fb->format->num_planes / 2;
   1978 }
   1979 
   1980 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
   1981 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
   1982 {
   1983 	if (is_ccs_modifier(fb->modifier))
   1984 		return main_to_ccs_plane(fb, main_plane);
   1985 
   1986 	return 1;
   1987 }
   1988 
   1989 bool
   1990 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
   1991 				    uint64_t modifier)
   1992 {
   1993 	return info->is_yuv &&
   1994 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
   1995 }
   1996 
   1997 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
   1998 				   int color_plane)
   1999 {
   2000 	return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
   2001 	       color_plane == 1;
   2002 }
   2003 
   2004 static unsigned int
   2005 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
   2006 {
   2007 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2008 	unsigned int cpp = fb->format->cpp[color_plane];
   2009 
   2010 	switch (fb->modifier) {
   2011 	case DRM_FORMAT_MOD_LINEAR:
   2012 		return intel_tile_size(dev_priv);
   2013 	case I915_FORMAT_MOD_X_TILED:
   2014 		if (IS_GEN(dev_priv, 2))
   2015 			return 128;
   2016 		else
   2017 			return 512;
   2018 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2019 		if (is_ccs_plane(fb, color_plane))
   2020 			return 128;
   2021 		/* fall through */
   2022 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2023 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2024 		if (is_ccs_plane(fb, color_plane))
   2025 			return 64;
   2026 		/* fall through */
   2027 	case I915_FORMAT_MOD_Y_TILED:
   2028 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
   2029 			return 128;
   2030 		else
   2031 			return 512;
   2032 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   2033 		if (is_ccs_plane(fb, color_plane))
   2034 			return 128;
   2035 		/* fall through */
   2036 	case I915_FORMAT_MOD_Yf_TILED:
   2037 		switch (cpp) {
   2038 		case 1:
   2039 			return 64;
   2040 		case 2:
   2041 		case 4:
   2042 			return 128;
   2043 		case 8:
   2044 		case 16:
   2045 			return 256;
   2046 		default:
   2047 			MISSING_CASE(cpp);
   2048 			return cpp;
   2049 		}
   2050 		break;
   2051 	default:
   2052 		MISSING_CASE(fb->modifier);
   2053 		return cpp;
   2054 	}
   2055 }
   2056 
   2057 static unsigned int
   2058 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
   2059 {
   2060 	if (is_gen12_ccs_plane(fb, color_plane))
   2061 		return 1;
   2062 
   2063 	return intel_tile_size(to_i915(fb->dev)) /
   2064 		intel_tile_width_bytes(fb, color_plane);
   2065 }
   2066 
   2067 /* Return the tile dimensions in pixel units */
   2068 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
   2069 			    unsigned int *tile_width,
   2070 			    unsigned int *tile_height)
   2071 {
   2072 	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
   2073 	unsigned int cpp = fb->format->cpp[color_plane];
   2074 
   2075 	*tile_width = tile_width_bytes / cpp;
   2076 	*tile_height = intel_tile_height(fb, color_plane);
   2077 }
   2078 
   2079 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
   2080 					int color_plane)
   2081 {
   2082 	unsigned int tile_width, tile_height;
   2083 
   2084 	intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
   2085 
   2086 	return fb->pitches[color_plane] * tile_height;
   2087 }
   2088 
   2089 unsigned int
   2090 intel_fb_align_height(const struct drm_framebuffer *fb,
   2091 		      int color_plane, unsigned int height)
   2092 {
   2093 	unsigned int tile_height = intel_tile_height(fb, color_plane);
   2094 
   2095 	return ALIGN(height, tile_height);
   2096 }
   2097 
   2098 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
   2099 {
   2100 	unsigned int size = 0;
   2101 	int i;
   2102 
   2103 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
   2104 		size += rot_info->plane[i].width * rot_info->plane[i].height;
   2105 
   2106 	return size;
   2107 }
   2108 
   2109 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
   2110 {
   2111 	unsigned int size = 0;
   2112 	int i;
   2113 
   2114 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
   2115 		size += rem_info->plane[i].width * rem_info->plane[i].height;
   2116 
   2117 	return size;
   2118 }
   2119 
   2120 static void
   2121 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
   2122 			const struct drm_framebuffer *fb,
   2123 			unsigned int rotation)
   2124 {
   2125 	view->type = I915_GGTT_VIEW_NORMAL;
   2126 	if (drm_rotation_90_or_270(rotation)) {
   2127 		view->type = I915_GGTT_VIEW_ROTATED;
   2128 		view->rotated = to_intel_framebuffer(fb)->rot_info;
   2129 	}
   2130 }
   2131 
   2132 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
   2133 {
   2134 	if (IS_I830(dev_priv))
   2135 		return 16 * 1024;
   2136 	else if (IS_I85X(dev_priv))
   2137 		return 256;
   2138 	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
   2139 		return 32;
   2140 	else
   2141 		return 4 * 1024;
   2142 }
   2143 
   2144 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
   2145 {
   2146 	if (INTEL_GEN(dev_priv) >= 9)
   2147 		return 256 * 1024;
   2148 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
   2149 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   2150 		return 128 * 1024;
   2151 	else if (INTEL_GEN(dev_priv) >= 4)
   2152 		return 4 * 1024;
   2153 	else
   2154 		return 0;
   2155 }
   2156 
   2157 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
   2158 					 int color_plane)
   2159 {
   2160 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2161 
   2162 	/* AUX_DIST needs only 4K alignment */
   2163 	if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
   2164 	    is_ccs_plane(fb, color_plane))
   2165 		return 4096;
   2166 
   2167 	switch (fb->modifier) {
   2168 	case DRM_FORMAT_MOD_LINEAR:
   2169 		return intel_linear_alignment(dev_priv);
   2170 	case I915_FORMAT_MOD_X_TILED:
   2171 		if (INTEL_GEN(dev_priv) >= 9)
   2172 			return 256 * 1024;
   2173 		return 0;
   2174 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2175 		if (is_semiplanar_uv_plane(fb, color_plane))
   2176 			return intel_tile_row_size(fb, color_plane);
   2177 		/* Fall-through */
   2178 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2179 		return 16 * 1024;
   2180 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2181 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   2182 	case I915_FORMAT_MOD_Y_TILED:
   2183 		if (INTEL_GEN(dev_priv) >= 12 &&
   2184 		    is_semiplanar_uv_plane(fb, color_plane))
   2185 			return intel_tile_row_size(fb, color_plane);
   2186 		/* Fall-through */
   2187 	case I915_FORMAT_MOD_Yf_TILED:
   2188 		return 1 * 1024 * 1024;
   2189 	default:
   2190 		MISSING_CASE(fb->modifier);
   2191 		return 0;
   2192 	}
   2193 }
   2194 
   2195 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
   2196 {
   2197 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   2198 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   2199 
   2200 	return INTEL_GEN(dev_priv) < 4 ||
   2201 		(plane->has_fbc &&
   2202 		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
   2203 }
   2204 
   2205 struct i915_vma *
   2206 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
   2207 			   const struct i915_ggtt_view *view,
   2208 			   bool uses_fence,
   2209 			   unsigned long *out_flags)
   2210 {
   2211 	struct drm_device *dev = fb->dev;
   2212 	struct drm_i915_private *dev_priv = to_i915(dev);
   2213 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   2214 	intel_wakeref_t wakeref;
   2215 	struct i915_vma *vma;
   2216 	unsigned int pinctl;
   2217 	u32 alignment;
   2218 
   2219 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
   2220 		return ERR_PTR(-EINVAL);
   2221 
   2222 	alignment = intel_surf_alignment(fb, 0);
   2223 	if (WARN_ON(alignment && !is_power_of_2(alignment)))
   2224 		return ERR_PTR(-EINVAL);
   2225 
   2226 	/* Note that the w/a also requires 64 PTE of padding following the
   2227 	 * bo. We currently fill all unused PTE with the shadow page and so
   2228 	 * we should always have valid PTE following the scanout preventing
   2229 	 * the VT-d warning.
   2230 	 */
   2231 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
   2232 		alignment = 256 * 1024;
   2233 
   2234 	/*
   2235 	 * Global gtt pte registers are special registers which actually forward
   2236 	 * writes to a chunk of system memory. Which means that there is no risk
   2237 	 * that the register values disappear as soon as we call
   2238 	 * intel_runtime_pm_put(), so it is correct to wrap only the
   2239 	 * pin/unpin/fence and not more.
   2240 	 */
   2241 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
   2242 
   2243 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
   2244 
   2245 	/*
   2246 	 * Valleyview is definitely limited to scanning out the first
   2247 	 * 512MiB. Lets presume this behaviour was inherited from the
   2248 	 * g4x display engine and that all earlier gen are similarly
   2249 	 * limited. Testing suggests that it is a little more
   2250 	 * complicated than this. For example, Cherryview appears quite
   2251 	 * happy to scanout from anywhere within its global aperture.
   2252 	 */
   2253 	pinctl = 0;
   2254 	if (HAS_GMCH(dev_priv))
   2255 		pinctl |= PIN_MAPPABLE;
   2256 
   2257 	vma = i915_gem_object_pin_to_display_plane(obj,
   2258 						   alignment, view, pinctl);
   2259 	if (IS_ERR(vma))
   2260 		goto err;
   2261 
   2262 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
   2263 		int ret;
   2264 
   2265 		/*
   2266 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
   2267 		 * fence, whereas 965+ only requires a fence if using
   2268 		 * framebuffer compression.  For simplicity, we always, when
   2269 		 * possible, install a fence as the cost is not that onerous.
   2270 		 *
   2271 		 * If we fail to fence the tiled scanout, then either the
   2272 		 * modeset will reject the change (which is highly unlikely as
   2273 		 * the affected systems, all but one, do not have unmappable
   2274 		 * space) or we will not be able to enable full powersaving
   2275 		 * techniques (also likely not to apply due to various limits
   2276 		 * FBC and the like impose on the size of the buffer, which
   2277 		 * presumably we violated anyway with this unmappable buffer).
   2278 		 * Anyway, it is presumably better to stumble onwards with
   2279 		 * something and try to run the system in a "less than optimal"
   2280 		 * mode that matches the user configuration.
   2281 		 */
   2282 		ret = i915_vma_pin_fence(vma);
   2283 		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
   2284 			i915_gem_object_unpin_from_display_plane(vma);
   2285 			vma = ERR_PTR(ret);
   2286 			goto err;
   2287 		}
   2288 
   2289 		if (ret == 0 && vma->fence)
   2290 			*out_flags |= PLANE_HAS_FENCE;
   2291 	}
   2292 
   2293 	i915_vma_get(vma);
   2294 err:
   2295 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
   2296 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
   2297 	return vma;
   2298 }
   2299 
   2300 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
   2301 {
   2302 	i915_gem_object_lock(vma->obj);
   2303 	if (flags & PLANE_HAS_FENCE)
   2304 		i915_vma_unpin_fence(vma);
   2305 	i915_gem_object_unpin_from_display_plane(vma);
   2306 	i915_gem_object_unlock(vma->obj);
   2307 
   2308 	i915_vma_put(vma);
   2309 }
   2310 
   2311 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
   2312 			  unsigned int rotation)
   2313 {
   2314 	if (drm_rotation_90_or_270(rotation))
   2315 		return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
   2316 	else
   2317 		return fb->pitches[color_plane];
   2318 }
   2319 
   2320 /*
   2321  * Convert the x/y offsets into a linear offset.
   2322  * Only valid with 0/180 degree rotation, which is fine since linear
   2323  * offset is only used with linear buffers on pre-hsw and tiled buffers
   2324  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
   2325  */
   2326 u32 intel_fb_xy_to_linear(int x, int y,
   2327 			  const struct intel_plane_state *state,
   2328 			  int color_plane)
   2329 {
   2330 	const struct drm_framebuffer *fb = state->hw.fb;
   2331 	unsigned int cpp = fb->format->cpp[color_plane];
   2332 	unsigned int pitch = state->color_plane[color_plane].stride;
   2333 
   2334 	return y * pitch + x * cpp;
   2335 }
   2336 
   2337 /*
   2338  * Add the x/y offsets derived from fb->offsets[] to the user
   2339  * specified plane src x/y offsets. The resulting x/y offsets
   2340  * specify the start of scanout from the beginning of the gtt mapping.
   2341  */
   2342 void intel_add_fb_offsets(int *x, int *y,
   2343 			  const struct intel_plane_state *state,
   2344 			  int color_plane)
   2345 
   2346 {
   2347 	*x += state->color_plane[color_plane].x;
   2348 	*y += state->color_plane[color_plane].y;
   2349 }
   2350 
   2351 static u32 intel_adjust_tile_offset(int *x, int *y,
   2352 				    unsigned int tile_width,
   2353 				    unsigned int tile_height,
   2354 				    unsigned int tile_size,
   2355 				    unsigned int pitch_tiles,
   2356 				    u32 old_offset,
   2357 				    u32 new_offset)
   2358 {
   2359 	unsigned int pitch_pixels = pitch_tiles * tile_width;
   2360 	unsigned int tiles;
   2361 
   2362 	WARN_ON(old_offset & (tile_size - 1));
   2363 	WARN_ON(new_offset & (tile_size - 1));
   2364 	WARN_ON(new_offset > old_offset);
   2365 
   2366 	tiles = (old_offset - new_offset) / tile_size;
   2367 
   2368 	*y += tiles / pitch_tiles * tile_height;
   2369 	*x += tiles % pitch_tiles * tile_width;
   2370 
   2371 	/* minimize x in case it got needlessly big */
   2372 	*y += *x / pitch_pixels * tile_height;
   2373 	*x %= pitch_pixels;
   2374 
   2375 	return new_offset;
   2376 }
   2377 
   2378 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
   2379 {
   2380 	return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
   2381 	       is_gen12_ccs_plane(fb, color_plane);
   2382 }
   2383 
   2384 static u32 intel_adjust_aligned_offset(int *x, int *y,
   2385 				       const struct drm_framebuffer *fb,
   2386 				       int color_plane,
   2387 				       unsigned int rotation,
   2388 				       unsigned int pitch,
   2389 				       u32 old_offset, u32 new_offset)
   2390 {
   2391 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2392 	unsigned int cpp = fb->format->cpp[color_plane];
   2393 
   2394 	WARN_ON(new_offset > old_offset);
   2395 
   2396 	if (!is_surface_linear(fb, color_plane)) {
   2397 		unsigned int tile_size, tile_width, tile_height;
   2398 		unsigned int pitch_tiles;
   2399 
   2400 		tile_size = intel_tile_size(dev_priv);
   2401 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
   2402 
   2403 		if (drm_rotation_90_or_270(rotation)) {
   2404 			pitch_tiles = pitch / tile_height;
   2405 			swap(tile_width, tile_height);
   2406 		} else {
   2407 			pitch_tiles = pitch / (tile_width * cpp);
   2408 		}
   2409 
   2410 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
   2411 					 tile_size, pitch_tiles,
   2412 					 old_offset, new_offset);
   2413 	} else {
   2414 		old_offset += *y * pitch + *x * cpp;
   2415 
   2416 		*y = (old_offset - new_offset) / pitch;
   2417 		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
   2418 	}
   2419 
   2420 	return new_offset;
   2421 }
   2422 
   2423 /*
   2424  * Adjust the tile offset by moving the difference into
   2425  * the x/y offsets.
   2426  */
   2427 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
   2428 					     const struct intel_plane_state *state,
   2429 					     int color_plane,
   2430 					     u32 old_offset, u32 new_offset)
   2431 {
   2432 	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
   2433 					   state->hw.rotation,
   2434 					   state->color_plane[color_plane].stride,
   2435 					   old_offset, new_offset);
   2436 }
   2437 
   2438 /*
   2439  * Computes the aligned offset to the base tile and adjusts
   2440  * x, y. bytes per pixel is assumed to be a power-of-two.
   2441  *
   2442  * In the 90/270 rotated case, x and y are assumed
   2443  * to be already rotated to match the rotated GTT view, and
   2444  * pitch is the tile_height aligned framebuffer height.
   2445  *
   2446  * This function is used when computing the derived information
   2447  * under intel_framebuffer, so using any of that information
   2448  * here is not allowed. Anything under drm_framebuffer can be
   2449  * used. This is why the user has to pass in the pitch since it
   2450  * is specified in the rotated orientation.
   2451  */
   2452 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
   2453 					int *x, int *y,
   2454 					const struct drm_framebuffer *fb,
   2455 					int color_plane,
   2456 					unsigned int pitch,
   2457 					unsigned int rotation,
   2458 					u32 alignment)
   2459 {
   2460 	unsigned int cpp = fb->format->cpp[color_plane];
   2461 	u32 offset, offset_aligned;
   2462 
   2463 	if (!is_surface_linear(fb, color_plane)) {
   2464 		unsigned int tile_size, tile_width, tile_height;
   2465 		unsigned int tile_rows, tiles, pitch_tiles;
   2466 
   2467 		tile_size = intel_tile_size(dev_priv);
   2468 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
   2469 
   2470 		if (drm_rotation_90_or_270(rotation)) {
   2471 			pitch_tiles = pitch / tile_height;
   2472 			swap(tile_width, tile_height);
   2473 		} else {
   2474 			pitch_tiles = pitch / (tile_width * cpp);
   2475 		}
   2476 
   2477 		tile_rows = *y / tile_height;
   2478 		*y %= tile_height;
   2479 
   2480 		tiles = *x / tile_width;
   2481 		*x %= tile_width;
   2482 
   2483 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
   2484 
   2485 		offset_aligned = offset;
   2486 		if (alignment)
   2487 			offset_aligned = rounddown(offset_aligned, alignment);
   2488 
   2489 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
   2490 					 tile_size, pitch_tiles,
   2491 					 offset, offset_aligned);
   2492 	} else {
   2493 		offset = *y * pitch + *x * cpp;
   2494 		offset_aligned = offset;
   2495 		if (alignment) {
   2496 			offset_aligned = rounddown(offset_aligned, alignment);
   2497 			*y = (offset % alignment) / pitch;
   2498 			*x = ((offset % alignment) - *y * pitch) / cpp;
   2499 		} else {
   2500 			*y = *x = 0;
   2501 		}
   2502 	}
   2503 
   2504 	return offset_aligned;
   2505 }
   2506 
   2507 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
   2508 					      const struct intel_plane_state *state,
   2509 					      int color_plane)
   2510 {
   2511 	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
   2512 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
   2513 	const struct drm_framebuffer *fb = state->hw.fb;
   2514 	unsigned int rotation = state->hw.rotation;
   2515 	int pitch = state->color_plane[color_plane].stride;
   2516 	u32 alignment;
   2517 
   2518 	if (intel_plane->id == PLANE_CURSOR)
   2519 		alignment = intel_cursor_alignment(dev_priv);
   2520 	else
   2521 		alignment = intel_surf_alignment(fb, color_plane);
   2522 
   2523 	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
   2524 					    pitch, rotation, alignment);
   2525 }
   2526 
   2527 /* Convert the fb->offset[] into x/y offsets */
   2528 static int intel_fb_offset_to_xy(int *x, int *y,
   2529 				 const struct drm_framebuffer *fb,
   2530 				 int color_plane)
   2531 {
   2532 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2533 	unsigned int height;
   2534 	u32 alignment;
   2535 
   2536 	if (INTEL_GEN(dev_priv) >= 12 &&
   2537 	    is_semiplanar_uv_plane(fb, color_plane))
   2538 		alignment = intel_tile_row_size(fb, color_plane);
   2539 	else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
   2540 		alignment = intel_tile_size(dev_priv);
   2541 	else
   2542 		alignment = 0;
   2543 
   2544 	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
   2545 		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
   2546 			      fb->offsets[color_plane], color_plane);
   2547 		return -EINVAL;
   2548 	}
   2549 
   2550 	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
   2551 	height = ALIGN(height, intel_tile_height(fb, color_plane));
   2552 
   2553 	/* Catch potential overflows early */
   2554 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
   2555 			    fb->offsets[color_plane])) {
   2556 		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
   2557 			      fb->offsets[color_plane], fb->pitches[color_plane],
   2558 			      color_plane);
   2559 		return -ERANGE;
   2560 	}
   2561 
   2562 	*x = 0;
   2563 	*y = 0;
   2564 
   2565 	intel_adjust_aligned_offset(x, y,
   2566 				    fb, color_plane, DRM_MODE_ROTATE_0,
   2567 				    fb->pitches[color_plane],
   2568 				    fb->offsets[color_plane], 0);
   2569 
   2570 	return 0;
   2571 }
   2572 
   2573 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
   2574 {
   2575 	switch (fb_modifier) {
   2576 	case I915_FORMAT_MOD_X_TILED:
   2577 		return I915_TILING_X;
   2578 	case I915_FORMAT_MOD_Y_TILED:
   2579 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2580 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2581 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2582 		return I915_TILING_Y;
   2583 	default:
   2584 		return I915_TILING_NONE;
   2585 	}
   2586 }
   2587 
   2588 /*
   2589  * From the Sky Lake PRM:
   2590  * "The Color Control Surface (CCS) contains the compression status of
   2591  *  the cache-line pairs. The compression state of the cache-line pair
   2592  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
   2593  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
   2594  *  cache-line-pairs. CCS is always Y tiled."
   2595  *
   2596  * Since cache line pairs refers to horizontally adjacent cache lines,
   2597  * each cache line in the CCS corresponds to an area of 32x16 cache
   2598  * lines on the main surface. Since each pixel is 4 bytes, this gives
   2599  * us a ratio of one byte in the CCS for each 8x16 pixels in the
   2600  * main surface.
   2601  */
   2602 static const struct drm_format_info skl_ccs_formats[] = {
   2603 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
   2604 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
   2605 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
   2606 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
   2607 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
   2608 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
   2609 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
   2610 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
   2611 };
   2612 
   2613 /*
   2614  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
   2615  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
   2616  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
   2617  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
   2618  * the main surface.
   2619  */
   2620 static const struct drm_format_info gen12_ccs_formats[] = {
   2621 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
   2622 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2623 	  .hsub = 1, .vsub = 1, },
   2624 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
   2625 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2626 	  .hsub = 1, .vsub = 1, },
   2627 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
   2628 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2629 	  .hsub = 1, .vsub = 1, .has_alpha = true },
   2630 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
   2631 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2632 	  .hsub = 1, .vsub = 1, .has_alpha = true },
   2633 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
   2634 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2635 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2636 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
   2637 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2638 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2639 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
   2640 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2641 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2642 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
   2643 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
   2644 	  .hsub = 2, .vsub = 1, .is_yuv = true },
   2645 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
   2646 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
   2647 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2648 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
   2649 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
   2650 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2651 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
   2652 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
   2653 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2654 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
   2655 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
   2656 	  .hsub = 2, .vsub = 2, .is_yuv = true },
   2657 };
   2658 
   2659 static const struct drm_format_info *
   2660 lookup_format_info(const struct drm_format_info formats[],
   2661 		   int num_formats, u32 format)
   2662 {
   2663 	int i;
   2664 
   2665 	for (i = 0; i < num_formats; i++) {
   2666 		if (formats[i].format == format)
   2667 			return &formats[i];
   2668 	}
   2669 
   2670 	return NULL;
   2671 }
   2672 
   2673 static const struct drm_format_info *
   2674 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
   2675 {
   2676 	switch (cmd->modifier[0]) {
   2677 	case I915_FORMAT_MOD_Y_TILED_CCS:
   2678 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   2679 		return lookup_format_info(skl_ccs_formats,
   2680 					  ARRAY_SIZE(skl_ccs_formats),
   2681 					  cmd->pixel_format);
   2682 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   2683 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   2684 		return lookup_format_info(gen12_ccs_formats,
   2685 					  ARRAY_SIZE(gen12_ccs_formats),
   2686 					  cmd->pixel_format);
   2687 	default:
   2688 		return NULL;
   2689 	}
   2690 }
   2691 
   2692 bool is_ccs_modifier(u64 modifier)
   2693 {
   2694 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
   2695 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
   2696 	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
   2697 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
   2698 }
   2699 
   2700 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
   2701 {
   2702 	return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
   2703 			    512) * 64;
   2704 }
   2705 
   2706 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
   2707 			      u32 pixel_format, u64 modifier)
   2708 {
   2709 	struct intel_crtc *crtc;
   2710 	struct intel_plane *plane;
   2711 
   2712 	/*
   2713 	 * We assume the primary plane for pipe A has
   2714 	 * the highest stride limits of them all.
   2715 	 */
   2716 	crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
   2717 	if (!crtc)
   2718 		return 0;
   2719 
   2720 	plane = to_intel_plane(crtc->base.primary);
   2721 
   2722 	return plane->max_stride(plane, pixel_format, modifier,
   2723 				 DRM_MODE_ROTATE_0);
   2724 }
   2725 
   2726 static
   2727 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
   2728 			u32 pixel_format, u64 modifier)
   2729 {
   2730 	/*
   2731 	 * Arbitrary limit for gen4+ chosen to match the
   2732 	 * render engine max stride.
   2733 	 *
   2734 	 * The new CCS hash mode makes remapping impossible
   2735 	 */
   2736 	if (!is_ccs_modifier(modifier)) {
   2737 		if (INTEL_GEN(dev_priv) >= 7)
   2738 			return 256*1024;
   2739 		else if (INTEL_GEN(dev_priv) >= 4)
   2740 			return 128*1024;
   2741 	}
   2742 
   2743 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
   2744 }
   2745 
   2746 static u32
   2747 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
   2748 {
   2749 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
   2750 	u32 tile_width;
   2751 
   2752 	if (is_surface_linear(fb, color_plane)) {
   2753 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
   2754 							   fb->format->format,
   2755 							   fb->modifier);
   2756 
   2757 		/*
   2758 		 * To make remapping with linear generally feasible
   2759 		 * we need the stride to be page aligned.
   2760 		 */
   2761 		if (fb->pitches[color_plane] > max_stride &&
   2762 		    !is_ccs_modifier(fb->modifier))
   2763 			return intel_tile_size(dev_priv);
   2764 		else
   2765 			return 64;
   2766 	}
   2767 
   2768 	tile_width = intel_tile_width_bytes(fb, color_plane);
   2769 	if (is_ccs_modifier(fb->modifier)) {
   2770 		/*
   2771 		 * Display WA #0531: skl,bxt,kbl,glk
   2772 		 *
   2773 		 * Render decompression and plane width > 3840
   2774 		 * combined with horizontal panning requires the
   2775 		 * plane stride to be a multiple of 4. We'll just
   2776 		 * require the entire fb to accommodate that to avoid
   2777 		 * potential runtime errors at plane configuration time.
   2778 		 */
   2779 		if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
   2780 			tile_width *= 4;
   2781 		/*
   2782 		 * The main surface pitch must be padded to a multiple of four
   2783 		 * tile widths.
   2784 		 */
   2785 		else if (INTEL_GEN(dev_priv) >= 12)
   2786 			tile_width *= 4;
   2787 	}
   2788 	return tile_width;
   2789 }
   2790 
   2791 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
   2792 {
   2793 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   2794 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   2795 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   2796 	int i;
   2797 
   2798 	/* We don't want to deal with remapping with cursors */
   2799 	if (plane->id == PLANE_CURSOR)
   2800 		return false;
   2801 
   2802 	/*
   2803 	 * The display engine limits already match/exceed the
   2804 	 * render engine limits, so not much point in remapping.
   2805 	 * Would also need to deal with the fence POT alignment
   2806 	 * and gen2 2KiB GTT tile size.
   2807 	 */
   2808 	if (INTEL_GEN(dev_priv) < 4)
   2809 		return false;
   2810 
   2811 	/*
   2812 	 * The new CCS hash mode isn't compatible with remapping as
   2813 	 * the virtual address of the pages affects the compressed data.
   2814 	 */
   2815 	if (is_ccs_modifier(fb->modifier))
   2816 		return false;
   2817 
   2818 	/* Linear needs a page aligned stride for remapping */
   2819 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
   2820 		unsigned int alignment = intel_tile_size(dev_priv) - 1;
   2821 
   2822 		for (i = 0; i < fb->format->num_planes; i++) {
   2823 			if (fb->pitches[i] & alignment)
   2824 				return false;
   2825 		}
   2826 	}
   2827 
   2828 	return true;
   2829 }
   2830 
   2831 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
   2832 {
   2833 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   2834 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   2835 	unsigned int rotation = plane_state->hw.rotation;
   2836 	u32 stride, max_stride;
   2837 
   2838 	/*
   2839 	 * No remapping for invisible planes since we don't have
   2840 	 * an actual source viewport to remap.
   2841 	 */
   2842 	if (!plane_state->uapi.visible)
   2843 		return false;
   2844 
   2845 	if (!intel_plane_can_remap(plane_state))
   2846 		return false;
   2847 
   2848 	/*
   2849 	 * FIXME: aux plane limits on gen9+ are
   2850 	 * unclear in Bspec, for now no checking.
   2851 	 */
   2852 	stride = intel_fb_pitch(fb, 0, rotation);
   2853 	max_stride = plane->max_stride(plane, fb->format->format,
   2854 				       fb->modifier, rotation);
   2855 
   2856 	return stride > max_stride;
   2857 }
   2858 
   2859 static void
   2860 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
   2861 			       const struct drm_framebuffer *fb,
   2862 			       int color_plane)
   2863 {
   2864 	int main_plane;
   2865 
   2866 	if (color_plane == 0) {
   2867 		*hsub = 1;
   2868 		*vsub = 1;
   2869 
   2870 		return;
   2871 	}
   2872 
   2873 	/*
   2874 	 * TODO: Deduct the subsampling from the char block for all CCS
   2875 	 * formats and planes.
   2876 	 */
   2877 	if (!is_gen12_ccs_plane(fb, color_plane)) {
   2878 		*hsub = fb->format->hsub;
   2879 		*vsub = fb->format->vsub;
   2880 
   2881 		return;
   2882 	}
   2883 
   2884 	main_plane = ccs_to_main_plane(fb, color_plane);
   2885 	*hsub = drm_format_info_block_width(fb->format, color_plane) /
   2886 		drm_format_info_block_width(fb->format, main_plane);
   2887 
   2888 	/*
   2889 	 * The min stride check in the core framebuffer_check() function
   2890 	 * assumes that format->hsub applies to every plane except for the
   2891 	 * first plane. That's incorrect for the CCS AUX plane of the first
   2892 	 * plane, but for the above check to pass we must define the block
   2893 	 * width with that subsampling applied to it. Adjust the width here
   2894 	 * accordingly, so we can calculate the actual subsampling factor.
   2895 	 */
   2896 	if (main_plane == 0)
   2897 		*hsub *= fb->format->hsub;
   2898 
   2899 	*vsub = 32;
   2900 }
   2901 static int
   2902 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
   2903 {
   2904 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   2905 	int main_plane;
   2906 	int hsub, vsub;
   2907 	int tile_width, tile_height;
   2908 	int ccs_x, ccs_y;
   2909 	int main_x, main_y;
   2910 
   2911 	if (!is_ccs_plane(fb, ccs_plane))
   2912 		return 0;
   2913 
   2914 	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
   2915 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
   2916 
   2917 	tile_width *= hsub;
   2918 	tile_height *= vsub;
   2919 
   2920 	ccs_x = (x * hsub) % tile_width;
   2921 	ccs_y = (y * vsub) % tile_height;
   2922 
   2923 	main_plane = ccs_to_main_plane(fb, ccs_plane);
   2924 	main_x = intel_fb->normal[main_plane].x % tile_width;
   2925 	main_y = intel_fb->normal[main_plane].y % tile_height;
   2926 
   2927 	/*
   2928 	 * CCS doesn't have its own x/y offset register, so the intra CCS tile
   2929 	 * x/y offsets must match between CCS and the main surface.
   2930 	 */
   2931 	if (main_x != ccs_x || main_y != ccs_y) {
   2932 		DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
   2933 			      main_x, main_y,
   2934 			      ccs_x, ccs_y,
   2935 			      intel_fb->normal[main_plane].x,
   2936 			      intel_fb->normal[main_plane].y,
   2937 			      x, y);
   2938 		return -EINVAL;
   2939 	}
   2940 
   2941 	return 0;
   2942 }
   2943 
   2944 static void
   2945 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
   2946 {
   2947 	int main_plane = is_ccs_plane(fb, color_plane) ?
   2948 			 ccs_to_main_plane(fb, color_plane) : 0;
   2949 	int main_hsub, main_vsub;
   2950 	int hsub, vsub;
   2951 
   2952 	intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
   2953 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
   2954 	*w = fb->width / main_hsub / hsub;
   2955 	*h = fb->height / main_vsub / vsub;
   2956 }
   2957 
   2958 /*
   2959  * Setup the rotated view for an FB plane and return the size the GTT mapping
   2960  * requires for this view.
   2961  */
   2962 static u32
   2963 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
   2964 		  u32 gtt_offset_rotated, int x, int y,
   2965 		  unsigned int width, unsigned int height,
   2966 		  unsigned int tile_size,
   2967 		  unsigned int tile_width, unsigned int tile_height,
   2968 		  struct drm_framebuffer *fb)
   2969 {
   2970 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   2971 	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
   2972 	unsigned int pitch_tiles;
   2973 	struct drm_rect r;
   2974 
   2975 	/* Y or Yf modifiers required for 90/270 rotation */
   2976 	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
   2977 	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
   2978 		return 0;
   2979 
   2980 	if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
   2981 		return 0;
   2982 
   2983 	rot_info->plane[plane] = *plane_info;
   2984 
   2985 	intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
   2986 
   2987 	/* rotate the x/y offsets to match the GTT view */
   2988 	drm_rect_init(&r, x, y, width, height);
   2989 	drm_rect_rotate(&r,
   2990 			plane_info->width * tile_width,
   2991 			plane_info->height * tile_height,
   2992 			DRM_MODE_ROTATE_270);
   2993 	x = r.x1;
   2994 	y = r.y1;
   2995 
   2996 	/* rotate the tile dimensions to match the GTT view */
   2997 	pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
   2998 	swap(tile_width, tile_height);
   2999 
   3000 	/*
   3001 	 * We only keep the x/y offsets, so push all of the
   3002 	 * gtt offset into the x/y offsets.
   3003 	 */
   3004 	intel_adjust_tile_offset(&x, &y,
   3005 				 tile_width, tile_height,
   3006 				 tile_size, pitch_tiles,
   3007 				 gtt_offset_rotated * tile_size, 0);
   3008 
   3009 	/*
   3010 	 * First pixel of the framebuffer from
   3011 	 * the start of the rotated gtt mapping.
   3012 	 */
   3013 	intel_fb->rotated[plane].x = x;
   3014 	intel_fb->rotated[plane].y = y;
   3015 
   3016 	return plane_info->width * plane_info->height;
   3017 }
   3018 
   3019 static int
   3020 intel_fill_fb_info(struct drm_i915_private *dev_priv,
   3021 		   struct drm_framebuffer *fb)
   3022 {
   3023 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   3024 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   3025 	u32 gtt_offset_rotated = 0;
   3026 	unsigned int max_size = 0;
   3027 	int i, num_planes = fb->format->num_planes;
   3028 	unsigned int tile_size = intel_tile_size(dev_priv);
   3029 
   3030 	for (i = 0; i < num_planes; i++) {
   3031 		unsigned int width, height;
   3032 		unsigned int cpp, size;
   3033 		u32 offset;
   3034 		int x, y;
   3035 		int ret;
   3036 
   3037 		cpp = fb->format->cpp[i];
   3038 		intel_fb_plane_dims(&width, &height, fb, i);
   3039 
   3040 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
   3041 		if (ret) {
   3042 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
   3043 				      i, fb->offsets[i]);
   3044 			return ret;
   3045 		}
   3046 
   3047 		ret = intel_fb_check_ccs_xy(fb, i, x, y);
   3048 		if (ret)
   3049 			return ret;
   3050 
   3051 		/*
   3052 		 * The fence (if used) is aligned to the start of the object
   3053 		 * so having the framebuffer wrap around across the edge of the
   3054 		 * fenced region doesn't really work. We have no API to configure
   3055 		 * the fence start offset within the object (nor could we probably
   3056 		 * on gen2/3). So it's just easier if we just require that the
   3057 		 * fb layout agrees with the fence layout. We already check that the
   3058 		 * fb stride matches the fence stride elsewhere.
   3059 		 */
   3060 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
   3061 		    (x + width) * cpp > fb->pitches[i]) {
   3062 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
   3063 				      i, fb->offsets[i]);
   3064 			return -EINVAL;
   3065 		}
   3066 
   3067 		/*
   3068 		 * First pixel of the framebuffer from
   3069 		 * the start of the normal gtt mapping.
   3070 		 */
   3071 		intel_fb->normal[i].x = x;
   3072 		intel_fb->normal[i].y = y;
   3073 
   3074 		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
   3075 						      fb->pitches[i],
   3076 						      DRM_MODE_ROTATE_0,
   3077 						      tile_size);
   3078 		offset /= tile_size;
   3079 
   3080 		if (!is_surface_linear(fb, i)) {
   3081 			struct intel_remapped_plane_info plane_info;
   3082 			unsigned int tile_width, tile_height;
   3083 
   3084 			intel_tile_dims(fb, i, &tile_width, &tile_height);
   3085 
   3086 			plane_info.offset = offset;
   3087 			plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
   3088 							 tile_width * cpp);
   3089 			plane_info.width = DIV_ROUND_UP(x + width, tile_width);
   3090 			plane_info.height = DIV_ROUND_UP(y + height,
   3091 							 tile_height);
   3092 
   3093 			/* how many tiles does this plane need */
   3094 			size = plane_info.stride * plane_info.height;
   3095 			/*
   3096 			 * If the plane isn't horizontally tile aligned,
   3097 			 * we need one more tile.
   3098 			 */
   3099 			if (x != 0)
   3100 				size++;
   3101 
   3102 			gtt_offset_rotated +=
   3103 				setup_fb_rotation(i, &plane_info,
   3104 						  gtt_offset_rotated,
   3105 						  x, y, width, height,
   3106 						  tile_size,
   3107 						  tile_width, tile_height,
   3108 						  fb);
   3109 		} else {
   3110 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
   3111 					    x * cpp, tile_size);
   3112 		}
   3113 
   3114 		/* how many tiles in total needed in the bo */
   3115 		max_size = max(max_size, offset + size);
   3116 	}
   3117 
   3118 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
   3119 		DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
   3120 			      mul_u32_u32(max_size, tile_size), obj->base.size);
   3121 		return -EINVAL;
   3122 	}
   3123 
   3124 	return 0;
   3125 }
   3126 
   3127 static void
   3128 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
   3129 {
   3130 	struct drm_i915_private *dev_priv =
   3131 		to_i915(plane_state->uapi.plane->dev);
   3132 	struct drm_framebuffer *fb = plane_state->hw.fb;
   3133 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   3134 	struct intel_rotation_info *info = &plane_state->view.rotated;
   3135 	unsigned int rotation = plane_state->hw.rotation;
   3136 	int i, num_planes = fb->format->num_planes;
   3137 	unsigned int tile_size = intel_tile_size(dev_priv);
   3138 	unsigned int src_x, src_y;
   3139 	unsigned int src_w, src_h;
   3140 	u32 gtt_offset = 0;
   3141 
   3142 	memset(&plane_state->view, 0, sizeof(plane_state->view));
   3143 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
   3144 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
   3145 
   3146 	src_x = plane_state->uapi.src.x1 >> 16;
   3147 	src_y = plane_state->uapi.src.y1 >> 16;
   3148 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   3149 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   3150 
   3151 	WARN_ON(is_ccs_modifier(fb->modifier));
   3152 
   3153 	/* Make src coordinates relative to the viewport */
   3154 	drm_rect_translate(&plane_state->uapi.src,
   3155 			   -(src_x << 16), -(src_y << 16));
   3156 
   3157 	/* Rotate src coordinates to match rotated GTT view */
   3158 	if (drm_rotation_90_or_270(rotation))
   3159 		drm_rect_rotate(&plane_state->uapi.src,
   3160 				src_w << 16, src_h << 16,
   3161 				DRM_MODE_ROTATE_270);
   3162 
   3163 	for (i = 0; i < num_planes; i++) {
   3164 		unsigned int hsub = i ? fb->format->hsub : 1;
   3165 		unsigned int vsub = i ? fb->format->vsub : 1;
   3166 		unsigned int cpp = fb->format->cpp[i];
   3167 		unsigned int tile_width, tile_height;
   3168 		unsigned int width, height;
   3169 		unsigned int pitch_tiles;
   3170 		unsigned int x, y;
   3171 		u32 offset;
   3172 
   3173 		intel_tile_dims(fb, i, &tile_width, &tile_height);
   3174 
   3175 		x = src_x / hsub;
   3176 		y = src_y / vsub;
   3177 		width = src_w / hsub;
   3178 		height = src_h / vsub;
   3179 
   3180 		/*
   3181 		 * First pixel of the src viewport from the
   3182 		 * start of the normal gtt mapping.
   3183 		 */
   3184 		x += intel_fb->normal[i].x;
   3185 		y += intel_fb->normal[i].y;
   3186 
   3187 		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
   3188 						      fb, i, fb->pitches[i],
   3189 						      DRM_MODE_ROTATE_0, tile_size);
   3190 		offset /= tile_size;
   3191 
   3192 		WARN_ON(i >= ARRAY_SIZE(info->plane));
   3193 		info->plane[i].offset = offset;
   3194 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
   3195 						     tile_width * cpp);
   3196 		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
   3197 		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
   3198 
   3199 		if (drm_rotation_90_or_270(rotation)) {
   3200 			struct drm_rect r;
   3201 
   3202 			/* rotate the x/y offsets to match the GTT view */
   3203 			drm_rect_init(&r, x, y, width, height);
   3204 			drm_rect_rotate(&r,
   3205 					info->plane[i].width * tile_width,
   3206 					info->plane[i].height * tile_height,
   3207 					DRM_MODE_ROTATE_270);
   3208 			x = r.x1;
   3209 			y = r.y1;
   3210 
   3211 			pitch_tiles = info->plane[i].height;
   3212 			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
   3213 
   3214 			/* rotate the tile dimensions to match the GTT view */
   3215 			swap(tile_width, tile_height);
   3216 		} else {
   3217 			pitch_tiles = info->plane[i].width;
   3218 			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
   3219 		}
   3220 
   3221 		/*
   3222 		 * We only keep the x/y offsets, so push all of the
   3223 		 * gtt offset into the x/y offsets.
   3224 		 */
   3225 		intel_adjust_tile_offset(&x, &y,
   3226 					 tile_width, tile_height,
   3227 					 tile_size, pitch_tiles,
   3228 					 gtt_offset * tile_size, 0);
   3229 
   3230 		gtt_offset += info->plane[i].width * info->plane[i].height;
   3231 
   3232 		plane_state->color_plane[i].offset = 0;
   3233 		plane_state->color_plane[i].x = x;
   3234 		plane_state->color_plane[i].y = y;
   3235 	}
   3236 }
   3237 
   3238 static int
   3239 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
   3240 {
   3241 	const struct intel_framebuffer *fb =
   3242 		to_intel_framebuffer(plane_state->hw.fb);
   3243 	unsigned int rotation = plane_state->hw.rotation;
   3244 	int i, num_planes;
   3245 
   3246 	if (!fb)
   3247 		return 0;
   3248 
   3249 	num_planes = fb->base.format->num_planes;
   3250 
   3251 	if (intel_plane_needs_remap(plane_state)) {
   3252 		intel_plane_remap_gtt(plane_state);
   3253 
   3254 		/*
   3255 		 * Sometimes even remapping can't overcome
   3256 		 * the stride limitations :( Can happen with
   3257 		 * big plane sizes and suitably misaligned
   3258 		 * offsets.
   3259 		 */
   3260 		return intel_plane_check_stride(plane_state);
   3261 	}
   3262 
   3263 	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
   3264 
   3265 	for (i = 0; i < num_planes; i++) {
   3266 		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
   3267 		plane_state->color_plane[i].offset = 0;
   3268 
   3269 		if (drm_rotation_90_or_270(rotation)) {
   3270 			plane_state->color_plane[i].x = fb->rotated[i].x;
   3271 			plane_state->color_plane[i].y = fb->rotated[i].y;
   3272 		} else {
   3273 			plane_state->color_plane[i].x = fb->normal[i].x;
   3274 			plane_state->color_plane[i].y = fb->normal[i].y;
   3275 		}
   3276 	}
   3277 
   3278 	/* Rotate src coordinates to match rotated GTT view */
   3279 	if (drm_rotation_90_or_270(rotation))
   3280 		drm_rect_rotate(&plane_state->uapi.src,
   3281 				fb->base.width << 16, fb->base.height << 16,
   3282 				DRM_MODE_ROTATE_270);
   3283 
   3284 	return intel_plane_check_stride(plane_state);
   3285 }
   3286 
   3287 static int i9xx_format_to_fourcc(int format)
   3288 {
   3289 	switch (format) {
   3290 	case DISPPLANE_8BPP:
   3291 		return DRM_FORMAT_C8;
   3292 	case DISPPLANE_BGRA555:
   3293 		return DRM_FORMAT_ARGB1555;
   3294 	case DISPPLANE_BGRX555:
   3295 		return DRM_FORMAT_XRGB1555;
   3296 	case DISPPLANE_BGRX565:
   3297 		return DRM_FORMAT_RGB565;
   3298 	default:
   3299 	case DISPPLANE_BGRX888:
   3300 		return DRM_FORMAT_XRGB8888;
   3301 	case DISPPLANE_RGBX888:
   3302 		return DRM_FORMAT_XBGR8888;
   3303 	case DISPPLANE_BGRA888:
   3304 		return DRM_FORMAT_ARGB8888;
   3305 	case DISPPLANE_RGBA888:
   3306 		return DRM_FORMAT_ABGR8888;
   3307 	case DISPPLANE_BGRX101010:
   3308 		return DRM_FORMAT_XRGB2101010;
   3309 	case DISPPLANE_RGBX101010:
   3310 		return DRM_FORMAT_XBGR2101010;
   3311 	case DISPPLANE_BGRA101010:
   3312 		return DRM_FORMAT_ARGB2101010;
   3313 	case DISPPLANE_RGBA101010:
   3314 		return DRM_FORMAT_ABGR2101010;
   3315 	case DISPPLANE_RGBX161616:
   3316 		return DRM_FORMAT_XBGR16161616F;
   3317 	}
   3318 }
   3319 
   3320 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
   3321 {
   3322 	switch (format) {
   3323 	case PLANE_CTL_FORMAT_RGB_565:
   3324 		return DRM_FORMAT_RGB565;
   3325 	case PLANE_CTL_FORMAT_NV12:
   3326 		return DRM_FORMAT_NV12;
   3327 	case PLANE_CTL_FORMAT_P010:
   3328 		return DRM_FORMAT_P010;
   3329 	case PLANE_CTL_FORMAT_P012:
   3330 		return DRM_FORMAT_P012;
   3331 	case PLANE_CTL_FORMAT_P016:
   3332 		return DRM_FORMAT_P016;
   3333 	case PLANE_CTL_FORMAT_Y210:
   3334 		return DRM_FORMAT_Y210;
   3335 	case PLANE_CTL_FORMAT_Y212:
   3336 		return DRM_FORMAT_Y212;
   3337 	case PLANE_CTL_FORMAT_Y216:
   3338 		return DRM_FORMAT_Y216;
   3339 	case PLANE_CTL_FORMAT_Y410:
   3340 		return DRM_FORMAT_XVYU2101010;
   3341 	case PLANE_CTL_FORMAT_Y412:
   3342 		return DRM_FORMAT_XVYU12_16161616;
   3343 	case PLANE_CTL_FORMAT_Y416:
   3344 		return DRM_FORMAT_XVYU16161616;
   3345 	default:
   3346 	case PLANE_CTL_FORMAT_XRGB_8888:
   3347 		if (rgb_order) {
   3348 			if (alpha)
   3349 				return DRM_FORMAT_ABGR8888;
   3350 			else
   3351 				return DRM_FORMAT_XBGR8888;
   3352 		} else {
   3353 			if (alpha)
   3354 				return DRM_FORMAT_ARGB8888;
   3355 			else
   3356 				return DRM_FORMAT_XRGB8888;
   3357 		}
   3358 	case PLANE_CTL_FORMAT_XRGB_2101010:
   3359 		if (rgb_order) {
   3360 			if (alpha)
   3361 				return DRM_FORMAT_ABGR2101010;
   3362 			else
   3363 				return DRM_FORMAT_XBGR2101010;
   3364 		} else {
   3365 			if (alpha)
   3366 				return DRM_FORMAT_ARGB2101010;
   3367 			else
   3368 				return DRM_FORMAT_XRGB2101010;
   3369 		}
   3370 	case PLANE_CTL_FORMAT_XRGB_16161616F:
   3371 		if (rgb_order) {
   3372 			if (alpha)
   3373 				return DRM_FORMAT_ABGR16161616F;
   3374 			else
   3375 				return DRM_FORMAT_XBGR16161616F;
   3376 		} else {
   3377 			if (alpha)
   3378 				return DRM_FORMAT_ARGB16161616F;
   3379 			else
   3380 				return DRM_FORMAT_XRGB16161616F;
   3381 		}
   3382 	}
   3383 }
   3384 
   3385 static bool
   3386 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
   3387 			      struct intel_initial_plane_config *plane_config)
   3388 {
   3389 	struct drm_device *dev = crtc->base.dev;
   3390 	struct drm_i915_private *dev_priv = to_i915(dev);
   3391 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
   3392 	struct drm_framebuffer *fb = &plane_config->fb->base;
   3393 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
   3394 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
   3395 				    PAGE_SIZE);
   3396 	struct drm_i915_gem_object *obj;
   3397 	bool ret = false;
   3398 
   3399 	size_aligned -= base_aligned;
   3400 
   3401 	if (plane_config->size == 0)
   3402 		return false;
   3403 
   3404 	/* If the FB is too big, just don't use it since fbdev is not very
   3405 	 * important and we should probably use that space with FBC or other
   3406 	 * features. */
   3407 	if (size_aligned * 2 > dev_priv->stolen_usable_size)
   3408 		return false;
   3409 
   3410 	switch (fb->modifier) {
   3411 	case DRM_FORMAT_MOD_LINEAR:
   3412 	case I915_FORMAT_MOD_X_TILED:
   3413 	case I915_FORMAT_MOD_Y_TILED:
   3414 		break;
   3415 	default:
   3416 		DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
   3417 				 fb->modifier);
   3418 		return false;
   3419 	}
   3420 
   3421 	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
   3422 							     base_aligned,
   3423 							     base_aligned,
   3424 							     size_aligned);
   3425 	if (IS_ERR(obj))
   3426 		return false;
   3427 
   3428 	switch (plane_config->tiling) {
   3429 	case I915_TILING_NONE:
   3430 		break;
   3431 	case I915_TILING_X:
   3432 	case I915_TILING_Y:
   3433 		obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
   3434 		break;
   3435 	default:
   3436 		MISSING_CASE(plane_config->tiling);
   3437 		goto out;
   3438 	}
   3439 
   3440 	mode_cmd.pixel_format = fb->format->format;
   3441 	mode_cmd.width = fb->width;
   3442 	mode_cmd.height = fb->height;
   3443 	mode_cmd.pitches[0] = fb->pitches[0];
   3444 	mode_cmd.modifier[0] = fb->modifier;
   3445 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
   3446 
   3447 	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
   3448 		DRM_DEBUG_KMS("intel fb init failed\n");
   3449 		goto out;
   3450 	}
   3451 
   3452 
   3453 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
   3454 	ret = true;
   3455 out:
   3456 	i915_gem_object_put(obj);
   3457 	return ret;
   3458 }
   3459 
   3460 static void
   3461 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
   3462 			struct intel_plane_state *plane_state,
   3463 			bool visible)
   3464 {
   3465 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   3466 
   3467 	plane_state->uapi.visible = visible;
   3468 
   3469 	if (visible)
   3470 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
   3471 	else
   3472 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
   3473 }
   3474 
   3475 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
   3476 {
   3477 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   3478 	struct drm_plane *plane;
   3479 
   3480 	/*
   3481 	 * Active_planes aliases if multiple "primary" or cursor planes
   3482 	 * have been used on the same (or wrong) pipe. plane_mask uses
   3483 	 * unique ids, hence we can use that to reconstruct active_planes.
   3484 	 */
   3485 	crtc_state->active_planes = 0;
   3486 
   3487 	drm_for_each_plane_mask(plane, &dev_priv->drm,
   3488 				crtc_state->uapi.plane_mask)
   3489 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
   3490 }
   3491 
   3492 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
   3493 					 struct intel_plane *plane)
   3494 {
   3495 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3496 	struct intel_crtc_state *crtc_state =
   3497 		to_intel_crtc_state(crtc->base.state);
   3498 	struct intel_plane_state *plane_state =
   3499 		to_intel_plane_state(plane->base.state);
   3500 
   3501 	DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
   3502 		      plane->base.base.id, plane->base.name,
   3503 		      crtc->base.base.id, crtc->base.name);
   3504 
   3505 	intel_set_plane_visible(crtc_state, plane_state, false);
   3506 	fixup_active_planes(crtc_state);
   3507 	crtc_state->data_rate[plane->id] = 0;
   3508 	crtc_state->min_cdclk[plane->id] = 0;
   3509 
   3510 	if (plane->id == PLANE_PRIMARY)
   3511 		hsw_disable_ips(crtc_state);
   3512 
   3513 	/*
   3514 	 * Vblank time updates from the shadow to live plane control register
   3515 	 * are blocked if the memory self-refresh mode is active at that
   3516 	 * moment. So to make sure the plane gets truly disabled, disable
   3517 	 * first the self-refresh mode. The self-refresh enable bit in turn
   3518 	 * will be checked/applied by the HW only at the next frame start
   3519 	 * event which is after the vblank start event, so we need to have a
   3520 	 * wait-for-vblank between disabling the plane and the pipe.
   3521 	 */
   3522 	if (HAS_GMCH(dev_priv) &&
   3523 	    intel_set_memory_cxsr(dev_priv, false))
   3524 		intel_wait_for_vblank(dev_priv, crtc->pipe);
   3525 
   3526 	/*
   3527 	 * Gen2 reports pipe underruns whenever all planes are disabled.
   3528 	 * So disable underrun reporting before all the planes get disabled.
   3529 	 */
   3530 	if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
   3531 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
   3532 
   3533 	intel_disable_plane(plane, crtc_state);
   3534 }
   3535 
   3536 static struct intel_frontbuffer *
   3537 to_intel_frontbuffer(struct drm_framebuffer *fb)
   3538 {
   3539 	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
   3540 }
   3541 
   3542 static void
   3543 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
   3544 			     struct intel_initial_plane_config *plane_config)
   3545 {
   3546 	struct drm_device *dev = intel_crtc->base.dev;
   3547 	struct drm_i915_private *dev_priv = to_i915(dev);
   3548 	struct drm_crtc *c;
   3549 	struct drm_plane *primary = intel_crtc->base.primary;
   3550 	struct drm_plane_state *plane_state = primary->state;
   3551 	struct intel_plane *intel_plane = to_intel_plane(primary);
   3552 	struct intel_plane_state *intel_state =
   3553 		to_intel_plane_state(plane_state);
   3554 	struct drm_framebuffer *fb;
   3555 
   3556 	if (!plane_config->fb)
   3557 		return;
   3558 
   3559 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
   3560 		fb = &plane_config->fb->base;
   3561 		goto valid_fb;
   3562 	}
   3563 
   3564 	kfree(plane_config->fb);
   3565 
   3566 	/*
   3567 	 * Failed to alloc the obj, check to see if we should share
   3568 	 * an fb with another CRTC instead
   3569 	 */
   3570 	for_each_crtc(dev, c) {
   3571 		struct intel_plane_state *state;
   3572 
   3573 		if (c == &intel_crtc->base)
   3574 			continue;
   3575 
   3576 		if (!to_intel_crtc(c)->active)
   3577 			continue;
   3578 
   3579 		state = to_intel_plane_state(c->primary->state);
   3580 		if (!state->vma)
   3581 			continue;
   3582 
   3583 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
   3584 			fb = state->hw.fb;
   3585 			drm_framebuffer_get(fb);
   3586 			goto valid_fb;
   3587 		}
   3588 	}
   3589 
   3590 	/*
   3591 	 * We've failed to reconstruct the BIOS FB.  Current display state
   3592 	 * indicates that the primary plane is visible, but has a NULL FB,
   3593 	 * which will lead to problems later if we don't fix it up.  The
   3594 	 * simplest solution is to just disable the primary plane now and
   3595 	 * pretend the BIOS never had it enabled.
   3596 	 */
   3597 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
   3598 
   3599 	return;
   3600 
   3601 valid_fb:
   3602 	intel_state->hw.rotation = plane_config->rotation;
   3603 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
   3604 				intel_state->hw.rotation);
   3605 	intel_state->color_plane[0].stride =
   3606 		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
   3607 
   3608 	intel_state->vma =
   3609 		intel_pin_and_fence_fb_obj(fb,
   3610 					   &intel_state->view,
   3611 					   intel_plane_uses_fence(intel_state),
   3612 					   &intel_state->flags);
   3613 	if (IS_ERR(intel_state->vma)) {
   3614 		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
   3615 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
   3616 
   3617 		intel_state->vma = NULL;
   3618 		drm_framebuffer_put(fb);
   3619 		return;
   3620 	}
   3621 
   3622 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
   3623 
   3624 	plane_state->src_x = 0;
   3625 	plane_state->src_y = 0;
   3626 	plane_state->src_w = fb->width << 16;
   3627 	plane_state->src_h = fb->height << 16;
   3628 
   3629 	plane_state->crtc_x = 0;
   3630 	plane_state->crtc_y = 0;
   3631 	plane_state->crtc_w = fb->width;
   3632 	plane_state->crtc_h = fb->height;
   3633 
   3634 	intel_state->uapi.src = drm_plane_state_src(plane_state);
   3635 	intel_state->uapi.dst = drm_plane_state_dest(plane_state);
   3636 
   3637 	if (plane_config->tiling)
   3638 		dev_priv->preserve_bios_swizzle = true;
   3639 
   3640 	plane_state->fb = fb;
   3641 	plane_state->crtc = &intel_crtc->base;
   3642 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
   3643 
   3644 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
   3645 		  &to_intel_frontbuffer(fb)->bits);
   3646 }
   3647 
   3648 static int skl_max_plane_width(const struct drm_framebuffer *fb,
   3649 			       int color_plane,
   3650 			       unsigned int rotation)
   3651 {
   3652 	int cpp = fb->format->cpp[color_plane];
   3653 
   3654 	switch (fb->modifier) {
   3655 	case DRM_FORMAT_MOD_LINEAR:
   3656 	case I915_FORMAT_MOD_X_TILED:
   3657 		/*
   3658 		 * Validated limit is 4k, but has 5k should
   3659 		 * work apart from the following features:
   3660 		 * - Ytile (already limited to 4k)
   3661 		 * - FP16 (already limited to 4k)
   3662 		 * - render compression (already limited to 4k)
   3663 		 * - KVMR sprite and cursor (don't care)
   3664 		 * - horizontal panning (TODO verify this)
   3665 		 * - pipe and plane scaling (TODO verify this)
   3666 		 */
   3667 		if (cpp == 8)
   3668 			return 4096;
   3669 		else
   3670 			return 5120;
   3671 	case I915_FORMAT_MOD_Y_TILED_CCS:
   3672 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   3673 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   3674 		/* FIXME AUX plane? */
   3675 	case I915_FORMAT_MOD_Y_TILED:
   3676 	case I915_FORMAT_MOD_Yf_TILED:
   3677 		if (cpp == 8)
   3678 			return 2048;
   3679 		else
   3680 			return 4096;
   3681 	default:
   3682 		MISSING_CASE(fb->modifier);
   3683 		return 2048;
   3684 	}
   3685 }
   3686 
   3687 static int glk_max_plane_width(const struct drm_framebuffer *fb,
   3688 			       int color_plane,
   3689 			       unsigned int rotation)
   3690 {
   3691 	int cpp = fb->format->cpp[color_plane];
   3692 
   3693 	switch (fb->modifier) {
   3694 	case DRM_FORMAT_MOD_LINEAR:
   3695 	case I915_FORMAT_MOD_X_TILED:
   3696 		if (cpp == 8)
   3697 			return 4096;
   3698 		else
   3699 			return 5120;
   3700 	case I915_FORMAT_MOD_Y_TILED_CCS:
   3701 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   3702 		/* FIXME AUX plane? */
   3703 	case I915_FORMAT_MOD_Y_TILED:
   3704 	case I915_FORMAT_MOD_Yf_TILED:
   3705 		if (cpp == 8)
   3706 			return 2048;
   3707 		else
   3708 			return 5120;
   3709 	default:
   3710 		MISSING_CASE(fb->modifier);
   3711 		return 2048;
   3712 	}
   3713 }
   3714 
   3715 static int icl_max_plane_width(const struct drm_framebuffer *fb,
   3716 			       int color_plane,
   3717 			       unsigned int rotation)
   3718 {
   3719 	return 5120;
   3720 }
   3721 
   3722 static int skl_max_plane_height(void)
   3723 {
   3724 	return 4096;
   3725 }
   3726 
   3727 static int icl_max_plane_height(void)
   3728 {
   3729 	return 4320;
   3730 }
   3731 
   3732 static bool
   3733 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
   3734 			       int main_x, int main_y, u32 main_offset,
   3735 			       int ccs_plane)
   3736 {
   3737 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3738 	int aux_x = plane_state->color_plane[ccs_plane].x;
   3739 	int aux_y = plane_state->color_plane[ccs_plane].y;
   3740 	u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
   3741 	u32 alignment = intel_surf_alignment(fb, ccs_plane);
   3742 	int hsub;
   3743 	int vsub;
   3744 
   3745 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
   3746 	while (aux_offset >= main_offset && aux_y <= main_y) {
   3747 		int x, y;
   3748 
   3749 		if (aux_x == main_x && aux_y == main_y)
   3750 			break;
   3751 
   3752 		if (aux_offset == 0)
   3753 			break;
   3754 
   3755 		x = aux_x / hsub;
   3756 		y = aux_y / vsub;
   3757 		aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
   3758 							       plane_state,
   3759 							       ccs_plane,
   3760 							       aux_offset,
   3761 							       aux_offset -
   3762 								alignment);
   3763 		aux_x = x * hsub + aux_x % hsub;
   3764 		aux_y = y * vsub + aux_y % vsub;
   3765 	}
   3766 
   3767 	if (aux_x != main_x || aux_y != main_y)
   3768 		return false;
   3769 
   3770 	plane_state->color_plane[ccs_plane].offset = aux_offset;
   3771 	plane_state->color_plane[ccs_plane].x = aux_x;
   3772 	plane_state->color_plane[ccs_plane].y = aux_y;
   3773 
   3774 	return true;
   3775 }
   3776 
   3777 static int skl_check_main_surface(struct intel_plane_state *plane_state)
   3778 {
   3779 	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
   3780 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3781 	unsigned int rotation = plane_state->hw.rotation;
   3782 	int x = plane_state->uapi.src.x1 >> 16;
   3783 	int y = plane_state->uapi.src.y1 >> 16;
   3784 	int w = drm_rect_width(&plane_state->uapi.src) >> 16;
   3785 	int h = drm_rect_height(&plane_state->uapi.src) >> 16;
   3786 	int max_width;
   3787 	int max_height;
   3788 	u32 alignment;
   3789 	u32 offset;
   3790 	int aux_plane = intel_main_to_aux_plane(fb, 0);
   3791 	u32 aux_offset = plane_state->color_plane[aux_plane].offset;
   3792 
   3793 	if (INTEL_GEN(dev_priv) >= 11)
   3794 		max_width = icl_max_plane_width(fb, 0, rotation);
   3795 	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   3796 		max_width = glk_max_plane_width(fb, 0, rotation);
   3797 	else
   3798 		max_width = skl_max_plane_width(fb, 0, rotation);
   3799 
   3800 	if (INTEL_GEN(dev_priv) >= 11)
   3801 		max_height = icl_max_plane_height();
   3802 	else
   3803 		max_height = skl_max_plane_height();
   3804 
   3805 	if (w > max_width || h > max_height) {
   3806 		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
   3807 			      w, h, max_width, max_height);
   3808 		return -EINVAL;
   3809 	}
   3810 
   3811 	intel_add_fb_offsets(&x, &y, plane_state, 0);
   3812 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
   3813 	alignment = intel_surf_alignment(fb, 0);
   3814 	if (WARN_ON(alignment && !is_power_of_2(alignment)))
   3815 		return -EINVAL;
   3816 
   3817 	/*
   3818 	 * AUX surface offset is specified as the distance from the
   3819 	 * main surface offset, and it must be non-negative. Make
   3820 	 * sure that is what we will get.
   3821 	 */
   3822 	if (offset > aux_offset)
   3823 		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
   3824 							   offset, aux_offset & ~(alignment - 1));
   3825 
   3826 	/*
   3827 	 * When using an X-tiled surface, the plane blows up
   3828 	 * if the x offset + width exceed the stride.
   3829 	 *
   3830 	 * TODO: linear and Y-tiled seem fine, Yf untested,
   3831 	 */
   3832 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
   3833 		int cpp = fb->format->cpp[0];
   3834 
   3835 		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
   3836 			if (offset == 0) {
   3837 				DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
   3838 				return -EINVAL;
   3839 			}
   3840 
   3841 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
   3842 								   offset, offset - alignment);
   3843 		}
   3844 	}
   3845 
   3846 	/*
   3847 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
   3848 	 * they match with the main surface x/y offsets.
   3849 	 */
   3850 	if (is_ccs_modifier(fb->modifier)) {
   3851 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
   3852 						       offset, aux_plane)) {
   3853 			if (offset == 0)
   3854 				break;
   3855 
   3856 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
   3857 								   offset, offset - alignment);
   3858 		}
   3859 
   3860 		if (x != plane_state->color_plane[aux_plane].x ||
   3861 		    y != plane_state->color_plane[aux_plane].y) {
   3862 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
   3863 			return -EINVAL;
   3864 		}
   3865 	}
   3866 
   3867 	plane_state->color_plane[0].offset = offset;
   3868 	plane_state->color_plane[0].x = x;
   3869 	plane_state->color_plane[0].y = y;
   3870 
   3871 	/*
   3872 	 * Put the final coordinates back so that the src
   3873 	 * coordinate checks will see the right values.
   3874 	 */
   3875 	drm_rect_translate_to(&plane_state->uapi.src,
   3876 			      x << 16, y << 16);
   3877 
   3878 	return 0;
   3879 }
   3880 
   3881 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
   3882 {
   3883 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3884 	unsigned int rotation = plane_state->hw.rotation;
   3885 	int uv_plane = 1;
   3886 	int max_width = skl_max_plane_width(fb, uv_plane, rotation);
   3887 	int max_height = 4096;
   3888 	int x = plane_state->uapi.src.x1 >> 17;
   3889 	int y = plane_state->uapi.src.y1 >> 17;
   3890 	int w = drm_rect_width(&plane_state->uapi.src) >> 17;
   3891 	int h = drm_rect_height(&plane_state->uapi.src) >> 17;
   3892 	u32 offset;
   3893 
   3894 	intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
   3895 	offset = intel_plane_compute_aligned_offset(&x, &y,
   3896 						    plane_state, uv_plane);
   3897 
   3898 	/* FIXME not quite sure how/if these apply to the chroma plane */
   3899 	if (w > max_width || h > max_height) {
   3900 		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
   3901 			      w, h, max_width, max_height);
   3902 		return -EINVAL;
   3903 	}
   3904 
   3905 	if (is_ccs_modifier(fb->modifier)) {
   3906 		int ccs_plane = main_to_ccs_plane(fb, uv_plane);
   3907 		int aux_offset = plane_state->color_plane[ccs_plane].offset;
   3908 		int alignment = intel_surf_alignment(fb, uv_plane);
   3909 
   3910 		if (offset > aux_offset)
   3911 			offset = intel_plane_adjust_aligned_offset(&x, &y,
   3912 								   plane_state,
   3913 								   uv_plane,
   3914 								   offset,
   3915 								   aux_offset & ~(alignment - 1));
   3916 
   3917 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
   3918 						       offset, ccs_plane)) {
   3919 			if (offset == 0)
   3920 				break;
   3921 
   3922 			offset = intel_plane_adjust_aligned_offset(&x, &y,
   3923 								   plane_state,
   3924 								   uv_plane,
   3925 								   offset, offset - alignment);
   3926 		}
   3927 
   3928 		if (x != plane_state->color_plane[ccs_plane].x ||
   3929 		    y != plane_state->color_plane[ccs_plane].y) {
   3930 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
   3931 			return -EINVAL;
   3932 		}
   3933 	}
   3934 
   3935 	plane_state->color_plane[uv_plane].offset = offset;
   3936 	plane_state->color_plane[uv_plane].x = x;
   3937 	plane_state->color_plane[uv_plane].y = y;
   3938 
   3939 	return 0;
   3940 }
   3941 
   3942 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
   3943 {
   3944 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3945 	int src_x = plane_state->uapi.src.x1 >> 16;
   3946 	int src_y = plane_state->uapi.src.y1 >> 16;
   3947 	u32 offset;
   3948 	int ccs_plane;
   3949 
   3950 	for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
   3951 		int main_hsub, main_vsub;
   3952 		int hsub, vsub;
   3953 		int x, y;
   3954 
   3955 		if (!is_ccs_plane(fb, ccs_plane))
   3956 			continue;
   3957 
   3958 		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
   3959 					       ccs_to_main_plane(fb, ccs_plane));
   3960 		intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
   3961 
   3962 		hsub *= main_hsub;
   3963 		vsub *= main_vsub;
   3964 		x = src_x / hsub;
   3965 		y = src_y / vsub;
   3966 
   3967 		intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
   3968 
   3969 		offset = intel_plane_compute_aligned_offset(&x, &y,
   3970 							    plane_state,
   3971 							    ccs_plane);
   3972 
   3973 		plane_state->color_plane[ccs_plane].offset = offset;
   3974 		plane_state->color_plane[ccs_plane].x = (x * hsub +
   3975 							 src_x % hsub) /
   3976 							main_hsub;
   3977 		plane_state->color_plane[ccs_plane].y = (y * vsub +
   3978 							 src_y % vsub) /
   3979 							main_vsub;
   3980 	}
   3981 
   3982 	return 0;
   3983 }
   3984 
   3985 int skl_check_plane_surface(struct intel_plane_state *plane_state)
   3986 {
   3987 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   3988 	int ret;
   3989 	bool needs_aux = false;
   3990 
   3991 	ret = intel_plane_compute_gtt(plane_state);
   3992 	if (ret)
   3993 		return ret;
   3994 
   3995 	if (!plane_state->uapi.visible)
   3996 		return 0;
   3997 
   3998 	/*
   3999 	 * Handle the AUX surface first since the main surface setup depends on
   4000 	 * it.
   4001 	 */
   4002 	if (is_ccs_modifier(fb->modifier)) {
   4003 		needs_aux = true;
   4004 		ret = skl_check_ccs_aux_surface(plane_state);
   4005 		if (ret)
   4006 			return ret;
   4007 	}
   4008 
   4009 	if (intel_format_info_is_yuv_semiplanar(fb->format,
   4010 						fb->modifier)) {
   4011 		needs_aux = true;
   4012 		ret = skl_check_nv12_aux_surface(plane_state);
   4013 		if (ret)
   4014 			return ret;
   4015 	}
   4016 
   4017 	if (!needs_aux) {
   4018 		int i;
   4019 
   4020 		for (i = 1; i < fb->format->num_planes; i++) {
   4021 			plane_state->color_plane[i].offset = ~0xfff;
   4022 			plane_state->color_plane[i].x = 0;
   4023 			plane_state->color_plane[i].y = 0;
   4024 		}
   4025 	}
   4026 
   4027 	ret = skl_check_main_surface(plane_state);
   4028 	if (ret)
   4029 		return ret;
   4030 
   4031 	return 0;
   4032 }
   4033 
   4034 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
   4035 			     const struct intel_plane_state *plane_state,
   4036 			     unsigned int *num, unsigned int *den)
   4037 {
   4038 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4039 	unsigned int cpp = fb->format->cpp[0];
   4040 
   4041 	/*
   4042 	 * g4x bspec says 64bpp pixel rate can't exceed 80%
   4043 	 * of cdclk when the sprite plane is enabled on the
   4044 	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
   4045 	 * never allowed to exceed 80% of cdclk. Let's just go
   4046 	 * with the ilk/snb limit always.
   4047 	 */
   4048 	if (cpp == 8) {
   4049 		*num = 10;
   4050 		*den = 8;
   4051 	} else {
   4052 		*num = 1;
   4053 		*den = 1;
   4054 	}
   4055 }
   4056 
   4057 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
   4058 				const struct intel_plane_state *plane_state)
   4059 {
   4060 	unsigned int pixel_rate;
   4061 	unsigned int num, den;
   4062 
   4063 	/*
   4064 	 * Note that crtc_state->pixel_rate accounts for both
   4065 	 * horizontal and vertical panel fitter downscaling factors.
   4066 	 * Pre-HSW bspec tells us to only consider the horizontal
   4067 	 * downscaling factor here. We ignore that and just consider
   4068 	 * both for simplicity.
   4069 	 */
   4070 	pixel_rate = crtc_state->pixel_rate;
   4071 
   4072 	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
   4073 
   4074 	/* two pixels per clock with double wide pipe */
   4075 	if (crtc_state->double_wide)
   4076 		den *= 2;
   4077 
   4078 	return DIV_ROUND_UP(pixel_rate * num, den);
   4079 }
   4080 
   4081 unsigned int
   4082 i9xx_plane_max_stride(struct intel_plane *plane,
   4083 		      u32 pixel_format, u64 modifier,
   4084 		      unsigned int rotation)
   4085 {
   4086 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4087 
   4088 	if (!HAS_GMCH(dev_priv)) {
   4089 		return 32*1024;
   4090 	} else if (INTEL_GEN(dev_priv) >= 4) {
   4091 		if (modifier == I915_FORMAT_MOD_X_TILED)
   4092 			return 16*1024;
   4093 		else
   4094 			return 32*1024;
   4095 	} else if (INTEL_GEN(dev_priv) >= 3) {
   4096 		if (modifier == I915_FORMAT_MOD_X_TILED)
   4097 			return 8*1024;
   4098 		else
   4099 			return 16*1024;
   4100 	} else {
   4101 		if (plane->i9xx_plane == PLANE_C)
   4102 			return 4*1024;
   4103 		else
   4104 			return 8*1024;
   4105 	}
   4106 }
   4107 
   4108 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
   4109 {
   4110 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4111 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4112 	u32 dspcntr = 0;
   4113 
   4114 	if (crtc_state->gamma_enable)
   4115 		dspcntr |= DISPPLANE_GAMMA_ENABLE;
   4116 
   4117 	if (crtc_state->csc_enable)
   4118 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
   4119 
   4120 	if (INTEL_GEN(dev_priv) < 5)
   4121 		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
   4122 
   4123 	return dspcntr;
   4124 }
   4125 
   4126 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
   4127 			  const struct intel_plane_state *plane_state)
   4128 {
   4129 	struct drm_i915_private *dev_priv =
   4130 		to_i915(plane_state->uapi.plane->dev);
   4131 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4132 	unsigned int rotation = plane_state->hw.rotation;
   4133 	u32 dspcntr;
   4134 
   4135 	dspcntr = DISPLAY_PLANE_ENABLE;
   4136 
   4137 	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
   4138 	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
   4139 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
   4140 
   4141 	switch (fb->format->format) {
   4142 	case DRM_FORMAT_C8:
   4143 		dspcntr |= DISPPLANE_8BPP;
   4144 		break;
   4145 	case DRM_FORMAT_XRGB1555:
   4146 		dspcntr |= DISPPLANE_BGRX555;
   4147 		break;
   4148 	case DRM_FORMAT_ARGB1555:
   4149 		dspcntr |= DISPPLANE_BGRA555;
   4150 		break;
   4151 	case DRM_FORMAT_RGB565:
   4152 		dspcntr |= DISPPLANE_BGRX565;
   4153 		break;
   4154 	case DRM_FORMAT_XRGB8888:
   4155 		dspcntr |= DISPPLANE_BGRX888;
   4156 		break;
   4157 	case DRM_FORMAT_XBGR8888:
   4158 		dspcntr |= DISPPLANE_RGBX888;
   4159 		break;
   4160 	case DRM_FORMAT_ARGB8888:
   4161 		dspcntr |= DISPPLANE_BGRA888;
   4162 		break;
   4163 	case DRM_FORMAT_ABGR8888:
   4164 		dspcntr |= DISPPLANE_RGBA888;
   4165 		break;
   4166 	case DRM_FORMAT_XRGB2101010:
   4167 		dspcntr |= DISPPLANE_BGRX101010;
   4168 		break;
   4169 	case DRM_FORMAT_XBGR2101010:
   4170 		dspcntr |= DISPPLANE_RGBX101010;
   4171 		break;
   4172 	case DRM_FORMAT_ARGB2101010:
   4173 		dspcntr |= DISPPLANE_BGRA101010;
   4174 		break;
   4175 	case DRM_FORMAT_ABGR2101010:
   4176 		dspcntr |= DISPPLANE_RGBA101010;
   4177 		break;
   4178 	case DRM_FORMAT_XBGR16161616F:
   4179 		dspcntr |= DISPPLANE_RGBX161616;
   4180 		break;
   4181 	default:
   4182 		MISSING_CASE(fb->format->format);
   4183 		return 0;
   4184 	}
   4185 
   4186 	if (INTEL_GEN(dev_priv) >= 4 &&
   4187 	    fb->modifier == I915_FORMAT_MOD_X_TILED)
   4188 		dspcntr |= DISPPLANE_TILED;
   4189 
   4190 	if (rotation & DRM_MODE_ROTATE_180)
   4191 		dspcntr |= DISPPLANE_ROTATE_180;
   4192 
   4193 	if (rotation & DRM_MODE_REFLECT_X)
   4194 		dspcntr |= DISPPLANE_MIRROR;
   4195 
   4196 	return dspcntr;
   4197 }
   4198 
   4199 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
   4200 {
   4201 	struct drm_i915_private *dev_priv =
   4202 		to_i915(plane_state->uapi.plane->dev);
   4203 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4204 	int src_x, src_y, src_w;
   4205 	u32 offset;
   4206 	int ret;
   4207 
   4208 	ret = intel_plane_compute_gtt(plane_state);
   4209 	if (ret)
   4210 		return ret;
   4211 
   4212 	if (!plane_state->uapi.visible)
   4213 		return 0;
   4214 
   4215 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   4216 	src_x = plane_state->uapi.src.x1 >> 16;
   4217 	src_y = plane_state->uapi.src.y1 >> 16;
   4218 
   4219 	/* Undocumented hardware limit on i965/g4x/vlv/chv */
   4220 	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
   4221 		return -EINVAL;
   4222 
   4223 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
   4224 
   4225 	if (INTEL_GEN(dev_priv) >= 4)
   4226 		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
   4227 							    plane_state, 0);
   4228 	else
   4229 		offset = 0;
   4230 
   4231 	/*
   4232 	 * Put the final coordinates back so that the src
   4233 	 * coordinate checks will see the right values.
   4234 	 */
   4235 	drm_rect_translate_to(&plane_state->uapi.src,
   4236 			      src_x << 16, src_y << 16);
   4237 
   4238 	/* HSW/BDW do this automagically in hardware */
   4239 	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
   4240 		unsigned int rotation = plane_state->hw.rotation;
   4241 		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   4242 		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   4243 
   4244 		if (rotation & DRM_MODE_ROTATE_180) {
   4245 			src_x += src_w - 1;
   4246 			src_y += src_h - 1;
   4247 		} else if (rotation & DRM_MODE_REFLECT_X) {
   4248 			src_x += src_w - 1;
   4249 		}
   4250 	}
   4251 
   4252 	plane_state->color_plane[0].offset = offset;
   4253 	plane_state->color_plane[0].x = src_x;
   4254 	plane_state->color_plane[0].y = src_y;
   4255 
   4256 	return 0;
   4257 }
   4258 
   4259 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
   4260 {
   4261 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4262 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4263 
   4264 	if (IS_CHERRYVIEW(dev_priv))
   4265 		return i9xx_plane == PLANE_B;
   4266 	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   4267 		return false;
   4268 	else if (IS_GEN(dev_priv, 4))
   4269 		return i9xx_plane == PLANE_C;
   4270 	else
   4271 		return i9xx_plane == PLANE_B ||
   4272 			i9xx_plane == PLANE_C;
   4273 }
   4274 
   4275 static int
   4276 i9xx_plane_check(struct intel_crtc_state *crtc_state,
   4277 		 struct intel_plane_state *plane_state)
   4278 {
   4279 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   4280 	int ret;
   4281 
   4282 	ret = chv_plane_check_rotation(plane_state);
   4283 	if (ret)
   4284 		return ret;
   4285 
   4286 	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
   4287 						  &crtc_state->uapi,
   4288 						  DRM_PLANE_HELPER_NO_SCALING,
   4289 						  DRM_PLANE_HELPER_NO_SCALING,
   4290 						  i9xx_plane_has_windowing(plane),
   4291 						  true);
   4292 	if (ret)
   4293 		return ret;
   4294 
   4295 	ret = i9xx_check_plane_surface(plane_state);
   4296 	if (ret)
   4297 		return ret;
   4298 
   4299 	if (!plane_state->uapi.visible)
   4300 		return 0;
   4301 
   4302 	ret = intel_plane_check_src_coordinates(plane_state);
   4303 	if (ret)
   4304 		return ret;
   4305 
   4306 	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
   4307 
   4308 	return 0;
   4309 }
   4310 
   4311 static void i9xx_update_plane(struct intel_plane *plane,
   4312 			      const struct intel_crtc_state *crtc_state,
   4313 			      const struct intel_plane_state *plane_state)
   4314 {
   4315 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4316 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4317 	u32 linear_offset;
   4318 	int x = plane_state->color_plane[0].x;
   4319 	int y = plane_state->color_plane[0].y;
   4320 	int crtc_x = plane_state->uapi.dst.x1;
   4321 	int crtc_y = plane_state->uapi.dst.y1;
   4322 	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
   4323 	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
   4324 	unsigned long irqflags;
   4325 	u32 dspaddr_offset;
   4326 	u32 dspcntr;
   4327 
   4328 	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
   4329 
   4330 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
   4331 
   4332 	if (INTEL_GEN(dev_priv) >= 4)
   4333 		dspaddr_offset = plane_state->color_plane[0].offset;
   4334 	else
   4335 		dspaddr_offset = linear_offset;
   4336 
   4337 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   4338 
   4339 	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
   4340 
   4341 	if (INTEL_GEN(dev_priv) < 4) {
   4342 		/*
   4343 		 * PLANE_A doesn't actually have a full window
   4344 		 * generator but let's assume we still need to
   4345 		 * program whatever is there.
   4346 		 */
   4347 		I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
   4348 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
   4349 			      ((crtc_h - 1) << 16) | (crtc_w - 1));
   4350 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
   4351 		I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
   4352 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
   4353 			      ((crtc_h - 1) << 16) | (crtc_w - 1));
   4354 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
   4355 	}
   4356 
   4357 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   4358 		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
   4359 	} else if (INTEL_GEN(dev_priv) >= 4) {
   4360 		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
   4361 		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
   4362 	}
   4363 
   4364 	/*
   4365 	 * The control register self-arms if the plane was previously
   4366 	 * disabled. Try to make the plane enable atomic by writing
   4367 	 * the control register just before the surface register.
   4368 	 */
   4369 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
   4370 	if (INTEL_GEN(dev_priv) >= 4)
   4371 		I915_WRITE_FW(DSPSURF(i9xx_plane),
   4372 			      intel_plane_ggtt_offset(plane_state) +
   4373 			      dspaddr_offset);
   4374 	else
   4375 		I915_WRITE_FW(DSPADDR(i9xx_plane),
   4376 			      intel_plane_ggtt_offset(plane_state) +
   4377 			      dspaddr_offset);
   4378 
   4379 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   4380 }
   4381 
   4382 static void i9xx_disable_plane(struct intel_plane *plane,
   4383 			       const struct intel_crtc_state *crtc_state)
   4384 {
   4385 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4386 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4387 	unsigned long irqflags;
   4388 	u32 dspcntr;
   4389 
   4390 	/*
   4391 	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
   4392 	 * enable on ilk+ affect the pipe bottom color as
   4393 	 * well, so we must configure them even if the plane
   4394 	 * is disabled.
   4395 	 *
   4396 	 * On pre-g4x there is no way to gamma correct the
   4397 	 * pipe bottom color but we'll keep on doing this
   4398 	 * anyway so that the crtc state readout works correctly.
   4399 	 */
   4400 	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
   4401 
   4402 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   4403 
   4404 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
   4405 	if (INTEL_GEN(dev_priv) >= 4)
   4406 		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
   4407 	else
   4408 		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
   4409 
   4410 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   4411 }
   4412 
   4413 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
   4414 				    enum pipe *pipe)
   4415 {
   4416 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   4417 	enum intel_display_power_domain power_domain;
   4418 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   4419 	intel_wakeref_t wakeref;
   4420 	bool ret;
   4421 	u32 val;
   4422 
   4423 	/*
   4424 	 * Not 100% correct for planes that can move between pipes,
   4425 	 * but that's only the case for gen2-4 which don't have any
   4426 	 * display power wells.
   4427 	 */
   4428 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
   4429 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   4430 	if (!wakeref)
   4431 		return false;
   4432 
   4433 	val = I915_READ(DSPCNTR(i9xx_plane));
   4434 
   4435 	ret = val & DISPLAY_PLANE_ENABLE;
   4436 
   4437 	if (INTEL_GEN(dev_priv) >= 5)
   4438 		*pipe = plane->pipe;
   4439 	else
   4440 		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
   4441 			DISPPLANE_SEL_PIPE_SHIFT;
   4442 
   4443 	intel_display_power_put(dev_priv, power_domain, wakeref);
   4444 
   4445 	return ret;
   4446 }
   4447 
   4448 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
   4449 {
   4450 	struct drm_device *dev = intel_crtc->base.dev;
   4451 	struct drm_i915_private *dev_priv = to_i915(dev);
   4452 
   4453 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
   4454 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
   4455 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
   4456 }
   4457 
   4458 /*
   4459  * This function detaches (aka. unbinds) unused scalers in hardware
   4460  */
   4461 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
   4462 {
   4463 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4464 	const struct intel_crtc_scaler_state *scaler_state =
   4465 		&crtc_state->scaler_state;
   4466 	int i;
   4467 
   4468 	/* loop through and disable scalers that aren't in use */
   4469 	for (i = 0; i < intel_crtc->num_scalers; i++) {
   4470 		if (!scaler_state->scalers[i].in_use)
   4471 			skl_detach_scaler(intel_crtc, i);
   4472 	}
   4473 }
   4474 
   4475 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
   4476 					  int color_plane, unsigned int rotation)
   4477 {
   4478 	/*
   4479 	 * The stride is either expressed as a multiple of 64 bytes chunks for
   4480 	 * linear buffers or in number of tiles for tiled buffers.
   4481 	 */
   4482 	if (is_surface_linear(fb, color_plane))
   4483 		return 64;
   4484 	else if (drm_rotation_90_or_270(rotation))
   4485 		return intel_tile_height(fb, color_plane);
   4486 	else
   4487 		return intel_tile_width_bytes(fb, color_plane);
   4488 }
   4489 
   4490 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
   4491 		     int color_plane)
   4492 {
   4493 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4494 	unsigned int rotation = plane_state->hw.rotation;
   4495 	u32 stride = plane_state->color_plane[color_plane].stride;
   4496 
   4497 	if (color_plane >= fb->format->num_planes)
   4498 		return 0;
   4499 
   4500 	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
   4501 }
   4502 
   4503 static u32 skl_plane_ctl_format(u32 pixel_format)
   4504 {
   4505 	switch (pixel_format) {
   4506 	case DRM_FORMAT_C8:
   4507 		return PLANE_CTL_FORMAT_INDEXED;
   4508 	case DRM_FORMAT_RGB565:
   4509 		return PLANE_CTL_FORMAT_RGB_565;
   4510 	case DRM_FORMAT_XBGR8888:
   4511 	case DRM_FORMAT_ABGR8888:
   4512 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
   4513 	case DRM_FORMAT_XRGB8888:
   4514 	case DRM_FORMAT_ARGB8888:
   4515 		return PLANE_CTL_FORMAT_XRGB_8888;
   4516 	case DRM_FORMAT_XBGR2101010:
   4517 	case DRM_FORMAT_ABGR2101010:
   4518 		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
   4519 	case DRM_FORMAT_XRGB2101010:
   4520 	case DRM_FORMAT_ARGB2101010:
   4521 		return PLANE_CTL_FORMAT_XRGB_2101010;
   4522 	case DRM_FORMAT_XBGR16161616F:
   4523 	case DRM_FORMAT_ABGR16161616F:
   4524 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
   4525 	case DRM_FORMAT_XRGB16161616F:
   4526 	case DRM_FORMAT_ARGB16161616F:
   4527 		return PLANE_CTL_FORMAT_XRGB_16161616F;
   4528 	case DRM_FORMAT_YUYV:
   4529 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
   4530 	case DRM_FORMAT_YVYU:
   4531 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
   4532 	case DRM_FORMAT_UYVY:
   4533 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
   4534 	case DRM_FORMAT_VYUY:
   4535 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
   4536 	case DRM_FORMAT_NV12:
   4537 		return PLANE_CTL_FORMAT_NV12;
   4538 	case DRM_FORMAT_P010:
   4539 		return PLANE_CTL_FORMAT_P010;
   4540 	case DRM_FORMAT_P012:
   4541 		return PLANE_CTL_FORMAT_P012;
   4542 	case DRM_FORMAT_P016:
   4543 		return PLANE_CTL_FORMAT_P016;
   4544 	case DRM_FORMAT_Y210:
   4545 		return PLANE_CTL_FORMAT_Y210;
   4546 	case DRM_FORMAT_Y212:
   4547 		return PLANE_CTL_FORMAT_Y212;
   4548 	case DRM_FORMAT_Y216:
   4549 		return PLANE_CTL_FORMAT_Y216;
   4550 	case DRM_FORMAT_XVYU2101010:
   4551 		return PLANE_CTL_FORMAT_Y410;
   4552 	case DRM_FORMAT_XVYU12_16161616:
   4553 		return PLANE_CTL_FORMAT_Y412;
   4554 	case DRM_FORMAT_XVYU16161616:
   4555 		return PLANE_CTL_FORMAT_Y416;
   4556 	default:
   4557 		MISSING_CASE(pixel_format);
   4558 	}
   4559 
   4560 	return 0;
   4561 }
   4562 
   4563 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
   4564 {
   4565 	if (!plane_state->hw.fb->format->has_alpha)
   4566 		return PLANE_CTL_ALPHA_DISABLE;
   4567 
   4568 	switch (plane_state->hw.pixel_blend_mode) {
   4569 	case DRM_MODE_BLEND_PIXEL_NONE:
   4570 		return PLANE_CTL_ALPHA_DISABLE;
   4571 	case DRM_MODE_BLEND_PREMULTI:
   4572 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
   4573 	case DRM_MODE_BLEND_COVERAGE:
   4574 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
   4575 	default:
   4576 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
   4577 		return PLANE_CTL_ALPHA_DISABLE;
   4578 	}
   4579 }
   4580 
   4581 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
   4582 {
   4583 	if (!plane_state->hw.fb->format->has_alpha)
   4584 		return PLANE_COLOR_ALPHA_DISABLE;
   4585 
   4586 	switch (plane_state->hw.pixel_blend_mode) {
   4587 	case DRM_MODE_BLEND_PIXEL_NONE:
   4588 		return PLANE_COLOR_ALPHA_DISABLE;
   4589 	case DRM_MODE_BLEND_PREMULTI:
   4590 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
   4591 	case DRM_MODE_BLEND_COVERAGE:
   4592 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
   4593 	default:
   4594 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
   4595 		return PLANE_COLOR_ALPHA_DISABLE;
   4596 	}
   4597 }
   4598 
   4599 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
   4600 {
   4601 	switch (fb_modifier) {
   4602 	case DRM_FORMAT_MOD_LINEAR:
   4603 		break;
   4604 	case I915_FORMAT_MOD_X_TILED:
   4605 		return PLANE_CTL_TILED_X;
   4606 	case I915_FORMAT_MOD_Y_TILED:
   4607 		return PLANE_CTL_TILED_Y;
   4608 	case I915_FORMAT_MOD_Y_TILED_CCS:
   4609 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
   4610 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
   4611 		return PLANE_CTL_TILED_Y |
   4612 		       PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
   4613 		       PLANE_CTL_CLEAR_COLOR_DISABLE;
   4614 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
   4615 		return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
   4616 	case I915_FORMAT_MOD_Yf_TILED:
   4617 		return PLANE_CTL_TILED_YF;
   4618 	case I915_FORMAT_MOD_Yf_TILED_CCS:
   4619 		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
   4620 	default:
   4621 		MISSING_CASE(fb_modifier);
   4622 	}
   4623 
   4624 	return 0;
   4625 }
   4626 
   4627 static u32 skl_plane_ctl_rotate(unsigned int rotate)
   4628 {
   4629 	switch (rotate) {
   4630 	case DRM_MODE_ROTATE_0:
   4631 		break;
   4632 	/*
   4633 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
   4634 	 * while i915 HW rotation is clockwise, thats why this swapping.
   4635 	 */
   4636 	case DRM_MODE_ROTATE_90:
   4637 		return PLANE_CTL_ROTATE_270;
   4638 	case DRM_MODE_ROTATE_180:
   4639 		return PLANE_CTL_ROTATE_180;
   4640 	case DRM_MODE_ROTATE_270:
   4641 		return PLANE_CTL_ROTATE_90;
   4642 	default:
   4643 		MISSING_CASE(rotate);
   4644 	}
   4645 
   4646 	return 0;
   4647 }
   4648 
   4649 static u32 cnl_plane_ctl_flip(unsigned int reflect)
   4650 {
   4651 	switch (reflect) {
   4652 	case 0:
   4653 		break;
   4654 	case DRM_MODE_REFLECT_X:
   4655 		return PLANE_CTL_FLIP_HORIZONTAL;
   4656 	case DRM_MODE_REFLECT_Y:
   4657 	default:
   4658 		MISSING_CASE(reflect);
   4659 	}
   4660 
   4661 	return 0;
   4662 }
   4663 
   4664 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
   4665 {
   4666 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4667 	u32 plane_ctl = 0;
   4668 
   4669 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   4670 		return plane_ctl;
   4671 
   4672 	if (crtc_state->gamma_enable)
   4673 		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
   4674 
   4675 	if (crtc_state->csc_enable)
   4676 		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
   4677 
   4678 	return plane_ctl;
   4679 }
   4680 
   4681 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
   4682 		  const struct intel_plane_state *plane_state)
   4683 {
   4684 	struct drm_i915_private *dev_priv =
   4685 		to_i915(plane_state->uapi.plane->dev);
   4686 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4687 	unsigned int rotation = plane_state->hw.rotation;
   4688 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
   4689 	u32 plane_ctl;
   4690 
   4691 	plane_ctl = PLANE_CTL_ENABLE;
   4692 
   4693 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
   4694 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
   4695 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
   4696 
   4697 		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
   4698 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
   4699 
   4700 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
   4701 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
   4702 	}
   4703 
   4704 	plane_ctl |= skl_plane_ctl_format(fb->format->format);
   4705 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
   4706 	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
   4707 
   4708 	if (INTEL_GEN(dev_priv) >= 10)
   4709 		plane_ctl |= cnl_plane_ctl_flip(rotation &
   4710 						DRM_MODE_REFLECT_MASK);
   4711 
   4712 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
   4713 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
   4714 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
   4715 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
   4716 
   4717 	return plane_ctl;
   4718 }
   4719 
   4720 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
   4721 {
   4722 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4723 	u32 plane_color_ctl = 0;
   4724 
   4725 	if (INTEL_GEN(dev_priv) >= 11)
   4726 		return plane_color_ctl;
   4727 
   4728 	if (crtc_state->gamma_enable)
   4729 		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
   4730 
   4731 	if (crtc_state->csc_enable)
   4732 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
   4733 
   4734 	return plane_color_ctl;
   4735 }
   4736 
   4737 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
   4738 			const struct intel_plane_state *plane_state)
   4739 {
   4740 	struct drm_i915_private *dev_priv =
   4741 		to_i915(plane_state->uapi.plane->dev);
   4742 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4743 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   4744 	u32 plane_color_ctl = 0;
   4745 
   4746 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
   4747 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
   4748 
   4749 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
   4750 		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
   4751 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
   4752 		else
   4753 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
   4754 
   4755 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
   4756 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
   4757 	} else if (fb->format->is_yuv) {
   4758 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
   4759 	}
   4760 
   4761 	return plane_color_ctl;
   4762 }
   4763 
   4764 static int
   4765 __intel_display_resume(struct drm_device *dev,
   4766 		       struct drm_atomic_state *state,
   4767 		       struct drm_modeset_acquire_ctx *ctx)
   4768 {
   4769 	struct drm_crtc_state *crtc_state;
   4770 	struct drm_crtc *crtc;
   4771 	int i, ret;
   4772 
   4773 	intel_modeset_setup_hw_state(dev, ctx);
   4774 	intel_vga_redisable(to_i915(dev));
   4775 
   4776 	if (!state)
   4777 		return 0;
   4778 
   4779 	/*
   4780 	 * We've duplicated the state, pointers to the old state are invalid.
   4781 	 *
   4782 	 * Don't attempt to use the old state until we commit the duplicated state.
   4783 	 */
   4784 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
   4785 		/*
   4786 		 * Force recalculation even if we restore
   4787 		 * current state. With fast modeset this may not result
   4788 		 * in a modeset when the state is compatible.
   4789 		 */
   4790 		crtc_state->mode_changed = true;
   4791 	}
   4792 
   4793 	/* ignore any reset values/BIOS leftovers in the WM registers */
   4794 	if (!HAS_GMCH(to_i915(dev)))
   4795 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
   4796 
   4797 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
   4798 
   4799 	WARN_ON(ret == -EDEADLK);
   4800 	return ret;
   4801 }
   4802 
   4803 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
   4804 {
   4805 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
   4806 		intel_has_gpu_reset(&dev_priv->gt));
   4807 }
   4808 
   4809 void intel_prepare_reset(struct drm_i915_private *dev_priv)
   4810 {
   4811 	struct drm_device *dev = &dev_priv->drm;
   4812 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
   4813 	struct drm_atomic_state *state;
   4814 	int ret;
   4815 
   4816 	/* reset doesn't touch the display */
   4817 	if (!i915_modparams.force_reset_modeset_test &&
   4818 	    !gpu_reset_clobbers_display(dev_priv))
   4819 		return;
   4820 
   4821 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
   4822 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
   4823 	smp_mb__after_atomic();
   4824 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
   4825 
   4826 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
   4827 		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
   4828 		intel_gt_set_wedged(&dev_priv->gt);
   4829 	}
   4830 
   4831 	/*
   4832 	 * Need mode_config.mutex so that we don't
   4833 	 * trample ongoing ->detect() and whatnot.
   4834 	 */
   4835 	mutex_lock(&dev->mode_config.mutex);
   4836 	drm_modeset_acquire_init(ctx, 0);
   4837 	while (1) {
   4838 		ret = drm_modeset_lock_all_ctx(dev, ctx);
   4839 		if (ret != -EDEADLK)
   4840 			break;
   4841 
   4842 		drm_modeset_backoff(ctx);
   4843 	}
   4844 	/*
   4845 	 * Disabling the crtcs gracefully seems nicer. Also the
   4846 	 * g33 docs say we should at least disable all the planes.
   4847 	 */
   4848 	state = drm_atomic_helper_duplicate_state(dev, ctx);
   4849 	if (IS_ERR(state)) {
   4850 		ret = PTR_ERR(state);
   4851 		DRM_ERROR("Duplicating state failed with %i\n", ret);
   4852 		return;
   4853 	}
   4854 
   4855 	ret = drm_atomic_helper_disable_all(dev, ctx);
   4856 	if (ret) {
   4857 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
   4858 		drm_atomic_state_put(state);
   4859 		return;
   4860 	}
   4861 
   4862 	dev_priv->modeset_restore_state = state;
   4863 	state->acquire_ctx = ctx;
   4864 }
   4865 
   4866 void intel_finish_reset(struct drm_i915_private *dev_priv)
   4867 {
   4868 	struct drm_device *dev = &dev_priv->drm;
   4869 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
   4870 	struct drm_atomic_state *state;
   4871 	int ret;
   4872 
   4873 	/* reset doesn't touch the display */
   4874 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
   4875 		return;
   4876 
   4877 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
   4878 	if (!state)
   4879 		goto unlock;
   4880 
   4881 	/* reset doesn't touch the display */
   4882 	if (!gpu_reset_clobbers_display(dev_priv)) {
   4883 		/* for testing only restore the display */
   4884 		ret = __intel_display_resume(dev, state, ctx);
   4885 		if (ret)
   4886 			DRM_ERROR("Restoring old state failed with %i\n", ret);
   4887 	} else {
   4888 		/*
   4889 		 * The display has been reset as well,
   4890 		 * so need a full re-initialization.
   4891 		 */
   4892 		intel_pps_unlock_regs_wa(dev_priv);
   4893 		intel_modeset_init_hw(dev_priv);
   4894 		intel_init_clock_gating(dev_priv);
   4895 
   4896 		spin_lock_irq(&dev_priv->irq_lock);
   4897 		if (dev_priv->display.hpd_irq_setup)
   4898 			dev_priv->display.hpd_irq_setup(dev_priv);
   4899 		spin_unlock_irq(&dev_priv->irq_lock);
   4900 
   4901 		ret = __intel_display_resume(dev, state, ctx);
   4902 		if (ret)
   4903 			DRM_ERROR("Restoring old state failed with %i\n", ret);
   4904 
   4905 		intel_hpd_init(dev_priv);
   4906 	}
   4907 
   4908 	drm_atomic_state_put(state);
   4909 unlock:
   4910 	drm_modeset_drop_locks(ctx);
   4911 	drm_modeset_acquire_fini(ctx);
   4912 	mutex_unlock(&dev->mode_config.mutex);
   4913 
   4914 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
   4915 }
   4916 
   4917 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
   4918 {
   4919 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4920 	enum pipe pipe = crtc->pipe;
   4921 	u32 tmp;
   4922 
   4923 	tmp = I915_READ(PIPE_CHICKEN(pipe));
   4924 
   4925 	/*
   4926 	 * Display WA #1153: icl
   4927 	 * enable hardware to bypass the alpha math
   4928 	 * and rounding for per-pixel values 00 and 0xff
   4929 	 */
   4930 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
   4931 	/*
   4932 	 * Display WA # 1605353570: icl
   4933 	 * Set the pixel rounding bit to 1 for allowing
   4934 	 * passthrough of Frame buffer pixels unmodified
   4935 	 * across pipe
   4936 	 */
   4937 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
   4938 	I915_WRITE(PIPE_CHICKEN(pipe), tmp);
   4939 }
   4940 
   4941 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
   4942 {
   4943 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4944 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4945 	u32 trans_ddi_func_ctl2_val;
   4946 	u8 master_select;
   4947 
   4948 	/*
   4949 	 * Configure the master select and enable Transcoder Port Sync for
   4950 	 * Slave CRTCs transcoder.
   4951 	 */
   4952 	if (crtc_state->master_transcoder == INVALID_TRANSCODER)
   4953 		return;
   4954 
   4955 	if (crtc_state->master_transcoder == TRANSCODER_EDP)
   4956 		master_select = 0;
   4957 	else
   4958 		master_select = crtc_state->master_transcoder + 1;
   4959 
   4960 	/* Set the master select bits for Tranascoder Port Sync */
   4961 	trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
   4962 				   PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
   4963 		PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
   4964 	/* Enable Transcoder Port Sync */
   4965 	trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
   4966 
   4967 	I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
   4968 		   trans_ddi_func_ctl2_val);
   4969 }
   4970 
   4971 static void intel_fdi_normal_train(struct intel_crtc *crtc)
   4972 {
   4973 	struct drm_device *dev = crtc->base.dev;
   4974 	struct drm_i915_private *dev_priv = to_i915(dev);
   4975 	enum pipe pipe = crtc->pipe;
   4976 	i915_reg_t reg;
   4977 	u32 temp;
   4978 
   4979 	/* enable normal train */
   4980 	reg = FDI_TX_CTL(pipe);
   4981 	temp = I915_READ(reg);
   4982 	if (IS_IVYBRIDGE(dev_priv)) {
   4983 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
   4984 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
   4985 	} else {
   4986 		temp &= ~FDI_LINK_TRAIN_NONE;
   4987 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
   4988 	}
   4989 	I915_WRITE(reg, temp);
   4990 
   4991 	reg = FDI_RX_CTL(pipe);
   4992 	temp = I915_READ(reg);
   4993 	if (HAS_PCH_CPT(dev_priv)) {
   4994 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   4995 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
   4996 	} else {
   4997 		temp &= ~FDI_LINK_TRAIN_NONE;
   4998 		temp |= FDI_LINK_TRAIN_NONE;
   4999 	}
   5000 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
   5001 
   5002 	/* wait one idle pattern time */
   5003 	POSTING_READ(reg);
   5004 	udelay(1000);
   5005 
   5006 	/* IVB wants error correction enabled */
   5007 	if (IS_IVYBRIDGE(dev_priv))
   5008 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
   5009 			   FDI_FE_ERRC_ENABLE);
   5010 }
   5011 
   5012 /* The FDI link training functions for ILK/Ibexpeak. */
   5013 static void ilk_fdi_link_train(struct intel_crtc *crtc,
   5014 			       const struct intel_crtc_state *crtc_state)
   5015 {
   5016 	struct drm_device *dev = crtc->base.dev;
   5017 	struct drm_i915_private *dev_priv = to_i915(dev);
   5018 	enum pipe pipe = crtc->pipe;
   5019 	i915_reg_t reg;
   5020 	u32 temp, tries;
   5021 
   5022 	/* FDI needs bits from pipe first */
   5023 	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
   5024 
   5025 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
   5026 	   for train result */
   5027 	reg = FDI_RX_IMR(pipe);
   5028 	temp = I915_READ(reg);
   5029 	temp &= ~FDI_RX_SYMBOL_LOCK;
   5030 	temp &= ~FDI_RX_BIT_LOCK;
   5031 	I915_WRITE(reg, temp);
   5032 	I915_READ(reg);
   5033 	udelay(150);
   5034 
   5035 	/* enable CPU FDI TX and PCH FDI RX */
   5036 	reg = FDI_TX_CTL(pipe);
   5037 	temp = I915_READ(reg);
   5038 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
   5039 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5040 	temp &= ~FDI_LINK_TRAIN_NONE;
   5041 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5042 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
   5043 
   5044 	reg = FDI_RX_CTL(pipe);
   5045 	temp = I915_READ(reg);
   5046 	temp &= ~FDI_LINK_TRAIN_NONE;
   5047 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5048 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
   5049 
   5050 	POSTING_READ(reg);
   5051 	udelay(150);
   5052 
   5053 	/* Ironlake workaround, enable clock pointer after FDI enable*/
   5054 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
   5055 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
   5056 		   FDI_RX_PHASE_SYNC_POINTER_EN);
   5057 
   5058 	reg = FDI_RX_IIR(pipe);
   5059 	for (tries = 0; tries < 5; tries++) {
   5060 		temp = I915_READ(reg);
   5061 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5062 
   5063 		if ((temp & FDI_RX_BIT_LOCK)) {
   5064 			DRM_DEBUG_KMS("FDI train 1 done.\n");
   5065 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
   5066 			break;
   5067 		}
   5068 	}
   5069 	if (tries == 5)
   5070 		DRM_ERROR("FDI train 1 fail!\n");
   5071 
   5072 	/* Train 2 */
   5073 	reg = FDI_TX_CTL(pipe);
   5074 	temp = I915_READ(reg);
   5075 	temp &= ~FDI_LINK_TRAIN_NONE;
   5076 	temp |= FDI_LINK_TRAIN_PATTERN_2;
   5077 	I915_WRITE(reg, temp);
   5078 
   5079 	reg = FDI_RX_CTL(pipe);
   5080 	temp = I915_READ(reg);
   5081 	temp &= ~FDI_LINK_TRAIN_NONE;
   5082 	temp |= FDI_LINK_TRAIN_PATTERN_2;
   5083 	I915_WRITE(reg, temp);
   5084 
   5085 	POSTING_READ(reg);
   5086 	udelay(150);
   5087 
   5088 	reg = FDI_RX_IIR(pipe);
   5089 	for (tries = 0; tries < 5; tries++) {
   5090 		temp = I915_READ(reg);
   5091 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5092 
   5093 		if (temp & FDI_RX_SYMBOL_LOCK) {
   5094 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
   5095 			DRM_DEBUG_KMS("FDI train 2 done.\n");
   5096 			break;
   5097 		}
   5098 	}
   5099 	if (tries == 5)
   5100 		DRM_ERROR("FDI train 2 fail!\n");
   5101 
   5102 	DRM_DEBUG_KMS("FDI train done\n");
   5103 
   5104 }
   5105 
   5106 static const int snb_b_fdi_train_param[] = {
   5107 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
   5108 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
   5109 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
   5110 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
   5111 };
   5112 
   5113 /* The FDI link training functions for SNB/Cougarpoint. */
   5114 static void gen6_fdi_link_train(struct intel_crtc *crtc,
   5115 				const struct intel_crtc_state *crtc_state)
   5116 {
   5117 	struct drm_device *dev = crtc->base.dev;
   5118 	struct drm_i915_private *dev_priv = to_i915(dev);
   5119 	enum pipe pipe = crtc->pipe;
   5120 	i915_reg_t reg;
   5121 	u32 temp, i, retry;
   5122 
   5123 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
   5124 	   for train result */
   5125 	reg = FDI_RX_IMR(pipe);
   5126 	temp = I915_READ(reg);
   5127 	temp &= ~FDI_RX_SYMBOL_LOCK;
   5128 	temp &= ~FDI_RX_BIT_LOCK;
   5129 	I915_WRITE(reg, temp);
   5130 
   5131 	POSTING_READ(reg);
   5132 	udelay(150);
   5133 
   5134 	/* enable CPU FDI TX and PCH FDI RX */
   5135 	reg = FDI_TX_CTL(pipe);
   5136 	temp = I915_READ(reg);
   5137 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
   5138 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5139 	temp &= ~FDI_LINK_TRAIN_NONE;
   5140 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5141 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5142 	/* SNB-B */
   5143 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
   5144 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
   5145 
   5146 	I915_WRITE(FDI_RX_MISC(pipe),
   5147 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
   5148 
   5149 	reg = FDI_RX_CTL(pipe);
   5150 	temp = I915_READ(reg);
   5151 	if (HAS_PCH_CPT(dev_priv)) {
   5152 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5153 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
   5154 	} else {
   5155 		temp &= ~FDI_LINK_TRAIN_NONE;
   5156 		temp |= FDI_LINK_TRAIN_PATTERN_1;
   5157 	}
   5158 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
   5159 
   5160 	POSTING_READ(reg);
   5161 	udelay(150);
   5162 
   5163 	for (i = 0; i < 4; i++) {
   5164 		reg = FDI_TX_CTL(pipe);
   5165 		temp = I915_READ(reg);
   5166 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5167 		temp |= snb_b_fdi_train_param[i];
   5168 		I915_WRITE(reg, temp);
   5169 
   5170 		POSTING_READ(reg);
   5171 		udelay(500);
   5172 
   5173 		for (retry = 0; retry < 5; retry++) {
   5174 			reg = FDI_RX_IIR(pipe);
   5175 			temp = I915_READ(reg);
   5176 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5177 			if (temp & FDI_RX_BIT_LOCK) {
   5178 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
   5179 				DRM_DEBUG_KMS("FDI train 1 done.\n");
   5180 				break;
   5181 			}
   5182 			udelay(50);
   5183 		}
   5184 		if (retry < 5)
   5185 			break;
   5186 	}
   5187 	if (i == 4)
   5188 		DRM_ERROR("FDI train 1 fail!\n");
   5189 
   5190 	/* Train 2 */
   5191 	reg = FDI_TX_CTL(pipe);
   5192 	temp = I915_READ(reg);
   5193 	temp &= ~FDI_LINK_TRAIN_NONE;
   5194 	temp |= FDI_LINK_TRAIN_PATTERN_2;
   5195 	if (IS_GEN(dev_priv, 6)) {
   5196 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5197 		/* SNB-B */
   5198 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
   5199 	}
   5200 	I915_WRITE(reg, temp);
   5201 
   5202 	reg = FDI_RX_CTL(pipe);
   5203 	temp = I915_READ(reg);
   5204 	if (HAS_PCH_CPT(dev_priv)) {
   5205 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5206 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
   5207 	} else {
   5208 		temp &= ~FDI_LINK_TRAIN_NONE;
   5209 		temp |= FDI_LINK_TRAIN_PATTERN_2;
   5210 	}
   5211 	I915_WRITE(reg, temp);
   5212 
   5213 	POSTING_READ(reg);
   5214 	udelay(150);
   5215 
   5216 	for (i = 0; i < 4; i++) {
   5217 		reg = FDI_TX_CTL(pipe);
   5218 		temp = I915_READ(reg);
   5219 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5220 		temp |= snb_b_fdi_train_param[i];
   5221 		I915_WRITE(reg, temp);
   5222 
   5223 		POSTING_READ(reg);
   5224 		udelay(500);
   5225 
   5226 		for (retry = 0; retry < 5; retry++) {
   5227 			reg = FDI_RX_IIR(pipe);
   5228 			temp = I915_READ(reg);
   5229 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5230 			if (temp & FDI_RX_SYMBOL_LOCK) {
   5231 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
   5232 				DRM_DEBUG_KMS("FDI train 2 done.\n");
   5233 				break;
   5234 			}
   5235 			udelay(50);
   5236 		}
   5237 		if (retry < 5)
   5238 			break;
   5239 	}
   5240 	if (i == 4)
   5241 		DRM_ERROR("FDI train 2 fail!\n");
   5242 
   5243 	DRM_DEBUG_KMS("FDI train done.\n");
   5244 }
   5245 
   5246 /* Manual link training for Ivy Bridge A0 parts */
   5247 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
   5248 				      const struct intel_crtc_state *crtc_state)
   5249 {
   5250 	struct drm_device *dev = crtc->base.dev;
   5251 	struct drm_i915_private *dev_priv = to_i915(dev);
   5252 	enum pipe pipe = crtc->pipe;
   5253 	i915_reg_t reg;
   5254 	u32 temp, i, j;
   5255 
   5256 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
   5257 	   for train result */
   5258 	reg = FDI_RX_IMR(pipe);
   5259 	temp = I915_READ(reg);
   5260 	temp &= ~FDI_RX_SYMBOL_LOCK;
   5261 	temp &= ~FDI_RX_BIT_LOCK;
   5262 	I915_WRITE(reg, temp);
   5263 
   5264 	POSTING_READ(reg);
   5265 	udelay(150);
   5266 
   5267 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
   5268 		      I915_READ(FDI_RX_IIR(pipe)));
   5269 
   5270 	/* Try each vswing and preemphasis setting twice before moving on */
   5271 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
   5272 		/* disable first in case we need to retry */
   5273 		reg = FDI_TX_CTL(pipe);
   5274 		temp = I915_READ(reg);
   5275 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
   5276 		temp &= ~FDI_TX_ENABLE;
   5277 		I915_WRITE(reg, temp);
   5278 
   5279 		reg = FDI_RX_CTL(pipe);
   5280 		temp = I915_READ(reg);
   5281 		temp &= ~FDI_LINK_TRAIN_AUTO;
   5282 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5283 		temp &= ~FDI_RX_ENABLE;
   5284 		I915_WRITE(reg, temp);
   5285 
   5286 		/* enable CPU FDI TX and PCH FDI RX */
   5287 		reg = FDI_TX_CTL(pipe);
   5288 		temp = I915_READ(reg);
   5289 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
   5290 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5291 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
   5292 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
   5293 		temp |= snb_b_fdi_train_param[j/2];
   5294 		temp |= FDI_COMPOSITE_SYNC;
   5295 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
   5296 
   5297 		I915_WRITE(FDI_RX_MISC(pipe),
   5298 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
   5299 
   5300 		reg = FDI_RX_CTL(pipe);
   5301 		temp = I915_READ(reg);
   5302 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
   5303 		temp |= FDI_COMPOSITE_SYNC;
   5304 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
   5305 
   5306 		POSTING_READ(reg);
   5307 		udelay(1); /* should be 0.5us */
   5308 
   5309 		for (i = 0; i < 4; i++) {
   5310 			reg = FDI_RX_IIR(pipe);
   5311 			temp = I915_READ(reg);
   5312 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5313 
   5314 			if (temp & FDI_RX_BIT_LOCK ||
   5315 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
   5316 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
   5317 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
   5318 					      i);
   5319 				break;
   5320 			}
   5321 			udelay(1); /* should be 0.5us */
   5322 		}
   5323 		if (i == 4) {
   5324 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
   5325 			continue;
   5326 		}
   5327 
   5328 		/* Train 2 */
   5329 		reg = FDI_TX_CTL(pipe);
   5330 		temp = I915_READ(reg);
   5331 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
   5332 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
   5333 		I915_WRITE(reg, temp);
   5334 
   5335 		reg = FDI_RX_CTL(pipe);
   5336 		temp = I915_READ(reg);
   5337 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5338 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
   5339 		I915_WRITE(reg, temp);
   5340 
   5341 		POSTING_READ(reg);
   5342 		udelay(2); /* should be 1.5us */
   5343 
   5344 		for (i = 0; i < 4; i++) {
   5345 			reg = FDI_RX_IIR(pipe);
   5346 			temp = I915_READ(reg);
   5347 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
   5348 
   5349 			if (temp & FDI_RX_SYMBOL_LOCK ||
   5350 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
   5351 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
   5352 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
   5353 					      i);
   5354 				goto train_done;
   5355 			}
   5356 			udelay(2); /* should be 1.5us */
   5357 		}
   5358 		if (i == 4)
   5359 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
   5360 	}
   5361 
   5362 train_done:
   5363 	DRM_DEBUG_KMS("FDI train done.\n");
   5364 }
   5365 
   5366 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
   5367 {
   5368 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5369 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
   5370 	enum pipe pipe = intel_crtc->pipe;
   5371 	i915_reg_t reg;
   5372 	u32 temp;
   5373 
   5374 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
   5375 	reg = FDI_RX_CTL(pipe);
   5376 	temp = I915_READ(reg);
   5377 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
   5378 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
   5379 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
   5380 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
   5381 
   5382 	POSTING_READ(reg);
   5383 	udelay(200);
   5384 
   5385 	/* Switch from Rawclk to PCDclk */
   5386 	temp = I915_READ(reg);
   5387 	I915_WRITE(reg, temp | FDI_PCDCLK);
   5388 
   5389 	POSTING_READ(reg);
   5390 	udelay(200);
   5391 
   5392 	/* Enable CPU FDI TX PLL, always on for Ironlake */
   5393 	reg = FDI_TX_CTL(pipe);
   5394 	temp = I915_READ(reg);
   5395 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
   5396 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
   5397 
   5398 		POSTING_READ(reg);
   5399 		udelay(100);
   5400 	}
   5401 }
   5402 
   5403 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
   5404 {
   5405 	struct drm_device *dev = intel_crtc->base.dev;
   5406 	struct drm_i915_private *dev_priv = to_i915(dev);
   5407 	enum pipe pipe = intel_crtc->pipe;
   5408 	i915_reg_t reg;
   5409 	u32 temp;
   5410 
   5411 	/* Switch from PCDclk to Rawclk */
   5412 	reg = FDI_RX_CTL(pipe);
   5413 	temp = I915_READ(reg);
   5414 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
   5415 
   5416 	/* Disable CPU FDI TX PLL */
   5417 	reg = FDI_TX_CTL(pipe);
   5418 	temp = I915_READ(reg);
   5419 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
   5420 
   5421 	POSTING_READ(reg);
   5422 	udelay(100);
   5423 
   5424 	reg = FDI_RX_CTL(pipe);
   5425 	temp = I915_READ(reg);
   5426 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
   5427 
   5428 	/* Wait for the clocks to turn off. */
   5429 	POSTING_READ(reg);
   5430 	udelay(100);
   5431 }
   5432 
   5433 static void ilk_fdi_disable(struct intel_crtc *crtc)
   5434 {
   5435 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5436 	enum pipe pipe = crtc->pipe;
   5437 	i915_reg_t reg;
   5438 	u32 temp;
   5439 
   5440 	/* disable CPU FDI tx and PCH FDI rx */
   5441 	reg = FDI_TX_CTL(pipe);
   5442 	temp = I915_READ(reg);
   5443 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
   5444 	POSTING_READ(reg);
   5445 
   5446 	reg = FDI_RX_CTL(pipe);
   5447 	temp = I915_READ(reg);
   5448 	temp &= ~(0x7 << 16);
   5449 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
   5450 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
   5451 
   5452 	POSTING_READ(reg);
   5453 	udelay(100);
   5454 
   5455 	/* Ironlake workaround, disable clock pointer after downing FDI */
   5456 	if (HAS_PCH_IBX(dev_priv))
   5457 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
   5458 
   5459 	/* still set train pattern 1 */
   5460 	reg = FDI_TX_CTL(pipe);
   5461 	temp = I915_READ(reg);
   5462 	temp &= ~FDI_LINK_TRAIN_NONE;
   5463 	temp |= FDI_LINK_TRAIN_PATTERN_1;
   5464 	I915_WRITE(reg, temp);
   5465 
   5466 	reg = FDI_RX_CTL(pipe);
   5467 	temp = I915_READ(reg);
   5468 	if (HAS_PCH_CPT(dev_priv)) {
   5469 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
   5470 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
   5471 	} else {
   5472 		temp &= ~FDI_LINK_TRAIN_NONE;
   5473 		temp |= FDI_LINK_TRAIN_PATTERN_1;
   5474 	}
   5475 	/* BPC in FDI rx is consistent with that in PIPECONF */
   5476 	temp &= ~(0x07 << 16);
   5477 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
   5478 	I915_WRITE(reg, temp);
   5479 
   5480 	POSTING_READ(reg);
   5481 	udelay(100);
   5482 }
   5483 
   5484 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
   5485 {
   5486 	struct drm_crtc *crtc;
   5487 	bool cleanup_done;
   5488 
   5489 	drm_for_each_crtc(crtc, &dev_priv->drm) {
   5490 		struct drm_crtc_commit *commit;
   5491 		spin_lock(&crtc->commit_lock);
   5492 		commit = list_first_entry_or_null(&crtc->commit_list,
   5493 						  struct drm_crtc_commit, commit_entry);
   5494 		cleanup_done = commit ?
   5495 			try_wait_for_completion(&commit->cleanup_done) : true;
   5496 		spin_unlock(&crtc->commit_lock);
   5497 
   5498 		if (cleanup_done)
   5499 			continue;
   5500 
   5501 		drm_crtc_wait_one_vblank(crtc);
   5502 
   5503 		return true;
   5504 	}
   5505 
   5506 	return false;
   5507 }
   5508 
   5509 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
   5510 {
   5511 	u32 temp;
   5512 
   5513 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
   5514 
   5515 	mutex_lock(&dev_priv->sb_lock);
   5516 
   5517 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
   5518 	temp |= SBI_SSCCTL_DISABLE;
   5519 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
   5520 
   5521 	mutex_unlock(&dev_priv->sb_lock);
   5522 }
   5523 
   5524 /* Program iCLKIP clock to the desired frequency */
   5525 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
   5526 {
   5527 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5528 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5529 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
   5530 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
   5531 	u32 temp;
   5532 
   5533 	lpt_disable_iclkip(dev_priv);
   5534 
   5535 	/* The iCLK virtual clock root frequency is in MHz,
   5536 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
   5537 	 * divisors, it is necessary to divide one by another, so we
   5538 	 * convert the virtual clock precision to KHz here for higher
   5539 	 * precision.
   5540 	 */
   5541 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
   5542 		u32 iclk_virtual_root_freq = 172800 * 1000;
   5543 		u32 iclk_pi_range = 64;
   5544 		u32 desired_divisor;
   5545 
   5546 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
   5547 						    clock << auxdiv);
   5548 		divsel = (desired_divisor / iclk_pi_range) - 2;
   5549 		phaseinc = desired_divisor % iclk_pi_range;
   5550 
   5551 		/*
   5552 		 * Near 20MHz is a corner case which is
   5553 		 * out of range for the 7-bit divisor
   5554 		 */
   5555 		if (divsel <= 0x7f)
   5556 			break;
   5557 	}
   5558 
   5559 	/* This should not happen with any sane values */
   5560 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
   5561 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
   5562 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
   5563 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
   5564 
   5565 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
   5566 			clock,
   5567 			auxdiv,
   5568 			divsel,
   5569 			phasedir,
   5570 			phaseinc);
   5571 
   5572 	mutex_lock(&dev_priv->sb_lock);
   5573 
   5574 	/* Program SSCDIVINTPHASE6 */
   5575 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
   5576 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
   5577 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
   5578 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
   5579 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
   5580 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
   5581 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
   5582 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
   5583 
   5584 	/* Program SSCAUXDIV */
   5585 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
   5586 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
   5587 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
   5588 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
   5589 
   5590 	/* Enable modulator and associated divider */
   5591 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
   5592 	temp &= ~SBI_SSCCTL_DISABLE;
   5593 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
   5594 
   5595 	mutex_unlock(&dev_priv->sb_lock);
   5596 
   5597 	/* Wait for initialization time */
   5598 	udelay(24);
   5599 
   5600 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
   5601 }
   5602 
   5603 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
   5604 {
   5605 	u32 divsel, phaseinc, auxdiv;
   5606 	u32 iclk_virtual_root_freq = 172800 * 1000;
   5607 	u32 iclk_pi_range = 64;
   5608 	u32 desired_divisor;
   5609 	u32 temp;
   5610 
   5611 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
   5612 		return 0;
   5613 
   5614 	mutex_lock(&dev_priv->sb_lock);
   5615 
   5616 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
   5617 	if (temp & SBI_SSCCTL_DISABLE) {
   5618 		mutex_unlock(&dev_priv->sb_lock);
   5619 		return 0;
   5620 	}
   5621 
   5622 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
   5623 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
   5624 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
   5625 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
   5626 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
   5627 
   5628 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
   5629 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
   5630 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
   5631 
   5632 	mutex_unlock(&dev_priv->sb_lock);
   5633 
   5634 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
   5635 
   5636 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
   5637 				 desired_divisor << auxdiv);
   5638 }
   5639 
   5640 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
   5641 					   enum pipe pch_transcoder)
   5642 {
   5643 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5644 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5645 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   5646 
   5647 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
   5648 		   I915_READ(HTOTAL(cpu_transcoder)));
   5649 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
   5650 		   I915_READ(HBLANK(cpu_transcoder)));
   5651 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
   5652 		   I915_READ(HSYNC(cpu_transcoder)));
   5653 
   5654 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
   5655 		   I915_READ(VTOTAL(cpu_transcoder)));
   5656 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
   5657 		   I915_READ(VBLANK(cpu_transcoder)));
   5658 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
   5659 		   I915_READ(VSYNC(cpu_transcoder)));
   5660 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
   5661 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
   5662 }
   5663 
   5664 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
   5665 {
   5666 	u32 temp;
   5667 
   5668 	temp = I915_READ(SOUTH_CHICKEN1);
   5669 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
   5670 		return;
   5671 
   5672 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
   5673 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
   5674 
   5675 	temp &= ~FDI_BC_BIFURCATION_SELECT;
   5676 	if (enable)
   5677 		temp |= FDI_BC_BIFURCATION_SELECT;
   5678 
   5679 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
   5680 	I915_WRITE(SOUTH_CHICKEN1, temp);
   5681 	POSTING_READ(SOUTH_CHICKEN1);
   5682 }
   5683 
   5684 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
   5685 {
   5686 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5687 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5688 
   5689 	switch (crtc->pipe) {
   5690 	case PIPE_A:
   5691 		break;
   5692 	case PIPE_B:
   5693 		if (crtc_state->fdi_lanes > 2)
   5694 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
   5695 		else
   5696 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
   5697 
   5698 		break;
   5699 	case PIPE_C:
   5700 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
   5701 
   5702 		break;
   5703 	default:
   5704 		BUG();
   5705 	}
   5706 }
   5707 
   5708 /*
   5709  * Finds the encoder associated with the given CRTC. This can only be
   5710  * used when we know that the CRTC isn't feeding multiple encoders!
   5711  */
   5712 static struct intel_encoder *
   5713 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
   5714 			   const struct intel_crtc_state *crtc_state)
   5715 {
   5716 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5717 	const struct drm_connector_state *connector_state;
   5718 	const struct drm_connector *connector;
   5719 	struct intel_encoder *encoder = NULL;
   5720 	int num_encoders = 0;
   5721 	int i;
   5722 
   5723 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
   5724 		if (connector_state->crtc != &crtc->base)
   5725 			continue;
   5726 
   5727 		encoder = to_intel_encoder(connector_state->best_encoder);
   5728 		num_encoders++;
   5729 	}
   5730 
   5731 	WARN(num_encoders != 1, "%d encoders for pipe %c\n",
   5732 	     num_encoders, pipe_name(crtc->pipe));
   5733 
   5734 	return encoder;
   5735 }
   5736 
   5737 /*
   5738  * Enable PCH resources required for PCH ports:
   5739  *   - PCH PLLs
   5740  *   - FDI training & RX/TX
   5741  *   - update transcoder timings
   5742  *   - DP transcoding bits
   5743  *   - transcoder
   5744  */
   5745 static void ilk_pch_enable(const struct intel_atomic_state *state,
   5746 			   const struct intel_crtc_state *crtc_state)
   5747 {
   5748 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5749 	struct drm_device *dev = crtc->base.dev;
   5750 	struct drm_i915_private *dev_priv = to_i915(dev);
   5751 	enum pipe pipe = crtc->pipe;
   5752 	u32 temp;
   5753 
   5754 	assert_pch_transcoder_disabled(dev_priv, pipe);
   5755 
   5756 	if (IS_IVYBRIDGE(dev_priv))
   5757 		ivb_update_fdi_bc_bifurcation(crtc_state);
   5758 
   5759 	/* Write the TU size bits before fdi link training, so that error
   5760 	 * detection works. */
   5761 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
   5762 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
   5763 
   5764 	/* For PCH output, training FDI link */
   5765 	dev_priv->display.fdi_link_train(crtc, crtc_state);
   5766 
   5767 	/* We need to program the right clock selection before writing the pixel
   5768 	 * mutliplier into the DPLL. */
   5769 	if (HAS_PCH_CPT(dev_priv)) {
   5770 		u32 sel;
   5771 
   5772 		temp = I915_READ(PCH_DPLL_SEL);
   5773 		temp |= TRANS_DPLL_ENABLE(pipe);
   5774 		sel = TRANS_DPLLB_SEL(pipe);
   5775 		if (crtc_state->shared_dpll ==
   5776 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
   5777 			temp |= sel;
   5778 		else
   5779 			temp &= ~sel;
   5780 		I915_WRITE(PCH_DPLL_SEL, temp);
   5781 	}
   5782 
   5783 	/* XXX: pch pll's can be enabled any time before we enable the PCH
   5784 	 * transcoder, and we actually should do this to not upset any PCH
   5785 	 * transcoder that already use the clock when we share it.
   5786 	 *
   5787 	 * Note that enable_shared_dpll tries to do the right thing, but
   5788 	 * get_shared_dpll unconditionally resets the pll - we need that to have
   5789 	 * the right LVDS enable sequence. */
   5790 	intel_enable_shared_dpll(crtc_state);
   5791 
   5792 	/* set transcoder timing, panel must allow it */
   5793 	assert_panel_unlocked(dev_priv, pipe);
   5794 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
   5795 
   5796 	intel_fdi_normal_train(crtc);
   5797 
   5798 	/* For PCH DP, enable TRANS_DP_CTL */
   5799 	if (HAS_PCH_CPT(dev_priv) &&
   5800 	    intel_crtc_has_dp_encoder(crtc_state)) {
   5801 		const struct drm_display_mode *adjusted_mode =
   5802 			&crtc_state->hw.adjusted_mode;
   5803 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
   5804 		i915_reg_t reg = TRANS_DP_CTL(pipe);
   5805 		enum port port;
   5806 
   5807 		temp = I915_READ(reg);
   5808 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
   5809 			  TRANS_DP_SYNC_MASK |
   5810 			  TRANS_DP_BPC_MASK);
   5811 		temp |= TRANS_DP_OUTPUT_ENABLE;
   5812 		temp |= bpc << 9; /* same format but at 11:9 */
   5813 
   5814 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
   5815 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
   5816 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
   5817 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
   5818 
   5819 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
   5820 		WARN_ON(port < PORT_B || port > PORT_D);
   5821 		temp |= TRANS_DP_PORT_SEL(port);
   5822 
   5823 		I915_WRITE(reg, temp);
   5824 	}
   5825 
   5826 	ilk_enable_pch_transcoder(crtc_state);
   5827 }
   5828 
   5829 static void lpt_pch_enable(const struct intel_atomic_state *state,
   5830 			   const struct intel_crtc_state *crtc_state)
   5831 {
   5832 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5833 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5834 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   5835 
   5836 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
   5837 
   5838 	lpt_program_iclkip(crtc_state);
   5839 
   5840 	/* Set transcoder timing. */
   5841 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
   5842 
   5843 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
   5844 }
   5845 
   5846 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
   5847 			       enum pipe pipe)
   5848 {
   5849 	i915_reg_t dslreg = PIPEDSL(pipe);
   5850 	u32 temp;
   5851 
   5852 	temp = I915_READ(dslreg);
   5853 	udelay(500);
   5854 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
   5855 		if (wait_for(I915_READ(dslreg) != temp, 5))
   5856 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
   5857 	}
   5858 }
   5859 
   5860 /*
   5861  * The hardware phase 0.0 refers to the center of the pixel.
   5862  * We want to start from the top/left edge which is phase
   5863  * -0.5. That matches how the hardware calculates the scaling
   5864  * factors (from top-left of the first pixel to bottom-right
   5865  * of the last pixel, as opposed to the pixel centers).
   5866  *
   5867  * For 4:2:0 subsampled chroma planes we obviously have to
   5868  * adjust that so that the chroma sample position lands in
   5869  * the right spot.
   5870  *
   5871  * Note that for packed YCbCr 4:2:2 formats there is no way to
   5872  * control chroma siting. The hardware simply replicates the
   5873  * chroma samples for both of the luma samples, and thus we don't
   5874  * actually get the expected MPEG2 chroma siting convention :(
   5875  * The same behaviour is observed on pre-SKL platforms as well.
   5876  *
   5877  * Theory behind the formula (note that we ignore sub-pixel
   5878  * source coordinates):
   5879  * s = source sample position
   5880  * d = destination sample position
   5881  *
   5882  * Downscaling 4:1:
   5883  * -0.5
   5884  * | 0.0
   5885  * | |     1.5 (initial phase)
   5886  * | |     |
   5887  * v v     v
   5888  * | s | s | s | s |
   5889  * |       d       |
   5890  *
   5891  * Upscaling 1:4:
   5892  * -0.5
   5893  * | -0.375 (initial phase)
   5894  * | |     0.0
   5895  * | |     |
   5896  * v v     v
   5897  * |       s       |
   5898  * | d | d | d | d |
   5899  */
   5900 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
   5901 {
   5902 	int phase = -0x8000;
   5903 	u16 trip = 0;
   5904 
   5905 	if (chroma_cosited)
   5906 		phase += (sub - 1) * 0x8000 / sub;
   5907 
   5908 	phase += scale / (2 * sub);
   5909 
   5910 	/*
   5911 	 * Hardware initial phase limited to [-0.5:1.5].
   5912 	 * Since the max hardware scale factor is 3.0, we
   5913 	 * should never actually excdeed 1.0 here.
   5914 	 */
   5915 	WARN_ON(phase < -0x8000 || phase > 0x18000);
   5916 
   5917 	if (phase < 0)
   5918 		phase = 0x10000 + phase;
   5919 	else
   5920 		trip = PS_PHASE_TRIP;
   5921 
   5922 	return ((phase >> 2) & PS_PHASE_MASK) | trip;
   5923 }
   5924 
   5925 #define SKL_MIN_SRC_W 8
   5926 #define SKL_MAX_SRC_W 4096
   5927 #define SKL_MIN_SRC_H 8
   5928 #define SKL_MAX_SRC_H 4096
   5929 #define SKL_MIN_DST_W 8
   5930 #define SKL_MAX_DST_W 4096
   5931 #define SKL_MIN_DST_H 8
   5932 #define SKL_MAX_DST_H 4096
   5933 #define ICL_MAX_SRC_W 5120
   5934 #define ICL_MAX_SRC_H 4096
   5935 #define ICL_MAX_DST_W 5120
   5936 #define ICL_MAX_DST_H 4096
   5937 #define SKL_MIN_YUV_420_SRC_W 16
   5938 #define SKL_MIN_YUV_420_SRC_H 16
   5939 
   5940 static int
   5941 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
   5942 		  unsigned int scaler_user, int *scaler_id,
   5943 		  int src_w, int src_h, int dst_w, int dst_h,
   5944 		  const struct drm_format_info *format,
   5945 		  u64 modifier, bool need_scaler)
   5946 {
   5947 	struct intel_crtc_scaler_state *scaler_state =
   5948 		&crtc_state->scaler_state;
   5949 	struct intel_crtc *intel_crtc =
   5950 		to_intel_crtc(crtc_state->uapi.crtc);
   5951 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
   5952 	const struct drm_display_mode *adjusted_mode =
   5953 		&crtc_state->hw.adjusted_mode;
   5954 
   5955 	/*
   5956 	 * Src coordinates are already rotated by 270 degrees for
   5957 	 * the 90/270 degree plane rotation cases (to match the
   5958 	 * GTT mapping), hence no need to account for rotation here.
   5959 	 */
   5960 	if (src_w != dst_w || src_h != dst_h)
   5961 		need_scaler = true;
   5962 
   5963 	/*
   5964 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
   5965 	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
   5966 	 * Once NV12 is enabled, handle it here while allocating scaler
   5967 	 * for NV12.
   5968 	 */
   5969 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
   5970 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
   5971 		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
   5972 		return -EINVAL;
   5973 	}
   5974 
   5975 	/*
   5976 	 * if plane is being disabled or scaler is no more required or force detach
   5977 	 *  - free scaler binded to this plane/crtc
   5978 	 *  - in order to do this, update crtc->scaler_usage
   5979 	 *
   5980 	 * Here scaler state in crtc_state is set free so that
   5981 	 * scaler can be assigned to other user. Actual register
   5982 	 * update to free the scaler is done in plane/panel-fit programming.
   5983 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
   5984 	 */
   5985 	if (force_detach || !need_scaler) {
   5986 		if (*scaler_id >= 0) {
   5987 			scaler_state->scaler_users &= ~(1 << scaler_user);
   5988 			scaler_state->scalers[*scaler_id].in_use = 0;
   5989 
   5990 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
   5991 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
   5992 				intel_crtc->pipe, scaler_user, *scaler_id,
   5993 				scaler_state->scaler_users);
   5994 			*scaler_id = -1;
   5995 		}
   5996 		return 0;
   5997 	}
   5998 
   5999 	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
   6000 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
   6001 		DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
   6002 		return -EINVAL;
   6003 	}
   6004 
   6005 	/* range checks */
   6006 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
   6007 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
   6008 	    (INTEL_GEN(dev_priv) >= 11 &&
   6009 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
   6010 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
   6011 	    (INTEL_GEN(dev_priv) < 11 &&
   6012 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
   6013 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
   6014 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
   6015 			"size is out of scaler range\n",
   6016 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
   6017 		return -EINVAL;
   6018 	}
   6019 
   6020 	/* mark this plane as a scaler user in crtc_state */
   6021 	scaler_state->scaler_users |= (1 << scaler_user);
   6022 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
   6023 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
   6024 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
   6025 		scaler_state->scaler_users);
   6026 
   6027 	return 0;
   6028 }
   6029 
   6030 /**
   6031  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
   6032  *
   6033  * @state: crtc's scaler state
   6034  *
   6035  * Return
   6036  *     0 - scaler_usage updated successfully
   6037  *    error - requested scaling cannot be supported or other error condition
   6038  */
   6039 int skl_update_scaler_crtc(struct intel_crtc_state *state)
   6040 {
   6041 	const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
   6042 	bool need_scaler = false;
   6043 
   6044 	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
   6045 		need_scaler = true;
   6046 
   6047 	return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
   6048 				 &state->scaler_state.scaler_id,
   6049 				 state->pipe_src_w, state->pipe_src_h,
   6050 				 adjusted_mode->crtc_hdisplay,
   6051 				 adjusted_mode->crtc_vdisplay, NULL, 0,
   6052 				 need_scaler);
   6053 }
   6054 
   6055 /**
   6056  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
   6057  * @crtc_state: crtc's scaler state
   6058  * @plane_state: atomic plane state to update
   6059  *
   6060  * Return
   6061  *     0 - scaler_usage updated successfully
   6062  *    error - requested scaling cannot be supported or other error condition
   6063  */
   6064 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
   6065 				   struct intel_plane_state *plane_state)
   6066 {
   6067 	struct intel_plane *intel_plane =
   6068 		to_intel_plane(plane_state->uapi.plane);
   6069 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
   6070 	struct drm_framebuffer *fb = plane_state->hw.fb;
   6071 	int ret;
   6072 	bool force_detach = !fb || !plane_state->uapi.visible;
   6073 	bool need_scaler = false;
   6074 
   6075 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
   6076 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
   6077 	    fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
   6078 		need_scaler = true;
   6079 
   6080 	ret = skl_update_scaler(crtc_state, force_detach,
   6081 				drm_plane_index(&intel_plane->base),
   6082 				&plane_state->scaler_id,
   6083 				drm_rect_width(&plane_state->uapi.src) >> 16,
   6084 				drm_rect_height(&plane_state->uapi.src) >> 16,
   6085 				drm_rect_width(&plane_state->uapi.dst),
   6086 				drm_rect_height(&plane_state->uapi.dst),
   6087 				fb ? fb->format : NULL,
   6088 				fb ? fb->modifier : 0,
   6089 				need_scaler);
   6090 
   6091 	if (ret || plane_state->scaler_id < 0)
   6092 		return ret;
   6093 
   6094 	/* check colorkey */
   6095 	if (plane_state->ckey.flags) {
   6096 		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
   6097 			      intel_plane->base.base.id,
   6098 			      intel_plane->base.name);
   6099 		return -EINVAL;
   6100 	}
   6101 
   6102 	/* Check src format */
   6103 	switch (fb->format->format) {
   6104 	case DRM_FORMAT_RGB565:
   6105 	case DRM_FORMAT_XBGR8888:
   6106 	case DRM_FORMAT_XRGB8888:
   6107 	case DRM_FORMAT_ABGR8888:
   6108 	case DRM_FORMAT_ARGB8888:
   6109 	case DRM_FORMAT_XRGB2101010:
   6110 	case DRM_FORMAT_XBGR2101010:
   6111 	case DRM_FORMAT_ARGB2101010:
   6112 	case DRM_FORMAT_ABGR2101010:
   6113 	case DRM_FORMAT_YUYV:
   6114 	case DRM_FORMAT_YVYU:
   6115 	case DRM_FORMAT_UYVY:
   6116 	case DRM_FORMAT_VYUY:
   6117 	case DRM_FORMAT_NV12:
   6118 	case DRM_FORMAT_P010:
   6119 	case DRM_FORMAT_P012:
   6120 	case DRM_FORMAT_P016:
   6121 	case DRM_FORMAT_Y210:
   6122 	case DRM_FORMAT_Y212:
   6123 	case DRM_FORMAT_Y216:
   6124 	case DRM_FORMAT_XVYU2101010:
   6125 	case DRM_FORMAT_XVYU12_16161616:
   6126 	case DRM_FORMAT_XVYU16161616:
   6127 		break;
   6128 	case DRM_FORMAT_XBGR16161616F:
   6129 	case DRM_FORMAT_ABGR16161616F:
   6130 	case DRM_FORMAT_XRGB16161616F:
   6131 	case DRM_FORMAT_ARGB16161616F:
   6132 		if (INTEL_GEN(dev_priv) >= 11)
   6133 			break;
   6134 		/* fall through */
   6135 	default:
   6136 		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
   6137 			      intel_plane->base.base.id, intel_plane->base.name,
   6138 			      fb->base.id, fb->format->format);
   6139 		return -EINVAL;
   6140 	}
   6141 
   6142 	return 0;
   6143 }
   6144 
   6145 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
   6146 {
   6147 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   6148 	int i;
   6149 
   6150 	for (i = 0; i < crtc->num_scalers; i++)
   6151 		skl_detach_scaler(crtc, i);
   6152 }
   6153 
   6154 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
   6155 {
   6156 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6157 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6158 	enum pipe pipe = crtc->pipe;
   6159 	const struct intel_crtc_scaler_state *scaler_state =
   6160 		&crtc_state->scaler_state;
   6161 
   6162 	if (crtc_state->pch_pfit.enabled) {
   6163 		u16 uv_rgb_hphase, uv_rgb_vphase;
   6164 		int pfit_w, pfit_h, hscale, vscale;
   6165 		int id;
   6166 
   6167 		if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
   6168 			return;
   6169 
   6170 		pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
   6171 		pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
   6172 
   6173 		hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
   6174 		vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
   6175 
   6176 		uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
   6177 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
   6178 
   6179 		id = scaler_state->scaler_id;
   6180 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
   6181 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
   6182 		I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
   6183 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
   6184 		I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
   6185 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
   6186 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
   6187 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
   6188 	}
   6189 }
   6190 
   6191 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
   6192 {
   6193 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6194 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6195 	enum pipe pipe = crtc->pipe;
   6196 
   6197 	if (crtc_state->pch_pfit.enabled) {
   6198 		/* Force use of hard-coded filter coefficients
   6199 		 * as some pre-programmed values are broken,
   6200 		 * e.g. x201.
   6201 		 */
   6202 		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
   6203 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
   6204 						 PF_PIPE_SEL_IVB(pipe));
   6205 		else
   6206 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
   6207 		I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
   6208 		I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
   6209 	}
   6210 }
   6211 
   6212 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
   6213 {
   6214 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6215 	struct drm_device *dev = crtc->base.dev;
   6216 	struct drm_i915_private *dev_priv = to_i915(dev);
   6217 
   6218 	if (!crtc_state->ips_enabled)
   6219 		return;
   6220 
   6221 	/*
   6222 	 * We can only enable IPS after we enable a plane and wait for a vblank
   6223 	 * This function is called from post_plane_update, which is run after
   6224 	 * a vblank wait.
   6225 	 */
   6226 	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
   6227 
   6228 	if (IS_BROADWELL(dev_priv)) {
   6229 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
   6230 						IPS_ENABLE | IPS_PCODE_CONTROL));
   6231 		/* Quoting Art Runyan: "its not safe to expect any particular
   6232 		 * value in IPS_CTL bit 31 after enabling IPS through the
   6233 		 * mailbox." Moreover, the mailbox may return a bogus state,
   6234 		 * so we need to just enable it and continue on.
   6235 		 */
   6236 	} else {
   6237 		I915_WRITE(IPS_CTL, IPS_ENABLE);
   6238 		/* The bit only becomes 1 in the next vblank, so this wait here
   6239 		 * is essentially intel_wait_for_vblank. If we don't have this
   6240 		 * and don't wait for vblanks until the end of crtc_enable, then
   6241 		 * the HW state readout code will complain that the expected
   6242 		 * IPS_CTL value is not the one we read. */
   6243 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
   6244 			DRM_ERROR("Timed out waiting for IPS enable\n");
   6245 	}
   6246 }
   6247 
   6248 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
   6249 {
   6250 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6251 	struct drm_device *dev = crtc->base.dev;
   6252 	struct drm_i915_private *dev_priv = to_i915(dev);
   6253 
   6254 	if (!crtc_state->ips_enabled)
   6255 		return;
   6256 
   6257 	if (IS_BROADWELL(dev_priv)) {
   6258 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
   6259 		/*
   6260 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
   6261 		 * 42ms timeout value leads to occasional timeouts so use 100ms
   6262 		 * instead.
   6263 		 */
   6264 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
   6265 			DRM_ERROR("Timed out waiting for IPS disable\n");
   6266 	} else {
   6267 		I915_WRITE(IPS_CTL, 0);
   6268 		POSTING_READ(IPS_CTL);
   6269 	}
   6270 
   6271 	/* We need to wait for a vblank before we can disable the plane. */
   6272 	intel_wait_for_vblank(dev_priv, crtc->pipe);
   6273 }
   6274 
   6275 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
   6276 {
   6277 	if (intel_crtc->overlay)
   6278 		(void) intel_overlay_switch_off(intel_crtc->overlay);
   6279 
   6280 	/* Let userspace switch the overlay on again. In most cases userspace
   6281 	 * has to recompute where to put it anyway.
   6282 	 */
   6283 }
   6284 
   6285 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
   6286 				       const struct intel_crtc_state *new_crtc_state)
   6287 {
   6288 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   6289 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6290 
   6291 	if (!old_crtc_state->ips_enabled)
   6292 		return false;
   6293 
   6294 	if (needs_modeset(new_crtc_state))
   6295 		return true;
   6296 
   6297 	/*
   6298 	 * Workaround : Do not read or write the pipe palette/gamma data while
   6299 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
   6300 	 *
   6301 	 * Disable IPS before we program the LUT.
   6302 	 */
   6303 	if (IS_HASWELL(dev_priv) &&
   6304 	    (new_crtc_state->uapi.color_mgmt_changed ||
   6305 	     new_crtc_state->update_pipe) &&
   6306 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
   6307 		return true;
   6308 
   6309 	return !new_crtc_state->ips_enabled;
   6310 }
   6311 
   6312 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
   6313 				       const struct intel_crtc_state *new_crtc_state)
   6314 {
   6315 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   6316 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6317 
   6318 	if (!new_crtc_state->ips_enabled)
   6319 		return false;
   6320 
   6321 	if (needs_modeset(new_crtc_state))
   6322 		return true;
   6323 
   6324 	/*
   6325 	 * Workaround : Do not read or write the pipe palette/gamma data while
   6326 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
   6327 	 *
   6328 	 * Re-enable IPS after the LUT has been programmed.
   6329 	 */
   6330 	if (IS_HASWELL(dev_priv) &&
   6331 	    (new_crtc_state->uapi.color_mgmt_changed ||
   6332 	     new_crtc_state->update_pipe) &&
   6333 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
   6334 		return true;
   6335 
   6336 	/*
   6337 	 * We can't read out IPS on broadwell, assume the worst and
   6338 	 * forcibly enable IPS on the first fastset.
   6339 	 */
   6340 	if (new_crtc_state->update_pipe &&
   6341 	    old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
   6342 		return true;
   6343 
   6344 	return !old_crtc_state->ips_enabled;
   6345 }
   6346 
   6347 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
   6348 {
   6349 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   6350 
   6351 	if (!crtc_state->nv12_planes)
   6352 		return false;
   6353 
   6354 	/* WA Display #0827: Gen9:all */
   6355 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
   6356 		return true;
   6357 
   6358 	return false;
   6359 }
   6360 
   6361 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
   6362 {
   6363 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   6364 
   6365 	/* Wa_2006604312:icl */
   6366 	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
   6367 		return true;
   6368 
   6369 	return false;
   6370 }
   6371 
   6372 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
   6373 			    const struct intel_crtc_state *new_crtc_state)
   6374 {
   6375 	return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
   6376 		new_crtc_state->active_planes;
   6377 }
   6378 
   6379 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
   6380 			     const struct intel_crtc_state *new_crtc_state)
   6381 {
   6382 	return old_crtc_state->active_planes &&
   6383 		(!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
   6384 }
   6385 
   6386 static void intel_post_plane_update(struct intel_atomic_state *state,
   6387 				    struct intel_crtc *crtc)
   6388 {
   6389 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   6390 	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
   6391 	const struct intel_crtc_state *old_crtc_state =
   6392 		intel_atomic_get_old_crtc_state(state, crtc);
   6393 	const struct intel_crtc_state *new_crtc_state =
   6394 		intel_atomic_get_new_crtc_state(state, crtc);
   6395 	const struct intel_plane_state *new_primary_state =
   6396 		intel_atomic_get_new_plane_state(state, primary);
   6397 	enum pipe pipe = crtc->pipe;
   6398 
   6399 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
   6400 
   6401 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
   6402 		intel_update_watermarks(crtc);
   6403 
   6404 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
   6405 		hsw_enable_ips(new_crtc_state);
   6406 
   6407 	if (new_primary_state)
   6408 		intel_fbc_post_update(crtc);
   6409 
   6410 	if (needs_nv12_wa(old_crtc_state) &&
   6411 	    !needs_nv12_wa(new_crtc_state))
   6412 		skl_wa_827(dev_priv, pipe, false);
   6413 
   6414 	if (needs_scalerclk_wa(old_crtc_state) &&
   6415 	    !needs_scalerclk_wa(new_crtc_state))
   6416 		icl_wa_scalerclkgating(dev_priv, pipe, false);
   6417 }
   6418 
   6419 static void intel_pre_plane_update(struct intel_atomic_state *state,
   6420 				   struct intel_crtc *crtc)
   6421 {
   6422 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   6423 	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
   6424 	const struct intel_crtc_state *old_crtc_state =
   6425 		intel_atomic_get_old_crtc_state(state, crtc);
   6426 	const struct intel_crtc_state *new_crtc_state =
   6427 		intel_atomic_get_new_crtc_state(state, crtc);
   6428 	const struct intel_plane_state *new_primary_state =
   6429 		intel_atomic_get_new_plane_state(state, primary);
   6430 	enum pipe pipe = crtc->pipe;
   6431 
   6432 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
   6433 		hsw_disable_ips(old_crtc_state);
   6434 
   6435 	if (new_primary_state &&
   6436 	    intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
   6437 		intel_wait_for_vblank(dev_priv, pipe);
   6438 
   6439 	/* Display WA 827 */
   6440 	if (!needs_nv12_wa(old_crtc_state) &&
   6441 	    needs_nv12_wa(new_crtc_state))
   6442 		skl_wa_827(dev_priv, pipe, true);
   6443 
   6444 	/* Wa_2006604312:icl */
   6445 	if (!needs_scalerclk_wa(old_crtc_state) &&
   6446 	    needs_scalerclk_wa(new_crtc_state))
   6447 		icl_wa_scalerclkgating(dev_priv, pipe, true);
   6448 
   6449 	/*
   6450 	 * Vblank time updates from the shadow to live plane control register
   6451 	 * are blocked if the memory self-refresh mode is active at that
   6452 	 * moment. So to make sure the plane gets truly disabled, disable
   6453 	 * first the self-refresh mode. The self-refresh enable bit in turn
   6454 	 * will be checked/applied by the HW only at the next frame start
   6455 	 * event which is after the vblank start event, so we need to have a
   6456 	 * wait-for-vblank between disabling the plane and the pipe.
   6457 	 */
   6458 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
   6459 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
   6460 		intel_wait_for_vblank(dev_priv, pipe);
   6461 
   6462 	/*
   6463 	 * IVB workaround: must disable low power watermarks for at least
   6464 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
   6465 	 * when scaling is disabled.
   6466 	 *
   6467 	 * WaCxSRDisabledForSpriteScaling:ivb
   6468 	 */
   6469 	if (old_crtc_state->hw.active &&
   6470 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
   6471 		intel_wait_for_vblank(dev_priv, pipe);
   6472 
   6473 	/*
   6474 	 * If we're doing a modeset we don't need to do any
   6475 	 * pre-vblank watermark programming here.
   6476 	 */
   6477 	if (!needs_modeset(new_crtc_state)) {
   6478 		/*
   6479 		 * For platforms that support atomic watermarks, program the
   6480 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
   6481 		 * will be the intermediate values that are safe for both pre- and
   6482 		 * post- vblank; when vblank happens, the 'active' values will be set
   6483 		 * to the final 'target' values and we'll do this again to get the
   6484 		 * optimal watermarks.  For gen9+ platforms, the values we program here
   6485 		 * will be the final target values which will get automatically latched
   6486 		 * at vblank time; no further programming will be necessary.
   6487 		 *
   6488 		 * If a platform hasn't been transitioned to atomic watermarks yet,
   6489 		 * we'll continue to update watermarks the old way, if flags tell
   6490 		 * us to.
   6491 		 */
   6492 		if (dev_priv->display.initial_watermarks)
   6493 			dev_priv->display.initial_watermarks(state, crtc);
   6494 		else if (new_crtc_state->update_wm_pre)
   6495 			intel_update_watermarks(crtc);
   6496 	}
   6497 
   6498 	/*
   6499 	 * Gen2 reports pipe underruns whenever all planes are disabled.
   6500 	 * So disable underrun reporting before all the planes get disabled.
   6501 	 *
   6502 	 * We do this after .initial_watermarks() so that we have a
   6503 	 * chance of catching underruns with the intermediate watermarks
   6504 	 * vs. the old plane configuration.
   6505 	 */
   6506 	if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
   6507 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   6508 }
   6509 
   6510 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
   6511 				      struct intel_crtc *crtc)
   6512 {
   6513 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6514 	const struct intel_crtc_state *new_crtc_state =
   6515 		intel_atomic_get_new_crtc_state(state, crtc);
   6516 	unsigned int update_mask = new_crtc_state->update_planes;
   6517 	const struct intel_plane_state *old_plane_state;
   6518 	struct intel_plane *plane;
   6519 	unsigned fb_bits = 0;
   6520 	int i;
   6521 
   6522 	intel_crtc_dpms_overlay_disable(crtc);
   6523 
   6524 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
   6525 		if (crtc->pipe != plane->pipe ||
   6526 		    !(update_mask & BIT(plane->id)))
   6527 			continue;
   6528 
   6529 		intel_disable_plane(plane, new_crtc_state);
   6530 
   6531 		if (old_plane_state->uapi.visible)
   6532 			fb_bits |= plane->frontbuffer_bit;
   6533 	}
   6534 
   6535 	intel_frontbuffer_flip(dev_priv, fb_bits);
   6536 }
   6537 
   6538 /*
   6539  * intel_connector_primary_encoder - get the primary encoder for a connector
   6540  * @connector: connector for which to return the encoder
   6541  *
   6542  * Returns the primary encoder for a connector. There is a 1:1 mapping from
   6543  * all connectors to their encoder, except for DP-MST connectors which have
   6544  * both a virtual and a primary encoder. These DP-MST primary encoders can be
   6545  * pointed to by as many DP-MST connectors as there are pipes.
   6546  */
   6547 static struct intel_encoder *
   6548 intel_connector_primary_encoder(struct intel_connector *connector)
   6549 {
   6550 	struct intel_encoder *encoder;
   6551 
   6552 	if (connector->mst_port)
   6553 		return &dp_to_dig_port(connector->mst_port)->base;
   6554 
   6555 	encoder = intel_attached_encoder(connector);
   6556 	WARN_ON(!encoder);
   6557 
   6558 	return encoder;
   6559 }
   6560 
   6561 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
   6562 {
   6563 	struct drm_connector_state *new_conn_state;
   6564 	struct drm_connector *connector;
   6565 	int i;
   6566 
   6567 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
   6568 					i) {
   6569 		struct intel_connector *intel_connector;
   6570 		struct intel_encoder *encoder;
   6571 		struct intel_crtc *crtc;
   6572 
   6573 		if (!intel_connector_needs_modeset(state, connector))
   6574 			continue;
   6575 
   6576 		intel_connector = to_intel_connector(connector);
   6577 		encoder = intel_connector_primary_encoder(intel_connector);
   6578 		if (!encoder->update_prepare)
   6579 			continue;
   6580 
   6581 		crtc = new_conn_state->crtc ?
   6582 			to_intel_crtc(new_conn_state->crtc) : NULL;
   6583 		encoder->update_prepare(state, encoder, crtc);
   6584 	}
   6585 }
   6586 
   6587 static void intel_encoders_update_complete(struct intel_atomic_state *state)
   6588 {
   6589 	struct drm_connector_state *new_conn_state;
   6590 	struct drm_connector *connector;
   6591 	int i;
   6592 
   6593 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
   6594 					i) {
   6595 		struct intel_connector *intel_connector;
   6596 		struct intel_encoder *encoder;
   6597 		struct intel_crtc *crtc;
   6598 
   6599 		if (!intel_connector_needs_modeset(state, connector))
   6600 			continue;
   6601 
   6602 		intel_connector = to_intel_connector(connector);
   6603 		encoder = intel_connector_primary_encoder(intel_connector);
   6604 		if (!encoder->update_complete)
   6605 			continue;
   6606 
   6607 		crtc = new_conn_state->crtc ?
   6608 			to_intel_crtc(new_conn_state->crtc) : NULL;
   6609 		encoder->update_complete(state, encoder, crtc);
   6610 	}
   6611 }
   6612 
   6613 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
   6614 					  struct intel_crtc *crtc)
   6615 {
   6616 	const struct intel_crtc_state *crtc_state =
   6617 		intel_atomic_get_new_crtc_state(state, crtc);
   6618 	const struct drm_connector_state *conn_state;
   6619 	struct drm_connector *conn;
   6620 	int i;
   6621 
   6622 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6623 		struct intel_encoder *encoder =
   6624 			to_intel_encoder(conn_state->best_encoder);
   6625 
   6626 		if (conn_state->crtc != &crtc->base)
   6627 			continue;
   6628 
   6629 		if (encoder->pre_pll_enable)
   6630 			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
   6631 	}
   6632 }
   6633 
   6634 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
   6635 				      struct intel_crtc *crtc)
   6636 {
   6637 	const struct intel_crtc_state *crtc_state =
   6638 		intel_atomic_get_new_crtc_state(state, crtc);
   6639 	const struct drm_connector_state *conn_state;
   6640 	struct drm_connector *conn;
   6641 	int i;
   6642 
   6643 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6644 		struct intel_encoder *encoder =
   6645 			to_intel_encoder(conn_state->best_encoder);
   6646 
   6647 		if (conn_state->crtc != &crtc->base)
   6648 			continue;
   6649 
   6650 		if (encoder->pre_enable)
   6651 			encoder->pre_enable(encoder, crtc_state, conn_state);
   6652 	}
   6653 }
   6654 
   6655 static void intel_encoders_enable(struct intel_atomic_state *state,
   6656 				  struct intel_crtc *crtc)
   6657 {
   6658 	const struct intel_crtc_state *crtc_state =
   6659 		intel_atomic_get_new_crtc_state(state, crtc);
   6660 	const struct drm_connector_state *conn_state;
   6661 	struct drm_connector *conn;
   6662 	int i;
   6663 
   6664 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6665 		struct intel_encoder *encoder =
   6666 			to_intel_encoder(conn_state->best_encoder);
   6667 
   6668 		if (conn_state->crtc != &crtc->base)
   6669 			continue;
   6670 
   6671 		if (encoder->enable)
   6672 			encoder->enable(encoder, crtc_state, conn_state);
   6673 		intel_opregion_notify_encoder(encoder, true);
   6674 	}
   6675 }
   6676 
   6677 static void intel_encoders_disable(struct intel_atomic_state *state,
   6678 				   struct intel_crtc *crtc)
   6679 {
   6680 	const struct intel_crtc_state *old_crtc_state =
   6681 		intel_atomic_get_old_crtc_state(state, crtc);
   6682 	const struct drm_connector_state *old_conn_state;
   6683 	struct drm_connector *conn;
   6684 	int i;
   6685 
   6686 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   6687 		struct intel_encoder *encoder =
   6688 			to_intel_encoder(old_conn_state->best_encoder);
   6689 
   6690 		if (old_conn_state->crtc != &crtc->base)
   6691 			continue;
   6692 
   6693 		intel_opregion_notify_encoder(encoder, false);
   6694 		if (encoder->disable)
   6695 			encoder->disable(encoder, old_crtc_state, old_conn_state);
   6696 	}
   6697 }
   6698 
   6699 static void intel_encoders_post_disable(struct intel_atomic_state *state,
   6700 					struct intel_crtc *crtc)
   6701 {
   6702 	const struct intel_crtc_state *old_crtc_state =
   6703 		intel_atomic_get_old_crtc_state(state, crtc);
   6704 	const struct drm_connector_state *old_conn_state;
   6705 	struct drm_connector *conn;
   6706 	int i;
   6707 
   6708 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   6709 		struct intel_encoder *encoder =
   6710 			to_intel_encoder(old_conn_state->best_encoder);
   6711 
   6712 		if (old_conn_state->crtc != &crtc->base)
   6713 			continue;
   6714 
   6715 		if (encoder->post_disable)
   6716 			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
   6717 	}
   6718 }
   6719 
   6720 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
   6721 					    struct intel_crtc *crtc)
   6722 {
   6723 	const struct intel_crtc_state *old_crtc_state =
   6724 		intel_atomic_get_old_crtc_state(state, crtc);
   6725 	const struct drm_connector_state *old_conn_state;
   6726 	struct drm_connector *conn;
   6727 	int i;
   6728 
   6729 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   6730 		struct intel_encoder *encoder =
   6731 			to_intel_encoder(old_conn_state->best_encoder);
   6732 
   6733 		if (old_conn_state->crtc != &crtc->base)
   6734 			continue;
   6735 
   6736 		if (encoder->post_pll_disable)
   6737 			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
   6738 	}
   6739 }
   6740 
   6741 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
   6742 				       struct intel_crtc *crtc)
   6743 {
   6744 	const struct intel_crtc_state *crtc_state =
   6745 		intel_atomic_get_new_crtc_state(state, crtc);
   6746 	const struct drm_connector_state *conn_state;
   6747 	struct drm_connector *conn;
   6748 	int i;
   6749 
   6750 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   6751 		struct intel_encoder *encoder =
   6752 			to_intel_encoder(conn_state->best_encoder);
   6753 
   6754 		if (conn_state->crtc != &crtc->base)
   6755 			continue;
   6756 
   6757 		if (encoder->update_pipe)
   6758 			encoder->update_pipe(encoder, crtc_state, conn_state);
   6759 	}
   6760 }
   6761 
   6762 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
   6763 {
   6764 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6765 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   6766 
   6767 	plane->disable_plane(plane, crtc_state);
   6768 }
   6769 
   6770 static void ilk_crtc_enable(struct intel_atomic_state *state,
   6771 			    struct intel_crtc *crtc)
   6772 {
   6773 	const struct intel_crtc_state *new_crtc_state =
   6774 		intel_atomic_get_new_crtc_state(state, crtc);
   6775 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6776 	enum pipe pipe = crtc->pipe;
   6777 
   6778 	if (WARN_ON(crtc->active))
   6779 		return;
   6780 
   6781 	/*
   6782 	 * Sometimes spurious CPU pipe underruns happen during FDI
   6783 	 * training, at least with VGA+HDMI cloning. Suppress them.
   6784 	 *
   6785 	 * On ILK we get an occasional spurious CPU pipe underruns
   6786 	 * between eDP port A enable and vdd enable. Also PCH port
   6787 	 * enable seems to result in the occasional CPU pipe underrun.
   6788 	 *
   6789 	 * Spurious PCH underruns also occur during PCH enabling.
   6790 	 */
   6791 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   6792 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
   6793 
   6794 	if (new_crtc_state->has_pch_encoder)
   6795 		intel_prepare_shared_dpll(new_crtc_state);
   6796 
   6797 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   6798 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   6799 
   6800 	intel_set_pipe_timings(new_crtc_state);
   6801 	intel_set_pipe_src_size(new_crtc_state);
   6802 
   6803 	if (new_crtc_state->has_pch_encoder)
   6804 		intel_cpu_transcoder_set_m_n(new_crtc_state,
   6805 					     &new_crtc_state->fdi_m_n, NULL);
   6806 
   6807 	ilk_set_pipeconf(new_crtc_state);
   6808 
   6809 	crtc->active = true;
   6810 
   6811 	intel_encoders_pre_enable(state, crtc);
   6812 
   6813 	if (new_crtc_state->has_pch_encoder) {
   6814 		/* Note: FDI PLL enabling _must_ be done before we enable the
   6815 		 * cpu pipes, hence this is separate from all the other fdi/pch
   6816 		 * enabling. */
   6817 		ilk_fdi_pll_enable(new_crtc_state);
   6818 	} else {
   6819 		assert_fdi_tx_disabled(dev_priv, pipe);
   6820 		assert_fdi_rx_disabled(dev_priv, pipe);
   6821 	}
   6822 
   6823 	ilk_pfit_enable(new_crtc_state);
   6824 
   6825 	/*
   6826 	 * On ILK+ LUT must be loaded before the pipe is running but with
   6827 	 * clocks enabled
   6828 	 */
   6829 	intel_color_load_luts(new_crtc_state);
   6830 	intel_color_commit(new_crtc_state);
   6831 	/* update DSPCNTR to configure gamma for pipe bottom color */
   6832 	intel_disable_primary_plane(new_crtc_state);
   6833 
   6834 	if (dev_priv->display.initial_watermarks)
   6835 		dev_priv->display.initial_watermarks(state, crtc);
   6836 	intel_enable_pipe(new_crtc_state);
   6837 
   6838 	if (new_crtc_state->has_pch_encoder)
   6839 		ilk_pch_enable(state, new_crtc_state);
   6840 
   6841 	intel_crtc_vblank_on(new_crtc_state);
   6842 
   6843 	intel_encoders_enable(state, crtc);
   6844 
   6845 	if (HAS_PCH_CPT(dev_priv))
   6846 		cpt_verify_modeset(dev_priv, pipe);
   6847 
   6848 	/*
   6849 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
   6850 	 * And a second vblank wait is needed at least on ILK with
   6851 	 * some interlaced HDMI modes. Let's do the double wait always
   6852 	 * in case there are more corner cases we don't know about.
   6853 	 */
   6854 	if (new_crtc_state->has_pch_encoder) {
   6855 		intel_wait_for_vblank(dev_priv, pipe);
   6856 		intel_wait_for_vblank(dev_priv, pipe);
   6857 	}
   6858 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   6859 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
   6860 }
   6861 
   6862 /* IPS only exists on ULT machines and is tied to pipe A. */
   6863 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
   6864 {
   6865 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
   6866 }
   6867 
   6868 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
   6869 					    enum pipe pipe, bool apply)
   6870 {
   6871 	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
   6872 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
   6873 
   6874 	if (apply)
   6875 		val |= mask;
   6876 	else
   6877 		val &= ~mask;
   6878 
   6879 	I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
   6880 }
   6881 
   6882 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
   6883 {
   6884 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6885 	enum pipe pipe = crtc->pipe;
   6886 	u32 val;
   6887 
   6888 	val = MBUS_DBOX_A_CREDIT(2);
   6889 
   6890 	if (INTEL_GEN(dev_priv) >= 12) {
   6891 		val |= MBUS_DBOX_BW_CREDIT(2);
   6892 		val |= MBUS_DBOX_B_CREDIT(12);
   6893 	} else {
   6894 		val |= MBUS_DBOX_BW_CREDIT(1);
   6895 		val |= MBUS_DBOX_B_CREDIT(8);
   6896 	}
   6897 
   6898 	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
   6899 }
   6900 
   6901 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
   6902 {
   6903 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6904 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6905 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
   6906 	u32 val;
   6907 
   6908 	val = I915_READ(reg);
   6909 	val &= ~HSW_FRAME_START_DELAY_MASK;
   6910 	val |= HSW_FRAME_START_DELAY(0);
   6911 	I915_WRITE(reg, val);
   6912 }
   6913 
   6914 static void hsw_crtc_enable(struct intel_atomic_state *state,
   6915 			    struct intel_crtc *crtc)
   6916 {
   6917 	const struct intel_crtc_state *new_crtc_state =
   6918 		intel_atomic_get_new_crtc_state(state, crtc);
   6919 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6920 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
   6921 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
   6922 	bool psl_clkgate_wa;
   6923 
   6924 	if (WARN_ON(crtc->active))
   6925 		return;
   6926 
   6927 	intel_encoders_pre_pll_enable(state, crtc);
   6928 
   6929 	if (new_crtc_state->shared_dpll)
   6930 		intel_enable_shared_dpll(new_crtc_state);
   6931 
   6932 	intel_encoders_pre_enable(state, crtc);
   6933 
   6934 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   6935 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   6936 
   6937 	if (!transcoder_is_dsi(cpu_transcoder))
   6938 		intel_set_pipe_timings(new_crtc_state);
   6939 
   6940 	if (INTEL_GEN(dev_priv) >= 11)
   6941 		icl_enable_trans_port_sync(new_crtc_state);
   6942 
   6943 	intel_set_pipe_src_size(new_crtc_state);
   6944 
   6945 	if (cpu_transcoder != TRANSCODER_EDP &&
   6946 	    !transcoder_is_dsi(cpu_transcoder))
   6947 		I915_WRITE(PIPE_MULT(cpu_transcoder),
   6948 			   new_crtc_state->pixel_multiplier - 1);
   6949 
   6950 	if (new_crtc_state->has_pch_encoder)
   6951 		intel_cpu_transcoder_set_m_n(new_crtc_state,
   6952 					     &new_crtc_state->fdi_m_n, NULL);
   6953 
   6954 	if (!transcoder_is_dsi(cpu_transcoder)) {
   6955 		hsw_set_frame_start_delay(new_crtc_state);
   6956 		hsw_set_pipeconf(new_crtc_state);
   6957 	}
   6958 
   6959 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
   6960 		bdw_set_pipemisc(new_crtc_state);
   6961 
   6962 	crtc->active = true;
   6963 
   6964 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
   6965 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
   6966 		new_crtc_state->pch_pfit.enabled;
   6967 	if (psl_clkgate_wa)
   6968 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
   6969 
   6970 	if (INTEL_GEN(dev_priv) >= 9)
   6971 		skl_pfit_enable(new_crtc_state);
   6972 	else
   6973 		ilk_pfit_enable(new_crtc_state);
   6974 
   6975 	/*
   6976 	 * On ILK+ LUT must be loaded before the pipe is running but with
   6977 	 * clocks enabled
   6978 	 */
   6979 	intel_color_load_luts(new_crtc_state);
   6980 	intel_color_commit(new_crtc_state);
   6981 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
   6982 	if (INTEL_GEN(dev_priv) < 9)
   6983 		intel_disable_primary_plane(new_crtc_state);
   6984 
   6985 	if (INTEL_GEN(dev_priv) >= 11)
   6986 		icl_set_pipe_chicken(crtc);
   6987 
   6988 	if (!transcoder_is_dsi(cpu_transcoder))
   6989 		intel_ddi_enable_transcoder_func(new_crtc_state);
   6990 
   6991 	if (dev_priv->display.initial_watermarks)
   6992 		dev_priv->display.initial_watermarks(state, crtc);
   6993 
   6994 	if (INTEL_GEN(dev_priv) >= 11)
   6995 		icl_pipe_mbus_enable(crtc);
   6996 
   6997 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
   6998 	if (!transcoder_is_dsi(cpu_transcoder))
   6999 		intel_enable_pipe(new_crtc_state);
   7000 
   7001 	if (new_crtc_state->has_pch_encoder)
   7002 		lpt_pch_enable(state, new_crtc_state);
   7003 
   7004 	intel_crtc_vblank_on(new_crtc_state);
   7005 
   7006 	intel_encoders_enable(state, crtc);
   7007 
   7008 	if (psl_clkgate_wa) {
   7009 		intel_wait_for_vblank(dev_priv, pipe);
   7010 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
   7011 	}
   7012 
   7013 	/* If we change the relative order between pipe/planes enabling, we need
   7014 	 * to change the workaround. */
   7015 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
   7016 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
   7017 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
   7018 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
   7019 	}
   7020 }
   7021 
   7022 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
   7023 {
   7024 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   7025 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7026 	enum pipe pipe = crtc->pipe;
   7027 
   7028 	/* To avoid upsetting the power well on haswell only disable the pfit if
   7029 	 * it's in use. The hw state code will make sure we get this right. */
   7030 	if (old_crtc_state->pch_pfit.enabled) {
   7031 		I915_WRITE(PF_CTL(pipe), 0);
   7032 		I915_WRITE(PF_WIN_POS(pipe), 0);
   7033 		I915_WRITE(PF_WIN_SZ(pipe), 0);
   7034 	}
   7035 }
   7036 
   7037 static void ilk_crtc_disable(struct intel_atomic_state *state,
   7038 			     struct intel_crtc *crtc)
   7039 {
   7040 	const struct intel_crtc_state *old_crtc_state =
   7041 		intel_atomic_get_old_crtc_state(state, crtc);
   7042 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7043 	enum pipe pipe = crtc->pipe;
   7044 
   7045 	/*
   7046 	 * Sometimes spurious CPU pipe underruns happen when the
   7047 	 * pipe is already disabled, but FDI RX/TX is still enabled.
   7048 	 * Happens at least with VGA+HDMI cloning. Suppress them.
   7049 	 */
   7050 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   7051 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
   7052 
   7053 	intel_encoders_disable(state, crtc);
   7054 
   7055 	intel_crtc_vblank_off(old_crtc_state);
   7056 
   7057 	intel_disable_pipe(old_crtc_state);
   7058 
   7059 	ilk_pfit_disable(old_crtc_state);
   7060 
   7061 	if (old_crtc_state->has_pch_encoder)
   7062 		ilk_fdi_disable(crtc);
   7063 
   7064 	intel_encoders_post_disable(state, crtc);
   7065 
   7066 	if (old_crtc_state->has_pch_encoder) {
   7067 		ilk_disable_pch_transcoder(dev_priv, pipe);
   7068 
   7069 		if (HAS_PCH_CPT(dev_priv)) {
   7070 			i915_reg_t reg;
   7071 			u32 temp;
   7072 
   7073 			/* disable TRANS_DP_CTL */
   7074 			reg = TRANS_DP_CTL(pipe);
   7075 			temp = I915_READ(reg);
   7076 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
   7077 				  TRANS_DP_PORT_SEL_MASK);
   7078 			temp |= TRANS_DP_PORT_SEL_NONE;
   7079 			I915_WRITE(reg, temp);
   7080 
   7081 			/* disable DPLL_SEL */
   7082 			temp = I915_READ(PCH_DPLL_SEL);
   7083 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
   7084 			I915_WRITE(PCH_DPLL_SEL, temp);
   7085 		}
   7086 
   7087 		ilk_fdi_pll_disable(crtc);
   7088 	}
   7089 
   7090 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   7091 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
   7092 }
   7093 
   7094 static void hsw_crtc_disable(struct intel_atomic_state *state,
   7095 			     struct intel_crtc *crtc)
   7096 {
   7097 	/*
   7098 	 * FIXME collapse everything to one hook.
   7099 	 * Need care with mst->ddi interactions.
   7100 	 */
   7101 	intel_encoders_disable(state, crtc);
   7102 	intel_encoders_post_disable(state, crtc);
   7103 }
   7104 
   7105 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
   7106 {
   7107 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7108 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7109 
   7110 	if (!crtc_state->gmch_pfit.control)
   7111 		return;
   7112 
   7113 	/*
   7114 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
   7115 	 * according to register description and PRM.
   7116 	 */
   7117 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
   7118 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
   7119 
   7120 	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
   7121 	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
   7122 
   7123 	/* Border color in case we don't scale up to the full screen. Black by
   7124 	 * default, change to something else for debugging. */
   7125 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
   7126 }
   7127 
   7128 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
   7129 {
   7130 	if (phy == PHY_NONE)
   7131 		return false;
   7132 
   7133 	if (IS_ELKHARTLAKE(dev_priv))
   7134 		return phy <= PHY_C;
   7135 
   7136 	if (INTEL_GEN(dev_priv) >= 11)
   7137 		return phy <= PHY_B;
   7138 
   7139 	return false;
   7140 }
   7141 
   7142 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
   7143 {
   7144 	if (INTEL_GEN(dev_priv) >= 12)
   7145 		return phy >= PHY_D && phy <= PHY_I;
   7146 
   7147 	if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
   7148 		return phy >= PHY_C && phy <= PHY_F;
   7149 
   7150 	return false;
   7151 }
   7152 
   7153 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
   7154 {
   7155 	if (IS_ELKHARTLAKE(i915) && port == PORT_D)
   7156 		return PHY_A;
   7157 
   7158 	return (enum phy)port;
   7159 }
   7160 
   7161 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
   7162 {
   7163 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
   7164 		return PORT_TC_NONE;
   7165 
   7166 	if (INTEL_GEN(dev_priv) >= 12)
   7167 		return port - PORT_D;
   7168 
   7169 	return port - PORT_C;
   7170 }
   7171 
   7172 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
   7173 {
   7174 	switch (port) {
   7175 	case PORT_A:
   7176 		return POWER_DOMAIN_PORT_DDI_A_LANES;
   7177 	case PORT_B:
   7178 		return POWER_DOMAIN_PORT_DDI_B_LANES;
   7179 	case PORT_C:
   7180 		return POWER_DOMAIN_PORT_DDI_C_LANES;
   7181 	case PORT_D:
   7182 		return POWER_DOMAIN_PORT_DDI_D_LANES;
   7183 	case PORT_E:
   7184 		return POWER_DOMAIN_PORT_DDI_E_LANES;
   7185 	case PORT_F:
   7186 		return POWER_DOMAIN_PORT_DDI_F_LANES;
   7187 	case PORT_G:
   7188 		return POWER_DOMAIN_PORT_DDI_G_LANES;
   7189 	default:
   7190 		MISSING_CASE(port);
   7191 		return POWER_DOMAIN_PORT_OTHER;
   7192 	}
   7193 }
   7194 
   7195 enum intel_display_power_domain
   7196 intel_aux_power_domain(struct intel_digital_port *dig_port)
   7197 {
   7198 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
   7199 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
   7200 
   7201 	if (intel_phy_is_tc(dev_priv, phy) &&
   7202 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
   7203 		switch (dig_port->aux_ch) {
   7204 		case AUX_CH_C:
   7205 			return POWER_DOMAIN_AUX_C_TBT;
   7206 		case AUX_CH_D:
   7207 			return POWER_DOMAIN_AUX_D_TBT;
   7208 		case AUX_CH_E:
   7209 			return POWER_DOMAIN_AUX_E_TBT;
   7210 		case AUX_CH_F:
   7211 			return POWER_DOMAIN_AUX_F_TBT;
   7212 		case AUX_CH_G:
   7213 			return POWER_DOMAIN_AUX_G_TBT;
   7214 		default:
   7215 			MISSING_CASE(dig_port->aux_ch);
   7216 			return POWER_DOMAIN_AUX_C_TBT;
   7217 		}
   7218 	}
   7219 
   7220 	switch (dig_port->aux_ch) {
   7221 	case AUX_CH_A:
   7222 		return POWER_DOMAIN_AUX_A;
   7223 	case AUX_CH_B:
   7224 		return POWER_DOMAIN_AUX_B;
   7225 	case AUX_CH_C:
   7226 		return POWER_DOMAIN_AUX_C;
   7227 	case AUX_CH_D:
   7228 		return POWER_DOMAIN_AUX_D;
   7229 	case AUX_CH_E:
   7230 		return POWER_DOMAIN_AUX_E;
   7231 	case AUX_CH_F:
   7232 		return POWER_DOMAIN_AUX_F;
   7233 	case AUX_CH_G:
   7234 		return POWER_DOMAIN_AUX_G;
   7235 	default:
   7236 		MISSING_CASE(dig_port->aux_ch);
   7237 		return POWER_DOMAIN_AUX_A;
   7238 	}
   7239 }
   7240 
   7241 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
   7242 {
   7243 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7244 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7245 	struct drm_encoder *encoder;
   7246 	enum pipe pipe = crtc->pipe;
   7247 	u64 mask;
   7248 	enum transcoder transcoder = crtc_state->cpu_transcoder;
   7249 
   7250 	if (!crtc_state->hw.active)
   7251 		return 0;
   7252 
   7253 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
   7254 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
   7255 	if (crtc_state->pch_pfit.enabled ||
   7256 	    crtc_state->pch_pfit.force_thru)
   7257 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
   7258 
   7259 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
   7260 				  crtc_state->uapi.encoder_mask) {
   7261 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
   7262 
   7263 		mask |= BIT_ULL(intel_encoder->power_domain);
   7264 	}
   7265 
   7266 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
   7267 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
   7268 
   7269 	if (crtc_state->shared_dpll)
   7270 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
   7271 
   7272 	return mask;
   7273 }
   7274 
   7275 static u64
   7276 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
   7277 {
   7278 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7279 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7280 	enum intel_display_power_domain domain;
   7281 	u64 domains, new_domains, old_domains;
   7282 
   7283 	old_domains = crtc->enabled_power_domains;
   7284 	crtc->enabled_power_domains = new_domains =
   7285 		get_crtc_power_domains(crtc_state);
   7286 
   7287 	domains = new_domains & ~old_domains;
   7288 
   7289 	for_each_power_domain(domain, domains)
   7290 		intel_display_power_get(dev_priv, domain);
   7291 
   7292 	return old_domains & ~new_domains;
   7293 }
   7294 
   7295 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
   7296 				      u64 domains)
   7297 {
   7298 	enum intel_display_power_domain domain;
   7299 
   7300 	for_each_power_domain(domain, domains)
   7301 		intel_display_power_put_unchecked(dev_priv, domain);
   7302 }
   7303 
   7304 static void valleyview_crtc_enable(struct intel_atomic_state *state,
   7305 				   struct intel_crtc *crtc)
   7306 {
   7307 	const struct intel_crtc_state *new_crtc_state =
   7308 		intel_atomic_get_new_crtc_state(state, crtc);
   7309 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7310 	enum pipe pipe = crtc->pipe;
   7311 
   7312 	if (WARN_ON(crtc->active))
   7313 		return;
   7314 
   7315 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   7316 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   7317 
   7318 	intel_set_pipe_timings(new_crtc_state);
   7319 	intel_set_pipe_src_size(new_crtc_state);
   7320 
   7321 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
   7322 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
   7323 		I915_WRITE(CHV_CANVAS(pipe), 0);
   7324 	}
   7325 
   7326 	i9xx_set_pipeconf(new_crtc_state);
   7327 
   7328 	crtc->active = true;
   7329 
   7330 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   7331 
   7332 	intel_encoders_pre_pll_enable(state, crtc);
   7333 
   7334 	if (IS_CHERRYVIEW(dev_priv)) {
   7335 		chv_prepare_pll(crtc, new_crtc_state);
   7336 		chv_enable_pll(crtc, new_crtc_state);
   7337 	} else {
   7338 		vlv_prepare_pll(crtc, new_crtc_state);
   7339 		vlv_enable_pll(crtc, new_crtc_state);
   7340 	}
   7341 
   7342 	intel_encoders_pre_enable(state, crtc);
   7343 
   7344 	i9xx_pfit_enable(new_crtc_state);
   7345 
   7346 	intel_color_load_luts(new_crtc_state);
   7347 	intel_color_commit(new_crtc_state);
   7348 	/* update DSPCNTR to configure gamma for pipe bottom color */
   7349 	intel_disable_primary_plane(new_crtc_state);
   7350 
   7351 	dev_priv->display.initial_watermarks(state, crtc);
   7352 	intel_enable_pipe(new_crtc_state);
   7353 
   7354 	intel_crtc_vblank_on(new_crtc_state);
   7355 
   7356 	intel_encoders_enable(state, crtc);
   7357 }
   7358 
   7359 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
   7360 {
   7361 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7362 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7363 
   7364 	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
   7365 	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
   7366 }
   7367 
   7368 static void i9xx_crtc_enable(struct intel_atomic_state *state,
   7369 			     struct intel_crtc *crtc)
   7370 {
   7371 	const struct intel_crtc_state *new_crtc_state =
   7372 		intel_atomic_get_new_crtc_state(state, crtc);
   7373 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7374 	enum pipe pipe = crtc->pipe;
   7375 
   7376 	if (WARN_ON(crtc->active))
   7377 		return;
   7378 
   7379 	i9xx_set_pll_dividers(new_crtc_state);
   7380 
   7381 	if (intel_crtc_has_dp_encoder(new_crtc_state))
   7382 		intel_dp_set_m_n(new_crtc_state, M1_N1);
   7383 
   7384 	intel_set_pipe_timings(new_crtc_state);
   7385 	intel_set_pipe_src_size(new_crtc_state);
   7386 
   7387 	i9xx_set_pipeconf(new_crtc_state);
   7388 
   7389 	crtc->active = true;
   7390 
   7391 	if (!IS_GEN(dev_priv, 2))
   7392 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   7393 
   7394 	intel_encoders_pre_enable(state, crtc);
   7395 
   7396 	i9xx_enable_pll(crtc, new_crtc_state);
   7397 
   7398 	i9xx_pfit_enable(new_crtc_state);
   7399 
   7400 	intel_color_load_luts(new_crtc_state);
   7401 	intel_color_commit(new_crtc_state);
   7402 	/* update DSPCNTR to configure gamma for pipe bottom color */
   7403 	intel_disable_primary_plane(new_crtc_state);
   7404 
   7405 	if (dev_priv->display.initial_watermarks)
   7406 		dev_priv->display.initial_watermarks(state, crtc);
   7407 	else
   7408 		intel_update_watermarks(crtc);
   7409 	intel_enable_pipe(new_crtc_state);
   7410 
   7411 	intel_crtc_vblank_on(new_crtc_state);
   7412 
   7413 	intel_encoders_enable(state, crtc);
   7414 }
   7415 
   7416 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
   7417 {
   7418 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   7419 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7420 
   7421 	if (!old_crtc_state->gmch_pfit.control)
   7422 		return;
   7423 
   7424 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
   7425 
   7426 	DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
   7427 		      I915_READ(PFIT_CONTROL));
   7428 	I915_WRITE(PFIT_CONTROL, 0);
   7429 }
   7430 
   7431 static void i9xx_crtc_disable(struct intel_atomic_state *state,
   7432 			      struct intel_crtc *crtc)
   7433 {
   7434 	struct intel_crtc_state *old_crtc_state =
   7435 		intel_atomic_get_old_crtc_state(state, crtc);
   7436 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7437 	enum pipe pipe = crtc->pipe;
   7438 
   7439 	/*
   7440 	 * On gen2 planes are double buffered but the pipe isn't, so we must
   7441 	 * wait for planes to fully turn off before disabling the pipe.
   7442 	 */
   7443 	if (IS_GEN(dev_priv, 2))
   7444 		intel_wait_for_vblank(dev_priv, pipe);
   7445 
   7446 	intel_encoders_disable(state, crtc);
   7447 
   7448 	intel_crtc_vblank_off(old_crtc_state);
   7449 
   7450 	intel_disable_pipe(old_crtc_state);
   7451 
   7452 	i9xx_pfit_disable(old_crtc_state);
   7453 
   7454 	intel_encoders_post_disable(state, crtc);
   7455 
   7456 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
   7457 		if (IS_CHERRYVIEW(dev_priv))
   7458 			chv_disable_pll(dev_priv, pipe);
   7459 		else if (IS_VALLEYVIEW(dev_priv))
   7460 			vlv_disable_pll(dev_priv, pipe);
   7461 		else
   7462 			i9xx_disable_pll(old_crtc_state);
   7463 	}
   7464 
   7465 	intel_encoders_post_pll_disable(state, crtc);
   7466 
   7467 	if (!IS_GEN(dev_priv, 2))
   7468 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   7469 
   7470 	if (!dev_priv->display.initial_watermarks)
   7471 		intel_update_watermarks(crtc);
   7472 
   7473 	/* clock the pipe down to 640x480@60 to potentially save power */
   7474 	if (IS_I830(dev_priv))
   7475 		i830_enable_pipe(dev_priv, pipe);
   7476 }
   7477 
   7478 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
   7479 					struct drm_modeset_acquire_ctx *ctx)
   7480 {
   7481 	struct intel_encoder *encoder;
   7482 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7483 	struct intel_bw_state *bw_state =
   7484 		to_intel_bw_state(dev_priv->bw_obj.state);
   7485 	struct intel_crtc_state *crtc_state =
   7486 		to_intel_crtc_state(crtc->base.state);
   7487 	enum intel_display_power_domain domain;
   7488 	struct intel_plane *plane;
   7489 	struct drm_atomic_state *state;
   7490 	struct intel_crtc_state *temp_crtc_state;
   7491 	enum pipe pipe = crtc->pipe;
   7492 	u64 domains;
   7493 	int ret;
   7494 
   7495 	if (!crtc_state->hw.active)
   7496 		return;
   7497 
   7498 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   7499 		const struct intel_plane_state *plane_state =
   7500 			to_intel_plane_state(plane->base.state);
   7501 
   7502 		if (plane_state->uapi.visible)
   7503 			intel_plane_disable_noatomic(crtc, plane);
   7504 	}
   7505 
   7506 	state = drm_atomic_state_alloc(&dev_priv->drm);
   7507 	if (!state) {
   7508 		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
   7509 			      crtc->base.base.id, crtc->base.name);
   7510 		return;
   7511 	}
   7512 
   7513 	state->acquire_ctx = ctx;
   7514 
   7515 	/* Everything's already locked, -EDEADLK can't happen. */
   7516 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
   7517 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
   7518 
   7519 	WARN_ON(IS_ERR(temp_crtc_state) || ret);
   7520 
   7521 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
   7522 
   7523 	drm_atomic_state_put(state);
   7524 
   7525 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
   7526 		      crtc->base.base.id, crtc->base.name);
   7527 
   7528 	crtc->active = false;
   7529 	crtc->base.enabled = false;
   7530 
   7531 	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
   7532 	crtc_state->uapi.active = false;
   7533 	crtc_state->uapi.connector_mask = 0;
   7534 	crtc_state->uapi.encoder_mask = 0;
   7535 	intel_crtc_free_hw_state(crtc_state);
   7536 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
   7537 
   7538 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
   7539 		encoder->base.crtc = NULL;
   7540 
   7541 	intel_fbc_disable(crtc);
   7542 	intel_update_watermarks(crtc);
   7543 	intel_disable_shared_dpll(crtc_state);
   7544 
   7545 	domains = crtc->enabled_power_domains;
   7546 	for_each_power_domain(domain, domains)
   7547 		intel_display_power_put_unchecked(dev_priv, domain);
   7548 	crtc->enabled_power_domains = 0;
   7549 
   7550 	dev_priv->active_pipes &= ~BIT(pipe);
   7551 	dev_priv->min_cdclk[pipe] = 0;
   7552 	dev_priv->min_voltage_level[pipe] = 0;
   7553 
   7554 	bw_state->data_rate[pipe] = 0;
   7555 	bw_state->num_active_planes[pipe] = 0;
   7556 }
   7557 
   7558 /*
   7559  * turn all crtc's off, but do not adjust state
   7560  * This has to be paired with a call to intel_modeset_setup_hw_state.
   7561  */
   7562 int intel_display_suspend(struct drm_device *dev)
   7563 {
   7564 	struct drm_i915_private *dev_priv = to_i915(dev);
   7565 	struct drm_atomic_state *state;
   7566 	int ret;
   7567 
   7568 	state = drm_atomic_helper_suspend(dev);
   7569 	ret = PTR_ERR_OR_ZERO(state);
   7570 	if (ret)
   7571 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
   7572 	else
   7573 		dev_priv->modeset_restore_state = state;
   7574 	return ret;
   7575 }
   7576 
   7577 void intel_encoder_destroy(struct drm_encoder *encoder)
   7578 {
   7579 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
   7580 
   7581 	drm_encoder_cleanup(encoder);
   7582 	kfree(intel_encoder);
   7583 }
   7584 
   7585 /* Cross check the actual hw state with our own modeset state tracking (and it's
   7586  * internal consistency). */
   7587 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
   7588 					 struct drm_connector_state *conn_state)
   7589 {
   7590 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
   7591 
   7592 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
   7593 		      connector->base.base.id,
   7594 		      connector->base.name);
   7595 
   7596 	if (connector->get_hw_state(connector)) {
   7597 		struct intel_encoder *encoder = connector->encoder;
   7598 
   7599 		I915_STATE_WARN(!crtc_state,
   7600 			 "connector enabled without attached crtc\n");
   7601 
   7602 		if (!crtc_state)
   7603 			return;
   7604 
   7605 		I915_STATE_WARN(!crtc_state->hw.active,
   7606 				"connector is active, but attached crtc isn't\n");
   7607 
   7608 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
   7609 			return;
   7610 
   7611 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
   7612 			"atomic encoder doesn't match attached encoder\n");
   7613 
   7614 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
   7615 			"attached encoder crtc differs from connector crtc\n");
   7616 	} else {
   7617 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
   7618 				"attached crtc is active, but connector isn't\n");
   7619 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
   7620 			"best encoder set without crtc!\n");
   7621 	}
   7622 }
   7623 
   7624 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
   7625 {
   7626 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
   7627 		return crtc_state->fdi_lanes;
   7628 
   7629 	return 0;
   7630 }
   7631 
   7632 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
   7633 			       struct intel_crtc_state *pipe_config)
   7634 {
   7635 	struct drm_i915_private *dev_priv = to_i915(dev);
   7636 	struct drm_atomic_state *state = pipe_config->uapi.state;
   7637 	struct intel_crtc *other_crtc;
   7638 	struct intel_crtc_state *other_crtc_state;
   7639 
   7640 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
   7641 		      pipe_name(pipe), pipe_config->fdi_lanes);
   7642 	if (pipe_config->fdi_lanes > 4) {
   7643 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
   7644 			      pipe_name(pipe), pipe_config->fdi_lanes);
   7645 		return -EINVAL;
   7646 	}
   7647 
   7648 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   7649 		if (pipe_config->fdi_lanes > 2) {
   7650 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
   7651 				      pipe_config->fdi_lanes);
   7652 			return -EINVAL;
   7653 		} else {
   7654 			return 0;
   7655 		}
   7656 	}
   7657 
   7658 	if (INTEL_NUM_PIPES(dev_priv) == 2)
   7659 		return 0;
   7660 
   7661 	/* Ivybridge 3 pipe is really complicated */
   7662 	switch (pipe) {
   7663 	case PIPE_A:
   7664 		return 0;
   7665 	case PIPE_B:
   7666 		if (pipe_config->fdi_lanes <= 2)
   7667 			return 0;
   7668 
   7669 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
   7670 		other_crtc_state =
   7671 			intel_atomic_get_crtc_state(state, other_crtc);
   7672 		if (IS_ERR(other_crtc_state))
   7673 			return PTR_ERR(other_crtc_state);
   7674 
   7675 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
   7676 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
   7677 				      pipe_name(pipe), pipe_config->fdi_lanes);
   7678 			return -EINVAL;
   7679 		}
   7680 		return 0;
   7681 	case PIPE_C:
   7682 		if (pipe_config->fdi_lanes > 2) {
   7683 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
   7684 				      pipe_name(pipe), pipe_config->fdi_lanes);
   7685 			return -EINVAL;
   7686 		}
   7687 
   7688 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
   7689 		other_crtc_state =
   7690 			intel_atomic_get_crtc_state(state, other_crtc);
   7691 		if (IS_ERR(other_crtc_state))
   7692 			return PTR_ERR(other_crtc_state);
   7693 
   7694 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
   7695 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
   7696 			return -EINVAL;
   7697 		}
   7698 		return 0;
   7699 	default:
   7700 		BUG();
   7701 	}
   7702 }
   7703 
   7704 #define RETRY 1
   7705 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
   7706 				  struct intel_crtc_state *pipe_config)
   7707 {
   7708 	struct drm_device *dev = intel_crtc->base.dev;
   7709 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
   7710 	int lane, link_bw, fdi_dotclock, ret;
   7711 	bool needs_recompute = false;
   7712 
   7713 retry:
   7714 	/* FDI is a binary signal running at ~2.7GHz, encoding
   7715 	 * each output octet as 10 bits. The actual frequency
   7716 	 * is stored as a divider into a 100MHz clock, and the
   7717 	 * mode pixel clock is stored in units of 1KHz.
   7718 	 * Hence the bw of each lane in terms of the mode signal
   7719 	 * is:
   7720 	 */
   7721 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
   7722 
   7723 	fdi_dotclock = adjusted_mode->crtc_clock;
   7724 
   7725 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
   7726 				      pipe_config->pipe_bpp);
   7727 
   7728 	pipe_config->fdi_lanes = lane;
   7729 
   7730 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
   7731 			       link_bw, &pipe_config->fdi_m_n, false, false);
   7732 
   7733 	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
   7734 	if (ret == -EDEADLK)
   7735 		return ret;
   7736 
   7737 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
   7738 		pipe_config->pipe_bpp -= 2*3;
   7739 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
   7740 			      pipe_config->pipe_bpp);
   7741 		needs_recompute = true;
   7742 		pipe_config->bw_constrained = true;
   7743 
   7744 		goto retry;
   7745 	}
   7746 
   7747 	if (needs_recompute)
   7748 		return RETRY;
   7749 
   7750 	return ret;
   7751 }
   7752 
   7753 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
   7754 {
   7755 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   7756 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7757 
   7758 	/* IPS only exists on ULT machines and is tied to pipe A. */
   7759 	if (!hsw_crtc_supports_ips(crtc))
   7760 		return false;
   7761 
   7762 	if (!i915_modparams.enable_ips)
   7763 		return false;
   7764 
   7765 	if (crtc_state->pipe_bpp > 24)
   7766 		return false;
   7767 
   7768 	/*
   7769 	 * We compare against max which means we must take
   7770 	 * the increased cdclk requirement into account when
   7771 	 * calculating the new cdclk.
   7772 	 *
   7773 	 * Should measure whether using a lower cdclk w/o IPS
   7774 	 */
   7775 	if (IS_BROADWELL(dev_priv) &&
   7776 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
   7777 		return false;
   7778 
   7779 	return true;
   7780 }
   7781 
   7782 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
   7783 {
   7784 	struct drm_i915_private *dev_priv =
   7785 		to_i915(crtc_state->uapi.crtc->dev);
   7786 	struct intel_atomic_state *intel_state =
   7787 		to_intel_atomic_state(crtc_state->uapi.state);
   7788 
   7789 	if (!hsw_crtc_state_ips_capable(crtc_state))
   7790 		return false;
   7791 
   7792 	/*
   7793 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
   7794 	 * enabled and disabled dynamically based on package C states,
   7795 	 * user space can't make reliable use of the CRCs, so let's just
   7796 	 * completely disable it.
   7797 	 */
   7798 	if (crtc_state->crc_enabled)
   7799 		return false;
   7800 
   7801 	/* IPS should be fine as long as at least one plane is enabled. */
   7802 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
   7803 		return false;
   7804 
   7805 	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
   7806 	if (IS_BROADWELL(dev_priv) &&
   7807 	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
   7808 		return false;
   7809 
   7810 	return true;
   7811 }
   7812 
   7813 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
   7814 {
   7815 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7816 
   7817 	/* GDG double wide on either pipe, otherwise pipe A only */
   7818 	return INTEL_GEN(dev_priv) < 4 &&
   7819 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
   7820 }
   7821 
   7822 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
   7823 {
   7824 	u32 pixel_rate;
   7825 
   7826 	pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
   7827 
   7828 	/*
   7829 	 * We only use IF-ID interlacing. If we ever use
   7830 	 * PF-ID we'll need to adjust the pixel_rate here.
   7831 	 */
   7832 
   7833 	if (pipe_config->pch_pfit.enabled) {
   7834 		u64 pipe_w, pipe_h, pfit_w, pfit_h;
   7835 		u32 pfit_size = pipe_config->pch_pfit.size;
   7836 
   7837 		pipe_w = pipe_config->pipe_src_w;
   7838 		pipe_h = pipe_config->pipe_src_h;
   7839 
   7840 		pfit_w = (pfit_size >> 16) & 0xFFFF;
   7841 		pfit_h = pfit_size & 0xFFFF;
   7842 		if (pipe_w < pfit_w)
   7843 			pipe_w = pfit_w;
   7844 		if (pipe_h < pfit_h)
   7845 			pipe_h = pfit_h;
   7846 
   7847 		if (WARN_ON(!pfit_w || !pfit_h))
   7848 			return pixel_rate;
   7849 
   7850 		pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
   7851 				     pfit_w * pfit_h);
   7852 	}
   7853 
   7854 	return pixel_rate;
   7855 }
   7856 
   7857 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
   7858 {
   7859 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   7860 
   7861 	if (HAS_GMCH(dev_priv))
   7862 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
   7863 		crtc_state->pixel_rate =
   7864 			crtc_state->hw.adjusted_mode.crtc_clock;
   7865 	else
   7866 		crtc_state->pixel_rate =
   7867 			ilk_pipe_pixel_rate(crtc_state);
   7868 }
   7869 
   7870 static int intel_crtc_compute_config(struct intel_crtc *crtc,
   7871 				     struct intel_crtc_state *pipe_config)
   7872 {
   7873 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7874 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
   7875 	int clock_limit = dev_priv->max_dotclk_freq;
   7876 
   7877 	if (INTEL_GEN(dev_priv) < 4) {
   7878 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
   7879 
   7880 		/*
   7881 		 * Enable double wide mode when the dot clock
   7882 		 * is > 90% of the (display) core speed.
   7883 		 */
   7884 		if (intel_crtc_supports_double_wide(crtc) &&
   7885 		    adjusted_mode->crtc_clock > clock_limit) {
   7886 			clock_limit = dev_priv->max_dotclk_freq;
   7887 			pipe_config->double_wide = true;
   7888 		}
   7889 	}
   7890 
   7891 	if (adjusted_mode->crtc_clock > clock_limit) {
   7892 		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
   7893 			      adjusted_mode->crtc_clock, clock_limit,
   7894 			      yesno(pipe_config->double_wide));
   7895 		return -EINVAL;
   7896 	}
   7897 
   7898 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
   7899 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
   7900 	     pipe_config->hw.ctm) {
   7901 		/*
   7902 		 * There is only one pipe CSC unit per pipe, and we need that
   7903 		 * for output conversion from RGB->YCBCR. So if CTM is already
   7904 		 * applied we can't support YCBCR420 output.
   7905 		 */
   7906 		DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
   7907 		return -EINVAL;
   7908 	}
   7909 
   7910 	/*
   7911 	 * Pipe horizontal size must be even in:
   7912 	 * - DVO ganged mode
   7913 	 * - LVDS dual channel mode
   7914 	 * - Double wide pipe
   7915 	 */
   7916 	if (pipe_config->pipe_src_w & 1) {
   7917 		if (pipe_config->double_wide) {
   7918 			DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
   7919 			return -EINVAL;
   7920 		}
   7921 
   7922 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
   7923 		    intel_is_dual_link_lvds(dev_priv)) {
   7924 			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
   7925 			return -EINVAL;
   7926 		}
   7927 	}
   7928 
   7929 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
   7930 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
   7931 	 */
   7932 	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
   7933 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
   7934 		return -EINVAL;
   7935 
   7936 	intel_crtc_compute_pixel_rate(pipe_config);
   7937 
   7938 	if (pipe_config->has_pch_encoder)
   7939 		return ilk_fdi_compute_config(crtc, pipe_config);
   7940 
   7941 	return 0;
   7942 }
   7943 
   7944 static void
   7945 intel_reduce_m_n_ratio(u32 *num, u32 *den)
   7946 {
   7947 	while (*num > DATA_LINK_M_N_MASK ||
   7948 	       *den > DATA_LINK_M_N_MASK) {
   7949 		*num >>= 1;
   7950 		*den >>= 1;
   7951 	}
   7952 }
   7953 
   7954 static void compute_m_n(unsigned int m, unsigned int n,
   7955 			u32 *ret_m, u32 *ret_n,
   7956 			bool constant_n)
   7957 {
   7958 	/*
   7959 	 * Several DP dongles in particular seem to be fussy about
   7960 	 * too large link M/N values. Give N value as 0x8000 that
   7961 	 * should be acceptable by specific devices. 0x8000 is the
   7962 	 * specified fixed N value for asynchronous clock mode,
   7963 	 * which the devices expect also in synchronous clock mode.
   7964 	 */
   7965 	if (constant_n)
   7966 		*ret_n = 0x8000;
   7967 	else
   7968 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
   7969 
   7970 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
   7971 	intel_reduce_m_n_ratio(ret_m, ret_n);
   7972 }
   7973 
   7974 void
   7975 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
   7976 		       int pixel_clock, int link_clock,
   7977 		       struct intel_link_m_n *m_n,
   7978 		       bool constant_n, bool fec_enable)
   7979 {
   7980 	u32 data_clock = bits_per_pixel * pixel_clock;
   7981 
   7982 	if (fec_enable)
   7983 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
   7984 
   7985 	m_n->tu = 64;
   7986 	compute_m_n(data_clock,
   7987 		    link_clock * nlanes * 8,
   7988 		    &m_n->gmch_m, &m_n->gmch_n,
   7989 		    constant_n);
   7990 
   7991 	compute_m_n(pixel_clock, link_clock,
   7992 		    &m_n->link_m, &m_n->link_n,
   7993 		    constant_n);
   7994 }
   7995 
   7996 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
   7997 {
   7998 	/*
   7999 	 * There may be no VBT; and if the BIOS enabled SSC we can
   8000 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
   8001 	 * BIOS isn't using it, don't assume it will work even if the VBT
   8002 	 * indicates as much.
   8003 	 */
   8004 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
   8005 		bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
   8006 			DREF_SSC1_ENABLE;
   8007 
   8008 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
   8009 			DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
   8010 				      enableddisabled(bios_lvds_use_ssc),
   8011 				      enableddisabled(dev_priv->vbt.lvds_use_ssc));
   8012 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
   8013 		}
   8014 	}
   8015 }
   8016 
   8017 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
   8018 {
   8019 	if (i915_modparams.panel_use_ssc >= 0)
   8020 		return i915_modparams.panel_use_ssc != 0;
   8021 	return dev_priv->vbt.lvds_use_ssc
   8022 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
   8023 }
   8024 
   8025 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
   8026 {
   8027 	return (1 << dpll->n) << 16 | dpll->m2;
   8028 }
   8029 
   8030 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
   8031 {
   8032 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
   8033 }
   8034 
   8035 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
   8036 				     struct intel_crtc_state *crtc_state,
   8037 				     struct dpll *reduced_clock)
   8038 {
   8039 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8040 	u32 fp, fp2 = 0;
   8041 
   8042 	if (IS_PINEVIEW(dev_priv)) {
   8043 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
   8044 		if (reduced_clock)
   8045 			fp2 = pnv_dpll_compute_fp(reduced_clock);
   8046 	} else {
   8047 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
   8048 		if (reduced_clock)
   8049 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
   8050 	}
   8051 
   8052 	crtc_state->dpll_hw_state.fp0 = fp;
   8053 
   8054 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   8055 	    reduced_clock) {
   8056 		crtc_state->dpll_hw_state.fp1 = fp2;
   8057 	} else {
   8058 		crtc_state->dpll_hw_state.fp1 = fp;
   8059 	}
   8060 }
   8061 
   8062 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
   8063 		pipe)
   8064 {
   8065 	u32 reg_val;
   8066 
   8067 	/*
   8068 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
   8069 	 * and set it to a reasonable value instead.
   8070 	 */
   8071 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
   8072 	reg_val &= 0xffffff00;
   8073 	reg_val |= 0x00000030;
   8074 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
   8075 
   8076 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
   8077 	reg_val &= 0x00ffffff;
   8078 	reg_val |= 0x8c000000;
   8079 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
   8080 
   8081 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
   8082 	reg_val &= 0xffffff00;
   8083 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
   8084 
   8085 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
   8086 	reg_val &= 0x00ffffff;
   8087 	reg_val |= 0xb0000000;
   8088 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
   8089 }
   8090 
   8091 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
   8092 					 const struct intel_link_m_n *m_n)
   8093 {
   8094 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8095 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8096 	enum pipe pipe = crtc->pipe;
   8097 
   8098 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
   8099 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
   8100 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
   8101 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
   8102 }
   8103 
   8104 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
   8105 				 enum transcoder transcoder)
   8106 {
   8107 	if (IS_HASWELL(dev_priv))
   8108 		return transcoder == TRANSCODER_EDP;
   8109 
   8110 	/*
   8111 	 * Strictly speaking some registers are available before
   8112 	 * gen7, but we only support DRRS on gen7+
   8113 	 */
   8114 	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
   8115 }
   8116 
   8117 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
   8118 					 const struct intel_link_m_n *m_n,
   8119 					 const struct intel_link_m_n *m2_n2)
   8120 {
   8121 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8122 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8123 	enum pipe pipe = crtc->pipe;
   8124 	enum transcoder transcoder = crtc_state->cpu_transcoder;
   8125 
   8126 	if (INTEL_GEN(dev_priv) >= 5) {
   8127 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
   8128 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
   8129 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
   8130 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
   8131 		/*
   8132 		 *  M2_N2 registers are set only if DRRS is supported
   8133 		 * (to make sure the registers are not unnecessarily accessed).
   8134 		 */
   8135 		if (m2_n2 && crtc_state->has_drrs &&
   8136 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
   8137 			I915_WRITE(PIPE_DATA_M2(transcoder),
   8138 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
   8139 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
   8140 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
   8141 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
   8142 		}
   8143 	} else {
   8144 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
   8145 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
   8146 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
   8147 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
   8148 	}
   8149 }
   8150 
   8151 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
   8152 {
   8153 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
   8154 
   8155 	if (m_n == M1_N1) {
   8156 		dp_m_n = &crtc_state->dp_m_n;
   8157 		dp_m2_n2 = &crtc_state->dp_m2_n2;
   8158 	} else if (m_n == M2_N2) {
   8159 
   8160 		/*
   8161 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
   8162 		 * needs to be programmed into M1_N1.
   8163 		 */
   8164 		dp_m_n = &crtc_state->dp_m2_n2;
   8165 	} else {
   8166 		DRM_ERROR("Unsupported divider value\n");
   8167 		return;
   8168 	}
   8169 
   8170 	if (crtc_state->has_pch_encoder)
   8171 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
   8172 	else
   8173 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
   8174 }
   8175 
   8176 static void vlv_compute_dpll(struct intel_crtc *crtc,
   8177 			     struct intel_crtc_state *pipe_config)
   8178 {
   8179 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
   8180 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   8181 	if (crtc->pipe != PIPE_A)
   8182 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
   8183 
   8184 	/* DPLL not used with DSI, but still need the rest set up */
   8185 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
   8186 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
   8187 			DPLL_EXT_BUFFER_ENABLE_VLV;
   8188 
   8189 	pipe_config->dpll_hw_state.dpll_md =
   8190 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
   8191 }
   8192 
   8193 static void chv_compute_dpll(struct intel_crtc *crtc,
   8194 			     struct intel_crtc_state *pipe_config)
   8195 {
   8196 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
   8197 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   8198 	if (crtc->pipe != PIPE_A)
   8199 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
   8200 
   8201 	/* DPLL not used with DSI, but still need the rest set up */
   8202 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
   8203 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
   8204 
   8205 	pipe_config->dpll_hw_state.dpll_md =
   8206 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
   8207 }
   8208 
   8209 static void vlv_prepare_pll(struct intel_crtc *crtc,
   8210 			    const struct intel_crtc_state *pipe_config)
   8211 {
   8212 	struct drm_device *dev = crtc->base.dev;
   8213 	struct drm_i915_private *dev_priv = to_i915(dev);
   8214 	enum pipe pipe = crtc->pipe;
   8215 	u32 mdiv;
   8216 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
   8217 	u32 coreclk, reg_val;
   8218 
   8219 	/* Enable Refclk */
   8220 	I915_WRITE(DPLL(pipe),
   8221 		   pipe_config->dpll_hw_state.dpll &
   8222 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
   8223 
   8224 	/* No need to actually set up the DPLL with DSI */
   8225 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   8226 		return;
   8227 
   8228 	vlv_dpio_get(dev_priv);
   8229 
   8230 	bestn = pipe_config->dpll.n;
   8231 	bestm1 = pipe_config->dpll.m1;
   8232 	bestm2 = pipe_config->dpll.m2;
   8233 	bestp1 = pipe_config->dpll.p1;
   8234 	bestp2 = pipe_config->dpll.p2;
   8235 
   8236 	/* See eDP HDMI DPIO driver vbios notes doc */
   8237 
   8238 	/* PLL B needs special handling */
   8239 	if (pipe == PIPE_B)
   8240 		vlv_pllb_recal_opamp(dev_priv, pipe);
   8241 
   8242 	/* Set up Tx target for periodic Rcomp update */
   8243 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
   8244 
   8245 	/* Disable target IRef on PLL */
   8246 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
   8247 	reg_val &= 0x00ffffff;
   8248 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
   8249 
   8250 	/* Disable fast lock */
   8251 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
   8252 
   8253 	/* Set idtafcrecal before PLL is enabled */
   8254 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
   8255 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
   8256 	mdiv |= ((bestn << DPIO_N_SHIFT));
   8257 	mdiv |= (1 << DPIO_K_SHIFT);
   8258 
   8259 	/*
   8260 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
   8261 	 * but we don't support that).
   8262 	 * Note: don't use the DAC post divider as it seems unstable.
   8263 	 */
   8264 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
   8265 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
   8266 
   8267 	mdiv |= DPIO_ENABLE_CALIBRATION;
   8268 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
   8269 
   8270 	/* Set HBR and RBR LPF coefficients */
   8271 	if (pipe_config->port_clock == 162000 ||
   8272 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
   8273 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
   8274 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
   8275 				 0x009f0003);
   8276 	else
   8277 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
   8278 				 0x00d0000f);
   8279 
   8280 	if (intel_crtc_has_dp_encoder(pipe_config)) {
   8281 		/* Use SSC source */
   8282 		if (pipe == PIPE_A)
   8283 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8284 					 0x0df40000);
   8285 		else
   8286 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8287 					 0x0df70000);
   8288 	} else { /* HDMI or VGA */
   8289 		/* Use bend source */
   8290 		if (pipe == PIPE_A)
   8291 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8292 					 0x0df70000);
   8293 		else
   8294 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
   8295 					 0x0df40000);
   8296 	}
   8297 
   8298 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
   8299 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
   8300 	if (intel_crtc_has_dp_encoder(pipe_config))
   8301 		coreclk |= 0x01000000;
   8302 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
   8303 
   8304 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
   8305 
   8306 	vlv_dpio_put(dev_priv);
   8307 }
   8308 
   8309 static void chv_prepare_pll(struct intel_crtc *crtc,
   8310 			    const struct intel_crtc_state *pipe_config)
   8311 {
   8312 	struct drm_device *dev = crtc->base.dev;
   8313 	struct drm_i915_private *dev_priv = to_i915(dev);
   8314 	enum pipe pipe = crtc->pipe;
   8315 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   8316 	u32 loopfilter, tribuf_calcntr;
   8317 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
   8318 	u32 dpio_val;
   8319 	int vco;
   8320 
   8321 	/* Enable Refclk and SSC */
   8322 	I915_WRITE(DPLL(pipe),
   8323 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
   8324 
   8325 	/* No need to actually set up the DPLL with DSI */
   8326 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   8327 		return;
   8328 
   8329 	bestn = pipe_config->dpll.n;
   8330 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
   8331 	bestm1 = pipe_config->dpll.m1;
   8332 	bestm2 = pipe_config->dpll.m2 >> 22;
   8333 	bestp1 = pipe_config->dpll.p1;
   8334 	bestp2 = pipe_config->dpll.p2;
   8335 	vco = pipe_config->dpll.vco;
   8336 	dpio_val = 0;
   8337 	loopfilter = 0;
   8338 
   8339 	vlv_dpio_get(dev_priv);
   8340 
   8341 	/* p1 and p2 divider */
   8342 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
   8343 			5 << DPIO_CHV_S1_DIV_SHIFT |
   8344 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
   8345 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
   8346 			1 << DPIO_CHV_K_DIV_SHIFT);
   8347 
   8348 	/* Feedback post-divider - m2 */
   8349 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
   8350 
   8351 	/* Feedback refclk divider - n and m1 */
   8352 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
   8353 			DPIO_CHV_M1_DIV_BY_2 |
   8354 			1 << DPIO_CHV_N_DIV_SHIFT);
   8355 
   8356 	/* M2 fraction division */
   8357 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
   8358 
   8359 	/* M2 fraction division enable */
   8360 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
   8361 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
   8362 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
   8363 	if (bestm2_frac)
   8364 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
   8365 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
   8366 
   8367 	/* Program digital lock detect threshold */
   8368 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
   8369 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
   8370 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
   8371 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
   8372 	if (!bestm2_frac)
   8373 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
   8374 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
   8375 
   8376 	/* Loop filter */
   8377 	if (vco == 5400000) {
   8378 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
   8379 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
   8380 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8381 		tribuf_calcntr = 0x9;
   8382 	} else if (vco <= 6200000) {
   8383 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
   8384 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
   8385 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8386 		tribuf_calcntr = 0x9;
   8387 	} else if (vco <= 6480000) {
   8388 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
   8389 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
   8390 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8391 		tribuf_calcntr = 0x8;
   8392 	} else {
   8393 		/* Not supported. Apply the same limits as in the max case */
   8394 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
   8395 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
   8396 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
   8397 		tribuf_calcntr = 0;
   8398 	}
   8399 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
   8400 
   8401 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
   8402 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
   8403 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
   8404 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
   8405 
   8406 	/* AFC Recal */
   8407 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
   8408 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
   8409 			DPIO_AFC_RECAL);
   8410 
   8411 	vlv_dpio_put(dev_priv);
   8412 }
   8413 
   8414 /**
   8415  * vlv_force_pll_on - forcibly enable just the PLL
   8416  * @dev_priv: i915 private structure
   8417  * @pipe: pipe PLL to enable
   8418  * @dpll: PLL configuration
   8419  *
   8420  * Enable the PLL for @pipe using the supplied @dpll config. To be used
   8421  * in cases where we need the PLL enabled even when @pipe is not going to
   8422  * be enabled.
   8423  */
   8424 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
   8425 		     const struct dpll *dpll)
   8426 {
   8427 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   8428 	struct intel_crtc_state *pipe_config;
   8429 
   8430 	pipe_config = intel_crtc_state_alloc(crtc);
   8431 	if (!pipe_config)
   8432 		return -ENOMEM;
   8433 
   8434 	pipe_config->cpu_transcoder = (enum transcoder)pipe;
   8435 	pipe_config->pixel_multiplier = 1;
   8436 	pipe_config->dpll = *dpll;
   8437 
   8438 	if (IS_CHERRYVIEW(dev_priv)) {
   8439 		chv_compute_dpll(crtc, pipe_config);
   8440 		chv_prepare_pll(crtc, pipe_config);
   8441 		chv_enable_pll(crtc, pipe_config);
   8442 	} else {
   8443 		vlv_compute_dpll(crtc, pipe_config);
   8444 		vlv_prepare_pll(crtc, pipe_config);
   8445 		vlv_enable_pll(crtc, pipe_config);
   8446 	}
   8447 
   8448 	kfree(pipe_config);
   8449 
   8450 	return 0;
   8451 }
   8452 
   8453 /**
   8454  * vlv_force_pll_off - forcibly disable just the PLL
   8455  * @dev_priv: i915 private structure
   8456  * @pipe: pipe PLL to disable
   8457  *
   8458  * Disable the PLL for @pipe. To be used in cases where we need
   8459  * the PLL enabled even when @pipe is not going to be enabled.
   8460  */
   8461 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
   8462 {
   8463 	if (IS_CHERRYVIEW(dev_priv))
   8464 		chv_disable_pll(dev_priv, pipe);
   8465 	else
   8466 		vlv_disable_pll(dev_priv, pipe);
   8467 }
   8468 
   8469 static void i9xx_compute_dpll(struct intel_crtc *crtc,
   8470 			      struct intel_crtc_state *crtc_state,
   8471 			      struct dpll *reduced_clock)
   8472 {
   8473 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8474 	u32 dpll;
   8475 	struct dpll *clock = &crtc_state->dpll;
   8476 
   8477 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
   8478 
   8479 	dpll = DPLL_VGA_MODE_DIS;
   8480 
   8481 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
   8482 		dpll |= DPLLB_MODE_LVDS;
   8483 	else
   8484 		dpll |= DPLLB_MODE_DAC_SERIAL;
   8485 
   8486 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
   8487 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
   8488 		dpll |= (crtc_state->pixel_multiplier - 1)
   8489 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
   8490 	}
   8491 
   8492 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
   8493 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
   8494 		dpll |= DPLL_SDVO_HIGH_SPEED;
   8495 
   8496 	if (intel_crtc_has_dp_encoder(crtc_state))
   8497 		dpll |= DPLL_SDVO_HIGH_SPEED;
   8498 
   8499 	/* compute bitmask from p1 value */
   8500 	if (IS_PINEVIEW(dev_priv))
   8501 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
   8502 	else {
   8503 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   8504 		if (IS_G4X(dev_priv) && reduced_clock)
   8505 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
   8506 	}
   8507 	switch (clock->p2) {
   8508 	case 5:
   8509 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
   8510 		break;
   8511 	case 7:
   8512 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
   8513 		break;
   8514 	case 10:
   8515 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
   8516 		break;
   8517 	case 14:
   8518 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
   8519 		break;
   8520 	}
   8521 	if (INTEL_GEN(dev_priv) >= 4)
   8522 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
   8523 
   8524 	if (crtc_state->sdvo_tv_clock)
   8525 		dpll |= PLL_REF_INPUT_TVCLKINBC;
   8526 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   8527 		 intel_panel_use_ssc(dev_priv))
   8528 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
   8529 	else
   8530 		dpll |= PLL_REF_INPUT_DREFCLK;
   8531 
   8532 	dpll |= DPLL_VCO_ENABLE;
   8533 	crtc_state->dpll_hw_state.dpll = dpll;
   8534 
   8535 	if (INTEL_GEN(dev_priv) >= 4) {
   8536 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
   8537 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
   8538 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
   8539 	}
   8540 }
   8541 
   8542 static void i8xx_compute_dpll(struct intel_crtc *crtc,
   8543 			      struct intel_crtc_state *crtc_state,
   8544 			      struct dpll *reduced_clock)
   8545 {
   8546 	struct drm_device *dev = crtc->base.dev;
   8547 	struct drm_i915_private *dev_priv = to_i915(dev);
   8548 	u32 dpll;
   8549 	struct dpll *clock = &crtc_state->dpll;
   8550 
   8551 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
   8552 
   8553 	dpll = DPLL_VGA_MODE_DIS;
   8554 
   8555 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8556 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   8557 	} else {
   8558 		if (clock->p1 == 2)
   8559 			dpll |= PLL_P1_DIVIDE_BY_TWO;
   8560 		else
   8561 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   8562 		if (clock->p2 == 4)
   8563 			dpll |= PLL_P2_DIVIDE_BY_4;
   8564 	}
   8565 
   8566 	/*
   8567 	 * Bspec:
   8568 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
   8569 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
   8570 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
   8571 	 *  Enable) must be set to 1 in both the DPLL A Control Register
   8572 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
   8573 	 *
   8574 	 * For simplicity We simply keep both bits always enabled in
   8575 	 * both DPLLS. The spec says we should disable the DVO 2X clock
   8576 	 * when not needed, but this seems to work fine in practice.
   8577 	 */
   8578 	if (IS_I830(dev_priv) ||
   8579 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
   8580 		dpll |= DPLL_DVO_2X_MODE;
   8581 
   8582 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   8583 	    intel_panel_use_ssc(dev_priv))
   8584 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
   8585 	else
   8586 		dpll |= PLL_REF_INPUT_DREFCLK;
   8587 
   8588 	dpll |= DPLL_VCO_ENABLE;
   8589 	crtc_state->dpll_hw_state.dpll = dpll;
   8590 }
   8591 
   8592 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
   8593 {
   8594 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8595 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8596 	enum pipe pipe = crtc->pipe;
   8597 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   8598 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
   8599 	u32 crtc_vtotal, crtc_vblank_end;
   8600 	int vsyncshift = 0;
   8601 
   8602 	/* We need to be careful not to changed the adjusted mode, for otherwise
   8603 	 * the hw state checker will get angry at the mismatch. */
   8604 	crtc_vtotal = adjusted_mode->crtc_vtotal;
   8605 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
   8606 
   8607 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
   8608 		/* the chip adds 2 halflines automatically */
   8609 		crtc_vtotal -= 1;
   8610 		crtc_vblank_end -= 1;
   8611 
   8612 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   8613 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
   8614 		else
   8615 			vsyncshift = adjusted_mode->crtc_hsync_start -
   8616 				adjusted_mode->crtc_htotal / 2;
   8617 		if (vsyncshift < 0)
   8618 			vsyncshift += adjusted_mode->crtc_htotal;
   8619 	}
   8620 
   8621 	if (INTEL_GEN(dev_priv) > 3)
   8622 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
   8623 
   8624 	I915_WRITE(HTOTAL(cpu_transcoder),
   8625 		   (adjusted_mode->crtc_hdisplay - 1) |
   8626 		   ((adjusted_mode->crtc_htotal - 1) << 16));
   8627 	I915_WRITE(HBLANK(cpu_transcoder),
   8628 		   (adjusted_mode->crtc_hblank_start - 1) |
   8629 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
   8630 	I915_WRITE(HSYNC(cpu_transcoder),
   8631 		   (adjusted_mode->crtc_hsync_start - 1) |
   8632 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
   8633 
   8634 	I915_WRITE(VTOTAL(cpu_transcoder),
   8635 		   (adjusted_mode->crtc_vdisplay - 1) |
   8636 		   ((crtc_vtotal - 1) << 16));
   8637 	I915_WRITE(VBLANK(cpu_transcoder),
   8638 		   (adjusted_mode->crtc_vblank_start - 1) |
   8639 		   ((crtc_vblank_end - 1) << 16));
   8640 	I915_WRITE(VSYNC(cpu_transcoder),
   8641 		   (adjusted_mode->crtc_vsync_start - 1) |
   8642 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
   8643 
   8644 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
   8645 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
   8646 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
   8647 	 * bits. */
   8648 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
   8649 	    (pipe == PIPE_B || pipe == PIPE_C))
   8650 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
   8651 
   8652 }
   8653 
   8654 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
   8655 {
   8656 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8657 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8658 	enum pipe pipe = crtc->pipe;
   8659 
   8660 	/* pipesrc controls the size that is scaled from, which should
   8661 	 * always be the user's requested size.
   8662 	 */
   8663 	I915_WRITE(PIPESRC(pipe),
   8664 		   ((crtc_state->pipe_src_w - 1) << 16) |
   8665 		   (crtc_state->pipe_src_h - 1));
   8666 }
   8667 
   8668 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
   8669 {
   8670 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   8671 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   8672 
   8673 	if (IS_GEN(dev_priv, 2))
   8674 		return false;
   8675 
   8676 	if (INTEL_GEN(dev_priv) >= 9 ||
   8677 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   8678 		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
   8679 	else
   8680 		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
   8681 }
   8682 
   8683 static void intel_get_pipe_timings(struct intel_crtc *crtc,
   8684 				   struct intel_crtc_state *pipe_config)
   8685 {
   8686 	struct drm_device *dev = crtc->base.dev;
   8687 	struct drm_i915_private *dev_priv = to_i915(dev);
   8688 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
   8689 	u32 tmp;
   8690 
   8691 	tmp = I915_READ(HTOTAL(cpu_transcoder));
   8692 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
   8693 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
   8694 
   8695 	if (!transcoder_is_dsi(cpu_transcoder)) {
   8696 		tmp = I915_READ(HBLANK(cpu_transcoder));
   8697 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
   8698 							(tmp & 0xffff) + 1;
   8699 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
   8700 						((tmp >> 16) & 0xffff) + 1;
   8701 	}
   8702 	tmp = I915_READ(HSYNC(cpu_transcoder));
   8703 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
   8704 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
   8705 
   8706 	tmp = I915_READ(VTOTAL(cpu_transcoder));
   8707 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
   8708 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
   8709 
   8710 	if (!transcoder_is_dsi(cpu_transcoder)) {
   8711 		tmp = I915_READ(VBLANK(cpu_transcoder));
   8712 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
   8713 							(tmp & 0xffff) + 1;
   8714 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
   8715 						((tmp >> 16) & 0xffff) + 1;
   8716 	}
   8717 	tmp = I915_READ(VSYNC(cpu_transcoder));
   8718 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
   8719 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
   8720 
   8721 	if (intel_pipe_is_interlaced(pipe_config)) {
   8722 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
   8723 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
   8724 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
   8725 	}
   8726 }
   8727 
   8728 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
   8729 				    struct intel_crtc_state *pipe_config)
   8730 {
   8731 	struct drm_device *dev = crtc->base.dev;
   8732 	struct drm_i915_private *dev_priv = to_i915(dev);
   8733 	u32 tmp;
   8734 
   8735 	tmp = I915_READ(PIPESRC(crtc->pipe));
   8736 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
   8737 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
   8738 
   8739 	pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
   8740 	pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
   8741 }
   8742 
   8743 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
   8744 				 struct intel_crtc_state *pipe_config)
   8745 {
   8746 	mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
   8747 	mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
   8748 	mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
   8749 	mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
   8750 
   8751 	mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
   8752 	mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
   8753 	mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
   8754 	mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
   8755 
   8756 	mode->flags = pipe_config->hw.adjusted_mode.flags;
   8757 	mode->type = DRM_MODE_TYPE_DRIVER;
   8758 
   8759 	mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
   8760 
   8761 	mode->hsync = drm_mode_hsync(mode);
   8762 	mode->vrefresh = drm_mode_vrefresh(mode);
   8763 	drm_mode_set_name(mode);
   8764 }
   8765 
   8766 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
   8767 {
   8768 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   8769 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8770 	u32 pipeconf;
   8771 
   8772 	pipeconf = 0;
   8773 
   8774 	/* we keep both pipes enabled on 830 */
   8775 	if (IS_I830(dev_priv))
   8776 		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
   8777 
   8778 	if (crtc_state->double_wide)
   8779 		pipeconf |= PIPECONF_DOUBLE_WIDE;
   8780 
   8781 	/* only g4x and later have fancy bpc/dither controls */
   8782 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   8783 	    IS_CHERRYVIEW(dev_priv)) {
   8784 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
   8785 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
   8786 			pipeconf |= PIPECONF_DITHER_EN |
   8787 				    PIPECONF_DITHER_TYPE_SP;
   8788 
   8789 		switch (crtc_state->pipe_bpp) {
   8790 		case 18:
   8791 			pipeconf |= PIPECONF_6BPC;
   8792 			break;
   8793 		case 24:
   8794 			pipeconf |= PIPECONF_8BPC;
   8795 			break;
   8796 		case 30:
   8797 			pipeconf |= PIPECONF_10BPC;
   8798 			break;
   8799 		default:
   8800 			/* Case prevented by intel_choose_pipe_bpp_dither. */
   8801 			BUG();
   8802 		}
   8803 	}
   8804 
   8805 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
   8806 		if (INTEL_GEN(dev_priv) < 4 ||
   8807 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   8808 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
   8809 		else
   8810 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
   8811 	} else {
   8812 		pipeconf |= PIPECONF_PROGRESSIVE;
   8813 	}
   8814 
   8815 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
   8816 	     crtc_state->limited_color_range)
   8817 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
   8818 
   8819 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
   8820 
   8821 	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
   8822 
   8823 	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
   8824 	POSTING_READ(PIPECONF(crtc->pipe));
   8825 }
   8826 
   8827 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
   8828 				   struct intel_crtc_state *crtc_state)
   8829 {
   8830 	struct drm_device *dev = crtc->base.dev;
   8831 	struct drm_i915_private *dev_priv = to_i915(dev);
   8832 	const struct intel_limit *limit;
   8833 	int refclk = 48000;
   8834 
   8835 	memset(&crtc_state->dpll_hw_state, 0,
   8836 	       sizeof(crtc_state->dpll_hw_state));
   8837 
   8838 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8839 		if (intel_panel_use_ssc(dev_priv)) {
   8840 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8841 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8842 		}
   8843 
   8844 		limit = &intel_limits_i8xx_lvds;
   8845 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
   8846 		limit = &intel_limits_i8xx_dvo;
   8847 	} else {
   8848 		limit = &intel_limits_i8xx_dac;
   8849 	}
   8850 
   8851 	if (!crtc_state->clock_set &&
   8852 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8853 				 refclk, NULL, &crtc_state->dpll)) {
   8854 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8855 		return -EINVAL;
   8856 	}
   8857 
   8858 	i8xx_compute_dpll(crtc, crtc_state, NULL);
   8859 
   8860 	return 0;
   8861 }
   8862 
   8863 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
   8864 				  struct intel_crtc_state *crtc_state)
   8865 {
   8866 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   8867 	const struct intel_limit *limit;
   8868 	int refclk = 96000;
   8869 
   8870 	memset(&crtc_state->dpll_hw_state, 0,
   8871 	       sizeof(crtc_state->dpll_hw_state));
   8872 
   8873 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8874 		if (intel_panel_use_ssc(dev_priv)) {
   8875 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8876 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8877 		}
   8878 
   8879 		if (intel_is_dual_link_lvds(dev_priv))
   8880 			limit = &intel_limits_g4x_dual_channel_lvds;
   8881 		else
   8882 			limit = &intel_limits_g4x_single_channel_lvds;
   8883 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
   8884 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
   8885 		limit = &intel_limits_g4x_hdmi;
   8886 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
   8887 		limit = &intel_limits_g4x_sdvo;
   8888 	} else {
   8889 		/* The option is for other outputs */
   8890 		limit = &intel_limits_i9xx_sdvo;
   8891 	}
   8892 
   8893 	if (!crtc_state->clock_set &&
   8894 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8895 				refclk, NULL, &crtc_state->dpll)) {
   8896 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8897 		return -EINVAL;
   8898 	}
   8899 
   8900 	i9xx_compute_dpll(crtc, crtc_state, NULL);
   8901 
   8902 	return 0;
   8903 }
   8904 
   8905 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
   8906 				  struct intel_crtc_state *crtc_state)
   8907 {
   8908 	struct drm_device *dev = crtc->base.dev;
   8909 	struct drm_i915_private *dev_priv = to_i915(dev);
   8910 	const struct intel_limit *limit;
   8911 	int refclk = 96000;
   8912 
   8913 	memset(&crtc_state->dpll_hw_state, 0,
   8914 	       sizeof(crtc_state->dpll_hw_state));
   8915 
   8916 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8917 		if (intel_panel_use_ssc(dev_priv)) {
   8918 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8919 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8920 		}
   8921 
   8922 		limit = &pnv_limits_lvds;
   8923 	} else {
   8924 		limit = &pnv_limits_sdvo;
   8925 	}
   8926 
   8927 	if (!crtc_state->clock_set &&
   8928 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8929 				refclk, NULL, &crtc_state->dpll)) {
   8930 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8931 		return -EINVAL;
   8932 	}
   8933 
   8934 	i9xx_compute_dpll(crtc, crtc_state, NULL);
   8935 
   8936 	return 0;
   8937 }
   8938 
   8939 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
   8940 				   struct intel_crtc_state *crtc_state)
   8941 {
   8942 	struct drm_device *dev = crtc->base.dev;
   8943 	struct drm_i915_private *dev_priv = to_i915(dev);
   8944 	const struct intel_limit *limit;
   8945 	int refclk = 96000;
   8946 
   8947 	memset(&crtc_state->dpll_hw_state, 0,
   8948 	       sizeof(crtc_state->dpll_hw_state));
   8949 
   8950 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   8951 		if (intel_panel_use_ssc(dev_priv)) {
   8952 			refclk = dev_priv->vbt.lvds_ssc_freq;
   8953 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
   8954 		}
   8955 
   8956 		limit = &intel_limits_i9xx_lvds;
   8957 	} else {
   8958 		limit = &intel_limits_i9xx_sdvo;
   8959 	}
   8960 
   8961 	if (!crtc_state->clock_set &&
   8962 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8963 				 refclk, NULL, &crtc_state->dpll)) {
   8964 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8965 		return -EINVAL;
   8966 	}
   8967 
   8968 	i9xx_compute_dpll(crtc, crtc_state, NULL);
   8969 
   8970 	return 0;
   8971 }
   8972 
   8973 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
   8974 				  struct intel_crtc_state *crtc_state)
   8975 {
   8976 	int refclk = 100000;
   8977 	const struct intel_limit *limit = &intel_limits_chv;
   8978 
   8979 	memset(&crtc_state->dpll_hw_state, 0,
   8980 	       sizeof(crtc_state->dpll_hw_state));
   8981 
   8982 	if (!crtc_state->clock_set &&
   8983 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   8984 				refclk, NULL, &crtc_state->dpll)) {
   8985 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   8986 		return -EINVAL;
   8987 	}
   8988 
   8989 	chv_compute_dpll(crtc, crtc_state);
   8990 
   8991 	return 0;
   8992 }
   8993 
   8994 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
   8995 				  struct intel_crtc_state *crtc_state)
   8996 {
   8997 	int refclk = 100000;
   8998 	const struct intel_limit *limit = &intel_limits_vlv;
   8999 
   9000 	memset(&crtc_state->dpll_hw_state, 0,
   9001 	       sizeof(crtc_state->dpll_hw_state));
   9002 
   9003 	if (!crtc_state->clock_set &&
   9004 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   9005 				refclk, NULL, &crtc_state->dpll)) {
   9006 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   9007 		return -EINVAL;
   9008 	}
   9009 
   9010 	vlv_compute_dpll(crtc, crtc_state);
   9011 
   9012 	return 0;
   9013 }
   9014 
   9015 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
   9016 {
   9017 	if (IS_I830(dev_priv))
   9018 		return false;
   9019 
   9020 	return INTEL_GEN(dev_priv) >= 4 ||
   9021 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
   9022 }
   9023 
   9024 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
   9025 				 struct intel_crtc_state *pipe_config)
   9026 {
   9027 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9028 	u32 tmp;
   9029 
   9030 	if (!i9xx_has_pfit(dev_priv))
   9031 		return;
   9032 
   9033 	tmp = I915_READ(PFIT_CONTROL);
   9034 	if (!(tmp & PFIT_ENABLE))
   9035 		return;
   9036 
   9037 	/* Check whether the pfit is attached to our pipe. */
   9038 	if (INTEL_GEN(dev_priv) < 4) {
   9039 		if (crtc->pipe != PIPE_B)
   9040 			return;
   9041 	} else {
   9042 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
   9043 			return;
   9044 	}
   9045 
   9046 	pipe_config->gmch_pfit.control = tmp;
   9047 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
   9048 }
   9049 
   9050 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
   9051 			       struct intel_crtc_state *pipe_config)
   9052 {
   9053 	struct drm_device *dev = crtc->base.dev;
   9054 	struct drm_i915_private *dev_priv = to_i915(dev);
   9055 	enum pipe pipe = crtc->pipe;
   9056 	struct dpll clock;
   9057 	u32 mdiv;
   9058 	int refclk = 100000;
   9059 
   9060 	/* In case of DSI, DPLL will not be used */
   9061 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   9062 		return;
   9063 
   9064 	vlv_dpio_get(dev_priv);
   9065 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
   9066 	vlv_dpio_put(dev_priv);
   9067 
   9068 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
   9069 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
   9070 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
   9071 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
   9072 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
   9073 
   9074 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
   9075 }
   9076 
   9077 static void
   9078 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
   9079 			      struct intel_initial_plane_config *plane_config)
   9080 {
   9081 	struct drm_device *dev = crtc->base.dev;
   9082 	struct drm_i915_private *dev_priv = to_i915(dev);
   9083 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   9084 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   9085 	enum pipe pipe;
   9086 	u32 val, base, offset;
   9087 	int fourcc, pixel_format;
   9088 	unsigned int aligned_height;
   9089 	struct drm_framebuffer *fb;
   9090 	struct intel_framebuffer *intel_fb;
   9091 
   9092 	if (!plane->get_hw_state(plane, &pipe))
   9093 		return;
   9094 
   9095 	WARN_ON(pipe != crtc->pipe);
   9096 
   9097 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
   9098 	if (!intel_fb) {
   9099 		DRM_DEBUG_KMS("failed to alloc fb\n");
   9100 		return;
   9101 	}
   9102 
   9103 	fb = &intel_fb->base;
   9104 
   9105 	fb->dev = dev;
   9106 
   9107 	val = I915_READ(DSPCNTR(i9xx_plane));
   9108 
   9109 	if (INTEL_GEN(dev_priv) >= 4) {
   9110 		if (val & DISPPLANE_TILED) {
   9111 			plane_config->tiling = I915_TILING_X;
   9112 			fb->modifier = I915_FORMAT_MOD_X_TILED;
   9113 		}
   9114 
   9115 		if (val & DISPPLANE_ROTATE_180)
   9116 			plane_config->rotation = DRM_MODE_ROTATE_180;
   9117 	}
   9118 
   9119 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
   9120 	    val & DISPPLANE_MIRROR)
   9121 		plane_config->rotation |= DRM_MODE_REFLECT_X;
   9122 
   9123 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
   9124 	fourcc = i9xx_format_to_fourcc(pixel_format);
   9125 	fb->format = drm_format_info(fourcc);
   9126 
   9127 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   9128 		offset = I915_READ(DSPOFFSET(i9xx_plane));
   9129 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
   9130 	} else if (INTEL_GEN(dev_priv) >= 4) {
   9131 		if (plane_config->tiling)
   9132 			offset = I915_READ(DSPTILEOFF(i9xx_plane));
   9133 		else
   9134 			offset = I915_READ(DSPLINOFF(i9xx_plane));
   9135 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
   9136 	} else {
   9137 		base = I915_READ(DSPADDR(i9xx_plane));
   9138 	}
   9139 	plane_config->base = base;
   9140 
   9141 	val = I915_READ(PIPESRC(pipe));
   9142 	fb->width = ((val >> 16) & 0xfff) + 1;
   9143 	fb->height = ((val >> 0) & 0xfff) + 1;
   9144 
   9145 	val = I915_READ(DSPSTRIDE(i9xx_plane));
   9146 	fb->pitches[0] = val & 0xffffffc0;
   9147 
   9148 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
   9149 
   9150 	plane_config->size = fb->pitches[0] * aligned_height;
   9151 
   9152 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
   9153 		      crtc->base.name, plane->base.name, fb->width, fb->height,
   9154 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
   9155 		      plane_config->size);
   9156 
   9157 	plane_config->fb = intel_fb;
   9158 }
   9159 
   9160 static void chv_crtc_clock_get(struct intel_crtc *crtc,
   9161 			       struct intel_crtc_state *pipe_config)
   9162 {
   9163 	struct drm_device *dev = crtc->base.dev;
   9164 	struct drm_i915_private *dev_priv = to_i915(dev);
   9165 	enum pipe pipe = crtc->pipe;
   9166 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   9167 	struct dpll clock;
   9168 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
   9169 	int refclk = 100000;
   9170 
   9171 	/* In case of DSI, DPLL will not be used */
   9172 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   9173 		return;
   9174 
   9175 	vlv_dpio_get(dev_priv);
   9176 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
   9177 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
   9178 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
   9179 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
   9180 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
   9181 	vlv_dpio_put(dev_priv);
   9182 
   9183 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
   9184 	clock.m2 = (pll_dw0 & 0xff) << 22;
   9185 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
   9186 		clock.m2 |= pll_dw2 & 0x3fffff;
   9187 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
   9188 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
   9189 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
   9190 
   9191 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
   9192 }
   9193 
   9194 static enum intel_output_format
   9195 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
   9196 {
   9197 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9198 	u32 tmp;
   9199 
   9200 	tmp = I915_READ(PIPEMISC(crtc->pipe));
   9201 
   9202 	if (tmp & PIPEMISC_YUV420_ENABLE) {
   9203 		/* We support 4:2:0 in full blend mode only */
   9204 		WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
   9205 
   9206 		return INTEL_OUTPUT_FORMAT_YCBCR420;
   9207 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
   9208 		return INTEL_OUTPUT_FORMAT_YCBCR444;
   9209 	} else {
   9210 		return INTEL_OUTPUT_FORMAT_RGB;
   9211 	}
   9212 }
   9213 
   9214 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
   9215 {
   9216 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9217 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   9218 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9219 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   9220 	u32 tmp;
   9221 
   9222 	tmp = I915_READ(DSPCNTR(i9xx_plane));
   9223 
   9224 	if (tmp & DISPPLANE_GAMMA_ENABLE)
   9225 		crtc_state->gamma_enable = true;
   9226 
   9227 	if (!HAS_GMCH(dev_priv) &&
   9228 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
   9229 		crtc_state->csc_enable = true;
   9230 }
   9231 
   9232 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
   9233 				 struct intel_crtc_state *pipe_config)
   9234 {
   9235 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9236 	enum intel_display_power_domain power_domain;
   9237 	intel_wakeref_t wakeref;
   9238 	u32 tmp;
   9239 	bool ret;
   9240 
   9241 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   9242 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   9243 	if (!wakeref)
   9244 		return false;
   9245 
   9246 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   9247 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   9248 	pipe_config->shared_dpll = NULL;
   9249 	pipe_config->master_transcoder = INVALID_TRANSCODER;
   9250 
   9251 	ret = false;
   9252 
   9253 	tmp = I915_READ(PIPECONF(crtc->pipe));
   9254 	if (!(tmp & PIPECONF_ENABLE))
   9255 		goto out;
   9256 
   9257 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   9258 	    IS_CHERRYVIEW(dev_priv)) {
   9259 		switch (tmp & PIPECONF_BPC_MASK) {
   9260 		case PIPECONF_6BPC:
   9261 			pipe_config->pipe_bpp = 18;
   9262 			break;
   9263 		case PIPECONF_8BPC:
   9264 			pipe_config->pipe_bpp = 24;
   9265 			break;
   9266 		case PIPECONF_10BPC:
   9267 			pipe_config->pipe_bpp = 30;
   9268 			break;
   9269 		default:
   9270 			break;
   9271 		}
   9272 	}
   9273 
   9274 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
   9275 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
   9276 		pipe_config->limited_color_range = true;
   9277 
   9278 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
   9279 		PIPECONF_GAMMA_MODE_SHIFT;
   9280 
   9281 	if (IS_CHERRYVIEW(dev_priv))
   9282 		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
   9283 
   9284 	i9xx_get_pipe_color_config(pipe_config);
   9285 	intel_color_get_config(pipe_config);
   9286 
   9287 	if (INTEL_GEN(dev_priv) < 4)
   9288 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
   9289 
   9290 	intel_get_pipe_timings(crtc, pipe_config);
   9291 	intel_get_pipe_src_size(crtc, pipe_config);
   9292 
   9293 	i9xx_get_pfit_config(crtc, pipe_config);
   9294 
   9295 	if (INTEL_GEN(dev_priv) >= 4) {
   9296 		/* No way to read it out on pipes B and C */
   9297 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
   9298 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
   9299 		else
   9300 			tmp = I915_READ(DPLL_MD(crtc->pipe));
   9301 		pipe_config->pixel_multiplier =
   9302 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
   9303 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
   9304 		pipe_config->dpll_hw_state.dpll_md = tmp;
   9305 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
   9306 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
   9307 		tmp = I915_READ(DPLL(crtc->pipe));
   9308 		pipe_config->pixel_multiplier =
   9309 			((tmp & SDVO_MULTIPLIER_MASK)
   9310 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
   9311 	} else {
   9312 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
   9313 		 * port and will be fixed up in the encoder->get_config
   9314 		 * function. */
   9315 		pipe_config->pixel_multiplier = 1;
   9316 	}
   9317 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
   9318 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
   9319 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
   9320 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
   9321 	} else {
   9322 		/* Mask out read-only status bits. */
   9323 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
   9324 						     DPLL_PORTC_READY_MASK |
   9325 						     DPLL_PORTB_READY_MASK);
   9326 	}
   9327 
   9328 	if (IS_CHERRYVIEW(dev_priv))
   9329 		chv_crtc_clock_get(crtc, pipe_config);
   9330 	else if (IS_VALLEYVIEW(dev_priv))
   9331 		vlv_crtc_clock_get(crtc, pipe_config);
   9332 	else
   9333 		i9xx_crtc_clock_get(crtc, pipe_config);
   9334 
   9335 	/*
   9336 	 * Normally the dotclock is filled in by the encoder .get_config()
   9337 	 * but in case the pipe is enabled w/o any ports we need a sane
   9338 	 * default.
   9339 	 */
   9340 	pipe_config->hw.adjusted_mode.crtc_clock =
   9341 		pipe_config->port_clock / pipe_config->pixel_multiplier;
   9342 
   9343 	ret = true;
   9344 
   9345 out:
   9346 	intel_display_power_put(dev_priv, power_domain, wakeref);
   9347 
   9348 	return ret;
   9349 }
   9350 
   9351 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
   9352 {
   9353 	struct intel_encoder *encoder;
   9354 	int i;
   9355 	u32 val, final;
   9356 	bool has_lvds = false;
   9357 	bool has_cpu_edp = false;
   9358 	bool has_panel = false;
   9359 	bool has_ck505 = false;
   9360 	bool can_ssc = false;
   9361 	bool using_ssc_source = false;
   9362 
   9363 	/* We need to take the global config into account */
   9364 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   9365 		switch (encoder->type) {
   9366 		case INTEL_OUTPUT_LVDS:
   9367 			has_panel = true;
   9368 			has_lvds = true;
   9369 			break;
   9370 		case INTEL_OUTPUT_EDP:
   9371 			has_panel = true;
   9372 			if (encoder->port == PORT_A)
   9373 				has_cpu_edp = true;
   9374 			break;
   9375 		default:
   9376 			break;
   9377 		}
   9378 	}
   9379 
   9380 	if (HAS_PCH_IBX(dev_priv)) {
   9381 		has_ck505 = dev_priv->vbt.display_clock_mode;
   9382 		can_ssc = has_ck505;
   9383 	} else {
   9384 		has_ck505 = false;
   9385 		can_ssc = true;
   9386 	}
   9387 
   9388 	/* Check if any DPLLs are using the SSC source */
   9389 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
   9390 		u32 temp = I915_READ(PCH_DPLL(i));
   9391 
   9392 		if (!(temp & DPLL_VCO_ENABLE))
   9393 			continue;
   9394 
   9395 		if ((temp & PLL_REF_INPUT_MASK) ==
   9396 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
   9397 			using_ssc_source = true;
   9398 			break;
   9399 		}
   9400 	}
   9401 
   9402 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
   9403 		      has_panel, has_lvds, has_ck505, using_ssc_source);
   9404 
   9405 	/* Ironlake: try to setup display ref clock before DPLL
   9406 	 * enabling. This is only under driver's control after
   9407 	 * PCH B stepping, previous chipset stepping should be
   9408 	 * ignoring this setting.
   9409 	 */
   9410 	val = I915_READ(PCH_DREF_CONTROL);
   9411 
   9412 	/* As we must carefully and slowly disable/enable each source in turn,
   9413 	 * compute the final state we want first and check if we need to
   9414 	 * make any changes at all.
   9415 	 */
   9416 	final = val;
   9417 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
   9418 	if (has_ck505)
   9419 		final |= DREF_NONSPREAD_CK505_ENABLE;
   9420 	else
   9421 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
   9422 
   9423 	final &= ~DREF_SSC_SOURCE_MASK;
   9424 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
   9425 	final &= ~DREF_SSC1_ENABLE;
   9426 
   9427 	if (has_panel) {
   9428 		final |= DREF_SSC_SOURCE_ENABLE;
   9429 
   9430 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
   9431 			final |= DREF_SSC1_ENABLE;
   9432 
   9433 		if (has_cpu_edp) {
   9434 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
   9435 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
   9436 			else
   9437 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
   9438 		} else
   9439 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
   9440 	} else if (using_ssc_source) {
   9441 		final |= DREF_SSC_SOURCE_ENABLE;
   9442 		final |= DREF_SSC1_ENABLE;
   9443 	}
   9444 
   9445 	if (final == val)
   9446 		return;
   9447 
   9448 	/* Always enable nonspread source */
   9449 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
   9450 
   9451 	if (has_ck505)
   9452 		val |= DREF_NONSPREAD_CK505_ENABLE;
   9453 	else
   9454 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
   9455 
   9456 	if (has_panel) {
   9457 		val &= ~DREF_SSC_SOURCE_MASK;
   9458 		val |= DREF_SSC_SOURCE_ENABLE;
   9459 
   9460 		/* SSC must be turned on before enabling the CPU output  */
   9461 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
   9462 			DRM_DEBUG_KMS("Using SSC on panel\n");
   9463 			val |= DREF_SSC1_ENABLE;
   9464 		} else
   9465 			val &= ~DREF_SSC1_ENABLE;
   9466 
   9467 		/* Get SSC going before enabling the outputs */
   9468 		I915_WRITE(PCH_DREF_CONTROL, val);
   9469 		POSTING_READ(PCH_DREF_CONTROL);
   9470 		udelay(200);
   9471 
   9472 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
   9473 
   9474 		/* Enable CPU source on CPU attached eDP */
   9475 		if (has_cpu_edp) {
   9476 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
   9477 				DRM_DEBUG_KMS("Using SSC on eDP\n");
   9478 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
   9479 			} else
   9480 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
   9481 		} else
   9482 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
   9483 
   9484 		I915_WRITE(PCH_DREF_CONTROL, val);
   9485 		POSTING_READ(PCH_DREF_CONTROL);
   9486 		udelay(200);
   9487 	} else {
   9488 		DRM_DEBUG_KMS("Disabling CPU source output\n");
   9489 
   9490 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
   9491 
   9492 		/* Turn off CPU output */
   9493 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
   9494 
   9495 		I915_WRITE(PCH_DREF_CONTROL, val);
   9496 		POSTING_READ(PCH_DREF_CONTROL);
   9497 		udelay(200);
   9498 
   9499 		if (!using_ssc_source) {
   9500 			DRM_DEBUG_KMS("Disabling SSC source\n");
   9501 
   9502 			/* Turn off the SSC source */
   9503 			val &= ~DREF_SSC_SOURCE_MASK;
   9504 			val |= DREF_SSC_SOURCE_DISABLE;
   9505 
   9506 			/* Turn off SSC1 */
   9507 			val &= ~DREF_SSC1_ENABLE;
   9508 
   9509 			I915_WRITE(PCH_DREF_CONTROL, val);
   9510 			POSTING_READ(PCH_DREF_CONTROL);
   9511 			udelay(200);
   9512 		}
   9513 	}
   9514 
   9515 	BUG_ON(val != final);
   9516 }
   9517 
   9518 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
   9519 {
   9520 	u32 tmp;
   9521 
   9522 	tmp = I915_READ(SOUTH_CHICKEN2);
   9523 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
   9524 	I915_WRITE(SOUTH_CHICKEN2, tmp);
   9525 
   9526 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
   9527 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
   9528 		DRM_ERROR("FDI mPHY reset assert timeout\n");
   9529 
   9530 	tmp = I915_READ(SOUTH_CHICKEN2);
   9531 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
   9532 	I915_WRITE(SOUTH_CHICKEN2, tmp);
   9533 
   9534 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
   9535 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
   9536 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
   9537 }
   9538 
   9539 /* WaMPhyProgramming:hsw */
   9540 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
   9541 {
   9542 	u32 tmp;
   9543 
   9544 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
   9545 	tmp &= ~(0xFF << 24);
   9546 	tmp |= (0x12 << 24);
   9547 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
   9548 
   9549 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
   9550 	tmp |= (1 << 11);
   9551 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
   9552 
   9553 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
   9554 	tmp |= (1 << 11);
   9555 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
   9556 
   9557 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
   9558 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
   9559 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
   9560 
   9561 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
   9562 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
   9563 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
   9564 
   9565 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
   9566 	tmp &= ~(7 << 13);
   9567 	tmp |= (5 << 13);
   9568 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
   9569 
   9570 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
   9571 	tmp &= ~(7 << 13);
   9572 	tmp |= (5 << 13);
   9573 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
   9574 
   9575 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
   9576 	tmp &= ~0xFF;
   9577 	tmp |= 0x1C;
   9578 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
   9579 
   9580 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
   9581 	tmp &= ~0xFF;
   9582 	tmp |= 0x1C;
   9583 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
   9584 
   9585 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
   9586 	tmp &= ~(0xFF << 16);
   9587 	tmp |= (0x1C << 16);
   9588 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
   9589 
   9590 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
   9591 	tmp &= ~(0xFF << 16);
   9592 	tmp |= (0x1C << 16);
   9593 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
   9594 
   9595 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
   9596 	tmp |= (1 << 27);
   9597 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
   9598 
   9599 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
   9600 	tmp |= (1 << 27);
   9601 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
   9602 
   9603 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
   9604 	tmp &= ~(0xF << 28);
   9605 	tmp |= (4 << 28);
   9606 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
   9607 
   9608 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
   9609 	tmp &= ~(0xF << 28);
   9610 	tmp |= (4 << 28);
   9611 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
   9612 }
   9613 
   9614 /* Implements 3 different sequences from BSpec chapter "Display iCLK
   9615  * Programming" based on the parameters passed:
   9616  * - Sequence to enable CLKOUT_DP
   9617  * - Sequence to enable CLKOUT_DP without spread
   9618  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
   9619  */
   9620 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
   9621 				 bool with_spread, bool with_fdi)
   9622 {
   9623 	u32 reg, tmp;
   9624 
   9625 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
   9626 		with_spread = true;
   9627 	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
   9628 	    with_fdi, "LP PCH doesn't have FDI\n"))
   9629 		with_fdi = false;
   9630 
   9631 	mutex_lock(&dev_priv->sb_lock);
   9632 
   9633 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
   9634 	tmp &= ~SBI_SSCCTL_DISABLE;
   9635 	tmp |= SBI_SSCCTL_PATHALT;
   9636 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9637 
   9638 	udelay(24);
   9639 
   9640 	if (with_spread) {
   9641 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
   9642 		tmp &= ~SBI_SSCCTL_PATHALT;
   9643 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9644 
   9645 		if (with_fdi) {
   9646 			lpt_reset_fdi_mphy(dev_priv);
   9647 			lpt_program_fdi_mphy(dev_priv);
   9648 		}
   9649 	}
   9650 
   9651 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
   9652 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
   9653 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
   9654 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
   9655 
   9656 	mutex_unlock(&dev_priv->sb_lock);
   9657 }
   9658 
   9659 /* Sequence to disable CLKOUT_DP */
   9660 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
   9661 {
   9662 	u32 reg, tmp;
   9663 
   9664 	mutex_lock(&dev_priv->sb_lock);
   9665 
   9666 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
   9667 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
   9668 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
   9669 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
   9670 
   9671 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
   9672 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
   9673 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
   9674 			tmp |= SBI_SSCCTL_PATHALT;
   9675 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9676 			udelay(32);
   9677 		}
   9678 		tmp |= SBI_SSCCTL_DISABLE;
   9679 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
   9680 	}
   9681 
   9682 	mutex_unlock(&dev_priv->sb_lock);
   9683 }
   9684 
   9685 #define BEND_IDX(steps) ((50 + (steps)) / 5)
   9686 
   9687 static const u16 sscdivintphase[] = {
   9688 	[BEND_IDX( 50)] = 0x3B23,
   9689 	[BEND_IDX( 45)] = 0x3B23,
   9690 	[BEND_IDX( 40)] = 0x3C23,
   9691 	[BEND_IDX( 35)] = 0x3C23,
   9692 	[BEND_IDX( 30)] = 0x3D23,
   9693 	[BEND_IDX( 25)] = 0x3D23,
   9694 	[BEND_IDX( 20)] = 0x3E23,
   9695 	[BEND_IDX( 15)] = 0x3E23,
   9696 	[BEND_IDX( 10)] = 0x3F23,
   9697 	[BEND_IDX(  5)] = 0x3F23,
   9698 	[BEND_IDX(  0)] = 0x0025,
   9699 	[BEND_IDX( -5)] = 0x0025,
   9700 	[BEND_IDX(-10)] = 0x0125,
   9701 	[BEND_IDX(-15)] = 0x0125,
   9702 	[BEND_IDX(-20)] = 0x0225,
   9703 	[BEND_IDX(-25)] = 0x0225,
   9704 	[BEND_IDX(-30)] = 0x0325,
   9705 	[BEND_IDX(-35)] = 0x0325,
   9706 	[BEND_IDX(-40)] = 0x0425,
   9707 	[BEND_IDX(-45)] = 0x0425,
   9708 	[BEND_IDX(-50)] = 0x0525,
   9709 };
   9710 
   9711 /*
   9712  * Bend CLKOUT_DP
   9713  * steps -50 to 50 inclusive, in steps of 5
   9714  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
   9715  * change in clock period = -(steps / 10) * 5.787 ps
   9716  */
   9717 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
   9718 {
   9719 	u32 tmp;
   9720 	int idx = BEND_IDX(steps);
   9721 
   9722 	if (WARN_ON(steps % 5 != 0))
   9723 		return;
   9724 
   9725 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
   9726 		return;
   9727 
   9728 	mutex_lock(&dev_priv->sb_lock);
   9729 
   9730 	if (steps % 10 != 0)
   9731 		tmp = 0xAAAAAAAB;
   9732 	else
   9733 		tmp = 0x00000000;
   9734 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
   9735 
   9736 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
   9737 	tmp &= 0xffff0000;
   9738 	tmp |= sscdivintphase[idx];
   9739 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
   9740 
   9741 	mutex_unlock(&dev_priv->sb_lock);
   9742 }
   9743 
   9744 #undef BEND_IDX
   9745 
   9746 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
   9747 {
   9748 	u32 fuse_strap = I915_READ(FUSE_STRAP);
   9749 	u32 ctl = I915_READ(SPLL_CTL);
   9750 
   9751 	if ((ctl & SPLL_PLL_ENABLE) == 0)
   9752 		return false;
   9753 
   9754 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
   9755 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
   9756 		return true;
   9757 
   9758 	if (IS_BROADWELL(dev_priv) &&
   9759 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
   9760 		return true;
   9761 
   9762 	return false;
   9763 }
   9764 
   9765 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
   9766 			       enum intel_dpll_id id)
   9767 {
   9768 	u32 fuse_strap = I915_READ(FUSE_STRAP);
   9769 	u32 ctl = I915_READ(WRPLL_CTL(id));
   9770 
   9771 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
   9772 		return false;
   9773 
   9774 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
   9775 		return true;
   9776 
   9777 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
   9778 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
   9779 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
   9780 		return true;
   9781 
   9782 	return false;
   9783 }
   9784 
   9785 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
   9786 {
   9787 	struct intel_encoder *encoder;
   9788 	bool has_fdi = false;
   9789 
   9790 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   9791 		switch (encoder->type) {
   9792 		case INTEL_OUTPUT_ANALOG:
   9793 			has_fdi = true;
   9794 			break;
   9795 		default:
   9796 			break;
   9797 		}
   9798 	}
   9799 
   9800 	/*
   9801 	 * The BIOS may have decided to use the PCH SSC
   9802 	 * reference so we must not disable it until the
   9803 	 * relevant PLLs have stopped relying on it. We'll
   9804 	 * just leave the PCH SSC reference enabled in case
   9805 	 * any active PLL is using it. It will get disabled
   9806 	 * after runtime suspend if we don't have FDI.
   9807 	 *
   9808 	 * TODO: Move the whole reference clock handling
   9809 	 * to the modeset sequence proper so that we can
   9810 	 * actually enable/disable/reconfigure these things
   9811 	 * safely. To do that we need to introduce a real
   9812 	 * clock hierarchy. That would also allow us to do
   9813 	 * clock bending finally.
   9814 	 */
   9815 	dev_priv->pch_ssc_use = 0;
   9816 
   9817 	if (spll_uses_pch_ssc(dev_priv)) {
   9818 		DRM_DEBUG_KMS("SPLL using PCH SSC\n");
   9819 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
   9820 	}
   9821 
   9822 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
   9823 		DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
   9824 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
   9825 	}
   9826 
   9827 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
   9828 		DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
   9829 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
   9830 	}
   9831 
   9832 	if (dev_priv->pch_ssc_use)
   9833 		return;
   9834 
   9835 	if (has_fdi) {
   9836 		lpt_bend_clkout_dp(dev_priv, 0);
   9837 		lpt_enable_clkout_dp(dev_priv, true, true);
   9838 	} else {
   9839 		lpt_disable_clkout_dp(dev_priv);
   9840 	}
   9841 }
   9842 
   9843 /*
   9844  * Initialize reference clocks when the driver loads
   9845  */
   9846 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
   9847 {
   9848 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
   9849 		ilk_init_pch_refclk(dev_priv);
   9850 	else if (HAS_PCH_LPT(dev_priv))
   9851 		lpt_init_pch_refclk(dev_priv);
   9852 }
   9853 
   9854 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
   9855 {
   9856 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9857 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9858 	enum pipe pipe = crtc->pipe;
   9859 	u32 val;
   9860 
   9861 	val = 0;
   9862 
   9863 	switch (crtc_state->pipe_bpp) {
   9864 	case 18:
   9865 		val |= PIPECONF_6BPC;
   9866 		break;
   9867 	case 24:
   9868 		val |= PIPECONF_8BPC;
   9869 		break;
   9870 	case 30:
   9871 		val |= PIPECONF_10BPC;
   9872 		break;
   9873 	case 36:
   9874 		val |= PIPECONF_12BPC;
   9875 		break;
   9876 	default:
   9877 		/* Case prevented by intel_choose_pipe_bpp_dither. */
   9878 		BUG();
   9879 	}
   9880 
   9881 	if (crtc_state->dither)
   9882 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
   9883 
   9884 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   9885 		val |= PIPECONF_INTERLACED_ILK;
   9886 	else
   9887 		val |= PIPECONF_PROGRESSIVE;
   9888 
   9889 	/*
   9890 	 * This would end up with an odd purple hue over
   9891 	 * the entire display. Make sure we don't do it.
   9892 	 */
   9893 	WARN_ON(crtc_state->limited_color_range &&
   9894 		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
   9895 
   9896 	if (crtc_state->limited_color_range)
   9897 		val |= PIPECONF_COLOR_RANGE_SELECT;
   9898 
   9899 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
   9900 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
   9901 
   9902 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
   9903 
   9904 	val |= PIPECONF_FRAME_START_DELAY(0);
   9905 
   9906 	I915_WRITE(PIPECONF(pipe), val);
   9907 	POSTING_READ(PIPECONF(pipe));
   9908 }
   9909 
   9910 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
   9911 {
   9912 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9913 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9914 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   9915 	u32 val = 0;
   9916 
   9917 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
   9918 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
   9919 
   9920 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   9921 		val |= PIPECONF_INTERLACED_ILK;
   9922 	else
   9923 		val |= PIPECONF_PROGRESSIVE;
   9924 
   9925 	if (IS_HASWELL(dev_priv) &&
   9926 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
   9927 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
   9928 
   9929 	I915_WRITE(PIPECONF(cpu_transcoder), val);
   9930 	POSTING_READ(PIPECONF(cpu_transcoder));
   9931 }
   9932 
   9933 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
   9934 {
   9935 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   9936 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9937 	u32 val = 0;
   9938 
   9939 	switch (crtc_state->pipe_bpp) {
   9940 	case 18:
   9941 		val |= PIPEMISC_DITHER_6_BPC;
   9942 		break;
   9943 	case 24:
   9944 		val |= PIPEMISC_DITHER_8_BPC;
   9945 		break;
   9946 	case 30:
   9947 		val |= PIPEMISC_DITHER_10_BPC;
   9948 		break;
   9949 	case 36:
   9950 		val |= PIPEMISC_DITHER_12_BPC;
   9951 		break;
   9952 	default:
   9953 		MISSING_CASE(crtc_state->pipe_bpp);
   9954 		break;
   9955 	}
   9956 
   9957 	if (crtc_state->dither)
   9958 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
   9959 
   9960 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
   9961 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
   9962 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
   9963 
   9964 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
   9965 		val |= PIPEMISC_YUV420_ENABLE |
   9966 			PIPEMISC_YUV420_MODE_FULL_BLEND;
   9967 
   9968 	if (INTEL_GEN(dev_priv) >= 11 &&
   9969 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
   9970 					   BIT(PLANE_CURSOR))) == 0)
   9971 		val |= PIPEMISC_HDR_MODE_PRECISION;
   9972 
   9973 	I915_WRITE(PIPEMISC(crtc->pipe), val);
   9974 }
   9975 
   9976 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
   9977 {
   9978 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   9979 	u32 tmp;
   9980 
   9981 	tmp = I915_READ(PIPEMISC(crtc->pipe));
   9982 
   9983 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
   9984 	case PIPEMISC_DITHER_6_BPC:
   9985 		return 18;
   9986 	case PIPEMISC_DITHER_8_BPC:
   9987 		return 24;
   9988 	case PIPEMISC_DITHER_10_BPC:
   9989 		return 30;
   9990 	case PIPEMISC_DITHER_12_BPC:
   9991 		return 36;
   9992 	default:
   9993 		MISSING_CASE(tmp);
   9994 		return 0;
   9995 	}
   9996 }
   9997 
   9998 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
   9999 {
   10000 	/*
   10001 	 * Account for spread spectrum to avoid
   10002 	 * oversubscribing the link. Max center spread
   10003 	 * is 2.5%; use 5% for safety's sake.
   10004 	 */
   10005 	u32 bps = target_clock * bpp * 21 / 20;
   10006 	return DIV_ROUND_UP(bps, link_bw * 8);
   10007 }
   10008 
   10009 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
   10010 {
   10011 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
   10012 }
   10013 
   10014 static void ilk_compute_dpll(struct intel_crtc *crtc,
   10015 			     struct intel_crtc_state *crtc_state,
   10016 			     struct dpll *reduced_clock)
   10017 {
   10018 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10019 	u32 dpll, fp, fp2;
   10020 	int factor;
   10021 
   10022 	/* Enable autotuning of the PLL clock (if permissible) */
   10023 	factor = 21;
   10024 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   10025 		if ((intel_panel_use_ssc(dev_priv) &&
   10026 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
   10027 		    (HAS_PCH_IBX(dev_priv) &&
   10028 		     intel_is_dual_link_lvds(dev_priv)))
   10029 			factor = 25;
   10030 	} else if (crtc_state->sdvo_tv_clock) {
   10031 		factor = 20;
   10032 	}
   10033 
   10034 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
   10035 
   10036 	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
   10037 		fp |= FP_CB_TUNE;
   10038 
   10039 	if (reduced_clock) {
   10040 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
   10041 
   10042 		if (reduced_clock->m < factor * reduced_clock->n)
   10043 			fp2 |= FP_CB_TUNE;
   10044 	} else {
   10045 		fp2 = fp;
   10046 	}
   10047 
   10048 	dpll = 0;
   10049 
   10050 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
   10051 		dpll |= DPLLB_MODE_LVDS;
   10052 	else
   10053 		dpll |= DPLLB_MODE_DAC_SERIAL;
   10054 
   10055 	dpll |= (crtc_state->pixel_multiplier - 1)
   10056 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
   10057 
   10058 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
   10059 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
   10060 		dpll |= DPLL_SDVO_HIGH_SPEED;
   10061 
   10062 	if (intel_crtc_has_dp_encoder(crtc_state))
   10063 		dpll |= DPLL_SDVO_HIGH_SPEED;
   10064 
   10065 	/*
   10066 	 * The high speed IO clock is only really required for
   10067 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
   10068 	 * possible to share the DPLL between CRT and HDMI. Enabling
   10069 	 * the clock needlessly does no real harm, except use up a
   10070 	 * bit of power potentially.
   10071 	 *
   10072 	 * We'll limit this to IVB with 3 pipes, since it has only two
   10073 	 * DPLLs and so DPLL sharing is the only way to get three pipes
   10074 	 * driving PCH ports at the same time. On SNB we could do this,
   10075 	 * and potentially avoid enabling the second DPLL, but it's not
   10076 	 * clear if it''s a win or loss power wise. No point in doing
   10077 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
   10078 	 */
   10079 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
   10080 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
   10081 		dpll |= DPLL_SDVO_HIGH_SPEED;
   10082 
   10083 	/* compute bitmask from p1 value */
   10084 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
   10085 	/* also FPA1 */
   10086 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
   10087 
   10088 	switch (crtc_state->dpll.p2) {
   10089 	case 5:
   10090 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
   10091 		break;
   10092 	case 7:
   10093 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
   10094 		break;
   10095 	case 10:
   10096 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
   10097 		break;
   10098 	case 14:
   10099 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
   10100 		break;
   10101 	}
   10102 
   10103 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   10104 	    intel_panel_use_ssc(dev_priv))
   10105 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
   10106 	else
   10107 		dpll |= PLL_REF_INPUT_DREFCLK;
   10108 
   10109 	dpll |= DPLL_VCO_ENABLE;
   10110 
   10111 	crtc_state->dpll_hw_state.dpll = dpll;
   10112 	crtc_state->dpll_hw_state.fp0 = fp;
   10113 	crtc_state->dpll_hw_state.fp1 = fp2;
   10114 }
   10115 
   10116 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
   10117 				  struct intel_crtc_state *crtc_state)
   10118 {
   10119 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10120 	struct intel_atomic_state *state =
   10121 		to_intel_atomic_state(crtc_state->uapi.state);
   10122 	const struct intel_limit *limit;
   10123 	int refclk = 120000;
   10124 
   10125 	memset(&crtc_state->dpll_hw_state, 0,
   10126 	       sizeof(crtc_state->dpll_hw_state));
   10127 
   10128 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
   10129 	if (!crtc_state->has_pch_encoder)
   10130 		return 0;
   10131 
   10132 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
   10133 		if (intel_panel_use_ssc(dev_priv)) {
   10134 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
   10135 				      dev_priv->vbt.lvds_ssc_freq);
   10136 			refclk = dev_priv->vbt.lvds_ssc_freq;
   10137 		}
   10138 
   10139 		if (intel_is_dual_link_lvds(dev_priv)) {
   10140 			if (refclk == 100000)
   10141 				limit = &ilk_limits_dual_lvds_100m;
   10142 			else
   10143 				limit = &ilk_limits_dual_lvds;
   10144 		} else {
   10145 			if (refclk == 100000)
   10146 				limit = &ilk_limits_single_lvds_100m;
   10147 			else
   10148 				limit = &ilk_limits_single_lvds;
   10149 		}
   10150 	} else {
   10151 		limit = &ilk_limits_dac;
   10152 	}
   10153 
   10154 	if (!crtc_state->clock_set &&
   10155 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
   10156 				refclk, NULL, &crtc_state->dpll)) {
   10157 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
   10158 		return -EINVAL;
   10159 	}
   10160 
   10161 	ilk_compute_dpll(crtc, crtc_state, NULL);
   10162 
   10163 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
   10164 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
   10165 			      pipe_name(crtc->pipe));
   10166 		return -EINVAL;
   10167 	}
   10168 
   10169 	return 0;
   10170 }
   10171 
   10172 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
   10173 					 struct intel_link_m_n *m_n)
   10174 {
   10175 	struct drm_device *dev = crtc->base.dev;
   10176 	struct drm_i915_private *dev_priv = to_i915(dev);
   10177 	enum pipe pipe = crtc->pipe;
   10178 
   10179 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
   10180 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
   10181 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
   10182 		& ~TU_SIZE_MASK;
   10183 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
   10184 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
   10185 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10186 }
   10187 
   10188 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
   10189 					 enum transcoder transcoder,
   10190 					 struct intel_link_m_n *m_n,
   10191 					 struct intel_link_m_n *m2_n2)
   10192 {
   10193 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10194 	enum pipe pipe = crtc->pipe;
   10195 
   10196 	if (INTEL_GEN(dev_priv) >= 5) {
   10197 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
   10198 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
   10199 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
   10200 			& ~TU_SIZE_MASK;
   10201 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
   10202 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
   10203 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10204 
   10205 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
   10206 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
   10207 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
   10208 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
   10209 					& ~TU_SIZE_MASK;
   10210 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
   10211 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
   10212 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10213 		}
   10214 	} else {
   10215 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
   10216 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
   10217 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
   10218 			& ~TU_SIZE_MASK;
   10219 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
   10220 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
   10221 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
   10222 	}
   10223 }
   10224 
   10225 void intel_dp_get_m_n(struct intel_crtc *crtc,
   10226 		      struct intel_crtc_state *pipe_config)
   10227 {
   10228 	if (pipe_config->has_pch_encoder)
   10229 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
   10230 	else
   10231 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
   10232 					     &pipe_config->dp_m_n,
   10233 					     &pipe_config->dp_m2_n2);
   10234 }
   10235 
   10236 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
   10237 				   struct intel_crtc_state *pipe_config)
   10238 {
   10239 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
   10240 				     &pipe_config->fdi_m_n, NULL);
   10241 }
   10242 
   10243 static void skl_get_pfit_config(struct intel_crtc *crtc,
   10244 				struct intel_crtc_state *pipe_config)
   10245 {
   10246 	struct drm_device *dev = crtc->base.dev;
   10247 	struct drm_i915_private *dev_priv = to_i915(dev);
   10248 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
   10249 	u32 ps_ctrl = 0;
   10250 	int id = -1;
   10251 	int i;
   10252 
   10253 	/* find scaler attached to this pipe */
   10254 	for (i = 0; i < crtc->num_scalers; i++) {
   10255 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
   10256 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
   10257 			id = i;
   10258 			pipe_config->pch_pfit.enabled = true;
   10259 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
   10260 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
   10261 			scaler_state->scalers[i].in_use = true;
   10262 			break;
   10263 		}
   10264 	}
   10265 
   10266 	scaler_state->scaler_id = id;
   10267 	if (id >= 0) {
   10268 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
   10269 	} else {
   10270 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
   10271 	}
   10272 }
   10273 
   10274 static void
   10275 skl_get_initial_plane_config(struct intel_crtc *crtc,
   10276 			     struct intel_initial_plane_config *plane_config)
   10277 {
   10278 	struct drm_device *dev = crtc->base.dev;
   10279 	struct drm_i915_private *dev_priv = to_i915(dev);
   10280 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   10281 	enum plane_id plane_id = plane->id;
   10282 	enum pipe pipe;
   10283 	u32 val, base, offset, stride_mult, tiling, alpha;
   10284 	int fourcc, pixel_format;
   10285 	unsigned int aligned_height;
   10286 	struct drm_framebuffer *fb;
   10287 	struct intel_framebuffer *intel_fb;
   10288 
   10289 	if (!plane->get_hw_state(plane, &pipe))
   10290 		return;
   10291 
   10292 	WARN_ON(pipe != crtc->pipe);
   10293 
   10294 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
   10295 	if (!intel_fb) {
   10296 		DRM_DEBUG_KMS("failed to alloc fb\n");
   10297 		return;
   10298 	}
   10299 
   10300 	fb = &intel_fb->base;
   10301 
   10302 	fb->dev = dev;
   10303 
   10304 	val = I915_READ(PLANE_CTL(pipe, plane_id));
   10305 
   10306 	if (INTEL_GEN(dev_priv) >= 11)
   10307 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
   10308 	else
   10309 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
   10310 
   10311 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
   10312 		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
   10313 		alpha &= PLANE_COLOR_ALPHA_MASK;
   10314 	} else {
   10315 		alpha = val & PLANE_CTL_ALPHA_MASK;
   10316 	}
   10317 
   10318 	fourcc = skl_format_to_fourcc(pixel_format,
   10319 				      val & PLANE_CTL_ORDER_RGBX, alpha);
   10320 	fb->format = drm_format_info(fourcc);
   10321 
   10322 	tiling = val & PLANE_CTL_TILED_MASK;
   10323 	switch (tiling) {
   10324 	case PLANE_CTL_TILED_LINEAR:
   10325 		fb->modifier = DRM_FORMAT_MOD_LINEAR;
   10326 		break;
   10327 	case PLANE_CTL_TILED_X:
   10328 		plane_config->tiling = I915_TILING_X;
   10329 		fb->modifier = I915_FORMAT_MOD_X_TILED;
   10330 		break;
   10331 	case PLANE_CTL_TILED_Y:
   10332 		plane_config->tiling = I915_TILING_Y;
   10333 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
   10334 			fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
   10335 				I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
   10336 				I915_FORMAT_MOD_Y_TILED_CCS;
   10337 		else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
   10338 			fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
   10339 		else
   10340 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
   10341 		break;
   10342 	case PLANE_CTL_TILED_YF:
   10343 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
   10344 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
   10345 		else
   10346 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
   10347 		break;
   10348 	default:
   10349 		MISSING_CASE(tiling);
   10350 		goto error;
   10351 	}
   10352 
   10353 	/*
   10354 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
   10355 	 * while i915 HW rotation is clockwise, thats why this swapping.
   10356 	 */
   10357 	switch (val & PLANE_CTL_ROTATE_MASK) {
   10358 	case PLANE_CTL_ROTATE_0:
   10359 		plane_config->rotation = DRM_MODE_ROTATE_0;
   10360 		break;
   10361 	case PLANE_CTL_ROTATE_90:
   10362 		plane_config->rotation = DRM_MODE_ROTATE_270;
   10363 		break;
   10364 	case PLANE_CTL_ROTATE_180:
   10365 		plane_config->rotation = DRM_MODE_ROTATE_180;
   10366 		break;
   10367 	case PLANE_CTL_ROTATE_270:
   10368 		plane_config->rotation = DRM_MODE_ROTATE_90;
   10369 		break;
   10370 	}
   10371 
   10372 	if (INTEL_GEN(dev_priv) >= 10 &&
   10373 	    val & PLANE_CTL_FLIP_HORIZONTAL)
   10374 		plane_config->rotation |= DRM_MODE_REFLECT_X;
   10375 
   10376 	base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
   10377 	plane_config->base = base;
   10378 
   10379 	offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
   10380 
   10381 	val = I915_READ(PLANE_SIZE(pipe, plane_id));
   10382 	fb->height = ((val >> 16) & 0xffff) + 1;
   10383 	fb->width = ((val >> 0) & 0xffff) + 1;
   10384 
   10385 	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
   10386 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
   10387 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
   10388 
   10389 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
   10390 
   10391 	plane_config->size = fb->pitches[0] * aligned_height;
   10392 
   10393 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
   10394 		      crtc->base.name, plane->base.name, fb->width, fb->height,
   10395 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
   10396 		      plane_config->size);
   10397 
   10398 	plane_config->fb = intel_fb;
   10399 	return;
   10400 
   10401 error:
   10402 	kfree(intel_fb);
   10403 }
   10404 
   10405 static void ilk_get_pfit_config(struct intel_crtc *crtc,
   10406 				struct intel_crtc_state *pipe_config)
   10407 {
   10408 	struct drm_device *dev = crtc->base.dev;
   10409 	struct drm_i915_private *dev_priv = to_i915(dev);
   10410 	u32 tmp;
   10411 
   10412 	tmp = I915_READ(PF_CTL(crtc->pipe));
   10413 
   10414 	if (tmp & PF_ENABLE) {
   10415 		pipe_config->pch_pfit.enabled = true;
   10416 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
   10417 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
   10418 
   10419 		/* We currently do not free assignements of panel fitters on
   10420 		 * ivb/hsw (since we don't use the higher upscaling modes which
   10421 		 * differentiates them) so just WARN about this case for now. */
   10422 		if (IS_GEN(dev_priv, 7)) {
   10423 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
   10424 				PF_PIPE_SEL_IVB(crtc->pipe));
   10425 		}
   10426 	}
   10427 }
   10428 
   10429 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
   10430 				struct intel_crtc_state *pipe_config)
   10431 {
   10432 	struct drm_device *dev = crtc->base.dev;
   10433 	struct drm_i915_private *dev_priv = to_i915(dev);
   10434 	enum intel_display_power_domain power_domain;
   10435 	intel_wakeref_t wakeref;
   10436 	u32 tmp;
   10437 	bool ret;
   10438 
   10439 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   10440 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10441 	if (!wakeref)
   10442 		return false;
   10443 
   10444 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   10445 	pipe_config->shared_dpll = NULL;
   10446 	pipe_config->master_transcoder = INVALID_TRANSCODER;
   10447 
   10448 	ret = false;
   10449 	tmp = I915_READ(PIPECONF(crtc->pipe));
   10450 	if (!(tmp & PIPECONF_ENABLE))
   10451 		goto out;
   10452 
   10453 	switch (tmp & PIPECONF_BPC_MASK) {
   10454 	case PIPECONF_6BPC:
   10455 		pipe_config->pipe_bpp = 18;
   10456 		break;
   10457 	case PIPECONF_8BPC:
   10458 		pipe_config->pipe_bpp = 24;
   10459 		break;
   10460 	case PIPECONF_10BPC:
   10461 		pipe_config->pipe_bpp = 30;
   10462 		break;
   10463 	case PIPECONF_12BPC:
   10464 		pipe_config->pipe_bpp = 36;
   10465 		break;
   10466 	default:
   10467 		break;
   10468 	}
   10469 
   10470 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
   10471 		pipe_config->limited_color_range = true;
   10472 
   10473 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
   10474 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
   10475 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
   10476 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
   10477 		break;
   10478 	default:
   10479 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   10480 		break;
   10481 	}
   10482 
   10483 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
   10484 		PIPECONF_GAMMA_MODE_SHIFT;
   10485 
   10486 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
   10487 
   10488 	i9xx_get_pipe_color_config(pipe_config);
   10489 	intel_color_get_config(pipe_config);
   10490 
   10491 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
   10492 		struct intel_shared_dpll *pll;
   10493 		enum intel_dpll_id pll_id;
   10494 
   10495 		pipe_config->has_pch_encoder = true;
   10496 
   10497 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
   10498 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
   10499 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
   10500 
   10501 		ilk_get_fdi_m_n_config(crtc, pipe_config);
   10502 
   10503 		if (HAS_PCH_IBX(dev_priv)) {
   10504 			/*
   10505 			 * The pipe->pch transcoder and pch transcoder->pll
   10506 			 * mapping is fixed.
   10507 			 */
   10508 			pll_id = (enum intel_dpll_id) crtc->pipe;
   10509 		} else {
   10510 			tmp = I915_READ(PCH_DPLL_SEL);
   10511 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
   10512 				pll_id = DPLL_ID_PCH_PLL_B;
   10513 			else
   10514 				pll_id= DPLL_ID_PCH_PLL_A;
   10515 		}
   10516 
   10517 		pipe_config->shared_dpll =
   10518 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
   10519 		pll = pipe_config->shared_dpll;
   10520 
   10521 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
   10522 						&pipe_config->dpll_hw_state));
   10523 
   10524 		tmp = pipe_config->dpll_hw_state.dpll;
   10525 		pipe_config->pixel_multiplier =
   10526 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
   10527 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
   10528 
   10529 		ilk_pch_clock_get(crtc, pipe_config);
   10530 	} else {
   10531 		pipe_config->pixel_multiplier = 1;
   10532 	}
   10533 
   10534 	intel_get_pipe_timings(crtc, pipe_config);
   10535 	intel_get_pipe_src_size(crtc, pipe_config);
   10536 
   10537 	ilk_get_pfit_config(crtc, pipe_config);
   10538 
   10539 	ret = true;
   10540 
   10541 out:
   10542 	intel_display_power_put(dev_priv, power_domain, wakeref);
   10543 
   10544 	return ret;
   10545 }
   10546 
   10547 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
   10548 				  struct intel_crtc_state *crtc_state)
   10549 {
   10550 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10551 	struct intel_atomic_state *state =
   10552 		to_intel_atomic_state(crtc_state->uapi.state);
   10553 
   10554 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
   10555 	    INTEL_GEN(dev_priv) >= 11) {
   10556 		struct intel_encoder *encoder =
   10557 			intel_get_crtc_new_encoder(state, crtc_state);
   10558 
   10559 		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
   10560 			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
   10561 				      pipe_name(crtc->pipe));
   10562 			return -EINVAL;
   10563 		}
   10564 	}
   10565 
   10566 	return 0;
   10567 }
   10568 
   10569 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10570 			    struct intel_crtc_state *pipe_config)
   10571 {
   10572 	enum intel_dpll_id id;
   10573 	u32 temp;
   10574 
   10575 	temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
   10576 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
   10577 
   10578 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
   10579 		return;
   10580 
   10581 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10582 }
   10583 
   10584 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10585 			    struct intel_crtc_state *pipe_config)
   10586 {
   10587 	enum phy phy = intel_port_to_phy(dev_priv, port);
   10588 	enum icl_port_dpll_id port_dpll_id;
   10589 	enum intel_dpll_id id;
   10590 	u32 temp;
   10591 
   10592 	if (intel_phy_is_combo(dev_priv, phy)) {
   10593 		temp = I915_READ(ICL_DPCLKA_CFGCR0) &
   10594 			ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
   10595 		id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
   10596 		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
   10597 	} else if (intel_phy_is_tc(dev_priv, phy)) {
   10598 		u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
   10599 
   10600 		if (clk_sel == DDI_CLK_SEL_MG) {
   10601 			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
   10602 								    port));
   10603 			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
   10604 		} else {
   10605 			WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
   10606 			id = DPLL_ID_ICL_TBTPLL;
   10607 			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
   10608 		}
   10609 	} else {
   10610 		WARN(1, "Invalid port %x\n", port);
   10611 		return;
   10612 	}
   10613 
   10614 	pipe_config->icl_port_dplls[port_dpll_id].pll =
   10615 		intel_get_shared_dpll_by_id(dev_priv, id);
   10616 
   10617 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
   10618 }
   10619 
   10620 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
   10621 				enum port port,
   10622 				struct intel_crtc_state *pipe_config)
   10623 {
   10624 	enum intel_dpll_id id;
   10625 
   10626 	switch (port) {
   10627 	case PORT_A:
   10628 		id = DPLL_ID_SKL_DPLL0;
   10629 		break;
   10630 	case PORT_B:
   10631 		id = DPLL_ID_SKL_DPLL1;
   10632 		break;
   10633 	case PORT_C:
   10634 		id = DPLL_ID_SKL_DPLL2;
   10635 		break;
   10636 	default:
   10637 		DRM_ERROR("Incorrect port type\n");
   10638 		return;
   10639 	}
   10640 
   10641 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10642 }
   10643 
   10644 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10645 			    struct intel_crtc_state *pipe_config)
   10646 {
   10647 	enum intel_dpll_id id;
   10648 	u32 temp;
   10649 
   10650 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
   10651 	id = temp >> (port * 3 + 1);
   10652 
   10653 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
   10654 		return;
   10655 
   10656 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10657 }
   10658 
   10659 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
   10660 			    struct intel_crtc_state *pipe_config)
   10661 {
   10662 	enum intel_dpll_id id;
   10663 	u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
   10664 
   10665 	switch (ddi_pll_sel) {
   10666 	case PORT_CLK_SEL_WRPLL1:
   10667 		id = DPLL_ID_WRPLL1;
   10668 		break;
   10669 	case PORT_CLK_SEL_WRPLL2:
   10670 		id = DPLL_ID_WRPLL2;
   10671 		break;
   10672 	case PORT_CLK_SEL_SPLL:
   10673 		id = DPLL_ID_SPLL;
   10674 		break;
   10675 	case PORT_CLK_SEL_LCPLL_810:
   10676 		id = DPLL_ID_LCPLL_810;
   10677 		break;
   10678 	case PORT_CLK_SEL_LCPLL_1350:
   10679 		id = DPLL_ID_LCPLL_1350;
   10680 		break;
   10681 	case PORT_CLK_SEL_LCPLL_2700:
   10682 		id = DPLL_ID_LCPLL_2700;
   10683 		break;
   10684 	default:
   10685 		MISSING_CASE(ddi_pll_sel);
   10686 		/* fall through */
   10687 	case PORT_CLK_SEL_NONE:
   10688 		return;
   10689 	}
   10690 
   10691 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
   10692 }
   10693 
   10694 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
   10695 				     struct intel_crtc_state *pipe_config,
   10696 				     u64 *power_domain_mask,
   10697 				     intel_wakeref_t *wakerefs)
   10698 {
   10699 	struct drm_device *dev = crtc->base.dev;
   10700 	struct drm_i915_private *dev_priv = to_i915(dev);
   10701 	enum intel_display_power_domain power_domain;
   10702 	unsigned long panel_transcoder_mask = 0;
   10703 	unsigned long enabled_panel_transcoders = 0;
   10704 	enum transcoder panel_transcoder;
   10705 	intel_wakeref_t wf;
   10706 	u32 tmp;
   10707 
   10708 	if (INTEL_GEN(dev_priv) >= 11)
   10709 		panel_transcoder_mask |=
   10710 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
   10711 
   10712 	if (HAS_TRANSCODER_EDP(dev_priv))
   10713 		panel_transcoder_mask |= BIT(TRANSCODER_EDP);
   10714 
   10715 	/*
   10716 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
   10717 	 * and DSI transcoders handled below.
   10718 	 */
   10719 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   10720 
   10721 	/*
   10722 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
   10723 	 * consistency and less surprising code; it's in always on power).
   10724 	 */
   10725 	for_each_set_bit(panel_transcoder,
   10726 			 &panel_transcoder_mask,
   10727 			 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
   10728 		bool force_thru = false;
   10729 		enum pipe trans_pipe;
   10730 
   10731 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
   10732 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
   10733 			continue;
   10734 
   10735 		/*
   10736 		 * Log all enabled ones, only use the first one.
   10737 		 *
   10738 		 * FIXME: This won't work for two separate DSI displays.
   10739 		 */
   10740 		enabled_panel_transcoders |= BIT(panel_transcoder);
   10741 		if (enabled_panel_transcoders != BIT(panel_transcoder))
   10742 			continue;
   10743 
   10744 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
   10745 		default:
   10746 			WARN(1, "unknown pipe linked to transcoder %s\n",
   10747 			     transcoder_name(panel_transcoder));
   10748 			/* fall through */
   10749 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
   10750 			force_thru = true;
   10751 			/* fall through */
   10752 		case TRANS_DDI_EDP_INPUT_A_ON:
   10753 			trans_pipe = PIPE_A;
   10754 			break;
   10755 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
   10756 			trans_pipe = PIPE_B;
   10757 			break;
   10758 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
   10759 			trans_pipe = PIPE_C;
   10760 			break;
   10761 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
   10762 			trans_pipe = PIPE_D;
   10763 			break;
   10764 		}
   10765 
   10766 		if (trans_pipe == crtc->pipe) {
   10767 			pipe_config->cpu_transcoder = panel_transcoder;
   10768 			pipe_config->pch_pfit.force_thru = force_thru;
   10769 		}
   10770 	}
   10771 
   10772 	/*
   10773 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
   10774 	 */
   10775 	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
   10776 		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
   10777 
   10778 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
   10779 	WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
   10780 
   10781 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10782 	if (!wf)
   10783 		return false;
   10784 
   10785 	wakerefs[power_domain] = wf;
   10786 	*power_domain_mask |= BIT_ULL(power_domain);
   10787 
   10788 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
   10789 
   10790 	return tmp & PIPECONF_ENABLE;
   10791 }
   10792 
   10793 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
   10794 					 struct intel_crtc_state *pipe_config,
   10795 					 u64 *power_domain_mask,
   10796 					 intel_wakeref_t *wakerefs)
   10797 {
   10798 	struct drm_device *dev = crtc->base.dev;
   10799 	struct drm_i915_private *dev_priv = to_i915(dev);
   10800 	enum intel_display_power_domain power_domain;
   10801 	enum transcoder cpu_transcoder;
   10802 	intel_wakeref_t wf;
   10803 	enum port port;
   10804 	u32 tmp;
   10805 
   10806 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
   10807 		if (port == PORT_A)
   10808 			cpu_transcoder = TRANSCODER_DSI_A;
   10809 		else
   10810 			cpu_transcoder = TRANSCODER_DSI_C;
   10811 
   10812 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   10813 		WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
   10814 
   10815 		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10816 		if (!wf)
   10817 			continue;
   10818 
   10819 		wakerefs[power_domain] = wf;
   10820 		*power_domain_mask |= BIT_ULL(power_domain);
   10821 
   10822 		/*
   10823 		 * The PLL needs to be enabled with a valid divider
   10824 		 * configuration, otherwise accessing DSI registers will hang
   10825 		 * the machine. See BSpec North Display Engine
   10826 		 * registers/MIPI[BXT]. We can break out here early, since we
   10827 		 * need the same DSI PLL to be enabled for both DSI ports.
   10828 		 */
   10829 		if (!bxt_dsi_pll_is_enabled(dev_priv))
   10830 			break;
   10831 
   10832 		/* XXX: this works for video mode only */
   10833 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
   10834 		if (!(tmp & DPI_ENABLE))
   10835 			continue;
   10836 
   10837 		tmp = I915_READ(MIPI_CTRL(port));
   10838 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
   10839 			continue;
   10840 
   10841 		pipe_config->cpu_transcoder = cpu_transcoder;
   10842 		break;
   10843 	}
   10844 
   10845 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
   10846 }
   10847 
   10848 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
   10849 				   struct intel_crtc_state *pipe_config)
   10850 {
   10851 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10852 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
   10853 	struct intel_shared_dpll *pll;
   10854 	enum port port;
   10855 	u32 tmp;
   10856 
   10857 	if (transcoder_is_dsi(cpu_transcoder)) {
   10858 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
   10859 						PORT_A : PORT_B;
   10860 	} else {
   10861 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
   10862 		if (INTEL_GEN(dev_priv) >= 12)
   10863 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
   10864 		else
   10865 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
   10866 	}
   10867 
   10868 	if (INTEL_GEN(dev_priv) >= 11)
   10869 		icl_get_ddi_pll(dev_priv, port, pipe_config);
   10870 	else if (IS_CANNONLAKE(dev_priv))
   10871 		cnl_get_ddi_pll(dev_priv, port, pipe_config);
   10872 	else if (IS_GEN9_BC(dev_priv))
   10873 		skl_get_ddi_pll(dev_priv, port, pipe_config);
   10874 	else if (IS_GEN9_LP(dev_priv))
   10875 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
   10876 	else
   10877 		hsw_get_ddi_pll(dev_priv, port, pipe_config);
   10878 
   10879 	pll = pipe_config->shared_dpll;
   10880 	if (pll) {
   10881 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
   10882 						&pipe_config->dpll_hw_state));
   10883 	}
   10884 
   10885 	/*
   10886 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
   10887 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
   10888 	 * the PCH transcoder is on.
   10889 	 */
   10890 	if (INTEL_GEN(dev_priv) < 9 &&
   10891 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
   10892 		pipe_config->has_pch_encoder = true;
   10893 
   10894 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
   10895 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
   10896 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
   10897 
   10898 		ilk_get_fdi_m_n_config(crtc, pipe_config);
   10899 	}
   10900 }
   10901 
   10902 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
   10903 						 enum transcoder cpu_transcoder)
   10904 {
   10905 	u32 trans_port_sync, master_select;
   10906 
   10907 	trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
   10908 
   10909 	if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
   10910 		return INVALID_TRANSCODER;
   10911 
   10912 	master_select = trans_port_sync &
   10913 			PORT_SYNC_MODE_MASTER_SELECT_MASK;
   10914 	if (master_select == 0)
   10915 		return TRANSCODER_EDP;
   10916 	else
   10917 		return master_select - 1;
   10918 }
   10919 
   10920 static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
   10921 {
   10922 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   10923 	u32 transcoders;
   10924 	enum transcoder cpu_transcoder;
   10925 
   10926 	crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
   10927 								  crtc_state->cpu_transcoder);
   10928 
   10929 	transcoders = BIT(TRANSCODER_A) |
   10930 		BIT(TRANSCODER_B) |
   10931 		BIT(TRANSCODER_C) |
   10932 		BIT(TRANSCODER_D);
   10933 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
   10934 		enum intel_display_power_domain power_domain;
   10935 		intel_wakeref_t trans_wakeref;
   10936 
   10937 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   10938 		trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
   10939 								   power_domain);
   10940 
   10941 		if (!trans_wakeref)
   10942 			continue;
   10943 
   10944 		if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
   10945 		    crtc_state->cpu_transcoder)
   10946 			crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
   10947 
   10948 		intel_display_power_put(dev_priv, power_domain, trans_wakeref);
   10949 	}
   10950 
   10951 	WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
   10952 		crtc_state->sync_mode_slaves_mask);
   10953 }
   10954 
   10955 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
   10956 				struct intel_crtc_state *pipe_config)
   10957 {
   10958 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   10959 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
   10960 	enum intel_display_power_domain power_domain;
   10961 	u64 power_domain_mask;
   10962 	bool active;
   10963 
   10964 	pipe_config->master_transcoder = INVALID_TRANSCODER;
   10965 
   10966 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   10967 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   10968 	if (!wf)
   10969 		return false;
   10970 
   10971 	wakerefs[power_domain] = wf;
   10972 	power_domain_mask = BIT_ULL(power_domain);
   10973 
   10974 	pipe_config->shared_dpll = NULL;
   10975 
   10976 	active = hsw_get_transcoder_state(crtc, pipe_config,
   10977 					  &power_domain_mask, wakerefs);
   10978 
   10979 	if (IS_GEN9_LP(dev_priv) &&
   10980 	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
   10981 					 &power_domain_mask, wakerefs)) {
   10982 		WARN_ON(active);
   10983 		active = true;
   10984 	}
   10985 
   10986 	if (!active)
   10987 		goto out;
   10988 
   10989 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
   10990 	    INTEL_GEN(dev_priv) >= 11) {
   10991 		hsw_get_ddi_port_state(crtc, pipe_config);
   10992 		intel_get_pipe_timings(crtc, pipe_config);
   10993 	}
   10994 
   10995 	intel_get_pipe_src_size(crtc, pipe_config);
   10996 
   10997 	if (IS_HASWELL(dev_priv)) {
   10998 		u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
   10999 
   11000 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
   11001 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
   11002 		else
   11003 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   11004 	} else {
   11005 		pipe_config->output_format =
   11006 			bdw_get_pipemisc_output_format(crtc);
   11007 
   11008 		/*
   11009 		 * Currently there is no interface defined to
   11010 		 * check user preference between RGB/YCBCR444
   11011 		 * or YCBCR420. So the only possible case for
   11012 		 * YCBCR444 usage is driving YCBCR420 output
   11013 		 * with LSPCON, when pipe is configured for
   11014 		 * YCBCR444 output and LSPCON takes care of
   11015 		 * downsampling it.
   11016 		 */
   11017 		pipe_config->lspcon_downsampling =
   11018 			pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
   11019 	}
   11020 
   11021 	pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
   11022 
   11023 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
   11024 
   11025 	if (INTEL_GEN(dev_priv) >= 9) {
   11026 		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
   11027 
   11028 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
   11029 			pipe_config->gamma_enable = true;
   11030 
   11031 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
   11032 			pipe_config->csc_enable = true;
   11033 	} else {
   11034 		i9xx_get_pipe_color_config(pipe_config);
   11035 	}
   11036 
   11037 	intel_color_get_config(pipe_config);
   11038 
   11039 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
   11040 	WARN_ON(power_domain_mask & BIT_ULL(power_domain));
   11041 
   11042 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
   11043 	if (wf) {
   11044 		wakerefs[power_domain] = wf;
   11045 		power_domain_mask |= BIT_ULL(power_domain);
   11046 
   11047 		if (INTEL_GEN(dev_priv) >= 9)
   11048 			skl_get_pfit_config(crtc, pipe_config);
   11049 		else
   11050 			ilk_get_pfit_config(crtc, pipe_config);
   11051 	}
   11052 
   11053 	if (hsw_crtc_supports_ips(crtc)) {
   11054 		if (IS_HASWELL(dev_priv))
   11055 			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
   11056 		else {
   11057 			/*
   11058 			 * We cannot readout IPS state on broadwell, set to
   11059 			 * true so we can set it to a defined state on first
   11060 			 * commit.
   11061 			 */
   11062 			pipe_config->ips_enabled = true;
   11063 		}
   11064 	}
   11065 
   11066 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
   11067 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
   11068 		pipe_config->pixel_multiplier =
   11069 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
   11070 	} else {
   11071 		pipe_config->pixel_multiplier = 1;
   11072 	}
   11073 
   11074 	if (INTEL_GEN(dev_priv) >= 11 &&
   11075 	    !transcoder_is_dsi(pipe_config->cpu_transcoder))
   11076 		icl_get_trans_port_sync_config(pipe_config);
   11077 
   11078 out:
   11079 	for_each_power_domain(power_domain, power_domain_mask)
   11080 		intel_display_power_put(dev_priv,
   11081 					power_domain, wakerefs[power_domain]);
   11082 
   11083 	return active;
   11084 }
   11085 
   11086 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
   11087 {
   11088 	struct drm_i915_private *dev_priv =
   11089 		to_i915(plane_state->uapi.plane->dev);
   11090 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11091 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   11092 	u32 base;
   11093 
   11094 	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
   11095 		base = sg_dma_address(obj->mm.pages->sgl);
   11096 	else
   11097 		base = intel_plane_ggtt_offset(plane_state);
   11098 
   11099 	return base + plane_state->color_plane[0].offset;
   11100 }
   11101 
   11102 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
   11103 {
   11104 	int x = plane_state->uapi.dst.x1;
   11105 	int y = plane_state->uapi.dst.y1;
   11106 	u32 pos = 0;
   11107 
   11108 	if (x < 0) {
   11109 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
   11110 		x = -x;
   11111 	}
   11112 	pos |= x << CURSOR_X_SHIFT;
   11113 
   11114 	if (y < 0) {
   11115 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
   11116 		y = -y;
   11117 	}
   11118 	pos |= y << CURSOR_Y_SHIFT;
   11119 
   11120 	return pos;
   11121 }
   11122 
   11123 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
   11124 {
   11125 	const struct drm_mode_config *config =
   11126 		&plane_state->uapi.plane->dev->mode_config;
   11127 	int width = drm_rect_width(&plane_state->uapi.dst);
   11128 	int height = drm_rect_height(&plane_state->uapi.dst);
   11129 
   11130 	return width > 0 && width <= config->cursor_width &&
   11131 		height > 0 && height <= config->cursor_height;
   11132 }
   11133 
   11134 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
   11135 {
   11136 	struct drm_i915_private *dev_priv =
   11137 		to_i915(plane_state->uapi.plane->dev);
   11138 	unsigned int rotation = plane_state->hw.rotation;
   11139 	int src_x, src_y;
   11140 	u32 offset;
   11141 	int ret;
   11142 
   11143 	ret = intel_plane_compute_gtt(plane_state);
   11144 	if (ret)
   11145 		return ret;
   11146 
   11147 	if (!plane_state->uapi.visible)
   11148 		return 0;
   11149 
   11150 	src_x = plane_state->uapi.src.x1 >> 16;
   11151 	src_y = plane_state->uapi.src.y1 >> 16;
   11152 
   11153 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
   11154 	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
   11155 						    plane_state, 0);
   11156 
   11157 	if (src_x != 0 || src_y != 0) {
   11158 		DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
   11159 		return -EINVAL;
   11160 	}
   11161 
   11162 	/*
   11163 	 * Put the final coordinates back so that the src
   11164 	 * coordinate checks will see the right values.
   11165 	 */
   11166 	drm_rect_translate_to(&plane_state->uapi.src,
   11167 			      src_x << 16, src_y << 16);
   11168 
   11169 	/* ILK+ do this automagically in hardware */
   11170 	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
   11171 		const struct drm_framebuffer *fb = plane_state->hw.fb;
   11172 		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   11173 		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   11174 
   11175 		offset += (src_h * src_w - 1) * fb->format->cpp[0];
   11176 	}
   11177 
   11178 	plane_state->color_plane[0].offset = offset;
   11179 	plane_state->color_plane[0].x = src_x;
   11180 	plane_state->color_plane[0].y = src_y;
   11181 
   11182 	return 0;
   11183 }
   11184 
   11185 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
   11186 			      struct intel_plane_state *plane_state)
   11187 {
   11188 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11189 	int ret;
   11190 
   11191 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
   11192 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
   11193 		return -EINVAL;
   11194 	}
   11195 
   11196 	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
   11197 						  &crtc_state->uapi,
   11198 						  DRM_PLANE_HELPER_NO_SCALING,
   11199 						  DRM_PLANE_HELPER_NO_SCALING,
   11200 						  true, true);
   11201 	if (ret)
   11202 		return ret;
   11203 
   11204 	/* Use the unclipped src/dst rectangles, which we program to hw */
   11205 	plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
   11206 	plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
   11207 
   11208 	ret = intel_cursor_check_surface(plane_state);
   11209 	if (ret)
   11210 		return ret;
   11211 
   11212 	if (!plane_state->uapi.visible)
   11213 		return 0;
   11214 
   11215 	ret = intel_plane_check_src_coordinates(plane_state);
   11216 	if (ret)
   11217 		return ret;
   11218 
   11219 	return 0;
   11220 }
   11221 
   11222 static unsigned int
   11223 i845_cursor_max_stride(struct intel_plane *plane,
   11224 		       u32 pixel_format, u64 modifier,
   11225 		       unsigned int rotation)
   11226 {
   11227 	return 2048;
   11228 }
   11229 
   11230 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
   11231 {
   11232 	u32 cntl = 0;
   11233 
   11234 	if (crtc_state->gamma_enable)
   11235 		cntl |= CURSOR_GAMMA_ENABLE;
   11236 
   11237 	return cntl;
   11238 }
   11239 
   11240 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
   11241 			   const struct intel_plane_state *plane_state)
   11242 {
   11243 	return CURSOR_ENABLE |
   11244 		CURSOR_FORMAT_ARGB |
   11245 		CURSOR_STRIDE(plane_state->color_plane[0].stride);
   11246 }
   11247 
   11248 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
   11249 {
   11250 	int width = drm_rect_width(&plane_state->uapi.dst);
   11251 
   11252 	/*
   11253 	 * 845g/865g are only limited by the width of their cursors,
   11254 	 * the height is arbitrary up to the precision of the register.
   11255 	 */
   11256 	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
   11257 }
   11258 
   11259 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
   11260 			     struct intel_plane_state *plane_state)
   11261 {
   11262 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11263 	int ret;
   11264 
   11265 	ret = intel_check_cursor(crtc_state, plane_state);
   11266 	if (ret)
   11267 		return ret;
   11268 
   11269 	/* if we want to turn off the cursor ignore width and height */
   11270 	if (!fb)
   11271 		return 0;
   11272 
   11273 	/* Check for which cursor types we support */
   11274 	if (!i845_cursor_size_ok(plane_state)) {
   11275 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
   11276 			  drm_rect_width(&plane_state->uapi.dst),
   11277 			  drm_rect_height(&plane_state->uapi.dst));
   11278 		return -EINVAL;
   11279 	}
   11280 
   11281 	WARN_ON(plane_state->uapi.visible &&
   11282 		plane_state->color_plane[0].stride != fb->pitches[0]);
   11283 
   11284 	switch (fb->pitches[0]) {
   11285 	case 256:
   11286 	case 512:
   11287 	case 1024:
   11288 	case 2048:
   11289 		break;
   11290 	default:
   11291 		DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
   11292 			      fb->pitches[0]);
   11293 		return -EINVAL;
   11294 	}
   11295 
   11296 	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
   11297 
   11298 	return 0;
   11299 }
   11300 
   11301 static void i845_update_cursor(struct intel_plane *plane,
   11302 			       const struct intel_crtc_state *crtc_state,
   11303 			       const struct intel_plane_state *plane_state)
   11304 {
   11305 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11306 	u32 cntl = 0, base = 0, pos = 0, size = 0;
   11307 	unsigned long irqflags;
   11308 
   11309 	if (plane_state && plane_state->uapi.visible) {
   11310 		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
   11311 		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
   11312 
   11313 		cntl = plane_state->ctl |
   11314 			i845_cursor_ctl_crtc(crtc_state);
   11315 
   11316 		size = (height << 12) | width;
   11317 
   11318 		base = intel_cursor_base(plane_state);
   11319 		pos = intel_cursor_position(plane_state);
   11320 	}
   11321 
   11322 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   11323 
   11324 	/* On these chipsets we can only modify the base/size/stride
   11325 	 * whilst the cursor is disabled.
   11326 	 */
   11327 	if (plane->cursor.base != base ||
   11328 	    plane->cursor.size != size ||
   11329 	    plane->cursor.cntl != cntl) {
   11330 		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
   11331 		I915_WRITE_FW(CURBASE(PIPE_A), base);
   11332 		I915_WRITE_FW(CURSIZE, size);
   11333 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
   11334 		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
   11335 
   11336 		plane->cursor.base = base;
   11337 		plane->cursor.size = size;
   11338 		plane->cursor.cntl = cntl;
   11339 	} else {
   11340 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
   11341 	}
   11342 
   11343 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   11344 }
   11345 
   11346 static void i845_disable_cursor(struct intel_plane *plane,
   11347 				const struct intel_crtc_state *crtc_state)
   11348 {
   11349 	i845_update_cursor(plane, crtc_state, NULL);
   11350 }
   11351 
   11352 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
   11353 				     enum pipe *pipe)
   11354 {
   11355 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11356 	enum intel_display_power_domain power_domain;
   11357 	intel_wakeref_t wakeref;
   11358 	bool ret;
   11359 
   11360 	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
   11361 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   11362 	if (!wakeref)
   11363 		return false;
   11364 
   11365 	ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
   11366 
   11367 	*pipe = PIPE_A;
   11368 
   11369 	intel_display_power_put(dev_priv, power_domain, wakeref);
   11370 
   11371 	return ret;
   11372 }
   11373 
   11374 static unsigned int
   11375 i9xx_cursor_max_stride(struct intel_plane *plane,
   11376 		       u32 pixel_format, u64 modifier,
   11377 		       unsigned int rotation)
   11378 {
   11379 	return plane->base.dev->mode_config.cursor_width * 4;
   11380 }
   11381 
   11382 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
   11383 {
   11384 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   11385 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   11386 	u32 cntl = 0;
   11387 
   11388 	if (INTEL_GEN(dev_priv) >= 11)
   11389 		return cntl;
   11390 
   11391 	if (crtc_state->gamma_enable)
   11392 		cntl = MCURSOR_GAMMA_ENABLE;
   11393 
   11394 	if (crtc_state->csc_enable)
   11395 		cntl |= MCURSOR_PIPE_CSC_ENABLE;
   11396 
   11397 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
   11398 		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
   11399 
   11400 	return cntl;
   11401 }
   11402 
   11403 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
   11404 			   const struct intel_plane_state *plane_state)
   11405 {
   11406 	struct drm_i915_private *dev_priv =
   11407 		to_i915(plane_state->uapi.plane->dev);
   11408 	u32 cntl = 0;
   11409 
   11410 	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
   11411 		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
   11412 
   11413 	switch (drm_rect_width(&plane_state->uapi.dst)) {
   11414 	case 64:
   11415 		cntl |= MCURSOR_MODE_64_ARGB_AX;
   11416 		break;
   11417 	case 128:
   11418 		cntl |= MCURSOR_MODE_128_ARGB_AX;
   11419 		break;
   11420 	case 256:
   11421 		cntl |= MCURSOR_MODE_256_ARGB_AX;
   11422 		break;
   11423 	default:
   11424 		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
   11425 		return 0;
   11426 	}
   11427 
   11428 	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
   11429 		cntl |= MCURSOR_ROTATE_180;
   11430 
   11431 	return cntl;
   11432 }
   11433 
   11434 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
   11435 {
   11436 	struct drm_i915_private *dev_priv =
   11437 		to_i915(plane_state->uapi.plane->dev);
   11438 	int width = drm_rect_width(&plane_state->uapi.dst);
   11439 	int height = drm_rect_height(&plane_state->uapi.dst);
   11440 
   11441 	if (!intel_cursor_size_ok(plane_state))
   11442 		return false;
   11443 
   11444 	/* Cursor width is limited to a few power-of-two sizes */
   11445 	switch (width) {
   11446 	case 256:
   11447 	case 128:
   11448 	case 64:
   11449 		break;
   11450 	default:
   11451 		return false;
   11452 	}
   11453 
   11454 	/*
   11455 	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
   11456 	 * height from 8 lines up to the cursor width, when the
   11457 	 * cursor is not rotated. Everything else requires square
   11458 	 * cursors.
   11459 	 */
   11460 	if (HAS_CUR_FBC(dev_priv) &&
   11461 	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
   11462 		if (height < 8 || height > width)
   11463 			return false;
   11464 	} else {
   11465 		if (height != width)
   11466 			return false;
   11467 	}
   11468 
   11469 	return true;
   11470 }
   11471 
   11472 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
   11473 			     struct intel_plane_state *plane_state)
   11474 {
   11475 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   11476 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11477 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   11478 	enum pipe pipe = plane->pipe;
   11479 	int ret;
   11480 
   11481 	ret = intel_check_cursor(crtc_state, plane_state);
   11482 	if (ret)
   11483 		return ret;
   11484 
   11485 	/* if we want to turn off the cursor ignore width and height */
   11486 	if (!fb)
   11487 		return 0;
   11488 
   11489 	/* Check for which cursor types we support */
   11490 	if (!i9xx_cursor_size_ok(plane_state)) {
   11491 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
   11492 			  drm_rect_width(&plane_state->uapi.dst),
   11493 			  drm_rect_height(&plane_state->uapi.dst));
   11494 		return -EINVAL;
   11495 	}
   11496 
   11497 	WARN_ON(plane_state->uapi.visible &&
   11498 		plane_state->color_plane[0].stride != fb->pitches[0]);
   11499 
   11500 	if (fb->pitches[0] !=
   11501 	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
   11502 		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
   11503 			      fb->pitches[0],
   11504 			      drm_rect_width(&plane_state->uapi.dst));
   11505 		return -EINVAL;
   11506 	}
   11507 
   11508 	/*
   11509 	 * There's something wrong with the cursor on CHV pipe C.
   11510 	 * If it straddles the left edge of the screen then
   11511 	 * moving it away from the edge or disabling it often
   11512 	 * results in a pipe underrun, and often that can lead to
   11513 	 * dead pipe (constant underrun reported, and it scans
   11514 	 * out just a solid color). To recover from that, the
   11515 	 * display power well must be turned off and on again.
   11516 	 * Refuse the put the cursor into that compromised position.
   11517 	 */
   11518 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
   11519 	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
   11520 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
   11521 		return -EINVAL;
   11522 	}
   11523 
   11524 	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
   11525 
   11526 	return 0;
   11527 }
   11528 
   11529 static void i9xx_update_cursor(struct intel_plane *plane,
   11530 			       const struct intel_crtc_state *crtc_state,
   11531 			       const struct intel_plane_state *plane_state)
   11532 {
   11533 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11534 	enum pipe pipe = plane->pipe;
   11535 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
   11536 	unsigned long irqflags;
   11537 
   11538 	if (plane_state && plane_state->uapi.visible) {
   11539 		unsigned width = drm_rect_width(&plane_state->uapi.dst);
   11540 		unsigned height = drm_rect_height(&plane_state->uapi.dst);
   11541 
   11542 		cntl = plane_state->ctl |
   11543 			i9xx_cursor_ctl_crtc(crtc_state);
   11544 
   11545 		if (width != height)
   11546 			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
   11547 
   11548 		base = intel_cursor_base(plane_state);
   11549 		pos = intel_cursor_position(plane_state);
   11550 	}
   11551 
   11552 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
   11553 
   11554 	/*
   11555 	 * On some platforms writing CURCNTR first will also
   11556 	 * cause CURPOS to be armed by the CURBASE write.
   11557 	 * Without the CURCNTR write the CURPOS write would
   11558 	 * arm itself. Thus we always update CURCNTR before
   11559 	 * CURPOS.
   11560 	 *
   11561 	 * On other platforms CURPOS always requires the
   11562 	 * CURBASE write to arm the update. Additonally
   11563 	 * a write to any of the cursor register will cancel
   11564 	 * an already armed cursor update. Thus leaving out
   11565 	 * the CURBASE write after CURPOS could lead to a
   11566 	 * cursor that doesn't appear to move, or even change
   11567 	 * shape. Thus we always write CURBASE.
   11568 	 *
   11569 	 * The other registers are armed by by the CURBASE write
   11570 	 * except when the plane is getting enabled at which time
   11571 	 * the CURCNTR write arms the update.
   11572 	 */
   11573 
   11574 	if (INTEL_GEN(dev_priv) >= 9)
   11575 		skl_write_cursor_wm(plane, crtc_state);
   11576 
   11577 	if (plane->cursor.base != base ||
   11578 	    plane->cursor.size != fbc_ctl ||
   11579 	    plane->cursor.cntl != cntl) {
   11580 		if (HAS_CUR_FBC(dev_priv))
   11581 			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
   11582 		I915_WRITE_FW(CURCNTR(pipe), cntl);
   11583 		I915_WRITE_FW(CURPOS(pipe), pos);
   11584 		I915_WRITE_FW(CURBASE(pipe), base);
   11585 
   11586 		plane->cursor.base = base;
   11587 		plane->cursor.size = fbc_ctl;
   11588 		plane->cursor.cntl = cntl;
   11589 	} else {
   11590 		I915_WRITE_FW(CURPOS(pipe), pos);
   11591 		I915_WRITE_FW(CURBASE(pipe), base);
   11592 	}
   11593 
   11594 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
   11595 }
   11596 
   11597 static void i9xx_disable_cursor(struct intel_plane *plane,
   11598 				const struct intel_crtc_state *crtc_state)
   11599 {
   11600 	i9xx_update_cursor(plane, crtc_state, NULL);
   11601 }
   11602 
   11603 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
   11604 				     enum pipe *pipe)
   11605 {
   11606 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   11607 	enum intel_display_power_domain power_domain;
   11608 	intel_wakeref_t wakeref;
   11609 	bool ret;
   11610 	u32 val;
   11611 
   11612 	/*
   11613 	 * Not 100% correct for planes that can move between pipes,
   11614 	 * but that's only the case for gen2-3 which don't have any
   11615 	 * display power wells.
   11616 	 */
   11617 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
   11618 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   11619 	if (!wakeref)
   11620 		return false;
   11621 
   11622 	val = I915_READ(CURCNTR(plane->pipe));
   11623 
   11624 	ret = val & MCURSOR_MODE;
   11625 
   11626 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   11627 		*pipe = plane->pipe;
   11628 	else
   11629 		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
   11630 			MCURSOR_PIPE_SELECT_SHIFT;
   11631 
   11632 	intel_display_power_put(dev_priv, power_domain, wakeref);
   11633 
   11634 	return ret;
   11635 }
   11636 
   11637 /* VESA 640x480x72Hz mode to set on the pipe */
   11638 static const struct drm_display_mode load_detect_mode = {
   11639 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
   11640 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
   11641 };
   11642 
   11643 struct drm_framebuffer *
   11644 intel_framebuffer_create(struct drm_i915_gem_object *obj,
   11645 			 struct drm_mode_fb_cmd2 *mode_cmd)
   11646 {
   11647 	struct intel_framebuffer *intel_fb;
   11648 	int ret;
   11649 
   11650 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
   11651 	if (!intel_fb)
   11652 		return ERR_PTR(-ENOMEM);
   11653 
   11654 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
   11655 	if (ret)
   11656 		goto err;
   11657 
   11658 	return &intel_fb->base;
   11659 
   11660 err:
   11661 	kfree(intel_fb);
   11662 	return ERR_PTR(ret);
   11663 }
   11664 
   11665 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
   11666 					struct drm_crtc *crtc)
   11667 {
   11668 	struct drm_plane *plane;
   11669 	struct drm_plane_state *plane_state;
   11670 	int ret, i;
   11671 
   11672 	ret = drm_atomic_add_affected_planes(state, crtc);
   11673 	if (ret)
   11674 		return ret;
   11675 
   11676 	for_each_new_plane_in_state(state, plane, plane_state, i) {
   11677 		if (plane_state->crtc != crtc)
   11678 			continue;
   11679 
   11680 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
   11681 		if (ret)
   11682 			return ret;
   11683 
   11684 		drm_atomic_set_fb_for_plane(plane_state, NULL);
   11685 	}
   11686 
   11687 	return 0;
   11688 }
   11689 
   11690 int intel_get_load_detect_pipe(struct drm_connector *connector,
   11691 			       struct intel_load_detect_pipe *old,
   11692 			       struct drm_modeset_acquire_ctx *ctx)
   11693 {
   11694 	struct intel_crtc *intel_crtc;
   11695 	struct intel_encoder *intel_encoder =
   11696 		intel_attached_encoder(to_intel_connector(connector));
   11697 	struct drm_crtc *possible_crtc;
   11698 	struct drm_encoder *encoder = &intel_encoder->base;
   11699 	struct drm_crtc *crtc = NULL;
   11700 	struct drm_device *dev = encoder->dev;
   11701 	struct drm_i915_private *dev_priv = to_i915(dev);
   11702 	struct drm_mode_config *config = &dev->mode_config;
   11703 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
   11704 	struct drm_connector_state *connector_state;
   11705 	struct intel_crtc_state *crtc_state;
   11706 	int ret, i = -1;
   11707 
   11708 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
   11709 		      connector->base.id, connector->name,
   11710 		      encoder->base.id, encoder->name);
   11711 
   11712 	old->restore_state = NULL;
   11713 
   11714 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
   11715 
   11716 	/*
   11717 	 * Algorithm gets a little messy:
   11718 	 *
   11719 	 *   - if the connector already has an assigned crtc, use it (but make
   11720 	 *     sure it's on first)
   11721 	 *
   11722 	 *   - try to find the first unused crtc that can drive this connector,
   11723 	 *     and use that if we find one
   11724 	 */
   11725 
   11726 	/* See if we already have a CRTC for this connector */
   11727 	if (connector->state->crtc) {
   11728 		crtc = connector->state->crtc;
   11729 
   11730 		ret = drm_modeset_lock(&crtc->mutex, ctx);
   11731 		if (ret)
   11732 			goto fail;
   11733 
   11734 		/* Make sure the crtc and connector are running */
   11735 		goto found;
   11736 	}
   11737 
   11738 	/* Find an unused one (if possible) */
   11739 	for_each_crtc(dev, possible_crtc) {
   11740 		i++;
   11741 		if (!(encoder->possible_crtcs & (1 << i)))
   11742 			continue;
   11743 
   11744 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
   11745 		if (ret)
   11746 			goto fail;
   11747 
   11748 		if (possible_crtc->state->enable) {
   11749 			drm_modeset_unlock(&possible_crtc->mutex);
   11750 			continue;
   11751 		}
   11752 
   11753 		crtc = possible_crtc;
   11754 		break;
   11755 	}
   11756 
   11757 	/*
   11758 	 * If we didn't find an unused CRTC, don't use any.
   11759 	 */
   11760 	if (!crtc) {
   11761 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
   11762 		ret = -ENODEV;
   11763 		goto fail;
   11764 	}
   11765 
   11766 found:
   11767 	intel_crtc = to_intel_crtc(crtc);
   11768 
   11769 	state = drm_atomic_state_alloc(dev);
   11770 	restore_state = drm_atomic_state_alloc(dev);
   11771 	if (!state || !restore_state) {
   11772 		ret = -ENOMEM;
   11773 		goto fail;
   11774 	}
   11775 
   11776 	state->acquire_ctx = ctx;
   11777 	restore_state->acquire_ctx = ctx;
   11778 
   11779 	connector_state = drm_atomic_get_connector_state(state, connector);
   11780 	if (IS_ERR(connector_state)) {
   11781 		ret = PTR_ERR(connector_state);
   11782 		goto fail;
   11783 	}
   11784 
   11785 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
   11786 	if (ret)
   11787 		goto fail;
   11788 
   11789 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
   11790 	if (IS_ERR(crtc_state)) {
   11791 		ret = PTR_ERR(crtc_state);
   11792 		goto fail;
   11793 	}
   11794 
   11795 	crtc_state->uapi.active = true;
   11796 
   11797 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
   11798 					   &load_detect_mode);
   11799 	if (ret)
   11800 		goto fail;
   11801 
   11802 	ret = intel_modeset_disable_planes(state, crtc);
   11803 	if (ret)
   11804 		goto fail;
   11805 
   11806 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
   11807 	if (!ret)
   11808 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
   11809 	if (!ret)
   11810 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
   11811 	if (ret) {
   11812 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
   11813 		goto fail;
   11814 	}
   11815 
   11816 	ret = drm_atomic_commit(state);
   11817 	if (ret) {
   11818 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
   11819 		goto fail;
   11820 	}
   11821 
   11822 	old->restore_state = restore_state;
   11823 	drm_atomic_state_put(state);
   11824 
   11825 	/* let the connector get through one full cycle before testing */
   11826 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
   11827 	return true;
   11828 
   11829 fail:
   11830 	if (state) {
   11831 		drm_atomic_state_put(state);
   11832 		state = NULL;
   11833 	}
   11834 	if (restore_state) {
   11835 		drm_atomic_state_put(restore_state);
   11836 		restore_state = NULL;
   11837 	}
   11838 
   11839 	if (ret == -EDEADLK)
   11840 		return ret;
   11841 
   11842 	return false;
   11843 }
   11844 
   11845 void intel_release_load_detect_pipe(struct drm_connector *connector,
   11846 				    struct intel_load_detect_pipe *old,
   11847 				    struct drm_modeset_acquire_ctx *ctx)
   11848 {
   11849 	struct intel_encoder *intel_encoder =
   11850 		intel_attached_encoder(to_intel_connector(connector));
   11851 	struct drm_encoder *encoder = &intel_encoder->base;
   11852 	struct drm_atomic_state *state = old->restore_state;
   11853 	int ret;
   11854 
   11855 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
   11856 		      connector->base.id, connector->name,
   11857 		      encoder->base.id, encoder->name);
   11858 
   11859 	if (!state)
   11860 		return;
   11861 
   11862 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
   11863 	if (ret)
   11864 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
   11865 	drm_atomic_state_put(state);
   11866 }
   11867 
   11868 static int i9xx_pll_refclk(struct drm_device *dev,
   11869 			   const struct intel_crtc_state *pipe_config)
   11870 {
   11871 	struct drm_i915_private *dev_priv = to_i915(dev);
   11872 	u32 dpll = pipe_config->dpll_hw_state.dpll;
   11873 
   11874 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
   11875 		return dev_priv->vbt.lvds_ssc_freq;
   11876 	else if (HAS_PCH_SPLIT(dev_priv))
   11877 		return 120000;
   11878 	else if (!IS_GEN(dev_priv, 2))
   11879 		return 96000;
   11880 	else
   11881 		return 48000;
   11882 }
   11883 
   11884 /* Returns the clock of the currently programmed mode of the given pipe. */
   11885 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
   11886 				struct intel_crtc_state *pipe_config)
   11887 {
   11888 	struct drm_device *dev = crtc->base.dev;
   11889 	struct drm_i915_private *dev_priv = to_i915(dev);
   11890 	enum pipe pipe = crtc->pipe;
   11891 	u32 dpll = pipe_config->dpll_hw_state.dpll;
   11892 	u32 fp;
   11893 	struct dpll clock;
   11894 	int port_clock;
   11895 	int refclk = i9xx_pll_refclk(dev, pipe_config);
   11896 
   11897 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
   11898 		fp = pipe_config->dpll_hw_state.fp0;
   11899 	else
   11900 		fp = pipe_config->dpll_hw_state.fp1;
   11901 
   11902 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
   11903 	if (IS_PINEVIEW(dev_priv)) {
   11904 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
   11905 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
   11906 	} else {
   11907 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
   11908 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
   11909 	}
   11910 
   11911 	if (!IS_GEN(dev_priv, 2)) {
   11912 		if (IS_PINEVIEW(dev_priv))
   11913 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
   11914 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
   11915 		else
   11916 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
   11917 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
   11918 
   11919 		switch (dpll & DPLL_MODE_MASK) {
   11920 		case DPLLB_MODE_DAC_SERIAL:
   11921 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
   11922 				5 : 10;
   11923 			break;
   11924 		case DPLLB_MODE_LVDS:
   11925 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
   11926 				7 : 14;
   11927 			break;
   11928 		default:
   11929 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
   11930 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
   11931 			return;
   11932 		}
   11933 
   11934 		if (IS_PINEVIEW(dev_priv))
   11935 			port_clock = pnv_calc_dpll_params(refclk, &clock);
   11936 		else
   11937 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
   11938 	} else {
   11939 		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
   11940 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
   11941 
   11942 		if (is_lvds) {
   11943 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
   11944 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
   11945 
   11946 			if (lvds & LVDS_CLKB_POWER_UP)
   11947 				clock.p2 = 7;
   11948 			else
   11949 				clock.p2 = 14;
   11950 		} else {
   11951 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
   11952 				clock.p1 = 2;
   11953 			else {
   11954 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
   11955 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
   11956 			}
   11957 			if (dpll & PLL_P2_DIVIDE_BY_4)
   11958 				clock.p2 = 4;
   11959 			else
   11960 				clock.p2 = 2;
   11961 		}
   11962 
   11963 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
   11964 	}
   11965 
   11966 	/*
   11967 	 * This value includes pixel_multiplier. We will use
   11968 	 * port_clock to compute adjusted_mode.crtc_clock in the
   11969 	 * encoder's get_config() function.
   11970 	 */
   11971 	pipe_config->port_clock = port_clock;
   11972 }
   11973 
   11974 int intel_dotclock_calculate(int link_freq,
   11975 			     const struct intel_link_m_n *m_n)
   11976 {
   11977 	/*
   11978 	 * The calculation for the data clock is:
   11979 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
   11980 	 * But we want to avoid losing precison if possible, so:
   11981 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
   11982 	 *
   11983 	 * and the link clock is simpler:
   11984 	 * link_clock = (m * link_clock) / n
   11985 	 */
   11986 
   11987 	if (!m_n->link_n)
   11988 		return 0;
   11989 
   11990 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
   11991 }
   11992 
   11993 static void ilk_pch_clock_get(struct intel_crtc *crtc,
   11994 			      struct intel_crtc_state *pipe_config)
   11995 {
   11996 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   11997 
   11998 	/* read out port_clock from the DPLL */
   11999 	i9xx_crtc_clock_get(crtc, pipe_config);
   12000 
   12001 	/*
   12002 	 * In case there is an active pipe without active ports,
   12003 	 * we may need some idea for the dotclock anyway.
   12004 	 * Calculate one based on the FDI configuration.
   12005 	 */
   12006 	pipe_config->hw.adjusted_mode.crtc_clock =
   12007 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
   12008 					 &pipe_config->fdi_m_n);
   12009 }
   12010 
   12011 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
   12012 				   struct intel_crtc *crtc)
   12013 {
   12014 	memset(crtc_state, 0, sizeof(*crtc_state));
   12015 
   12016 	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
   12017 
   12018 	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
   12019 	crtc_state->master_transcoder = INVALID_TRANSCODER;
   12020 	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
   12021 	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
   12022 	crtc_state->scaler_state.scaler_id = -1;
   12023 	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
   12024 }
   12025 
   12026 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
   12027 {
   12028 	struct intel_crtc_state *crtc_state;
   12029 
   12030 	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
   12031 
   12032 	if (crtc_state)
   12033 		intel_crtc_state_reset(crtc_state, crtc);
   12034 
   12035 	return crtc_state;
   12036 }
   12037 
   12038 /* Returns the currently programmed mode of the given encoder. */
   12039 struct drm_display_mode *
   12040 intel_encoder_current_mode(struct intel_encoder *encoder)
   12041 {
   12042 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   12043 	struct intel_crtc_state *crtc_state;
   12044 	struct drm_display_mode *mode;
   12045 	struct intel_crtc *crtc;
   12046 	enum pipe pipe;
   12047 
   12048 	if (!encoder->get_hw_state(encoder, &pipe))
   12049 		return NULL;
   12050 
   12051 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   12052 
   12053 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
   12054 	if (!mode)
   12055 		return NULL;
   12056 
   12057 	crtc_state = intel_crtc_state_alloc(crtc);
   12058 	if (!crtc_state) {
   12059 		kfree(mode);
   12060 		return NULL;
   12061 	}
   12062 
   12063 	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
   12064 		kfree(crtc_state);
   12065 		kfree(mode);
   12066 		return NULL;
   12067 	}
   12068 
   12069 	encoder->get_config(encoder, crtc_state);
   12070 
   12071 	intel_mode_from_pipe_config(mode, crtc_state);
   12072 
   12073 	kfree(crtc_state);
   12074 
   12075 	return mode;
   12076 }
   12077 
   12078 static void intel_crtc_destroy(struct drm_crtc *crtc)
   12079 {
   12080 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
   12081 
   12082 	drm_crtc_cleanup(crtc);
   12083 	kfree(intel_crtc);
   12084 }
   12085 
   12086 /**
   12087  * intel_wm_need_update - Check whether watermarks need updating
   12088  * @cur: current plane state
   12089  * @new: new plane state
   12090  *
   12091  * Check current plane state versus the new one to determine whether
   12092  * watermarks need to be recalculated.
   12093  *
   12094  * Returns true or false.
   12095  */
   12096 static bool intel_wm_need_update(const struct intel_plane_state *cur,
   12097 				 struct intel_plane_state *new)
   12098 {
   12099 	/* Update watermarks on tiling or size changes. */
   12100 	if (new->uapi.visible != cur->uapi.visible)
   12101 		return true;
   12102 
   12103 	if (!cur->hw.fb || !new->hw.fb)
   12104 		return false;
   12105 
   12106 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
   12107 	    cur->hw.rotation != new->hw.rotation ||
   12108 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
   12109 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
   12110 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
   12111 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
   12112 		return true;
   12113 
   12114 	return false;
   12115 }
   12116 
   12117 static bool needs_scaling(const struct intel_plane_state *state)
   12118 {
   12119 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
   12120 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
   12121 	int dst_w = drm_rect_width(&state->uapi.dst);
   12122 	int dst_h = drm_rect_height(&state->uapi.dst);
   12123 
   12124 	return (src_w != dst_w || src_h != dst_h);
   12125 }
   12126 
   12127 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
   12128 				    struct intel_crtc_state *crtc_state,
   12129 				    const struct intel_plane_state *old_plane_state,
   12130 				    struct intel_plane_state *plane_state)
   12131 {
   12132 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   12133 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   12134 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12135 	bool mode_changed = needs_modeset(crtc_state);
   12136 	bool was_crtc_enabled = old_crtc_state->hw.active;
   12137 	bool is_crtc_enabled = crtc_state->hw.active;
   12138 	bool turn_off, turn_on, visible, was_visible;
   12139 	int ret;
   12140 
   12141 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
   12142 		ret = skl_update_scaler_plane(crtc_state, plane_state);
   12143 		if (ret)
   12144 			return ret;
   12145 	}
   12146 
   12147 	was_visible = old_plane_state->uapi.visible;
   12148 	visible = plane_state->uapi.visible;
   12149 
   12150 	if (!was_crtc_enabled && WARN_ON(was_visible))
   12151 		was_visible = false;
   12152 
   12153 	/*
   12154 	 * Visibility is calculated as if the crtc was on, but
   12155 	 * after scaler setup everything depends on it being off
   12156 	 * when the crtc isn't active.
   12157 	 *
   12158 	 * FIXME this is wrong for watermarks. Watermarks should also
   12159 	 * be computed as if the pipe would be active. Perhaps move
   12160 	 * per-plane wm computation to the .check_plane() hook, and
   12161 	 * only combine the results from all planes in the current place?
   12162 	 */
   12163 	if (!is_crtc_enabled) {
   12164 		plane_state->uapi.visible = visible = false;
   12165 		crtc_state->active_planes &= ~BIT(plane->id);
   12166 		crtc_state->data_rate[plane->id] = 0;
   12167 		crtc_state->min_cdclk[plane->id] = 0;
   12168 	}
   12169 
   12170 	if (!was_visible && !visible)
   12171 		return 0;
   12172 
   12173 	turn_off = was_visible && (!visible || mode_changed);
   12174 	turn_on = visible && (!was_visible || mode_changed);
   12175 
   12176 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
   12177 			 crtc->base.base.id, crtc->base.name,
   12178 			 plane->base.base.id, plane->base.name,
   12179 			 was_visible, visible,
   12180 			 turn_off, turn_on, mode_changed);
   12181 
   12182 	if (turn_on) {
   12183 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
   12184 			crtc_state->update_wm_pre = true;
   12185 
   12186 		/* must disable cxsr around plane enable/disable */
   12187 		if (plane->id != PLANE_CURSOR)
   12188 			crtc_state->disable_cxsr = true;
   12189 	} else if (turn_off) {
   12190 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
   12191 			crtc_state->update_wm_post = true;
   12192 
   12193 		/* must disable cxsr around plane enable/disable */
   12194 		if (plane->id != PLANE_CURSOR)
   12195 			crtc_state->disable_cxsr = true;
   12196 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
   12197 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
   12198 			/* FIXME bollocks */
   12199 			crtc_state->update_wm_pre = true;
   12200 			crtc_state->update_wm_post = true;
   12201 		}
   12202 	}
   12203 
   12204 	if (visible || was_visible)
   12205 		crtc_state->fb_bits |= plane->frontbuffer_bit;
   12206 
   12207 	/*
   12208 	 * ILK/SNB DVSACNTR/Sprite Enable
   12209 	 * IVB SPR_CTL/Sprite Enable
   12210 	 * "When in Self Refresh Big FIFO mode, a write to enable the
   12211 	 *  plane will be internally buffered and delayed while Big FIFO
   12212 	 *  mode is exiting."
   12213 	 *
   12214 	 * Which means that enabling the sprite can take an extra frame
   12215 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
   12216 	 * down to LP0 and wait for vblank in order to make sure the
   12217 	 * sprite gets enabled on the next vblank after the register write.
   12218 	 * Doing otherwise would risk enabling the sprite one frame after
   12219 	 * we've already signalled flip completion. We can resume LP1+
   12220 	 * once the sprite has been enabled.
   12221 	 *
   12222 	 *
   12223 	 * WaCxSRDisabledForSpriteScaling:ivb
   12224 	 * IVB SPR_SCALE/Scaling Enable
   12225 	 * "Low Power watermarks must be disabled for at least one
   12226 	 *  frame before enabling sprite scaling, and kept disabled
   12227 	 *  until sprite scaling is disabled."
   12228 	 *
   12229 	 * ILK/SNB DVSASCALE/Scaling Enable
   12230 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
   12231 	 *  masked off while Big FIFO mode is exiting."
   12232 	 *
   12233 	 * Despite the w/a only being listed for IVB we assume that
   12234 	 * the ILK/SNB note has similar ramifications, hence we apply
   12235 	 * the w/a on all three platforms.
   12236 	 *
   12237 	 * With experimental results seems this is needed also for primary
   12238 	 * plane, not only sprite plane.
   12239 	 */
   12240 	if (plane->id != PLANE_CURSOR &&
   12241 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
   12242 	     IS_IVYBRIDGE(dev_priv)) &&
   12243 	    (turn_on || (!needs_scaling(old_plane_state) &&
   12244 			 needs_scaling(plane_state))))
   12245 		crtc_state->disable_lp_wm = true;
   12246 
   12247 	return 0;
   12248 }
   12249 
   12250 static bool encoders_cloneable(const struct intel_encoder *a,
   12251 			       const struct intel_encoder *b)
   12252 {
   12253 	/* masks could be asymmetric, so check both ways */
   12254 	return a == b || (a->cloneable & (1 << b->type) &&
   12255 			  b->cloneable & (1 << a->type));
   12256 }
   12257 
   12258 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
   12259 					 struct intel_crtc *crtc,
   12260 					 struct intel_encoder *encoder)
   12261 {
   12262 	struct intel_encoder *source_encoder;
   12263 	struct drm_connector *connector;
   12264 	struct drm_connector_state *connector_state;
   12265 	int i;
   12266 
   12267 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   12268 		if (connector_state->crtc != &crtc->base)
   12269 			continue;
   12270 
   12271 		source_encoder =
   12272 			to_intel_encoder(connector_state->best_encoder);
   12273 		if (!encoders_cloneable(encoder, source_encoder))
   12274 			return false;
   12275 	}
   12276 
   12277 	return true;
   12278 }
   12279 
   12280 static int icl_add_linked_planes(struct intel_atomic_state *state)
   12281 {
   12282 	struct intel_plane *plane, *linked;
   12283 	struct intel_plane_state *plane_state, *linked_plane_state;
   12284 	int i;
   12285 
   12286 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12287 		linked = plane_state->planar_linked_plane;
   12288 
   12289 		if (!linked)
   12290 			continue;
   12291 
   12292 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
   12293 		if (IS_ERR(linked_plane_state))
   12294 			return PTR_ERR(linked_plane_state);
   12295 
   12296 		WARN_ON(linked_plane_state->planar_linked_plane != plane);
   12297 		WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
   12298 	}
   12299 
   12300 	return 0;
   12301 }
   12302 
   12303 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
   12304 {
   12305 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   12306 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12307 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
   12308 	struct intel_plane *plane, *linked;
   12309 	struct intel_plane_state *plane_state;
   12310 	int i;
   12311 
   12312 	if (INTEL_GEN(dev_priv) < 11)
   12313 		return 0;
   12314 
   12315 	/*
   12316 	 * Destroy all old plane links and make the slave plane invisible
   12317 	 * in the crtc_state->active_planes mask.
   12318 	 */
   12319 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12320 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
   12321 			continue;
   12322 
   12323 		plane_state->planar_linked_plane = NULL;
   12324 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
   12325 			crtc_state->active_planes &= ~BIT(plane->id);
   12326 			crtc_state->update_planes |= BIT(plane->id);
   12327 		}
   12328 
   12329 		plane_state->planar_slave = false;
   12330 	}
   12331 
   12332 	if (!crtc_state->nv12_planes)
   12333 		return 0;
   12334 
   12335 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12336 		struct intel_plane_state *linked_state = NULL;
   12337 
   12338 		if (plane->pipe != crtc->pipe ||
   12339 		    !(crtc_state->nv12_planes & BIT(plane->id)))
   12340 			continue;
   12341 
   12342 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
   12343 			if (!icl_is_nv12_y_plane(linked->id))
   12344 				continue;
   12345 
   12346 			if (crtc_state->active_planes & BIT(linked->id))
   12347 				continue;
   12348 
   12349 			linked_state = intel_atomic_get_plane_state(state, linked);
   12350 			if (IS_ERR(linked_state))
   12351 				return PTR_ERR(linked_state);
   12352 
   12353 			break;
   12354 		}
   12355 
   12356 		if (!linked_state) {
   12357 			DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
   12358 				      hweight8(crtc_state->nv12_planes));
   12359 
   12360 			return -EINVAL;
   12361 		}
   12362 
   12363 		plane_state->planar_linked_plane = linked;
   12364 
   12365 		linked_state->planar_slave = true;
   12366 		linked_state->planar_linked_plane = plane;
   12367 		crtc_state->active_planes |= BIT(linked->id);
   12368 		crtc_state->update_planes |= BIT(linked->id);
   12369 		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
   12370 
   12371 		/* Copy parameters to slave plane */
   12372 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
   12373 		linked_state->color_ctl = plane_state->color_ctl;
   12374 		linked_state->view = plane_state->view;
   12375 		memcpy(linked_state->color_plane, plane_state->color_plane,
   12376 		       sizeof(linked_state->color_plane));
   12377 
   12378 		intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
   12379 		linked_state->uapi.src = plane_state->uapi.src;
   12380 		linked_state->uapi.dst = plane_state->uapi.dst;
   12381 
   12382 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
   12383 			if (linked->id == PLANE_SPRITE5)
   12384 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
   12385 			else if (linked->id == PLANE_SPRITE4)
   12386 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
   12387 			else
   12388 				MISSING_CASE(linked->id);
   12389 		}
   12390 	}
   12391 
   12392 	return 0;
   12393 }
   12394 
   12395 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
   12396 {
   12397 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   12398 	struct intel_atomic_state *state =
   12399 		to_intel_atomic_state(new_crtc_state->uapi.state);
   12400 	const struct intel_crtc_state *old_crtc_state =
   12401 		intel_atomic_get_old_crtc_state(state, crtc);
   12402 
   12403 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
   12404 }
   12405 
   12406 static bool
   12407 intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
   12408 {
   12409 	struct drm_crtc *crtc = crtc_state->uapi.crtc;
   12410 	struct drm_atomic_state *state = crtc_state->uapi.state;
   12411 	struct drm_connector *connector;
   12412 	struct drm_connector_state *connector_state;
   12413 	int i;
   12414 
   12415 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   12416 		if (connector_state->crtc != crtc)
   12417 			continue;
   12418 		if (connector->has_tile &&
   12419 		    connector->tile_h_loc == connector->num_h_tile - 1 &&
   12420 		    connector->tile_v_loc == connector->num_v_tile - 1)
   12421 			return true;
   12422 	}
   12423 
   12424 	return false;
   12425 }
   12426 
   12427 static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
   12428 {
   12429 	crtc_state->master_transcoder = INVALID_TRANSCODER;
   12430 	crtc_state->sync_mode_slaves_mask = 0;
   12431 }
   12432 
   12433 static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
   12434 					    struct intel_crtc_state *crtc_state,
   12435 					    int num_tiled_conns)
   12436 {
   12437 	struct drm_crtc *crtc = crtc_state->uapi.crtc;
   12438 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
   12439 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   12440 	struct drm_connector *master_connector;
   12441 	struct drm_connector_list_iter conn_iter;
   12442 	struct drm_crtc *master_crtc = NULL;
   12443 	struct drm_crtc_state *master_crtc_state;
   12444 	struct intel_crtc_state *master_pipe_config;
   12445 
   12446 	if (INTEL_GEN(dev_priv) < 11)
   12447 		return 0;
   12448 
   12449 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
   12450 		return 0;
   12451 
   12452 	/*
   12453 	 * In case of tiled displays there could be one or more slaves but there is
   12454 	 * only one master. Lets make the CRTC used by the connector corresponding
   12455 	 * to the last horizonal and last vertical tile a master/genlock CRTC.
   12456 	 * All the other CRTCs corresponding to other tiles of the same Tile group
   12457 	 * are the slave CRTCs and hold a pointer to their genlock CRTC.
   12458 	 * If all tiles not present do not make master slave assignments.
   12459 	 */
   12460 	if (!connector->has_tile ||
   12461 	    crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
   12462 	    crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
   12463 	    num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
   12464 		reset_port_sync_mode_state(crtc_state);
   12465 		return 0;
   12466 	}
   12467 	/* Last Horizontal and last vertical tile connector is a master
   12468 	 * Master's crtc state is already populated in slave for port sync
   12469 	 */
   12470 	if (connector->tile_h_loc == connector->num_h_tile - 1 &&
   12471 	    connector->tile_v_loc == connector->num_v_tile - 1)
   12472 		return 0;
   12473 
   12474 	/* Loop through all connectors and configure the Slave crtc_state
   12475 	 * to point to the correct master.
   12476 	 */
   12477 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
   12478 	drm_for_each_connector_iter(master_connector, &conn_iter) {
   12479 		struct drm_connector_state *master_conn_state = NULL;
   12480 
   12481 		if (!(master_connector->has_tile &&
   12482 		      master_connector->tile_group->id == connector->tile_group->id))
   12483 			continue;
   12484 		if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
   12485 		    master_connector->tile_v_loc != master_connector->num_v_tile - 1)
   12486 			continue;
   12487 
   12488 		master_conn_state = drm_atomic_get_connector_state(&state->base,
   12489 								   master_connector);
   12490 		if (IS_ERR(master_conn_state)) {
   12491 			drm_connector_list_iter_end(&conn_iter);
   12492 			return PTR_ERR(master_conn_state);
   12493 		}
   12494 		if (master_conn_state->crtc) {
   12495 			master_crtc = master_conn_state->crtc;
   12496 			break;
   12497 		}
   12498 	}
   12499 	drm_connector_list_iter_end(&conn_iter);
   12500 
   12501 	if (!master_crtc) {
   12502 		DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
   12503 			      crtc->base.id);
   12504 		return -EINVAL;
   12505 	}
   12506 
   12507 	master_crtc_state = drm_atomic_get_crtc_state(&state->base,
   12508 						      master_crtc);
   12509 	if (IS_ERR(master_crtc_state))
   12510 		return PTR_ERR(master_crtc_state);
   12511 
   12512 	master_pipe_config = to_intel_crtc_state(master_crtc_state);
   12513 	crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
   12514 	master_pipe_config->sync_mode_slaves_mask |=
   12515 		BIT(crtc_state->cpu_transcoder);
   12516 	DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
   12517 		      transcoder_name(crtc_state->master_transcoder),
   12518 		      crtc->base.id,
   12519 		      master_pipe_config->sync_mode_slaves_mask);
   12520 
   12521 	return 0;
   12522 }
   12523 
   12524 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
   12525 				   struct intel_crtc *crtc)
   12526 {
   12527 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12528 	struct intel_crtc_state *crtc_state =
   12529 		intel_atomic_get_new_crtc_state(state, crtc);
   12530 	bool mode_changed = needs_modeset(crtc_state);
   12531 	int ret;
   12532 
   12533 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
   12534 	    mode_changed && !crtc_state->hw.active)
   12535 		crtc_state->update_wm_post = true;
   12536 
   12537 	if (mode_changed && crtc_state->hw.enable &&
   12538 	    dev_priv->display.crtc_compute_clock &&
   12539 	    !WARN_ON(crtc_state->shared_dpll)) {
   12540 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
   12541 		if (ret)
   12542 			return ret;
   12543 	}
   12544 
   12545 	/*
   12546 	 * May need to update pipe gamma enable bits
   12547 	 * when C8 planes are getting enabled/disabled.
   12548 	 */
   12549 	if (c8_planes_changed(crtc_state))
   12550 		crtc_state->uapi.color_mgmt_changed = true;
   12551 
   12552 	if (mode_changed || crtc_state->update_pipe ||
   12553 	    crtc_state->uapi.color_mgmt_changed) {
   12554 		ret = intel_color_check(crtc_state);
   12555 		if (ret)
   12556 			return ret;
   12557 	}
   12558 
   12559 	ret = 0;
   12560 	if (dev_priv->display.compute_pipe_wm) {
   12561 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
   12562 		if (ret) {
   12563 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
   12564 			return ret;
   12565 		}
   12566 	}
   12567 
   12568 	if (dev_priv->display.compute_intermediate_wm) {
   12569 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
   12570 			return 0;
   12571 
   12572 		/*
   12573 		 * Calculate 'intermediate' watermarks that satisfy both the
   12574 		 * old state and the new state.  We can program these
   12575 		 * immediately.
   12576 		 */
   12577 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
   12578 		if (ret) {
   12579 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
   12580 			return ret;
   12581 		}
   12582 	}
   12583 
   12584 	if (INTEL_GEN(dev_priv) >= 9) {
   12585 		if (mode_changed || crtc_state->update_pipe)
   12586 			ret = skl_update_scaler_crtc(crtc_state);
   12587 		if (!ret)
   12588 			ret = intel_atomic_setup_scalers(dev_priv, crtc,
   12589 							 crtc_state);
   12590 	}
   12591 
   12592 	if (HAS_IPS(dev_priv))
   12593 		crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
   12594 
   12595 	return ret;
   12596 }
   12597 
   12598 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
   12599 {
   12600 	struct intel_connector *connector;
   12601 	struct drm_connector_list_iter conn_iter;
   12602 
   12603 	drm_connector_list_iter_begin(dev, &conn_iter);
   12604 	for_each_intel_connector_iter(connector, &conn_iter) {
   12605 		if (connector->base.state->crtc)
   12606 			drm_connector_put(&connector->base);
   12607 
   12608 		if (connector->base.encoder) {
   12609 			connector->base.state->best_encoder =
   12610 				connector->base.encoder;
   12611 			connector->base.state->crtc =
   12612 				connector->base.encoder->crtc;
   12613 
   12614 			drm_connector_get(&connector->base);
   12615 		} else {
   12616 			connector->base.state->best_encoder = NULL;
   12617 			connector->base.state->crtc = NULL;
   12618 		}
   12619 	}
   12620 	drm_connector_list_iter_end(&conn_iter);
   12621 }
   12622 
   12623 static int
   12624 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
   12625 		      struct intel_crtc_state *pipe_config)
   12626 {
   12627 	struct drm_connector *connector = conn_state->connector;
   12628 	const struct drm_display_info *info = &connector->display_info;
   12629 	int bpp;
   12630 
   12631 	switch (conn_state->max_bpc) {
   12632 	case 6 ... 7:
   12633 		bpp = 6 * 3;
   12634 		break;
   12635 	case 8 ... 9:
   12636 		bpp = 8 * 3;
   12637 		break;
   12638 	case 10 ... 11:
   12639 		bpp = 10 * 3;
   12640 		break;
   12641 	case 12:
   12642 		bpp = 12 * 3;
   12643 		break;
   12644 	default:
   12645 		return -EINVAL;
   12646 	}
   12647 
   12648 	if (bpp < pipe_config->pipe_bpp) {
   12649 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
   12650 			      "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
   12651 			      connector->base.id, connector->name,
   12652 			      bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
   12653 			      pipe_config->pipe_bpp);
   12654 
   12655 		pipe_config->pipe_bpp = bpp;
   12656 	}
   12657 
   12658 	return 0;
   12659 }
   12660 
   12661 static int
   12662 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
   12663 			  struct intel_crtc_state *pipe_config)
   12664 {
   12665 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12666 	struct drm_atomic_state *state = pipe_config->uapi.state;
   12667 	struct drm_connector *connector;
   12668 	struct drm_connector_state *connector_state;
   12669 	int bpp, i;
   12670 
   12671 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   12672 	    IS_CHERRYVIEW(dev_priv)))
   12673 		bpp = 10*3;
   12674 	else if (INTEL_GEN(dev_priv) >= 5)
   12675 		bpp = 12*3;
   12676 	else
   12677 		bpp = 8*3;
   12678 
   12679 	pipe_config->pipe_bpp = bpp;
   12680 
   12681 	/* Clamp display bpp to connector max bpp */
   12682 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   12683 		int ret;
   12684 
   12685 		if (connector_state->crtc != &crtc->base)
   12686 			continue;
   12687 
   12688 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
   12689 		if (ret)
   12690 			return ret;
   12691 	}
   12692 
   12693 	return 0;
   12694 }
   12695 
   12696 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
   12697 {
   12698 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
   12699 		      "type: 0x%x flags: 0x%x\n",
   12700 		      mode->crtc_clock,
   12701 		      mode->crtc_hdisplay, mode->crtc_hsync_start,
   12702 		      mode->crtc_hsync_end, mode->crtc_htotal,
   12703 		      mode->crtc_vdisplay, mode->crtc_vsync_start,
   12704 		      mode->crtc_vsync_end, mode->crtc_vtotal,
   12705 		      mode->type, mode->flags);
   12706 }
   12707 
   12708 static inline void
   12709 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
   12710 		      const char *id, unsigned int lane_count,
   12711 		      const struct intel_link_m_n *m_n)
   12712 {
   12713 	DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
   12714 		      id, lane_count,
   12715 		      m_n->gmch_m, m_n->gmch_n,
   12716 		      m_n->link_m, m_n->link_n, m_n->tu);
   12717 }
   12718 
   12719 static void
   12720 intel_dump_infoframe(struct drm_i915_private *dev_priv,
   12721 		     const union hdmi_infoframe *frame)
   12722 {
   12723 	if (!drm_debug_enabled(DRM_UT_KMS))
   12724 		return;
   12725 
   12726 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
   12727 }
   12728 
   12729 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
   12730 
   12731 static const char * const output_type_str[] = {
   12732 	OUTPUT_TYPE(UNUSED),
   12733 	OUTPUT_TYPE(ANALOG),
   12734 	OUTPUT_TYPE(DVO),
   12735 	OUTPUT_TYPE(SDVO),
   12736 	OUTPUT_TYPE(LVDS),
   12737 	OUTPUT_TYPE(TVOUT),
   12738 	OUTPUT_TYPE(HDMI),
   12739 	OUTPUT_TYPE(DP),
   12740 	OUTPUT_TYPE(EDP),
   12741 	OUTPUT_TYPE(DSI),
   12742 	OUTPUT_TYPE(DDI),
   12743 	OUTPUT_TYPE(DP_MST),
   12744 };
   12745 
   12746 #undef OUTPUT_TYPE
   12747 
   12748 static void snprintf_output_types(char *buf, size_t len,
   12749 				  unsigned int output_types)
   12750 {
   12751 	char *str = buf;
   12752 	int i;
   12753 
   12754 	str[0] = '\0';
   12755 
   12756 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
   12757 		int r;
   12758 
   12759 		if ((output_types & BIT(i)) == 0)
   12760 			continue;
   12761 
   12762 		r = snprintf(str, len, "%s%s",
   12763 			     str != buf ? "," : "", output_type_str[i]);
   12764 		if (r >= len)
   12765 			break;
   12766 		str += r;
   12767 		len -= r;
   12768 
   12769 		output_types &= ~BIT(i);
   12770 	}
   12771 
   12772 	WARN_ON_ONCE(output_types != 0);
   12773 }
   12774 
   12775 static const char * const output_format_str[] = {
   12776 	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
   12777 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
   12778 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
   12779 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
   12780 };
   12781 
   12782 static const char *output_formats(enum intel_output_format format)
   12783 {
   12784 	if (format >= ARRAY_SIZE(output_format_str))
   12785 		format = INTEL_OUTPUT_FORMAT_INVALID;
   12786 	return output_format_str[format];
   12787 }
   12788 
   12789 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
   12790 {
   12791 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   12792 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   12793 	struct drm_format_name_buf format_name;
   12794 
   12795 	if (!fb) {
   12796 		DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
   12797 			      plane->base.base.id, plane->base.name,
   12798 			      yesno(plane_state->uapi.visible));
   12799 		return;
   12800 	}
   12801 
   12802 	DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
   12803 		      plane->base.base.id, plane->base.name,
   12804 		      fb->base.id, fb->width, fb->height,
   12805 		      drm_get_format_name(fb->format->format, &format_name),
   12806 		      yesno(plane_state->uapi.visible));
   12807 	DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
   12808 		      plane_state->hw.rotation, plane_state->scaler_id);
   12809 	if (plane_state->uapi.visible)
   12810 		DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
   12811 			      DRM_RECT_FP_ARG(&plane_state->uapi.src),
   12812 			      DRM_RECT_ARG(&plane_state->uapi.dst));
   12813 }
   12814 
   12815 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
   12816 				   struct intel_atomic_state *state,
   12817 				   const char *context)
   12818 {
   12819 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
   12820 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   12821 	const struct intel_plane_state *plane_state;
   12822 	struct intel_plane *plane;
   12823 	char buf[64];
   12824 	int i;
   12825 
   12826 	DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
   12827 		      crtc->base.base.id, crtc->base.name,
   12828 		      yesno(pipe_config->hw.enable), context);
   12829 
   12830 	if (!pipe_config->hw.enable)
   12831 		goto dump_planes;
   12832 
   12833 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
   12834 	DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
   12835 		      yesno(pipe_config->hw.active),
   12836 		      buf, pipe_config->output_types,
   12837 		      output_formats(pipe_config->output_format));
   12838 
   12839 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
   12840 		      transcoder_name(pipe_config->cpu_transcoder),
   12841 		      pipe_config->pipe_bpp, pipe_config->dither);
   12842 
   12843 	if (pipe_config->has_pch_encoder)
   12844 		intel_dump_m_n_config(pipe_config, "fdi",
   12845 				      pipe_config->fdi_lanes,
   12846 				      &pipe_config->fdi_m_n);
   12847 
   12848 	if (intel_crtc_has_dp_encoder(pipe_config)) {
   12849 		intel_dump_m_n_config(pipe_config, "dp m_n",
   12850 				pipe_config->lane_count, &pipe_config->dp_m_n);
   12851 		if (pipe_config->has_drrs)
   12852 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
   12853 					      pipe_config->lane_count,
   12854 					      &pipe_config->dp_m2_n2);
   12855 	}
   12856 
   12857 	DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
   12858 		      pipe_config->has_audio, pipe_config->has_infoframe,
   12859 		      pipe_config->infoframes.enable);
   12860 
   12861 	if (pipe_config->infoframes.enable &
   12862 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
   12863 		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
   12864 	if (pipe_config->infoframes.enable &
   12865 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
   12866 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
   12867 	if (pipe_config->infoframes.enable &
   12868 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
   12869 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
   12870 	if (pipe_config->infoframes.enable &
   12871 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
   12872 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
   12873 
   12874 	DRM_DEBUG_KMS("requested mode:\n");
   12875 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
   12876 	DRM_DEBUG_KMS("adjusted mode:\n");
   12877 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
   12878 	intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
   12879 	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
   12880 		      pipe_config->port_clock,
   12881 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
   12882 		      pipe_config->pixel_rate);
   12883 
   12884 	if (INTEL_GEN(dev_priv) >= 9)
   12885 		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
   12886 			      crtc->num_scalers,
   12887 			      pipe_config->scaler_state.scaler_users,
   12888 		              pipe_config->scaler_state.scaler_id);
   12889 
   12890 	if (HAS_GMCH(dev_priv))
   12891 		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
   12892 			      pipe_config->gmch_pfit.control,
   12893 			      pipe_config->gmch_pfit.pgm_ratios,
   12894 			      pipe_config->gmch_pfit.lvds_border_bits);
   12895 	else
   12896 		DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
   12897 			      pipe_config->pch_pfit.pos,
   12898 			      pipe_config->pch_pfit.size,
   12899 			      enableddisabled(pipe_config->pch_pfit.enabled),
   12900 			      yesno(pipe_config->pch_pfit.force_thru));
   12901 
   12902 	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
   12903 		      pipe_config->ips_enabled, pipe_config->double_wide);
   12904 
   12905 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
   12906 
   12907 	if (IS_CHERRYVIEW(dev_priv))
   12908 		DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
   12909 			      pipe_config->cgm_mode, pipe_config->gamma_mode,
   12910 			      pipe_config->gamma_enable, pipe_config->csc_enable);
   12911 	else
   12912 		DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
   12913 			      pipe_config->csc_mode, pipe_config->gamma_mode,
   12914 			      pipe_config->gamma_enable, pipe_config->csc_enable);
   12915 
   12916 	DRM_DEBUG_KMS("MST master transcoder: %s\n",
   12917 		      transcoder_name(pipe_config->mst_master_transcoder));
   12918 
   12919 dump_planes:
   12920 	if (!state)
   12921 		return;
   12922 
   12923 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   12924 		if (plane->pipe == crtc->pipe)
   12925 			intel_dump_plane_state(plane_state);
   12926 	}
   12927 }
   12928 
   12929 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
   12930 {
   12931 	struct drm_device *dev = state->base.dev;
   12932 	struct drm_connector *connector;
   12933 	struct drm_connector_list_iter conn_iter;
   12934 	unsigned int used_ports = 0;
   12935 	unsigned int used_mst_ports = 0;
   12936 	bool ret = true;
   12937 
   12938 	/*
   12939 	 * We're going to peek into connector->state,
   12940 	 * hence connection_mutex must be held.
   12941 	 */
   12942 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
   12943 
   12944 	/*
   12945 	 * Walk the connector list instead of the encoder
   12946 	 * list to detect the problem on ddi platforms
   12947 	 * where there's just one encoder per digital port.
   12948 	 */
   12949 	drm_connector_list_iter_begin(dev, &conn_iter);
   12950 	drm_for_each_connector_iter(connector, &conn_iter) {
   12951 		struct drm_connector_state *connector_state;
   12952 		struct intel_encoder *encoder;
   12953 
   12954 		connector_state =
   12955 			drm_atomic_get_new_connector_state(&state->base,
   12956 							   connector);
   12957 		if (!connector_state)
   12958 			connector_state = connector->state;
   12959 
   12960 		if (!connector_state->best_encoder)
   12961 			continue;
   12962 
   12963 		encoder = to_intel_encoder(connector_state->best_encoder);
   12964 
   12965 		WARN_ON(!connector_state->crtc);
   12966 
   12967 		switch (encoder->type) {
   12968 			unsigned int port_mask;
   12969 		case INTEL_OUTPUT_DDI:
   12970 			if (WARN_ON(!HAS_DDI(to_i915(dev))))
   12971 				break;
   12972 			/* else, fall through */
   12973 		case INTEL_OUTPUT_DP:
   12974 		case INTEL_OUTPUT_HDMI:
   12975 		case INTEL_OUTPUT_EDP:
   12976 			port_mask = 1 << encoder->port;
   12977 
   12978 			/* the same port mustn't appear more than once */
   12979 			if (used_ports & port_mask)
   12980 				ret = false;
   12981 
   12982 			used_ports |= port_mask;
   12983 			break;
   12984 		case INTEL_OUTPUT_DP_MST:
   12985 			used_mst_ports |=
   12986 				1 << encoder->port;
   12987 			break;
   12988 		default:
   12989 			break;
   12990 		}
   12991 	}
   12992 	drm_connector_list_iter_end(&conn_iter);
   12993 
   12994 	/* can't mix MST and SST/HDMI on the same port */
   12995 	if (used_ports & used_mst_ports)
   12996 		return false;
   12997 
   12998 	return ret;
   12999 }
   13000 
   13001 static void
   13002 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
   13003 {
   13004 	intel_crtc_copy_color_blobs(crtc_state);
   13005 }
   13006 
   13007 static void
   13008 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
   13009 {
   13010 	crtc_state->hw.enable = crtc_state->uapi.enable;
   13011 	crtc_state->hw.active = crtc_state->uapi.active;
   13012 	crtc_state->hw.mode = crtc_state->uapi.mode;
   13013 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
   13014 	intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
   13015 }
   13016 
   13017 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
   13018 {
   13019 	crtc_state->uapi.enable = crtc_state->hw.enable;
   13020 	crtc_state->uapi.active = crtc_state->hw.active;
   13021 	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
   13022 
   13023 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
   13024 
   13025 	/* copy color blobs to uapi */
   13026 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
   13027 				  crtc_state->hw.degamma_lut);
   13028 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
   13029 				  crtc_state->hw.gamma_lut);
   13030 	drm_property_replace_blob(&crtc_state->uapi.ctm,
   13031 				  crtc_state->hw.ctm);
   13032 }
   13033 
   13034 static int
   13035 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
   13036 {
   13037 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   13038 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   13039 	struct intel_crtc_state *saved_state;
   13040 
   13041 	saved_state = intel_crtc_state_alloc(crtc);
   13042 	if (!saved_state)
   13043 		return -ENOMEM;
   13044 
   13045 	/* free the old crtc_state->hw members */
   13046 	intel_crtc_free_hw_state(crtc_state);
   13047 
   13048 	/* FIXME: before the switch to atomic started, a new pipe_config was
   13049 	 * kzalloc'd. Code that depends on any field being zero should be
   13050 	 * fixed, so that the crtc_state can be safely duplicated. For now,
   13051 	 * only fields that are know to not cause problems are preserved. */
   13052 
   13053 	saved_state->uapi = crtc_state->uapi;
   13054 	saved_state->scaler_state = crtc_state->scaler_state;
   13055 	saved_state->shared_dpll = crtc_state->shared_dpll;
   13056 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
   13057 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
   13058 	       sizeof(saved_state->icl_port_dplls));
   13059 	saved_state->crc_enabled = crtc_state->crc_enabled;
   13060 	if (IS_G4X(dev_priv) ||
   13061 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   13062 		saved_state->wm = crtc_state->wm;
   13063 	/*
   13064 	 * Save the slave bitmask which gets filled for master crtc state during
   13065 	 * slave atomic check call. For all other CRTCs reset the port sync variables
   13066 	 * crtc_state->master_transcoder needs to be set to INVALID
   13067 	 */
   13068 	reset_port_sync_mode_state(saved_state);
   13069 	if (intel_atomic_is_master_connector(crtc_state))
   13070 		saved_state->sync_mode_slaves_mask =
   13071 			crtc_state->sync_mode_slaves_mask;
   13072 
   13073 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
   13074 	kfree(saved_state);
   13075 
   13076 	intel_crtc_copy_uapi_to_hw_state(crtc_state);
   13077 
   13078 	return 0;
   13079 }
   13080 
   13081 static int
   13082 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
   13083 {
   13084 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
   13085 	struct drm_atomic_state *state = pipe_config->uapi.state;
   13086 	struct intel_encoder *encoder;
   13087 	struct drm_connector *connector;
   13088 	struct drm_connector_state *connector_state;
   13089 	int base_bpp, ret;
   13090 	int i, tile_group_id = -1, num_tiled_conns = 0;
   13091 	bool retry = true;
   13092 
   13093 	pipe_config->cpu_transcoder =
   13094 		(enum transcoder) to_intel_crtc(crtc)->pipe;
   13095 
   13096 	/*
   13097 	 * Sanitize sync polarity flags based on requested ones. If neither
   13098 	 * positive or negative polarity is requested, treat this as meaning
   13099 	 * negative polarity.
   13100 	 */
   13101 	if (!(pipe_config->hw.adjusted_mode.flags &
   13102 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
   13103 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
   13104 
   13105 	if (!(pipe_config->hw.adjusted_mode.flags &
   13106 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
   13107 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
   13108 
   13109 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
   13110 					pipe_config);
   13111 	if (ret)
   13112 		return ret;
   13113 
   13114 	base_bpp = pipe_config->pipe_bpp;
   13115 
   13116 	/*
   13117 	 * Determine the real pipe dimensions. Note that stereo modes can
   13118 	 * increase the actual pipe size due to the frame doubling and
   13119 	 * insertion of additional space for blanks between the frame. This
   13120 	 * is stored in the crtc timings. We use the requested mode to do this
   13121 	 * computation to clearly distinguish it from the adjusted mode, which
   13122 	 * can be changed by the connectors in the below retry loop.
   13123 	 */
   13124 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
   13125 			       &pipe_config->pipe_src_w,
   13126 			       &pipe_config->pipe_src_h);
   13127 
   13128 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13129 		if (connector_state->crtc != crtc)
   13130 			continue;
   13131 
   13132 		encoder = to_intel_encoder(connector_state->best_encoder);
   13133 
   13134 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
   13135 			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
   13136 			return -EINVAL;
   13137 		}
   13138 
   13139 		/*
   13140 		 * Determine output_types before calling the .compute_config()
   13141 		 * hooks so that the hooks can use this information safely.
   13142 		 */
   13143 		if (encoder->compute_output_type)
   13144 			pipe_config->output_types |=
   13145 				BIT(encoder->compute_output_type(encoder, pipe_config,
   13146 								 connector_state));
   13147 		else
   13148 			pipe_config->output_types |= BIT(encoder->type);
   13149 	}
   13150 
   13151 encoder_retry:
   13152 	/* Ensure the port clock defaults are reset when retrying. */
   13153 	pipe_config->port_clock = 0;
   13154 	pipe_config->pixel_multiplier = 1;
   13155 
   13156 	/* Fill in default crtc timings, allow encoders to overwrite them. */
   13157 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
   13158 			      CRTC_STEREO_DOUBLE);
   13159 
   13160 	/* Get tile_group_id of tiled connector */
   13161 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13162 		if (connector_state->crtc == crtc &&
   13163 		    connector->has_tile) {
   13164 			tile_group_id = connector->tile_group->id;
   13165 			break;
   13166 		}
   13167 	}
   13168 
   13169 	/* Get total number of tiled connectors in state that belong to
   13170 	 * this tile group.
   13171 	 */
   13172 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13173 		if (connector->has_tile &&
   13174 		    connector->tile_group->id == tile_group_id)
   13175 			num_tiled_conns++;
   13176 	}
   13177 
   13178 	/* Pass our mode to the connectors and the CRTC to give them a chance to
   13179 	 * adjust it according to limitations or connector properties, and also
   13180 	 * a chance to reject the mode entirely.
   13181 	 */
   13182 	for_each_new_connector_in_state(state, connector, connector_state, i) {
   13183 		if (connector_state->crtc != crtc)
   13184 			continue;
   13185 
   13186 		ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
   13187 						       num_tiled_conns);
   13188 		if (ret) {
   13189 			DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
   13190 				      ret);
   13191 			return ret;
   13192 		}
   13193 
   13194 		encoder = to_intel_encoder(connector_state->best_encoder);
   13195 		ret = encoder->compute_config(encoder, pipe_config,
   13196 					      connector_state);
   13197 		if (ret < 0) {
   13198 			if (ret != -EDEADLK)
   13199 				DRM_DEBUG_KMS("Encoder config failure: %d\n",
   13200 					      ret);
   13201 			return ret;
   13202 		}
   13203 	}
   13204 
   13205 	/* Set default port clock if not overwritten by the encoder. Needs to be
   13206 	 * done afterwards in case the encoder adjusts the mode. */
   13207 	if (!pipe_config->port_clock)
   13208 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
   13209 			* pipe_config->pixel_multiplier;
   13210 
   13211 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
   13212 	if (ret == -EDEADLK)
   13213 		return ret;
   13214 	if (ret < 0) {
   13215 		DRM_DEBUG_KMS("CRTC fixup failed\n");
   13216 		return ret;
   13217 	}
   13218 
   13219 	if (ret == RETRY) {
   13220 		if (WARN(!retry, "loop in pipe configuration computation\n"))
   13221 			return -EINVAL;
   13222 
   13223 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
   13224 		retry = false;
   13225 		goto encoder_retry;
   13226 	}
   13227 
   13228 	/* Dithering seems to not pass-through bits correctly when it should, so
   13229 	 * only enable it on 6bpc panels and when its not a compliance
   13230 	 * test requesting 6bpc video pattern.
   13231 	 */
   13232 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
   13233 		!pipe_config->dither_force_disable;
   13234 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
   13235 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
   13236 
   13237 	/*
   13238 	 * Make drm_calc_timestamping_constants in
   13239 	 * drm_atomic_helper_update_legacy_modeset_state() happy
   13240 	 */
   13241 	pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
   13242 
   13243 	return 0;
   13244 }
   13245 
   13246 bool intel_fuzzy_clock_check(int clock1, int clock2)
   13247 {
   13248 	int diff;
   13249 
   13250 	if (clock1 == clock2)
   13251 		return true;
   13252 
   13253 	if (!clock1 || !clock2)
   13254 		return false;
   13255 
   13256 	diff = abs(clock1 - clock2);
   13257 
   13258 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
   13259 		return true;
   13260 
   13261 	return false;
   13262 }
   13263 
   13264 static bool
   13265 intel_compare_m_n(unsigned int m, unsigned int n,
   13266 		  unsigned int m2, unsigned int n2,
   13267 		  bool exact)
   13268 {
   13269 	if (m == m2 && n == n2)
   13270 		return true;
   13271 
   13272 	if (exact || !m || !n || !m2 || !n2)
   13273 		return false;
   13274 
   13275 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
   13276 
   13277 	if (n > n2) {
   13278 		while (n > n2) {
   13279 			m2 <<= 1;
   13280 			n2 <<= 1;
   13281 		}
   13282 	} else if (n < n2) {
   13283 		while (n < n2) {
   13284 			m <<= 1;
   13285 			n <<= 1;
   13286 		}
   13287 	}
   13288 
   13289 	if (n != n2)
   13290 		return false;
   13291 
   13292 	return intel_fuzzy_clock_check(m, m2);
   13293 }
   13294 
   13295 static bool
   13296 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
   13297 		       const struct intel_link_m_n *m2_n2,
   13298 		       bool exact)
   13299 {
   13300 	return m_n->tu == m2_n2->tu &&
   13301 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
   13302 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
   13303 		intel_compare_m_n(m_n->link_m, m_n->link_n,
   13304 				  m2_n2->link_m, m2_n2->link_n, exact);
   13305 }
   13306 
   13307 static bool
   13308 intel_compare_infoframe(const union hdmi_infoframe *a,
   13309 			const union hdmi_infoframe *b)
   13310 {
   13311 	return memcmp(a, b, sizeof(*a)) == 0;
   13312 }
   13313 
   13314 static void
   13315 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
   13316 			       bool fastset, const char *name,
   13317 			       const union hdmi_infoframe *a,
   13318 			       const union hdmi_infoframe *b)
   13319 {
   13320 	if (fastset) {
   13321 		if (!drm_debug_enabled(DRM_UT_KMS))
   13322 			return;
   13323 
   13324 		DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
   13325 		DRM_DEBUG_KMS("expected:\n");
   13326 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
   13327 		DRM_DEBUG_KMS("found:\n");
   13328 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
   13329 	} else {
   13330 		DRM_ERROR("mismatch in %s infoframe\n", name);
   13331 		DRM_ERROR("expected:\n");
   13332 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
   13333 		DRM_ERROR("found:\n");
   13334 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
   13335 	}
   13336 }
   13337 
   13338 static void __printf(4, 5)
   13339 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
   13340 		     const char *name, const char *format, ...)
   13341 {
   13342 	struct va_format vaf;
   13343 	va_list args;
   13344 
   13345 	va_start(args, format);
   13346 	vaf.fmt = format;
   13347 	vaf.va = &args;
   13348 
   13349 	if (fastset)
   13350 		DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
   13351 			      crtc->base.base.id, crtc->base.name, name, &vaf);
   13352 	else
   13353 		DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
   13354 			  crtc->base.base.id, crtc->base.name, name, &vaf);
   13355 
   13356 	va_end(args);
   13357 }
   13358 
   13359 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
   13360 {
   13361 	if (i915_modparams.fastboot != -1)
   13362 		return i915_modparams.fastboot;
   13363 
   13364 	/* Enable fastboot by default on Skylake and newer */
   13365 	if (INTEL_GEN(dev_priv) >= 9)
   13366 		return true;
   13367 
   13368 	/* Enable fastboot by default on VLV and CHV */
   13369 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   13370 		return true;
   13371 
   13372 	/* Disabled by default on all others */
   13373 	return false;
   13374 }
   13375 
   13376 static bool
   13377 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
   13378 			  const struct intel_crtc_state *pipe_config,
   13379 			  bool fastset)
   13380 {
   13381 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
   13382 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
   13383 	bool ret = true;
   13384 	u32 bp_gamma = 0;
   13385 	bool fixup_inherited = fastset &&
   13386 		(current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
   13387 		!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
   13388 
   13389 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
   13390 		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
   13391 		ret = false;
   13392 	}
   13393 
   13394 #define PIPE_CONF_CHECK_X(name) do { \
   13395 	if (current_config->name != pipe_config->name) { \
   13396 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13397 				     "(expected 0x%08x, found 0x%08x)", \
   13398 				     current_config->name, \
   13399 				     pipe_config->name); \
   13400 		ret = false; \
   13401 	} \
   13402 } while (0)
   13403 
   13404 #define PIPE_CONF_CHECK_I(name) do { \
   13405 	if (current_config->name != pipe_config->name) { \
   13406 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13407 				     "(expected %i, found %i)", \
   13408 				     current_config->name, \
   13409 				     pipe_config->name); \
   13410 		ret = false; \
   13411 	} \
   13412 } while (0)
   13413 
   13414 #define PIPE_CONF_CHECK_BOOL(name) do { \
   13415 	if (current_config->name != pipe_config->name) { \
   13416 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
   13417 				     "(expected %s, found %s)", \
   13418 				     yesno(current_config->name), \
   13419 				     yesno(pipe_config->name)); \
   13420 		ret = false; \
   13421 	} \
   13422 } while (0)
   13423 
   13424 /*
   13425  * Checks state where we only read out the enabling, but not the entire
   13426  * state itself (like full infoframes or ELD for audio). These states
   13427  * require a full modeset on bootup to fix up.
   13428  */
   13429 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
   13430 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
   13431 		PIPE_CONF_CHECK_BOOL(name); \
   13432 	} else { \
   13433 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13434 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
   13435 				     yesno(current_config->name), \
   13436 				     yesno(pipe_config->name)); \
   13437 		ret = false; \
   13438 	} \
   13439 } while (0)
   13440 
   13441 #define PIPE_CONF_CHECK_P(name) do { \
   13442 	if (current_config->name != pipe_config->name) { \
   13443 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13444 				     "(expected %p, found %p)", \
   13445 				     current_config->name, \
   13446 				     pipe_config->name); \
   13447 		ret = false; \
   13448 	} \
   13449 } while (0)
   13450 
   13451 #define PIPE_CONF_CHECK_M_N(name) do { \
   13452 	if (!intel_compare_link_m_n(&current_config->name, \
   13453 				    &pipe_config->name,\
   13454 				    !fastset)) { \
   13455 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13456 				     "(expected tu %i gmch %i/%i link %i/%i, " \
   13457 				     "found tu %i, gmch %i/%i link %i/%i)", \
   13458 				     current_config->name.tu, \
   13459 				     current_config->name.gmch_m, \
   13460 				     current_config->name.gmch_n, \
   13461 				     current_config->name.link_m, \
   13462 				     current_config->name.link_n, \
   13463 				     pipe_config->name.tu, \
   13464 				     pipe_config->name.gmch_m, \
   13465 				     pipe_config->name.gmch_n, \
   13466 				     pipe_config->name.link_m, \
   13467 				     pipe_config->name.link_n); \
   13468 		ret = false; \
   13469 	} \
   13470 } while (0)
   13471 
   13472 /* This is required for BDW+ where there is only one set of registers for
   13473  * switching between high and low RR.
   13474  * This macro can be used whenever a comparison has to be made between one
   13475  * hw state and multiple sw state variables.
   13476  */
   13477 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
   13478 	if (!intel_compare_link_m_n(&current_config->name, \
   13479 				    &pipe_config->name, !fastset) && \
   13480 	    !intel_compare_link_m_n(&current_config->alt_name, \
   13481 				    &pipe_config->name, !fastset)) { \
   13482 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13483 				     "(expected tu %i gmch %i/%i link %i/%i, " \
   13484 				     "or tu %i gmch %i/%i link %i/%i, " \
   13485 				     "found tu %i, gmch %i/%i link %i/%i)", \
   13486 				     current_config->name.tu, \
   13487 				     current_config->name.gmch_m, \
   13488 				     current_config->name.gmch_n, \
   13489 				     current_config->name.link_m, \
   13490 				     current_config->name.link_n, \
   13491 				     current_config->alt_name.tu, \
   13492 				     current_config->alt_name.gmch_m, \
   13493 				     current_config->alt_name.gmch_n, \
   13494 				     current_config->alt_name.link_m, \
   13495 				     current_config->alt_name.link_n, \
   13496 				     pipe_config->name.tu, \
   13497 				     pipe_config->name.gmch_m, \
   13498 				     pipe_config->name.gmch_n, \
   13499 				     pipe_config->name.link_m, \
   13500 				     pipe_config->name.link_n); \
   13501 		ret = false; \
   13502 	} \
   13503 } while (0)
   13504 
   13505 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
   13506 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
   13507 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13508 				     "(%x) (expected %i, found %i)", \
   13509 				     (mask), \
   13510 				     current_config->name & (mask), \
   13511 				     pipe_config->name & (mask)); \
   13512 		ret = false; \
   13513 	} \
   13514 } while (0)
   13515 
   13516 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
   13517 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
   13518 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   13519 				     "(expected %i, found %i)", \
   13520 				     current_config->name, \
   13521 				     pipe_config->name); \
   13522 		ret = false; \
   13523 	} \
   13524 } while (0)
   13525 
   13526 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
   13527 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
   13528 				     &pipe_config->infoframes.name)) { \
   13529 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
   13530 					       &current_config->infoframes.name, \
   13531 					       &pipe_config->infoframes.name); \
   13532 		ret = false; \
   13533 	} \
   13534 } while (0)
   13535 
   13536 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
   13537 	if (current_config->name1 != pipe_config->name1) { \
   13538 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
   13539 				"(expected %i, found %i, won't compare lut values)", \
   13540 				current_config->name1, \
   13541 				pipe_config->name1); \
   13542 		ret = false;\
   13543 	} else { \
   13544 		if (!intel_color_lut_equal(current_config->name2, \
   13545 					pipe_config->name2, pipe_config->name1, \
   13546 					bit_precision)) { \
   13547 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
   13548 					"hw_state doesn't match sw_state"); \
   13549 			ret = false; \
   13550 		} \
   13551 	} \
   13552 } while (0)
   13553 
   13554 #define PIPE_CONF_QUIRK(quirk) \
   13555 	((current_config->quirks | pipe_config->quirks) & (quirk))
   13556 
   13557 	PIPE_CONF_CHECK_I(cpu_transcoder);
   13558 
   13559 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
   13560 	PIPE_CONF_CHECK_I(fdi_lanes);
   13561 	PIPE_CONF_CHECK_M_N(fdi_m_n);
   13562 
   13563 	PIPE_CONF_CHECK_I(lane_count);
   13564 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
   13565 
   13566 	if (INTEL_GEN(dev_priv) < 8) {
   13567 		PIPE_CONF_CHECK_M_N(dp_m_n);
   13568 
   13569 		if (current_config->has_drrs)
   13570 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
   13571 	} else
   13572 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
   13573 
   13574 	PIPE_CONF_CHECK_X(output_types);
   13575 
   13576 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
   13577 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
   13578 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
   13579 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
   13580 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
   13581 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
   13582 
   13583 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
   13584 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
   13585 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
   13586 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
   13587 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
   13588 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
   13589 
   13590 	PIPE_CONF_CHECK_I(pixel_multiplier);
   13591 	PIPE_CONF_CHECK_I(output_format);
   13592 	PIPE_CONF_CHECK_I(dc3co_exitline);
   13593 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
   13594 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
   13595 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   13596 		PIPE_CONF_CHECK_BOOL(limited_color_range);
   13597 
   13598 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
   13599 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
   13600 	PIPE_CONF_CHECK_BOOL(has_infoframe);
   13601 	PIPE_CONF_CHECK_BOOL(fec_enable);
   13602 
   13603 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
   13604 
   13605 	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13606 			      DRM_MODE_FLAG_INTERLACE);
   13607 
   13608 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
   13609 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13610 				      DRM_MODE_FLAG_PHSYNC);
   13611 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13612 				      DRM_MODE_FLAG_NHSYNC);
   13613 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13614 				      DRM_MODE_FLAG_PVSYNC);
   13615 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   13616 				      DRM_MODE_FLAG_NVSYNC);
   13617 	}
   13618 
   13619 	PIPE_CONF_CHECK_X(gmch_pfit.control);
   13620 	/* pfit ratios are autocomputed by the hw on gen4+ */
   13621 	if (INTEL_GEN(dev_priv) < 4)
   13622 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
   13623 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
   13624 
   13625 	/*
   13626 	 * Changing the EDP transcoder input mux
   13627 	 * (A_ONOFF vs. A_ON) requires a full modeset.
   13628 	 */
   13629 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
   13630 
   13631 	if (!fastset) {
   13632 		PIPE_CONF_CHECK_I(pipe_src_w);
   13633 		PIPE_CONF_CHECK_I(pipe_src_h);
   13634 
   13635 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
   13636 		if (current_config->pch_pfit.enabled) {
   13637 			PIPE_CONF_CHECK_X(pch_pfit.pos);
   13638 			PIPE_CONF_CHECK_X(pch_pfit.size);
   13639 		}
   13640 
   13641 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
   13642 		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
   13643 
   13644 		PIPE_CONF_CHECK_X(gamma_mode);
   13645 		if (IS_CHERRYVIEW(dev_priv))
   13646 			PIPE_CONF_CHECK_X(cgm_mode);
   13647 		else
   13648 			PIPE_CONF_CHECK_X(csc_mode);
   13649 		PIPE_CONF_CHECK_BOOL(gamma_enable);
   13650 		PIPE_CONF_CHECK_BOOL(csc_enable);
   13651 
   13652 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
   13653 		if (bp_gamma)
   13654 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
   13655 
   13656 	}
   13657 
   13658 	PIPE_CONF_CHECK_BOOL(double_wide);
   13659 
   13660 	PIPE_CONF_CHECK_P(shared_dpll);
   13661 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
   13662 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
   13663 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
   13664 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
   13665 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
   13666 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
   13667 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
   13668 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
   13669 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
   13670 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
   13671 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
   13672 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
   13673 	PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
   13674 	PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
   13675 	PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
   13676 	PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
   13677 	PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
   13678 	PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
   13679 	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
   13680 	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
   13681 	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
   13682 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
   13683 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
   13684 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
   13685 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
   13686 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
   13687 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
   13688 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
   13689 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
   13690 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
   13691 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
   13692 
   13693 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
   13694 	PIPE_CONF_CHECK_X(dsi_pll.div);
   13695 
   13696 	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
   13697 		PIPE_CONF_CHECK_I(pipe_bpp);
   13698 
   13699 	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
   13700 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
   13701 
   13702 	PIPE_CONF_CHECK_I(min_voltage_level);
   13703 
   13704 	PIPE_CONF_CHECK_X(infoframes.enable);
   13705 	PIPE_CONF_CHECK_X(infoframes.gcp);
   13706 	PIPE_CONF_CHECK_INFOFRAME(avi);
   13707 	PIPE_CONF_CHECK_INFOFRAME(spd);
   13708 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
   13709 	PIPE_CONF_CHECK_INFOFRAME(drm);
   13710 
   13711 	PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
   13712 	PIPE_CONF_CHECK_I(master_transcoder);
   13713 
   13714 	PIPE_CONF_CHECK_I(dsc.compression_enable);
   13715 	PIPE_CONF_CHECK_I(dsc.dsc_split);
   13716 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
   13717 
   13718 	PIPE_CONF_CHECK_I(mst_master_transcoder);
   13719 
   13720 #undef PIPE_CONF_CHECK_X
   13721 #undef PIPE_CONF_CHECK_I
   13722 #undef PIPE_CONF_CHECK_BOOL
   13723 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
   13724 #undef PIPE_CONF_CHECK_P
   13725 #undef PIPE_CONF_CHECK_FLAGS
   13726 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
   13727 #undef PIPE_CONF_CHECK_COLOR_LUT
   13728 #undef PIPE_CONF_QUIRK
   13729 
   13730 	return ret;
   13731 }
   13732 
   13733 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
   13734 					   const struct intel_crtc_state *pipe_config)
   13735 {
   13736 	if (pipe_config->has_pch_encoder) {
   13737 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
   13738 							    &pipe_config->fdi_m_n);
   13739 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
   13740 
   13741 		/*
   13742 		 * FDI already provided one idea for the dotclock.
   13743 		 * Yell if the encoder disagrees.
   13744 		 */
   13745 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
   13746 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
   13747 		     fdi_dotclock, dotclock);
   13748 	}
   13749 }
   13750 
   13751 static void verify_wm_state(struct intel_crtc *crtc,
   13752 			    struct intel_crtc_state *new_crtc_state)
   13753 {
   13754 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   13755 	struct skl_hw_state {
   13756 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
   13757 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
   13758 		struct skl_ddb_allocation ddb;
   13759 		struct skl_pipe_wm wm;
   13760 	} *hw;
   13761 	struct skl_ddb_allocation *sw_ddb;
   13762 	struct skl_pipe_wm *sw_wm;
   13763 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
   13764 	const enum pipe pipe = crtc->pipe;
   13765 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
   13766 
   13767 	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
   13768 		return;
   13769 
   13770 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
   13771 	if (!hw)
   13772 		return;
   13773 
   13774 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
   13775 	sw_wm = &new_crtc_state->wm.skl.optimal;
   13776 
   13777 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
   13778 
   13779 	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
   13780 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
   13781 
   13782 	if (INTEL_GEN(dev_priv) >= 11 &&
   13783 	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
   13784 		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
   13785 			  sw_ddb->enabled_slices,
   13786 			  hw->ddb.enabled_slices);
   13787 
   13788 	/* planes */
   13789 	for_each_universal_plane(dev_priv, pipe, plane) {
   13790 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
   13791 
   13792 		hw_plane_wm = &hw->wm.planes[plane];
   13793 		sw_plane_wm = &sw_wm->planes[plane];
   13794 
   13795 		/* Watermarks */
   13796 		for (level = 0; level <= max_level; level++) {
   13797 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
   13798 						&sw_plane_wm->wm[level]))
   13799 				continue;
   13800 
   13801 			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13802 				  pipe_name(pipe), plane + 1, level,
   13803 				  sw_plane_wm->wm[level].plane_en,
   13804 				  sw_plane_wm->wm[level].plane_res_b,
   13805 				  sw_plane_wm->wm[level].plane_res_l,
   13806 				  hw_plane_wm->wm[level].plane_en,
   13807 				  hw_plane_wm->wm[level].plane_res_b,
   13808 				  hw_plane_wm->wm[level].plane_res_l);
   13809 		}
   13810 
   13811 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
   13812 					 &sw_plane_wm->trans_wm)) {
   13813 			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13814 				  pipe_name(pipe), plane + 1,
   13815 				  sw_plane_wm->trans_wm.plane_en,
   13816 				  sw_plane_wm->trans_wm.plane_res_b,
   13817 				  sw_plane_wm->trans_wm.plane_res_l,
   13818 				  hw_plane_wm->trans_wm.plane_en,
   13819 				  hw_plane_wm->trans_wm.plane_res_b,
   13820 				  hw_plane_wm->trans_wm.plane_res_l);
   13821 		}
   13822 
   13823 		/* DDB */
   13824 		hw_ddb_entry = &hw->ddb_y[plane];
   13825 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
   13826 
   13827 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
   13828 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
   13829 				  pipe_name(pipe), plane + 1,
   13830 				  sw_ddb_entry->start, sw_ddb_entry->end,
   13831 				  hw_ddb_entry->start, hw_ddb_entry->end);
   13832 		}
   13833 	}
   13834 
   13835 	/*
   13836 	 * cursor
   13837 	 * If the cursor plane isn't active, we may not have updated it's ddb
   13838 	 * allocation. In that case since the ddb allocation will be updated
   13839 	 * once the plane becomes visible, we can skip this check
   13840 	 */
   13841 	if (1) {
   13842 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
   13843 
   13844 		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
   13845 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
   13846 
   13847 		/* Watermarks */
   13848 		for (level = 0; level <= max_level; level++) {
   13849 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
   13850 						&sw_plane_wm->wm[level]))
   13851 				continue;
   13852 
   13853 			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13854 				  pipe_name(pipe), level,
   13855 				  sw_plane_wm->wm[level].plane_en,
   13856 				  sw_plane_wm->wm[level].plane_res_b,
   13857 				  sw_plane_wm->wm[level].plane_res_l,
   13858 				  hw_plane_wm->wm[level].plane_en,
   13859 				  hw_plane_wm->wm[level].plane_res_b,
   13860 				  hw_plane_wm->wm[level].plane_res_l);
   13861 		}
   13862 
   13863 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
   13864 					 &sw_plane_wm->trans_wm)) {
   13865 			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   13866 				  pipe_name(pipe),
   13867 				  sw_plane_wm->trans_wm.plane_en,
   13868 				  sw_plane_wm->trans_wm.plane_res_b,
   13869 				  sw_plane_wm->trans_wm.plane_res_l,
   13870 				  hw_plane_wm->trans_wm.plane_en,
   13871 				  hw_plane_wm->trans_wm.plane_res_b,
   13872 				  hw_plane_wm->trans_wm.plane_res_l);
   13873 		}
   13874 
   13875 		/* DDB */
   13876 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
   13877 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
   13878 
   13879 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
   13880 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
   13881 				  pipe_name(pipe),
   13882 				  sw_ddb_entry->start, sw_ddb_entry->end,
   13883 				  hw_ddb_entry->start, hw_ddb_entry->end);
   13884 		}
   13885 	}
   13886 
   13887 	kfree(hw);
   13888 }
   13889 
   13890 static void
   13891 verify_connector_state(struct intel_atomic_state *state,
   13892 		       struct intel_crtc *crtc)
   13893 {
   13894 	struct drm_connector *connector;
   13895 	struct drm_connector_state *new_conn_state;
   13896 	int i;
   13897 
   13898 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
   13899 		struct drm_encoder *encoder = connector->encoder;
   13900 		struct intel_crtc_state *crtc_state = NULL;
   13901 
   13902 		if (new_conn_state->crtc != &crtc->base)
   13903 			continue;
   13904 
   13905 		if (crtc)
   13906 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
   13907 
   13908 		intel_connector_verify_state(crtc_state, new_conn_state);
   13909 
   13910 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
   13911 		     "connector's atomic encoder doesn't match legacy encoder\n");
   13912 	}
   13913 }
   13914 
   13915 static void
   13916 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
   13917 {
   13918 	struct intel_encoder *encoder;
   13919 	struct drm_connector *connector;
   13920 	struct drm_connector_state *old_conn_state, *new_conn_state;
   13921 	int i;
   13922 
   13923 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   13924 		bool enabled = false, found = false;
   13925 		enum pipe pipe;
   13926 
   13927 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
   13928 			      encoder->base.base.id,
   13929 			      encoder->base.name);
   13930 
   13931 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
   13932 						   new_conn_state, i) {
   13933 			if (old_conn_state->best_encoder == &encoder->base)
   13934 				found = true;
   13935 
   13936 			if (new_conn_state->best_encoder != &encoder->base)
   13937 				continue;
   13938 			found = enabled = true;
   13939 
   13940 			I915_STATE_WARN(new_conn_state->crtc !=
   13941 					encoder->base.crtc,
   13942 			     "connector's crtc doesn't match encoder crtc\n");
   13943 		}
   13944 
   13945 		if (!found)
   13946 			continue;
   13947 
   13948 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
   13949 		     "encoder's enabled state mismatch "
   13950 		     "(expected %i, found %i)\n",
   13951 		     !!encoder->base.crtc, enabled);
   13952 
   13953 		if (!encoder->base.crtc) {
   13954 			bool active;
   13955 
   13956 			active = encoder->get_hw_state(encoder, &pipe);
   13957 			I915_STATE_WARN(active,
   13958 			     "encoder detached but still enabled on pipe %c.\n",
   13959 			     pipe_name(pipe));
   13960 		}
   13961 	}
   13962 }
   13963 
   13964 static void
   13965 verify_crtc_state(struct intel_crtc *crtc,
   13966 		  struct intel_crtc_state *old_crtc_state,
   13967 		  struct intel_crtc_state *new_crtc_state)
   13968 {
   13969 	struct drm_device *dev = crtc->base.dev;
   13970 	struct drm_i915_private *dev_priv = to_i915(dev);
   13971 	struct intel_encoder *encoder;
   13972 	struct intel_crtc_state *pipe_config = old_crtc_state;
   13973 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
   13974 	bool active;
   13975 
   13976 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
   13977 	intel_crtc_free_hw_state(old_crtc_state);
   13978 	intel_crtc_state_reset(old_crtc_state, crtc);
   13979 	old_crtc_state->uapi.state = state;
   13980 
   13981 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
   13982 
   13983 	active = dev_priv->display.get_pipe_config(crtc, pipe_config);
   13984 
   13985 	/* we keep both pipes enabled on 830 */
   13986 	if (IS_I830(dev_priv))
   13987 		active = new_crtc_state->hw.active;
   13988 
   13989 	I915_STATE_WARN(new_crtc_state->hw.active != active,
   13990 			"crtc active state doesn't match with hw state "
   13991 			"(expected %i, found %i)\n",
   13992 			new_crtc_state->hw.active, active);
   13993 
   13994 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
   13995 			"transitional active state does not match atomic hw state "
   13996 			"(expected %i, found %i)\n",
   13997 			new_crtc_state->hw.active, crtc->active);
   13998 
   13999 	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
   14000 		enum pipe pipe;
   14001 
   14002 		active = encoder->get_hw_state(encoder, &pipe);
   14003 		I915_STATE_WARN(active != new_crtc_state->hw.active,
   14004 				"[ENCODER:%i] active %i with crtc active %i\n",
   14005 				encoder->base.base.id, active,
   14006 				new_crtc_state->hw.active);
   14007 
   14008 		I915_STATE_WARN(active && crtc->pipe != pipe,
   14009 				"Encoder connected to wrong pipe %c\n",
   14010 				pipe_name(pipe));
   14011 
   14012 		if (active)
   14013 			encoder->get_config(encoder, pipe_config);
   14014 	}
   14015 
   14016 	intel_crtc_compute_pixel_rate(pipe_config);
   14017 
   14018 	if (!new_crtc_state->hw.active)
   14019 		return;
   14020 
   14021 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
   14022 
   14023 	if (!intel_pipe_config_compare(new_crtc_state,
   14024 				       pipe_config, false)) {
   14025 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
   14026 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
   14027 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
   14028 	}
   14029 }
   14030 
   14031 static void
   14032 intel_verify_planes(struct intel_atomic_state *state)
   14033 {
   14034 	struct intel_plane *plane;
   14035 	const struct intel_plane_state *plane_state;
   14036 	int i;
   14037 
   14038 	for_each_new_intel_plane_in_state(state, plane,
   14039 					  plane_state, i)
   14040 		assert_plane(plane, plane_state->planar_slave ||
   14041 			     plane_state->uapi.visible);
   14042 }
   14043 
   14044 static void
   14045 verify_single_dpll_state(struct drm_i915_private *dev_priv,
   14046 			 struct intel_shared_dpll *pll,
   14047 			 struct intel_crtc *crtc,
   14048 			 struct intel_crtc_state *new_crtc_state)
   14049 {
   14050 	struct intel_dpll_hw_state dpll_hw_state;
   14051 	unsigned int crtc_mask;
   14052 	bool active;
   14053 
   14054 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
   14055 
   14056 	DRM_DEBUG_KMS("%s\n", pll->info->name);
   14057 
   14058 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
   14059 
   14060 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
   14061 		I915_STATE_WARN(!pll->on && pll->active_mask,
   14062 		     "pll in active use but not on in sw tracking\n");
   14063 		I915_STATE_WARN(pll->on && !pll->active_mask,
   14064 		     "pll is on but not used by any active crtc\n");
   14065 		I915_STATE_WARN(pll->on != active,
   14066 		     "pll on state mismatch (expected %i, found %i)\n",
   14067 		     pll->on, active);
   14068 	}
   14069 
   14070 	if (!crtc) {
   14071 		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
   14072 				"more active pll users than references: %x vs %x\n",
   14073 				pll->active_mask, pll->state.crtc_mask);
   14074 
   14075 		return;
   14076 	}
   14077 
   14078 	crtc_mask = drm_crtc_mask(&crtc->base);
   14079 
   14080 	if (new_crtc_state->hw.active)
   14081 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
   14082 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
   14083 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
   14084 	else
   14085 		I915_STATE_WARN(pll->active_mask & crtc_mask,
   14086 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
   14087 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
   14088 
   14089 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
   14090 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
   14091 			crtc_mask, pll->state.crtc_mask);
   14092 
   14093 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
   14094 					  &dpll_hw_state,
   14095 					  sizeof(dpll_hw_state)),
   14096 			"pll hw state mismatch\n");
   14097 }
   14098 
   14099 static void
   14100 verify_shared_dpll_state(struct intel_crtc *crtc,
   14101 			 struct intel_crtc_state *old_crtc_state,
   14102 			 struct intel_crtc_state *new_crtc_state)
   14103 {
   14104 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14105 
   14106 	if (new_crtc_state->shared_dpll)
   14107 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
   14108 
   14109 	if (old_crtc_state->shared_dpll &&
   14110 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
   14111 		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
   14112 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
   14113 
   14114 		I915_STATE_WARN(pll->active_mask & crtc_mask,
   14115 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
   14116 				pipe_name(drm_crtc_index(&crtc->base)));
   14117 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
   14118 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
   14119 				pipe_name(drm_crtc_index(&crtc->base)));
   14120 	}
   14121 }
   14122 
   14123 static void
   14124 intel_modeset_verify_crtc(struct intel_crtc *crtc,
   14125 			  struct intel_atomic_state *state,
   14126 			  struct intel_crtc_state *old_crtc_state,
   14127 			  struct intel_crtc_state *new_crtc_state)
   14128 {
   14129 	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
   14130 		return;
   14131 
   14132 	verify_wm_state(crtc, new_crtc_state);
   14133 	verify_connector_state(state, crtc);
   14134 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
   14135 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
   14136 }
   14137 
   14138 static void
   14139 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
   14140 {
   14141 	int i;
   14142 
   14143 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
   14144 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
   14145 }
   14146 
   14147 static void
   14148 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
   14149 			      struct intel_atomic_state *state)
   14150 {
   14151 	verify_encoder_state(dev_priv, state);
   14152 	verify_connector_state(state, NULL);
   14153 	verify_disabled_dpll_state(dev_priv);
   14154 }
   14155 
   14156 static void
   14157 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
   14158 {
   14159 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   14160 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14161 	const struct drm_display_mode *adjusted_mode =
   14162 		&crtc_state->hw.adjusted_mode;
   14163 
   14164 	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
   14165 
   14166 	/*
   14167 	 * The scanline counter increments at the leading edge of hsync.
   14168 	 *
   14169 	 * On most platforms it starts counting from vtotal-1 on the
   14170 	 * first active line. That means the scanline counter value is
   14171 	 * always one less than what we would expect. Ie. just after
   14172 	 * start of vblank, which also occurs at start of hsync (on the
   14173 	 * last active line), the scanline counter will read vblank_start-1.
   14174 	 *
   14175 	 * On gen2 the scanline counter starts counting from 1 instead
   14176 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
   14177 	 * to keep the value positive), instead of adding one.
   14178 	 *
   14179 	 * On HSW+ the behaviour of the scanline counter depends on the output
   14180 	 * type. For DP ports it behaves like most other platforms, but on HDMI
   14181 	 * there's an extra 1 line difference. So we need to add two instead of
   14182 	 * one to the value.
   14183 	 *
   14184 	 * On VLV/CHV DSI the scanline counter would appear to increment
   14185 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
   14186 	 * that means we can't tell whether we're in vblank or not while
   14187 	 * we're on that particular line. We must still set scanline_offset
   14188 	 * to 1 so that the vblank timestamps come out correct when we query
   14189 	 * the scanline counter from within the vblank interrupt handler.
   14190 	 * However if queried just before the start of vblank we'll get an
   14191 	 * answer that's slightly in the future.
   14192 	 */
   14193 	if (IS_GEN(dev_priv, 2)) {
   14194 		int vtotal;
   14195 
   14196 		vtotal = adjusted_mode->crtc_vtotal;
   14197 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
   14198 			vtotal /= 2;
   14199 
   14200 		crtc->scanline_offset = vtotal - 1;
   14201 	} else if (HAS_DDI(dev_priv) &&
   14202 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
   14203 		crtc->scanline_offset = 2;
   14204 	} else {
   14205 		crtc->scanline_offset = 1;
   14206 	}
   14207 }
   14208 
   14209 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
   14210 {
   14211 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14212 	struct intel_crtc_state *new_crtc_state;
   14213 	struct intel_crtc *crtc;
   14214 	int i;
   14215 
   14216 	if (!dev_priv->display.crtc_compute_clock)
   14217 		return;
   14218 
   14219 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   14220 		if (!needs_modeset(new_crtc_state))
   14221 			continue;
   14222 
   14223 		intel_release_shared_dplls(state, crtc);
   14224 	}
   14225 }
   14226 
   14227 /*
   14228  * This implements the workaround described in the "notes" section of the mode
   14229  * set sequence documentation. When going from no pipes or single pipe to
   14230  * multiple pipes, and planes are enabled after the pipe, we need to wait at
   14231  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
   14232  */
   14233 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
   14234 {
   14235 	struct intel_crtc_state *crtc_state;
   14236 	struct intel_crtc *crtc;
   14237 	struct intel_crtc_state *first_crtc_state = NULL;
   14238 	struct intel_crtc_state *other_crtc_state = NULL;
   14239 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
   14240 	int i;
   14241 
   14242 	/* look at all crtc's that are going to be enabled in during modeset */
   14243 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   14244 		if (!crtc_state->hw.active ||
   14245 		    !needs_modeset(crtc_state))
   14246 			continue;
   14247 
   14248 		if (first_crtc_state) {
   14249 			other_crtc_state = crtc_state;
   14250 			break;
   14251 		} else {
   14252 			first_crtc_state = crtc_state;
   14253 			first_pipe = crtc->pipe;
   14254 		}
   14255 	}
   14256 
   14257 	/* No workaround needed? */
   14258 	if (!first_crtc_state)
   14259 		return 0;
   14260 
   14261 	/* w/a possibly needed, check how many crtc's are already enabled. */
   14262 	for_each_intel_crtc(state->base.dev, crtc) {
   14263 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
   14264 		if (IS_ERR(crtc_state))
   14265 			return PTR_ERR(crtc_state);
   14266 
   14267 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
   14268 
   14269 		if (!crtc_state->hw.active ||
   14270 		    needs_modeset(crtc_state))
   14271 			continue;
   14272 
   14273 		/* 2 or more enabled crtcs means no need for w/a */
   14274 		if (enabled_pipe != INVALID_PIPE)
   14275 			return 0;
   14276 
   14277 		enabled_pipe = crtc->pipe;
   14278 	}
   14279 
   14280 	if (enabled_pipe != INVALID_PIPE)
   14281 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
   14282 	else if (other_crtc_state)
   14283 		other_crtc_state->hsw_workaround_pipe = first_pipe;
   14284 
   14285 	return 0;
   14286 }
   14287 
   14288 static int intel_modeset_checks(struct intel_atomic_state *state)
   14289 {
   14290 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14291 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14292 	struct intel_crtc *crtc;
   14293 	int ret, i;
   14294 
   14295 	/* keep the current setting */
   14296 	if (!state->cdclk.force_min_cdclk_changed)
   14297 		state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
   14298 
   14299 	state->modeset = true;
   14300 	state->active_pipes = dev_priv->active_pipes;
   14301 	state->cdclk.logical = dev_priv->cdclk.logical;
   14302 	state->cdclk.actual = dev_priv->cdclk.actual;
   14303 
   14304 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14305 					    new_crtc_state, i) {
   14306 		if (new_crtc_state->hw.active)
   14307 			state->active_pipes |= BIT(crtc->pipe);
   14308 		else
   14309 			state->active_pipes &= ~BIT(crtc->pipe);
   14310 
   14311 		if (old_crtc_state->hw.active != new_crtc_state->hw.active)
   14312 			state->active_pipe_changes |= BIT(crtc->pipe);
   14313 	}
   14314 
   14315 	if (state->active_pipe_changes) {
   14316 		ret = intel_atomic_lock_global_state(state);
   14317 		if (ret)
   14318 			return ret;
   14319 	}
   14320 
   14321 	ret = intel_modeset_calc_cdclk(state);
   14322 	if (ret)
   14323 		return ret;
   14324 
   14325 	intel_modeset_clear_plls(state);
   14326 
   14327 	if (IS_HASWELL(dev_priv))
   14328 		return hsw_mode_set_planes_workaround(state);
   14329 
   14330 	return 0;
   14331 }
   14332 
   14333 /*
   14334  * Handle calculation of various watermark data at the end of the atomic check
   14335  * phase.  The code here should be run after the per-crtc and per-plane 'check'
   14336  * handlers to ensure that all derived state has been updated.
   14337  */
   14338 static int calc_watermark_data(struct intel_atomic_state *state)
   14339 {
   14340 	struct drm_device *dev = state->base.dev;
   14341 	struct drm_i915_private *dev_priv = to_i915(dev);
   14342 
   14343 	/* Is there platform-specific watermark information to calculate? */
   14344 	if (dev_priv->display.compute_global_watermarks)
   14345 		return dev_priv->display.compute_global_watermarks(state);
   14346 
   14347 	return 0;
   14348 }
   14349 
   14350 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
   14351 				     struct intel_crtc_state *new_crtc_state)
   14352 {
   14353 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
   14354 		return;
   14355 
   14356 	new_crtc_state->uapi.mode_changed = false;
   14357 	new_crtc_state->update_pipe = true;
   14358 }
   14359 
   14360 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
   14361 				    struct intel_crtc_state *new_crtc_state)
   14362 {
   14363 	/*
   14364 	 * If we're not doing the full modeset we want to
   14365 	 * keep the current M/N values as they may be
   14366 	 * sufficiently different to the computed values
   14367 	 * to cause problems.
   14368 	 *
   14369 	 * FIXME: should really copy more fuzzy state here
   14370 	 */
   14371 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
   14372 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
   14373 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
   14374 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
   14375 }
   14376 
   14377 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
   14378 					  struct intel_crtc *crtc,
   14379 					  u8 plane_ids_mask)
   14380 {
   14381 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14382 	struct intel_plane *plane;
   14383 
   14384 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   14385 		struct intel_plane_state *plane_state;
   14386 
   14387 		if ((plane_ids_mask & BIT(plane->id)) == 0)
   14388 			continue;
   14389 
   14390 		plane_state = intel_atomic_get_plane_state(state, plane);
   14391 		if (IS_ERR(plane_state))
   14392 			return PTR_ERR(plane_state);
   14393 	}
   14394 
   14395 	return 0;
   14396 }
   14397 
   14398 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
   14399 {
   14400 	/* See {hsw,vlv,ivb}_plane_ratio() */
   14401 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
   14402 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   14403 		IS_IVYBRIDGE(dev_priv);
   14404 }
   14405 
   14406 static int intel_atomic_check_planes(struct intel_atomic_state *state,
   14407 				     bool *need_modeset)
   14408 {
   14409 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14410 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14411 	struct intel_plane_state *plane_state;
   14412 	struct intel_plane *plane;
   14413 	struct intel_crtc *crtc;
   14414 	int i, ret;
   14415 
   14416 	ret = icl_add_linked_planes(state);
   14417 	if (ret)
   14418 		return ret;
   14419 
   14420 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   14421 		ret = intel_plane_atomic_check(state, plane);
   14422 		if (ret) {
   14423 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
   14424 					 plane->base.base.id, plane->base.name);
   14425 			return ret;
   14426 		}
   14427 	}
   14428 
   14429 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14430 					    new_crtc_state, i) {
   14431 		u8 old_active_planes, new_active_planes;
   14432 
   14433 		ret = icl_check_nv12_planes(new_crtc_state);
   14434 		if (ret)
   14435 			return ret;
   14436 
   14437 		/*
   14438 		 * On some platforms the number of active planes affects
   14439 		 * the planes' minimum cdclk calculation. Add such planes
   14440 		 * to the state before we compute the minimum cdclk.
   14441 		 */
   14442 		if (!active_planes_affects_min_cdclk(dev_priv))
   14443 			continue;
   14444 
   14445 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   14446 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   14447 
   14448 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
   14449 			continue;
   14450 
   14451 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
   14452 		if (ret)
   14453 			return ret;
   14454 	}
   14455 
   14456 	/*
   14457 	 * active_planes bitmask has been updated, and potentially
   14458 	 * affected planes are part of the state. We can now
   14459 	 * compute the minimum cdclk for each plane.
   14460 	 */
   14461 	for_each_new_intel_plane_in_state(state, plane, plane_state, i)
   14462 		*need_modeset |= intel_plane_calc_min_cdclk(state, plane);
   14463 
   14464 	return 0;
   14465 }
   14466 
   14467 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
   14468 {
   14469 	struct intel_crtc_state *crtc_state;
   14470 	struct intel_crtc *crtc;
   14471 	int i;
   14472 
   14473 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   14474 		int ret = intel_crtc_atomic_check(state, crtc);
   14475 		if (ret) {
   14476 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
   14477 					 crtc->base.base.id, crtc->base.name);
   14478 			return ret;
   14479 		}
   14480 	}
   14481 
   14482 	return 0;
   14483 }
   14484 
   14485 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
   14486 					       u8 transcoders)
   14487 {
   14488 	const struct intel_crtc_state *new_crtc_state;
   14489 	struct intel_crtc *crtc;
   14490 	int i;
   14491 
   14492 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   14493 		if (new_crtc_state->hw.enable &&
   14494 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
   14495 		    needs_modeset(new_crtc_state))
   14496 			return true;
   14497 	}
   14498 
   14499 	return false;
   14500 }
   14501 
   14502 static int
   14503 intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
   14504 {
   14505 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14506 	struct drm_connector *connector;
   14507 	struct drm_connector_list_iter conn_iter;
   14508 	int ret = 0;
   14509 
   14510 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
   14511 	drm_for_each_connector_iter(connector, &conn_iter) {
   14512 		struct drm_connector_state *conn_state;
   14513 		struct drm_crtc_state *crtc_state;
   14514 
   14515 		if (!connector->has_tile ||
   14516 		    connector->tile_group->id != tile_grp_id)
   14517 			continue;
   14518 		conn_state = drm_atomic_get_connector_state(&state->base,
   14519 							    connector);
   14520 		if (IS_ERR(conn_state)) {
   14521 			ret =  PTR_ERR(conn_state);
   14522 			break;
   14523 		}
   14524 
   14525 		if (!conn_state->crtc)
   14526 			continue;
   14527 
   14528 		crtc_state = drm_atomic_get_crtc_state(&state->base,
   14529 						       conn_state->crtc);
   14530 		if (IS_ERR(crtc_state)) {
   14531 			ret = PTR_ERR(crtc_state);
   14532 			break;
   14533 		}
   14534 		crtc_state->mode_changed = true;
   14535 		ret = drm_atomic_add_affected_connectors(&state->base,
   14536 							 conn_state->crtc);
   14537 		if (ret)
   14538 			break;
   14539 	}
   14540 	drm_connector_list_iter_end(&conn_iter);
   14541 
   14542 	return ret;
   14543 }
   14544 
   14545 static int
   14546 intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
   14547 {
   14548 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14549 	struct drm_connector *connector;
   14550 	struct drm_connector_state *old_conn_state, *new_conn_state;
   14551 	int i, ret;
   14552 
   14553 	if (INTEL_GEN(dev_priv) < 11)
   14554 		return 0;
   14555 
   14556 	/* Is tiled, mark all other tiled CRTCs as needing a modeset */
   14557 	for_each_oldnew_connector_in_state(&state->base, connector,
   14558 					   old_conn_state, new_conn_state, i) {
   14559 		if (!connector->has_tile)
   14560 			continue;
   14561 		if (!intel_connector_needs_modeset(state, connector))
   14562 			continue;
   14563 
   14564 		ret = intel_modeset_all_tiles(state, connector->tile_group->id);
   14565 		if (ret)
   14566 			return ret;
   14567 	}
   14568 
   14569 	return 0;
   14570 }
   14571 
   14572 /**
   14573  * intel_atomic_check - validate state object
   14574  * @dev: drm device
   14575  * @_state: state to validate
   14576  */
   14577 static int intel_atomic_check(struct drm_device *dev,
   14578 			      struct drm_atomic_state *_state)
   14579 {
   14580 	struct drm_i915_private *dev_priv = to_i915(dev);
   14581 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
   14582 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14583 	struct intel_crtc *crtc;
   14584 	int ret, i;
   14585 	bool any_ms = false;
   14586 
   14587 	/* Catch I915_MODE_FLAG_INHERITED */
   14588 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14589 					    new_crtc_state, i) {
   14590 		if (new_crtc_state->hw.mode.private_flags !=
   14591 		    old_crtc_state->hw.mode.private_flags)
   14592 			new_crtc_state->uapi.mode_changed = true;
   14593 	}
   14594 
   14595 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
   14596 	if (ret)
   14597 		goto fail;
   14598 
   14599 	/**
   14600 	 * This check adds all the connectors in current state that belong to
   14601 	 * the same tile group to a full modeset.
   14602 	 * This function directly sets the mode_changed to true and we also call
   14603 	 * drm_atomic_add_affected_connectors(). Hence we are not explicitly
   14604 	 * calling drm_atomic_helper_check_modeset() after this.
   14605 	 *
   14606 	 * Fixme: Handle some corner cases where one of the
   14607 	 * tiled connectors gets disconnected and tile info is lost but since it
   14608 	 * was previously synced to other conn, we need to add that to the modeset.
   14609 	 */
   14610 	ret = intel_atomic_check_tiled_conns(state);
   14611 	if (ret)
   14612 		goto fail;
   14613 
   14614 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14615 					    new_crtc_state, i) {
   14616 		if (!needs_modeset(new_crtc_state)) {
   14617 			/* Light copy */
   14618 			intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
   14619 
   14620 			continue;
   14621 		}
   14622 
   14623 		if (!new_crtc_state->uapi.enable) {
   14624 			intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
   14625 			continue;
   14626 		}
   14627 
   14628 		ret = intel_crtc_prepare_cleared_state(new_crtc_state);
   14629 		if (ret)
   14630 			goto fail;
   14631 
   14632 		ret = intel_modeset_pipe_config(new_crtc_state);
   14633 		if (ret)
   14634 			goto fail;
   14635 
   14636 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
   14637 	}
   14638 
   14639 	/**
   14640 	 * Check if fastset is allowed by external dependencies like other
   14641 	 * pipes and transcoders.
   14642 	 *
   14643 	 * Right now it only forces a fullmodeset when the MST master
   14644 	 * transcoder did not changed but the pipe of the master transcoder
   14645 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
   14646 	 * in case of port synced crtcs, if one of the synced crtcs
   14647 	 * needs a full modeset, all other synced crtcs should be
   14648 	 * forced a full modeset.
   14649 	 */
   14650 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   14651 		if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
   14652 			continue;
   14653 
   14654 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
   14655 			enum transcoder master = new_crtc_state->mst_master_transcoder;
   14656 
   14657 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
   14658 				new_crtc_state->uapi.mode_changed = true;
   14659 				new_crtc_state->update_pipe = false;
   14660 			}
   14661 		}
   14662 
   14663 		if (is_trans_port_sync_mode(new_crtc_state)) {
   14664 			u8 trans = new_crtc_state->sync_mode_slaves_mask |
   14665 				   BIT(new_crtc_state->master_transcoder);
   14666 
   14667 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
   14668 				new_crtc_state->uapi.mode_changed = true;
   14669 				new_crtc_state->update_pipe = false;
   14670 			}
   14671 		}
   14672 	}
   14673 
   14674 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14675 					    new_crtc_state, i) {
   14676 		if (needs_modeset(new_crtc_state)) {
   14677 			any_ms = true;
   14678 			continue;
   14679 		}
   14680 
   14681 		if (!new_crtc_state->update_pipe)
   14682 			continue;
   14683 
   14684 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
   14685 	}
   14686 
   14687 	if (any_ms && !check_digital_port_conflicts(state)) {
   14688 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
   14689 		ret = EINVAL;
   14690 		goto fail;
   14691 	}
   14692 
   14693 	ret = drm_dp_mst_atomic_check(&state->base);
   14694 	if (ret)
   14695 		goto fail;
   14696 
   14697 	any_ms |= state->cdclk.force_min_cdclk_changed;
   14698 
   14699 	ret = intel_atomic_check_planes(state, &any_ms);
   14700 	if (ret)
   14701 		goto fail;
   14702 
   14703 	if (any_ms) {
   14704 		ret = intel_modeset_checks(state);
   14705 		if (ret)
   14706 			goto fail;
   14707 	} else {
   14708 		state->cdclk.logical = dev_priv->cdclk.logical;
   14709 	}
   14710 
   14711 	ret = intel_atomic_check_crtcs(state);
   14712 	if (ret)
   14713 		goto fail;
   14714 
   14715 	intel_fbc_choose_crtc(dev_priv, state);
   14716 	ret = calc_watermark_data(state);
   14717 	if (ret)
   14718 		goto fail;
   14719 
   14720 	ret = intel_bw_atomic_check(state);
   14721 	if (ret)
   14722 		goto fail;
   14723 
   14724 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14725 					    new_crtc_state, i) {
   14726 		if (!needs_modeset(new_crtc_state) &&
   14727 		    !new_crtc_state->update_pipe)
   14728 			continue;
   14729 
   14730 		intel_dump_pipe_config(new_crtc_state, state,
   14731 				       needs_modeset(new_crtc_state) ?
   14732 				       "[modeset]" : "[fastset]");
   14733 	}
   14734 
   14735 	return 0;
   14736 
   14737  fail:
   14738 	if (ret == -EDEADLK)
   14739 		return ret;
   14740 
   14741 	/*
   14742 	 * FIXME would probably be nice to know which crtc specifically
   14743 	 * caused the failure, in cases where we can pinpoint it.
   14744 	 */
   14745 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14746 					    new_crtc_state, i)
   14747 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
   14748 
   14749 	return ret;
   14750 }
   14751 
   14752 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
   14753 {
   14754 	return drm_atomic_helper_prepare_planes(state->base.dev,
   14755 						&state->base);
   14756 }
   14757 
   14758 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
   14759 {
   14760 	struct drm_device *dev = crtc->base.dev;
   14761 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
   14762 
   14763 	if (!vblank->max_vblank_count)
   14764 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
   14765 
   14766 	return crtc->base.funcs->get_vblank_counter(&crtc->base);
   14767 }
   14768 
   14769 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
   14770 				  struct intel_crtc_state *crtc_state)
   14771 {
   14772 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14773 
   14774 	if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
   14775 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
   14776 
   14777 	if (crtc_state->has_pch_encoder) {
   14778 		enum pipe pch_transcoder =
   14779 			intel_crtc_pch_transcoder(crtc);
   14780 
   14781 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
   14782 	}
   14783 }
   14784 
   14785 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
   14786 			       const struct intel_crtc_state *new_crtc_state)
   14787 {
   14788 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   14789 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   14790 
   14791 	/*
   14792 	 * Update pipe size and adjust fitter if needed: the reason for this is
   14793 	 * that in compute_mode_changes we check the native mode (not the pfit
   14794 	 * mode) to see if we can flip rather than do a full mode set. In the
   14795 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
   14796 	 * pfit state, we'll end up with a big fb scanned out into the wrong
   14797 	 * sized surface.
   14798 	 */
   14799 	intel_set_pipe_src_size(new_crtc_state);
   14800 
   14801 	/* on skylake this is done by detaching scalers */
   14802 	if (INTEL_GEN(dev_priv) >= 9) {
   14803 		skl_detach_scalers(new_crtc_state);
   14804 
   14805 		if (new_crtc_state->pch_pfit.enabled)
   14806 			skl_pfit_enable(new_crtc_state);
   14807 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   14808 		if (new_crtc_state->pch_pfit.enabled)
   14809 			ilk_pfit_enable(new_crtc_state);
   14810 		else if (old_crtc_state->pch_pfit.enabled)
   14811 			ilk_pfit_disable(old_crtc_state);
   14812 	}
   14813 
   14814 	if (INTEL_GEN(dev_priv) >= 11)
   14815 		icl_set_pipe_chicken(crtc);
   14816 }
   14817 
   14818 static void commit_pipe_config(struct intel_atomic_state *state,
   14819 			       struct intel_crtc_state *old_crtc_state,
   14820 			       struct intel_crtc_state *new_crtc_state)
   14821 {
   14822 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   14823 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14824 	bool modeset = needs_modeset(new_crtc_state);
   14825 
   14826 	/*
   14827 	 * During modesets pipe configuration was programmed as the
   14828 	 * CRTC was enabled.
   14829 	 */
   14830 	if (!modeset) {
   14831 		if (new_crtc_state->uapi.color_mgmt_changed ||
   14832 		    new_crtc_state->update_pipe)
   14833 			intel_color_commit(new_crtc_state);
   14834 
   14835 		if (INTEL_GEN(dev_priv) >= 9)
   14836 			skl_detach_scalers(new_crtc_state);
   14837 
   14838 		if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
   14839 			bdw_set_pipemisc(new_crtc_state);
   14840 
   14841 		if (new_crtc_state->update_pipe)
   14842 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
   14843 	}
   14844 
   14845 	if (dev_priv->display.atomic_update_watermarks)
   14846 		dev_priv->display.atomic_update_watermarks(state, crtc);
   14847 }
   14848 
   14849 static void intel_update_crtc(struct intel_crtc *crtc,
   14850 			      struct intel_atomic_state *state,
   14851 			      struct intel_crtc_state *old_crtc_state,
   14852 			      struct intel_crtc_state *new_crtc_state)
   14853 {
   14854 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14855 	bool modeset = needs_modeset(new_crtc_state);
   14856 	struct intel_plane_state *new_plane_state =
   14857 		intel_atomic_get_new_plane_state(state,
   14858 						 to_intel_plane(crtc->base.primary));
   14859 
   14860 	if (modeset) {
   14861 		intel_crtc_update_active_timings(new_crtc_state);
   14862 
   14863 		dev_priv->display.crtc_enable(state, crtc);
   14864 
   14865 		/* vblanks work again, re-enable pipe CRC. */
   14866 		intel_crtc_enable_pipe_crc(crtc);
   14867 	} else {
   14868 		if (new_crtc_state->preload_luts &&
   14869 		    (new_crtc_state->uapi.color_mgmt_changed ||
   14870 		     new_crtc_state->update_pipe))
   14871 			intel_color_load_luts(new_crtc_state);
   14872 
   14873 		intel_pre_plane_update(state, crtc);
   14874 
   14875 		if (new_crtc_state->update_pipe)
   14876 			intel_encoders_update_pipe(state, crtc);
   14877 	}
   14878 
   14879 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
   14880 		intel_fbc_disable(crtc);
   14881 	else if (new_plane_state)
   14882 		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
   14883 
   14884 	/* Perform vblank evasion around commit operation */
   14885 	intel_pipe_update_start(new_crtc_state);
   14886 
   14887 	commit_pipe_config(state, old_crtc_state, new_crtc_state);
   14888 
   14889 	if (INTEL_GEN(dev_priv) >= 9)
   14890 		skl_update_planes_on_crtc(state, crtc);
   14891 	else
   14892 		i9xx_update_planes_on_crtc(state, crtc);
   14893 
   14894 	intel_pipe_update_end(new_crtc_state);
   14895 
   14896 	/*
   14897 	 * We usually enable FIFO underrun interrupts as part of the
   14898 	 * CRTC enable sequence during modesets.  But when we inherit a
   14899 	 * valid pipe configuration from the BIOS we need to take care
   14900 	 * of enabling them on the CRTC's first fastset.
   14901 	 */
   14902 	if (new_crtc_state->update_pipe && !modeset &&
   14903 	    old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
   14904 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
   14905 }
   14906 
   14907 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
   14908 {
   14909 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
   14910 	enum transcoder slave_transcoder;
   14911 
   14912 	WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
   14913 
   14914 	slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
   14915 	return intel_get_crtc_for_pipe(dev_priv,
   14916 				       (enum pipe)slave_transcoder);
   14917 }
   14918 
   14919 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
   14920 					  struct intel_crtc_state *old_crtc_state,
   14921 					  struct intel_crtc_state *new_crtc_state,
   14922 					  struct intel_crtc *crtc)
   14923 {
   14924 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   14925 
   14926 	intel_crtc_disable_planes(state, crtc);
   14927 
   14928 	/*
   14929 	 * We need to disable pipe CRC before disabling the pipe,
   14930 	 * or we race against vblank off.
   14931 	 */
   14932 	intel_crtc_disable_pipe_crc(crtc);
   14933 
   14934 	dev_priv->display.crtc_disable(state, crtc);
   14935 	crtc->active = false;
   14936 	intel_fbc_disable(crtc);
   14937 	intel_disable_shared_dpll(old_crtc_state);
   14938 
   14939 	/* FIXME unify this for all platforms */
   14940 	if (!new_crtc_state->hw.active &&
   14941 	    !HAS_GMCH(dev_priv) &&
   14942 	    dev_priv->display.initial_watermarks)
   14943 		dev_priv->display.initial_watermarks(state, crtc);
   14944 }
   14945 
   14946 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
   14947 {
   14948 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   14949 	struct intel_crtc *crtc;
   14950 	u32 handled = 0;
   14951 	int i;
   14952 
   14953 	/* Only disable port sync and MST slaves */
   14954 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14955 					    new_crtc_state, i) {
   14956 		if (!needs_modeset(new_crtc_state))
   14957 			continue;
   14958 
   14959 		if (!old_crtc_state->hw.active)
   14960 			continue;
   14961 
   14962 		/* In case of Transcoder port Sync master slave CRTCs can be
   14963 		 * assigned in any order and we need to make sure that
   14964 		 * slave CRTCs are disabled first and then master CRTC since
   14965 		 * Slave vblanks are masked till Master Vblanks.
   14966 		 */
   14967 		if (!is_trans_port_sync_slave(old_crtc_state) &&
   14968 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
   14969 			continue;
   14970 
   14971 		intel_pre_plane_update(state, crtc);
   14972 		intel_old_crtc_state_disables(state, old_crtc_state,
   14973 					      new_crtc_state, crtc);
   14974 		handled |= BIT(crtc->pipe);
   14975 	}
   14976 
   14977 	/* Disable everything else left on */
   14978 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   14979 					    new_crtc_state, i) {
   14980 		if (!needs_modeset(new_crtc_state) ||
   14981 		    (handled & BIT(crtc->pipe)))
   14982 			continue;
   14983 
   14984 		intel_pre_plane_update(state, crtc);
   14985 		if (old_crtc_state->hw.active)
   14986 			intel_old_crtc_state_disables(state, old_crtc_state,
   14987 						      new_crtc_state, crtc);
   14988 	}
   14989 }
   14990 
   14991 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
   14992 {
   14993 	struct intel_crtc *crtc;
   14994 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   14995 	int i;
   14996 
   14997 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   14998 		if (!new_crtc_state->hw.active)
   14999 			continue;
   15000 
   15001 		intel_update_crtc(crtc, state, old_crtc_state,
   15002 				  new_crtc_state);
   15003 	}
   15004 }
   15005 
   15006 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
   15007 					      struct intel_atomic_state *state,
   15008 					      struct intel_crtc_state *new_crtc_state)
   15009 {
   15010 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   15011 
   15012 	intel_crtc_update_active_timings(new_crtc_state);
   15013 	dev_priv->display.crtc_enable(state, crtc);
   15014 	intel_crtc_enable_pipe_crc(crtc);
   15015 }
   15016 
   15017 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
   15018 				       struct intel_atomic_state *state)
   15019 {
   15020 	struct drm_connector *uninitialized_var(conn);
   15021 	struct drm_connector_state *conn_state;
   15022 	struct intel_dp *intel_dp;
   15023 	int i;
   15024 
   15025 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   15026 		if (conn_state->crtc == &crtc->base)
   15027 			break;
   15028 	}
   15029 	intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
   15030 	intel_dp_stop_link_train(intel_dp);
   15031 }
   15032 
   15033 /*
   15034  * TODO: This is only called from port sync and it is identical to what will be
   15035  * executed again in intel_update_crtc() over port sync pipes
   15036  */
   15037 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
   15038 					   struct intel_atomic_state *state)
   15039 {
   15040 	struct intel_crtc_state *new_crtc_state =
   15041 		intel_atomic_get_new_crtc_state(state, crtc);
   15042 	struct intel_crtc_state *old_crtc_state =
   15043 		intel_atomic_get_old_crtc_state(state, crtc);
   15044 	struct intel_plane_state *new_plane_state =
   15045 		intel_atomic_get_new_plane_state(state,
   15046 						 to_intel_plane(crtc->base.primary));
   15047 	bool modeset = needs_modeset(new_crtc_state);
   15048 
   15049 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
   15050 		intel_fbc_disable(crtc);
   15051 	else if (new_plane_state)
   15052 		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
   15053 
   15054 	/* Perform vblank evasion around commit operation */
   15055 	intel_pipe_update_start(new_crtc_state);
   15056 	commit_pipe_config(state, old_crtc_state, new_crtc_state);
   15057 	skl_update_planes_on_crtc(state, crtc);
   15058 	intel_pipe_update_end(new_crtc_state);
   15059 
   15060 	/*
   15061 	 * We usually enable FIFO underrun interrupts as part of the
   15062 	 * CRTC enable sequence during modesets.  But when we inherit a
   15063 	 * valid pipe configuration from the BIOS we need to take care
   15064 	 * of enabling them on the CRTC's first fastset.
   15065 	 */
   15066 	if (new_crtc_state->update_pipe && !modeset &&
   15067 	    old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
   15068 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
   15069 }
   15070 
   15071 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
   15072 					       struct intel_atomic_state *state,
   15073 					       struct intel_crtc_state *old_crtc_state,
   15074 					       struct intel_crtc_state *new_crtc_state)
   15075 {
   15076 	struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
   15077 	struct intel_crtc_state *new_slave_crtc_state =
   15078 		intel_atomic_get_new_crtc_state(state, slave_crtc);
   15079 	struct intel_crtc_state *old_slave_crtc_state =
   15080 		intel_atomic_get_old_crtc_state(state, slave_crtc);
   15081 
   15082 	WARN_ON(!slave_crtc || !new_slave_crtc_state ||
   15083 		!old_slave_crtc_state);
   15084 
   15085 	DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
   15086 		      crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
   15087 		      slave_crtc->base.name);
   15088 
   15089 	/* Enable seq for slave with with DP_TP_CTL left Idle until the
   15090 	 * master is ready
   15091 	 */
   15092 	intel_crtc_enable_trans_port_sync(slave_crtc,
   15093 					  state,
   15094 					  new_slave_crtc_state);
   15095 
   15096 	/* Enable seq for master with with DP_TP_CTL left Idle */
   15097 	intel_crtc_enable_trans_port_sync(crtc,
   15098 					  state,
   15099 					  new_crtc_state);
   15100 
   15101 	/* Set Slave's DP_TP_CTL to Normal */
   15102 	intel_set_dp_tp_ctl_normal(slave_crtc,
   15103 				   state);
   15104 
   15105 	/* Set Master's DP_TP_CTL To Normal */
   15106 	usleep_range(200, 400);
   15107 	intel_set_dp_tp_ctl_normal(crtc,
   15108 				   state);
   15109 
   15110 	/* Now do the post crtc enable for all master and slaves */
   15111 	intel_post_crtc_enable_updates(slave_crtc,
   15112 				       state);
   15113 	intel_post_crtc_enable_updates(crtc,
   15114 				       state);
   15115 }
   15116 
   15117 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
   15118 {
   15119 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   15120 	struct intel_crtc *crtc;
   15121 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   15122 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
   15123 	u8 required_slices = state->wm_results.ddb.enabled_slices;
   15124 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
   15125 	const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
   15126 	u8 update_pipes = 0, modeset_pipes = 0;
   15127 	int i;
   15128 
   15129 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   15130 		if (!new_crtc_state->hw.active)
   15131 			continue;
   15132 
   15133 		/* ignore allocations for crtc's that have been turned off. */
   15134 		if (!needs_modeset(new_crtc_state)) {
   15135 			entries[i] = old_crtc_state->wm.skl.ddb;
   15136 			update_pipes |= BIT(crtc->pipe);
   15137 		} else {
   15138 			modeset_pipes |= BIT(crtc->pipe);
   15139 		}
   15140 	}
   15141 
   15142 	/* If 2nd DBuf slice required, enable it here */
   15143 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
   15144 		icl_dbuf_slices_update(dev_priv, required_slices);
   15145 
   15146 	/*
   15147 	 * Whenever the number of active pipes changes, we need to make sure we
   15148 	 * update the pipes in the right order so that their ddb allocations
   15149 	 * never overlap with each other between CRTC updates. Otherwise we'll
   15150 	 * cause pipe underruns and other bad stuff.
   15151 	 *
   15152 	 * So first lets enable all pipes that do not need a fullmodeset as
   15153 	 * those don't have any external dependency.
   15154 	 */
   15155 	while (update_pipes) {
   15156 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15157 						    new_crtc_state, i) {
   15158 			enum pipe pipe = crtc->pipe;
   15159 
   15160 			if ((update_pipes & BIT(pipe)) == 0)
   15161 				continue;
   15162 
   15163 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   15164 							entries, num_pipes, i))
   15165 				continue;
   15166 
   15167 			entries[i] = new_crtc_state->wm.skl.ddb;
   15168 			update_pipes &= ~BIT(pipe);
   15169 
   15170 			intel_update_crtc(crtc, state, old_crtc_state,
   15171 					  new_crtc_state);
   15172 
   15173 			/*
   15174 			 * If this is an already active pipe, it's DDB changed,
   15175 			 * and this isn't the last pipe that needs updating
   15176 			 * then we need to wait for a vblank to pass for the
   15177 			 * new ddb allocation to take effect.
   15178 			 */
   15179 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
   15180 						 &old_crtc_state->wm.skl.ddb) &&
   15181 			    (update_pipes | modeset_pipes))
   15182 				intel_wait_for_vblank(dev_priv, pipe);
   15183 		}
   15184 	}
   15185 
   15186 	/*
   15187 	 * Enable all pipes that needs a modeset and do not depends on other
   15188 	 * pipes
   15189 	 */
   15190 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15191 					    new_crtc_state, i) {
   15192 		enum pipe pipe = crtc->pipe;
   15193 
   15194 		if ((modeset_pipes & BIT(pipe)) == 0)
   15195 			continue;
   15196 
   15197 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
   15198 		    is_trans_port_sync_slave(new_crtc_state))
   15199 			continue;
   15200 
   15201 		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   15202 						    entries, num_pipes, i));
   15203 
   15204 		entries[i] = new_crtc_state->wm.skl.ddb;
   15205 		modeset_pipes &= ~BIT(pipe);
   15206 
   15207 		if (is_trans_port_sync_mode(new_crtc_state)) {
   15208 			struct intel_crtc *slave_crtc;
   15209 
   15210 			intel_update_trans_port_sync_crtcs(crtc, state,
   15211 							   old_crtc_state,
   15212 							   new_crtc_state);
   15213 
   15214 			slave_crtc = intel_get_slave_crtc(new_crtc_state);
   15215 			/* TODO: update entries[] of slave */
   15216 			modeset_pipes &= ~BIT(slave_crtc->pipe);
   15217 
   15218 		} else {
   15219 			intel_update_crtc(crtc, state, old_crtc_state,
   15220 					  new_crtc_state);
   15221 		}
   15222 	}
   15223 
   15224 	/*
   15225 	 * Finally enable all pipes that needs a modeset and depends on
   15226 	 * other pipes, right now it is only MST slaves as both port sync slave
   15227 	 * and master are enabled together
   15228 	 */
   15229 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15230 					    new_crtc_state, i) {
   15231 		enum pipe pipe = crtc->pipe;
   15232 
   15233 		if ((modeset_pipes & BIT(pipe)) == 0)
   15234 			continue;
   15235 
   15236 		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   15237 						    entries, num_pipes, i));
   15238 
   15239 		entries[i] = new_crtc_state->wm.skl.ddb;
   15240 		modeset_pipes &= ~BIT(pipe);
   15241 
   15242 		intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
   15243 	}
   15244 
   15245 	WARN_ON(modeset_pipes);
   15246 
   15247 	/* If 2nd DBuf slice is no more required disable it */
   15248 	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
   15249 		icl_dbuf_slices_update(dev_priv, required_slices);
   15250 }
   15251 
   15252 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
   15253 {
   15254 	struct intel_atomic_state *state, *next;
   15255 	struct llist_node *freed;
   15256 
   15257 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
   15258 	llist_for_each_entry_safe(state, next, freed, freed)
   15259 		drm_atomic_state_put(&state->base);
   15260 }
   15261 
   15262 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
   15263 {
   15264 	struct drm_i915_private *dev_priv =
   15265 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
   15266 
   15267 	intel_atomic_helper_free_state(dev_priv);
   15268 }
   15269 
   15270 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
   15271 {
   15272 	struct wait_queue_entry wait_fence, wait_reset;
   15273 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
   15274 
   15275 	init_wait_entry(&wait_fence, 0);
   15276 	init_wait_entry(&wait_reset, 0);
   15277 	for (;;) {
   15278 		prepare_to_wait(&intel_state->commit_ready.wait,
   15279 				&wait_fence, TASK_UNINTERRUPTIBLE);
   15280 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
   15281 					      I915_RESET_MODESET),
   15282 				&wait_reset, TASK_UNINTERRUPTIBLE);
   15283 
   15284 
   15285 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
   15286 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
   15287 			break;
   15288 
   15289 		schedule();
   15290 	}
   15291 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
   15292 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
   15293 				  I915_RESET_MODESET),
   15294 		    &wait_reset);
   15295 }
   15296 
   15297 static void intel_atomic_cleanup_work(struct work_struct *work)
   15298 {
   15299 	struct drm_atomic_state *state =
   15300 		container_of(work, struct drm_atomic_state, commit_work);
   15301 	struct drm_i915_private *i915 = to_i915(state->dev);
   15302 
   15303 	drm_atomic_helper_cleanup_planes(&i915->drm, state);
   15304 	drm_atomic_helper_commit_cleanup_done(state);
   15305 	drm_atomic_state_put(state);
   15306 
   15307 	intel_atomic_helper_free_state(i915);
   15308 }
   15309 
   15310 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
   15311 {
   15312 	struct drm_device *dev = state->base.dev;
   15313 	struct drm_i915_private *dev_priv = to_i915(dev);
   15314 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   15315 	struct intel_crtc *crtc;
   15316 	u64 put_domains[I915_MAX_PIPES] = {};
   15317 	intel_wakeref_t wakeref = 0;
   15318 	int i;
   15319 
   15320 	intel_atomic_commit_fence_wait(state);
   15321 
   15322 	drm_atomic_helper_wait_for_dependencies(&state->base);
   15323 
   15324 	if (state->modeset)
   15325 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
   15326 
   15327 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15328 					    new_crtc_state, i) {
   15329 		if (needs_modeset(new_crtc_state) ||
   15330 		    new_crtc_state->update_pipe) {
   15331 
   15332 			put_domains[crtc->pipe] =
   15333 				modeset_get_crtc_power_domains(new_crtc_state);
   15334 		}
   15335 	}
   15336 
   15337 	intel_commit_modeset_disables(state);
   15338 
   15339 	/* FIXME: Eventually get rid of our crtc->config pointer */
   15340 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   15341 		crtc->config = new_crtc_state;
   15342 
   15343 	if (state->modeset) {
   15344 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
   15345 
   15346 		intel_set_cdclk_pre_plane_update(dev_priv,
   15347 						 &state->cdclk.actual,
   15348 						 &dev_priv->cdclk.actual,
   15349 						 state->cdclk.pipe);
   15350 
   15351 		/*
   15352 		 * SKL workaround: bspec recommends we disable the SAGV when we
   15353 		 * have more then one pipe enabled
   15354 		 */
   15355 		if (!intel_can_enable_sagv(state))
   15356 			intel_disable_sagv(dev_priv);
   15357 
   15358 		intel_modeset_verify_disabled(dev_priv, state);
   15359 	}
   15360 
   15361 	/* Complete the events for pipes that have now been disabled */
   15362 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   15363 		bool modeset = needs_modeset(new_crtc_state);
   15364 
   15365 		/* Complete events for now disable pipes here. */
   15366 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
   15367 			spin_lock_irq(&dev->event_lock);
   15368 			drm_crtc_send_vblank_event(&crtc->base,
   15369 						   new_crtc_state->uapi.event);
   15370 			spin_unlock_irq(&dev->event_lock);
   15371 
   15372 			new_crtc_state->uapi.event = NULL;
   15373 		}
   15374 	}
   15375 
   15376 	if (state->modeset)
   15377 		intel_encoders_update_prepare(state);
   15378 
   15379 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
   15380 	dev_priv->display.commit_modeset_enables(state);
   15381 
   15382 	if (state->modeset) {
   15383 		intel_encoders_update_complete(state);
   15384 
   15385 		intel_set_cdclk_post_plane_update(dev_priv,
   15386 						  &state->cdclk.actual,
   15387 						  &dev_priv->cdclk.actual,
   15388 						  state->cdclk.pipe);
   15389 	}
   15390 
   15391 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
   15392 	 * already, but still need the state for the delayed optimization. To
   15393 	 * fix this:
   15394 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
   15395 	 * - schedule that vblank worker _before_ calling hw_done
   15396 	 * - at the start of commit_tail, cancel it _synchrously
   15397 	 * - switch over to the vblank wait helper in the core after that since
   15398 	 *   we don't need out special handling any more.
   15399 	 */
   15400 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
   15401 
   15402 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   15403 		if (new_crtc_state->hw.active &&
   15404 		    !needs_modeset(new_crtc_state) &&
   15405 		    !new_crtc_state->preload_luts &&
   15406 		    (new_crtc_state->uapi.color_mgmt_changed ||
   15407 		     new_crtc_state->update_pipe))
   15408 			intel_color_load_luts(new_crtc_state);
   15409 	}
   15410 
   15411 	/*
   15412 	 * Now that the vblank has passed, we can go ahead and program the
   15413 	 * optimal watermarks on platforms that need two-step watermark
   15414 	 * programming.
   15415 	 *
   15416 	 * TODO: Move this (and other cleanup) to an async worker eventually.
   15417 	 */
   15418 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   15419 					    new_crtc_state, i) {
   15420 		/*
   15421 		 * Gen2 reports pipe underruns whenever all planes are disabled.
   15422 		 * So re-enable underrun reporting after some planes get enabled.
   15423 		 *
   15424 		 * We do this before .optimize_watermarks() so that we have a
   15425 		 * chance of catching underruns with the intermediate watermarks
   15426 		 * vs. the new plane configuration.
   15427 		 */
   15428 		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
   15429 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
   15430 
   15431 		if (dev_priv->display.optimize_watermarks)
   15432 			dev_priv->display.optimize_watermarks(state, crtc);
   15433 	}
   15434 
   15435 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   15436 		intel_post_plane_update(state, crtc);
   15437 
   15438 		if (put_domains[i])
   15439 			modeset_put_power_domains(dev_priv, put_domains[i]);
   15440 
   15441 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
   15442 	}
   15443 
   15444 	/* Underruns don't always raise interrupts, so check manually */
   15445 	intel_check_cpu_fifo_underruns(dev_priv);
   15446 	intel_check_pch_fifo_underruns(dev_priv);
   15447 
   15448 	if (state->modeset)
   15449 		intel_verify_planes(state);
   15450 
   15451 	if (state->modeset && intel_can_enable_sagv(state))
   15452 		intel_enable_sagv(dev_priv);
   15453 
   15454 	drm_atomic_helper_commit_hw_done(&state->base);
   15455 
   15456 	if (state->modeset) {
   15457 		/* As one of the primary mmio accessors, KMS has a high
   15458 		 * likelihood of triggering bugs in unclaimed access. After we
   15459 		 * finish modesetting, see if an error has been flagged, and if
   15460 		 * so enable debugging for the next modeset - and hope we catch
   15461 		 * the culprit.
   15462 		 */
   15463 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
   15464 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
   15465 	}
   15466 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   15467 
   15468 	/*
   15469 	 * Defer the cleanup of the old state to a separate worker to not
   15470 	 * impede the current task (userspace for blocking modesets) that
   15471 	 * are executed inline. For out-of-line asynchronous modesets/flips,
   15472 	 * deferring to a new worker seems overkill, but we would place a
   15473 	 * schedule point (cond_resched()) here anyway to keep latencies
   15474 	 * down.
   15475 	 */
   15476 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
   15477 	queue_work(system_highpri_wq, &state->base.commit_work);
   15478 }
   15479 
   15480 static void intel_atomic_commit_work(struct work_struct *work)
   15481 {
   15482 	struct intel_atomic_state *state =
   15483 		container_of(work, struct intel_atomic_state, base.commit_work);
   15484 
   15485 	intel_atomic_commit_tail(state);
   15486 }
   15487 
   15488 static int __i915_sw_fence_call
   15489 intel_atomic_commit_ready(struct i915_sw_fence *fence,
   15490 			  enum i915_sw_fence_notify notify)
   15491 {
   15492 	struct intel_atomic_state *state =
   15493 		container_of(fence, struct intel_atomic_state, commit_ready);
   15494 
   15495 	switch (notify) {
   15496 	case FENCE_COMPLETE:
   15497 		/* we do blocking waits in the worker, nothing to do here */
   15498 		break;
   15499 	case FENCE_FREE:
   15500 		{
   15501 			struct intel_atomic_helper *helper =
   15502 				&to_i915(state->base.dev)->atomic_helper;
   15503 
   15504 			if (llist_add(&state->freed, &helper->free_list))
   15505 				schedule_work(&helper->free_work);
   15506 			break;
   15507 		}
   15508 	}
   15509 
   15510 	return NOTIFY_DONE;
   15511 }
   15512 
   15513 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
   15514 {
   15515 	struct intel_plane_state *old_plane_state, *new_plane_state;
   15516 	struct intel_plane *plane;
   15517 	int i;
   15518 
   15519 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   15520 					     new_plane_state, i)
   15521 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
   15522 					to_intel_frontbuffer(new_plane_state->hw.fb),
   15523 					plane->frontbuffer_bit);
   15524 }
   15525 
   15526 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
   15527 {
   15528 	struct intel_crtc *crtc;
   15529 
   15530 	for_each_intel_crtc(&dev_priv->drm, crtc)
   15531 		drm_modeset_lock_assert_held(&crtc->base.mutex);
   15532 }
   15533 
   15534 static int intel_atomic_commit(struct drm_device *dev,
   15535 			       struct drm_atomic_state *_state,
   15536 			       bool nonblock)
   15537 {
   15538 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
   15539 	struct drm_i915_private *dev_priv = to_i915(dev);
   15540 	int ret = 0;
   15541 
   15542 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
   15543 
   15544 	drm_atomic_state_get(&state->base);
   15545 	i915_sw_fence_init(&state->commit_ready,
   15546 			   intel_atomic_commit_ready);
   15547 
   15548 	/*
   15549 	 * The intel_legacy_cursor_update() fast path takes care
   15550 	 * of avoiding the vblank waits for simple cursor
   15551 	 * movement and flips. For cursor on/off and size changes,
   15552 	 * we want to perform the vblank waits so that watermark
   15553 	 * updates happen during the correct frames. Gen9+ have
   15554 	 * double buffered watermarks and so shouldn't need this.
   15555 	 *
   15556 	 * Unset state->legacy_cursor_update before the call to
   15557 	 * drm_atomic_helper_setup_commit() because otherwise
   15558 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
   15559 	 * we get FIFO underruns because we didn't wait
   15560 	 * for vblank.
   15561 	 *
   15562 	 * FIXME doing watermarks and fb cleanup from a vblank worker
   15563 	 * (assuming we had any) would solve these problems.
   15564 	 */
   15565 	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
   15566 		struct intel_crtc_state *new_crtc_state;
   15567 		struct intel_crtc *crtc;
   15568 		int i;
   15569 
   15570 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   15571 			if (new_crtc_state->wm.need_postvbl_update ||
   15572 			    new_crtc_state->update_wm_post)
   15573 				state->base.legacy_cursor_update = false;
   15574 	}
   15575 
   15576 	ret = intel_atomic_prepare_commit(state);
   15577 	if (ret) {
   15578 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
   15579 		i915_sw_fence_commit(&state->commit_ready);
   15580 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   15581 		return ret;
   15582 	}
   15583 
   15584 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
   15585 	if (!ret)
   15586 		ret = drm_atomic_helper_swap_state(&state->base, true);
   15587 
   15588 	if (ret) {
   15589 		i915_sw_fence_commit(&state->commit_ready);
   15590 
   15591 		drm_atomic_helper_cleanup_planes(dev, &state->base);
   15592 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   15593 		return ret;
   15594 	}
   15595 	dev_priv->wm.distrust_bios_wm = false;
   15596 	intel_shared_dpll_swap_state(state);
   15597 	intel_atomic_track_fbs(state);
   15598 
   15599 	if (state->global_state_changed) {
   15600 		assert_global_state_locked(dev_priv);
   15601 
   15602 		memcpy(dev_priv->min_cdclk, state->min_cdclk,
   15603 		       sizeof(state->min_cdclk));
   15604 		memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
   15605 		       sizeof(state->min_voltage_level));
   15606 		dev_priv->active_pipes = state->active_pipes;
   15607 		dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
   15608 
   15609 		intel_cdclk_swap_state(state);
   15610 	}
   15611 
   15612 	drm_atomic_state_get(&state->base);
   15613 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
   15614 
   15615 	i915_sw_fence_commit(&state->commit_ready);
   15616 	if (nonblock && state->modeset) {
   15617 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
   15618 	} else if (nonblock) {
   15619 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
   15620 	} else {
   15621 		if (state->modeset)
   15622 			flush_workqueue(dev_priv->modeset_wq);
   15623 		intel_atomic_commit_tail(state);
   15624 	}
   15625 
   15626 	return 0;
   15627 }
   15628 
   15629 struct wait_rps_boost {
   15630 	struct wait_queue_entry wait;
   15631 
   15632 	struct drm_crtc *crtc;
   15633 	struct i915_request *request;
   15634 };
   15635 
   15636 static int do_rps_boost(struct wait_queue_entry *_wait,
   15637 			unsigned mode, int sync, void *key)
   15638 {
   15639 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
   15640 	struct i915_request *rq = wait->request;
   15641 
   15642 	/*
   15643 	 * If we missed the vblank, but the request is already running it
   15644 	 * is reasonable to assume that it will complete before the next
   15645 	 * vblank without our intervention, so leave RPS alone.
   15646 	 */
   15647 	if (!i915_request_started(rq))
   15648 		intel_rps_boost(rq);
   15649 	i915_request_put(rq);
   15650 
   15651 	drm_crtc_vblank_put(wait->crtc);
   15652 
   15653 	list_del(&wait->wait.entry);
   15654 	kfree(wait);
   15655 	return 1;
   15656 }
   15657 
   15658 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
   15659 				       struct dma_fence *fence)
   15660 {
   15661 	struct wait_rps_boost *wait;
   15662 
   15663 	if (!dma_fence_is_i915(fence))
   15664 		return;
   15665 
   15666 	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
   15667 		return;
   15668 
   15669 	if (drm_crtc_vblank_get(crtc))
   15670 		return;
   15671 
   15672 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
   15673 	if (!wait) {
   15674 		drm_crtc_vblank_put(crtc);
   15675 		return;
   15676 	}
   15677 
   15678 	wait->request = to_request(dma_fence_get(fence));
   15679 	wait->crtc = crtc;
   15680 
   15681 	wait->wait.func = do_rps_boost;
   15682 	wait->wait.flags = 0;
   15683 
   15684 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
   15685 }
   15686 
   15687 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
   15688 {
   15689 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   15690 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   15691 	struct drm_framebuffer *fb = plane_state->hw.fb;
   15692 	struct i915_vma *vma;
   15693 
   15694 	if (plane->id == PLANE_CURSOR &&
   15695 	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
   15696 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   15697 		const int align = intel_cursor_alignment(dev_priv);
   15698 		int err;
   15699 
   15700 		err = i915_gem_object_attach_phys(obj, align);
   15701 		if (err)
   15702 			return err;
   15703 	}
   15704 
   15705 	vma = intel_pin_and_fence_fb_obj(fb,
   15706 					 &plane_state->view,
   15707 					 intel_plane_uses_fence(plane_state),
   15708 					 &plane_state->flags);
   15709 	if (IS_ERR(vma))
   15710 		return PTR_ERR(vma);
   15711 
   15712 	plane_state->vma = vma;
   15713 
   15714 	return 0;
   15715 }
   15716 
   15717 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
   15718 {
   15719 	struct i915_vma *vma;
   15720 
   15721 	vma = fetch_and_zero(&old_plane_state->vma);
   15722 	if (vma)
   15723 		intel_unpin_fb_vma(vma, old_plane_state->flags);
   15724 }
   15725 
   15726 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
   15727 {
   15728 	struct i915_sched_attr attr = {
   15729 		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
   15730 	};
   15731 
   15732 	i915_gem_object_wait_priority(obj, 0, &attr);
   15733 }
   15734 
   15735 /**
   15736  * intel_prepare_plane_fb - Prepare fb for usage on plane
   15737  * @plane: drm plane to prepare for
   15738  * @_new_plane_state: the plane state being prepared
   15739  *
   15740  * Prepares a framebuffer for usage on a display plane.  Generally this
   15741  * involves pinning the underlying object and updating the frontbuffer tracking
   15742  * bits.  Some older platforms need special physical address handling for
   15743  * cursor planes.
   15744  *
   15745  * Returns 0 on success, negative error code on failure.
   15746  */
   15747 int
   15748 intel_prepare_plane_fb(struct drm_plane *plane,
   15749 		       struct drm_plane_state *_new_plane_state)
   15750 {
   15751 	struct intel_plane_state *new_plane_state =
   15752 		to_intel_plane_state(_new_plane_state);
   15753 	struct intel_atomic_state *intel_state =
   15754 		to_intel_atomic_state(new_plane_state->uapi.state);
   15755 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
   15756 	struct drm_framebuffer *fb = new_plane_state->hw.fb;
   15757 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   15758 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
   15759 	int ret;
   15760 
   15761 	if (old_obj) {
   15762 		struct intel_crtc_state *crtc_state =
   15763 			intel_atomic_get_new_crtc_state(intel_state,
   15764 							to_intel_crtc(plane->state->crtc));
   15765 
   15766 		/* Big Hammer, we also need to ensure that any pending
   15767 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
   15768 		 * current scanout is retired before unpinning the old
   15769 		 * framebuffer. Note that we rely on userspace rendering
   15770 		 * into the buffer attached to the pipe they are waiting
   15771 		 * on. If not, userspace generates a GPU hang with IPEHR
   15772 		 * point to the MI_WAIT_FOR_EVENT.
   15773 		 *
   15774 		 * This should only fail upon a hung GPU, in which case we
   15775 		 * can safely continue.
   15776 		 */
   15777 		if (needs_modeset(crtc_state)) {
   15778 			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
   15779 							      old_obj->base.resv, NULL,
   15780 							      false, 0,
   15781 							      GFP_KERNEL);
   15782 			if (ret < 0)
   15783 				return ret;
   15784 		}
   15785 	}
   15786 
   15787 	if (new_plane_state->uapi.fence) { /* explicit fencing */
   15788 		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
   15789 						    new_plane_state->uapi.fence,
   15790 						    I915_FENCE_TIMEOUT,
   15791 						    GFP_KERNEL);
   15792 		if (ret < 0)
   15793 			return ret;
   15794 	}
   15795 
   15796 	if (!obj)
   15797 		return 0;
   15798 
   15799 	ret = i915_gem_object_pin_pages(obj);
   15800 	if (ret)
   15801 		return ret;
   15802 
   15803 	ret = intel_plane_pin_fb(new_plane_state);
   15804 
   15805 	i915_gem_object_unpin_pages(obj);
   15806 	if (ret)
   15807 		return ret;
   15808 
   15809 	fb_obj_bump_render_priority(obj);
   15810 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
   15811 
   15812 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
   15813 		struct dma_fence *fence;
   15814 
   15815 		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
   15816 						      obj->base.resv, NULL,
   15817 						      false, I915_FENCE_TIMEOUT,
   15818 						      GFP_KERNEL);
   15819 		if (ret < 0)
   15820 			return ret;
   15821 
   15822 		fence = dma_resv_get_excl_rcu(obj->base.resv);
   15823 		if (fence) {
   15824 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
   15825 						   fence);
   15826 			dma_fence_put(fence);
   15827 		}
   15828 	} else {
   15829 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
   15830 					   new_plane_state->uapi.fence);
   15831 	}
   15832 
   15833 	/*
   15834 	 * We declare pageflips to be interactive and so merit a small bias
   15835 	 * towards upclocking to deliver the frame on time. By only changing
   15836 	 * the RPS thresholds to sample more regularly and aim for higher
   15837 	 * clocks we can hopefully deliver low power workloads (like kodi)
   15838 	 * that are not quite steady state without resorting to forcing
   15839 	 * maximum clocks following a vblank miss (see do_rps_boost()).
   15840 	 */
   15841 	if (!intel_state->rps_interactive) {
   15842 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
   15843 		intel_state->rps_interactive = true;
   15844 	}
   15845 
   15846 	return 0;
   15847 }
   15848 
   15849 /**
   15850  * intel_cleanup_plane_fb - Cleans up an fb after plane use
   15851  * @plane: drm plane to clean up for
   15852  * @_old_plane_state: the state from the previous modeset
   15853  *
   15854  * Cleans up a framebuffer that has just been removed from a plane.
   15855  */
   15856 void
   15857 intel_cleanup_plane_fb(struct drm_plane *plane,
   15858 		       struct drm_plane_state *_old_plane_state)
   15859 {
   15860 	struct intel_plane_state *old_plane_state =
   15861 		to_intel_plane_state(_old_plane_state);
   15862 	struct intel_atomic_state *intel_state =
   15863 		to_intel_atomic_state(old_plane_state->uapi.state);
   15864 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
   15865 
   15866 	if (intel_state->rps_interactive) {
   15867 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
   15868 		intel_state->rps_interactive = false;
   15869 	}
   15870 
   15871 	/* Should only be called after a successful intel_prepare_plane_fb()! */
   15872 	intel_plane_unpin_fb(old_plane_state);
   15873 }
   15874 
   15875 /**
   15876  * intel_plane_destroy - destroy a plane
   15877  * @plane: plane to destroy
   15878  *
   15879  * Common destruction function for all types of planes (primary, cursor,
   15880  * sprite).
   15881  */
   15882 void intel_plane_destroy(struct drm_plane *plane)
   15883 {
   15884 	drm_plane_cleanup(plane);
   15885 	kfree(to_intel_plane(plane));
   15886 }
   15887 
   15888 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
   15889 					    u32 format, u64 modifier)
   15890 {
   15891 	switch (modifier) {
   15892 	case DRM_FORMAT_MOD_LINEAR:
   15893 	case I915_FORMAT_MOD_X_TILED:
   15894 		break;
   15895 	default:
   15896 		return false;
   15897 	}
   15898 
   15899 	switch (format) {
   15900 	case DRM_FORMAT_C8:
   15901 	case DRM_FORMAT_RGB565:
   15902 	case DRM_FORMAT_XRGB1555:
   15903 	case DRM_FORMAT_XRGB8888:
   15904 		return modifier == DRM_FORMAT_MOD_LINEAR ||
   15905 			modifier == I915_FORMAT_MOD_X_TILED;
   15906 	default:
   15907 		return false;
   15908 	}
   15909 }
   15910 
   15911 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
   15912 					    u32 format, u64 modifier)
   15913 {
   15914 	switch (modifier) {
   15915 	case DRM_FORMAT_MOD_LINEAR:
   15916 	case I915_FORMAT_MOD_X_TILED:
   15917 		break;
   15918 	default:
   15919 		return false;
   15920 	}
   15921 
   15922 	switch (format) {
   15923 	case DRM_FORMAT_C8:
   15924 	case DRM_FORMAT_RGB565:
   15925 	case DRM_FORMAT_XRGB8888:
   15926 	case DRM_FORMAT_XBGR8888:
   15927 	case DRM_FORMAT_ARGB8888:
   15928 	case DRM_FORMAT_ABGR8888:
   15929 	case DRM_FORMAT_XRGB2101010:
   15930 	case DRM_FORMAT_XBGR2101010:
   15931 	case DRM_FORMAT_ARGB2101010:
   15932 	case DRM_FORMAT_ABGR2101010:
   15933 	case DRM_FORMAT_XBGR16161616F:
   15934 		return modifier == DRM_FORMAT_MOD_LINEAR ||
   15935 			modifier == I915_FORMAT_MOD_X_TILED;
   15936 	default:
   15937 		return false;
   15938 	}
   15939 }
   15940 
   15941 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
   15942 					      u32 format, u64 modifier)
   15943 {
   15944 	return modifier == DRM_FORMAT_MOD_LINEAR &&
   15945 		format == DRM_FORMAT_ARGB8888;
   15946 }
   15947 
   15948 static const struct drm_plane_funcs i965_plane_funcs = {
   15949 	.update_plane = drm_atomic_helper_update_plane,
   15950 	.disable_plane = drm_atomic_helper_disable_plane,
   15951 	.destroy = intel_plane_destroy,
   15952 	.atomic_duplicate_state = intel_plane_duplicate_state,
   15953 	.atomic_destroy_state = intel_plane_destroy_state,
   15954 	.format_mod_supported = i965_plane_format_mod_supported,
   15955 };
   15956 
   15957 static const struct drm_plane_funcs i8xx_plane_funcs = {
   15958 	.update_plane = drm_atomic_helper_update_plane,
   15959 	.disable_plane = drm_atomic_helper_disable_plane,
   15960 	.destroy = intel_plane_destroy,
   15961 	.atomic_duplicate_state = intel_plane_duplicate_state,
   15962 	.atomic_destroy_state = intel_plane_destroy_state,
   15963 	.format_mod_supported = i8xx_plane_format_mod_supported,
   15964 };
   15965 
   15966 static int
   15967 intel_legacy_cursor_update(struct drm_plane *_plane,
   15968 			   struct drm_crtc *_crtc,
   15969 			   struct drm_framebuffer *fb,
   15970 			   int crtc_x, int crtc_y,
   15971 			   unsigned int crtc_w, unsigned int crtc_h,
   15972 			   u32 src_x, u32 src_y,
   15973 			   u32 src_w, u32 src_h,
   15974 			   struct drm_modeset_acquire_ctx *ctx)
   15975 {
   15976 	struct intel_plane *plane = to_intel_plane(_plane);
   15977 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
   15978 	struct intel_plane_state *old_plane_state =
   15979 		to_intel_plane_state(plane->base.state);
   15980 	struct intel_plane_state *new_plane_state;
   15981 	struct intel_crtc_state *crtc_state =
   15982 		to_intel_crtc_state(crtc->base.state);
   15983 	struct intel_crtc_state *new_crtc_state;
   15984 	int ret;
   15985 
   15986 	/*
   15987 	 * When crtc is inactive or there is a modeset pending,
   15988 	 * wait for it to complete in the slowpath
   15989 	 */
   15990 	if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
   15991 	    crtc_state->update_pipe)
   15992 		goto slow;
   15993 
   15994 	/*
   15995 	 * Don't do an async update if there is an outstanding commit modifying
   15996 	 * the plane.  This prevents our async update's changes from getting
   15997 	 * overridden by a previous synchronous update's state.
   15998 	 */
   15999 	if (old_plane_state->uapi.commit &&
   16000 	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
   16001 		goto slow;
   16002 
   16003 	/*
   16004 	 * If any parameters change that may affect watermarks,
   16005 	 * take the slowpath. Only changing fb or position should be
   16006 	 * in the fastpath.
   16007 	 */
   16008 	if (old_plane_state->uapi.crtc != &crtc->base ||
   16009 	    old_plane_state->uapi.src_w != src_w ||
   16010 	    old_plane_state->uapi.src_h != src_h ||
   16011 	    old_plane_state->uapi.crtc_w != crtc_w ||
   16012 	    old_plane_state->uapi.crtc_h != crtc_h ||
   16013 	    !old_plane_state->uapi.fb != !fb)
   16014 		goto slow;
   16015 
   16016 	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
   16017 	if (!new_plane_state)
   16018 		return -ENOMEM;
   16019 
   16020 	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
   16021 	if (!new_crtc_state) {
   16022 		ret = -ENOMEM;
   16023 		goto out_free;
   16024 	}
   16025 
   16026 	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
   16027 
   16028 	new_plane_state->uapi.src_x = src_x;
   16029 	new_plane_state->uapi.src_y = src_y;
   16030 	new_plane_state->uapi.src_w = src_w;
   16031 	new_plane_state->uapi.src_h = src_h;
   16032 	new_plane_state->uapi.crtc_x = crtc_x;
   16033 	new_plane_state->uapi.crtc_y = crtc_y;
   16034 	new_plane_state->uapi.crtc_w = crtc_w;
   16035 	new_plane_state->uapi.crtc_h = crtc_h;
   16036 
   16037 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
   16038 						  old_plane_state, new_plane_state);
   16039 	if (ret)
   16040 		goto out_free;
   16041 
   16042 	ret = intel_plane_pin_fb(new_plane_state);
   16043 	if (ret)
   16044 		goto out_free;
   16045 
   16046 	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
   16047 				ORIGIN_FLIP);
   16048 	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
   16049 				to_intel_frontbuffer(new_plane_state->hw.fb),
   16050 				plane->frontbuffer_bit);
   16051 
   16052 	/* Swap plane state */
   16053 	plane->base.state = &new_plane_state->uapi;
   16054 
   16055 	/*
   16056 	 * We cannot swap crtc_state as it may be in use by an atomic commit or
   16057 	 * page flip that's running simultaneously. If we swap crtc_state and
   16058 	 * destroy the old state, we will cause a use-after-free there.
   16059 	 *
   16060 	 * Only update active_planes, which is needed for our internal
   16061 	 * bookkeeping. Either value will do the right thing when updating
   16062 	 * planes atomically. If the cursor was part of the atomic update then
   16063 	 * we would have taken the slowpath.
   16064 	 */
   16065 	crtc_state->active_planes = new_crtc_state->active_planes;
   16066 
   16067 	if (new_plane_state->uapi.visible)
   16068 		intel_update_plane(plane, crtc_state, new_plane_state);
   16069 	else
   16070 		intel_disable_plane(plane, crtc_state);
   16071 
   16072 	intel_plane_unpin_fb(old_plane_state);
   16073 
   16074 out_free:
   16075 	if (new_crtc_state)
   16076 		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
   16077 	if (ret)
   16078 		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
   16079 	else
   16080 		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
   16081 	return ret;
   16082 
   16083 slow:
   16084 	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
   16085 					      crtc_x, crtc_y, crtc_w, crtc_h,
   16086 					      src_x, src_y, src_w, src_h, ctx);
   16087 }
   16088 
   16089 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
   16090 	.update_plane = intel_legacy_cursor_update,
   16091 	.disable_plane = drm_atomic_helper_disable_plane,
   16092 	.destroy = intel_plane_destroy,
   16093 	.atomic_duplicate_state = intel_plane_duplicate_state,
   16094 	.atomic_destroy_state = intel_plane_destroy_state,
   16095 	.format_mod_supported = intel_cursor_format_mod_supported,
   16096 };
   16097 
   16098 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
   16099 			       enum i9xx_plane_id i9xx_plane)
   16100 {
   16101 	if (!HAS_FBC(dev_priv))
   16102 		return false;
   16103 
   16104 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   16105 		return i9xx_plane == PLANE_A; /* tied to pipe A */
   16106 	else if (IS_IVYBRIDGE(dev_priv))
   16107 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
   16108 			i9xx_plane == PLANE_C;
   16109 	else if (INTEL_GEN(dev_priv) >= 4)
   16110 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
   16111 	else
   16112 		return i9xx_plane == PLANE_A;
   16113 }
   16114 
   16115 static struct intel_plane *
   16116 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
   16117 {
   16118 	struct intel_plane *plane;
   16119 	const struct drm_plane_funcs *plane_funcs;
   16120 	unsigned int supported_rotations;
   16121 	unsigned int possible_crtcs;
   16122 	const u32 *formats;
   16123 	int num_formats;
   16124 	int ret, zpos;
   16125 
   16126 	if (INTEL_GEN(dev_priv) >= 9)
   16127 		return skl_universal_plane_create(dev_priv, pipe,
   16128 						  PLANE_PRIMARY);
   16129 
   16130 	plane = intel_plane_alloc();
   16131 	if (IS_ERR(plane))
   16132 		return plane;
   16133 
   16134 	plane->pipe = pipe;
   16135 	/*
   16136 	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
   16137 	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
   16138 	 */
   16139 	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
   16140 		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
   16141 	else
   16142 		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
   16143 	plane->id = PLANE_PRIMARY;
   16144 	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
   16145 
   16146 	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
   16147 	if (plane->has_fbc) {
   16148 		struct intel_fbc *fbc = &dev_priv->fbc;
   16149 
   16150 		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
   16151 	}
   16152 
   16153 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   16154 		formats = vlv_primary_formats;
   16155 		num_formats = ARRAY_SIZE(vlv_primary_formats);
   16156 	} else if (INTEL_GEN(dev_priv) >= 4) {
   16157 		/*
   16158 		 * WaFP16GammaEnabling:ivb
   16159 		 * "Workaround : When using the 64-bit format, the plane
   16160 		 *  output on each color channel has one quarter amplitude.
   16161 		 *  It can be brought up to full amplitude by using pipe
   16162 		 *  gamma correction or pipe color space conversion to
   16163 		 *  multiply the plane output by four."
   16164 		 *
   16165 		 * There is no dedicated plane gamma for the primary plane,
   16166 		 * and using the pipe gamma/csc could conflict with other
   16167 		 * planes, so we choose not to expose fp16 on IVB primary
   16168 		 * planes. HSW primary planes no longer have this problem.
   16169 		 */
   16170 		if (IS_IVYBRIDGE(dev_priv)) {
   16171 			formats = ivb_primary_formats;
   16172 			num_formats = ARRAY_SIZE(ivb_primary_formats);
   16173 		} else {
   16174 			formats = i965_primary_formats;
   16175 			num_formats = ARRAY_SIZE(i965_primary_formats);
   16176 		}
   16177 	} else {
   16178 		formats = i8xx_primary_formats;
   16179 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
   16180 	}
   16181 
   16182 	if (INTEL_GEN(dev_priv) >= 4)
   16183 		plane_funcs = &i965_plane_funcs;
   16184 	else
   16185 		plane_funcs = &i8xx_plane_funcs;
   16186 
   16187 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   16188 		plane->min_cdclk = vlv_plane_min_cdclk;
   16189 	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   16190 		plane->min_cdclk = hsw_plane_min_cdclk;
   16191 	else if (IS_IVYBRIDGE(dev_priv))
   16192 		plane->min_cdclk = ivb_plane_min_cdclk;
   16193 	else
   16194 		plane->min_cdclk = i9xx_plane_min_cdclk;
   16195 
   16196 	plane->max_stride = i9xx_plane_max_stride;
   16197 	plane->update_plane = i9xx_update_plane;
   16198 	plane->disable_plane = i9xx_disable_plane;
   16199 	plane->get_hw_state = i9xx_plane_get_hw_state;
   16200 	plane->check_plane = i9xx_plane_check;
   16201 
   16202 	possible_crtcs = BIT(pipe);
   16203 
   16204 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
   16205 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
   16206 					       possible_crtcs, plane_funcs,
   16207 					       formats, num_formats,
   16208 					       i9xx_format_modifiers,
   16209 					       DRM_PLANE_TYPE_PRIMARY,
   16210 					       "primary %c", pipe_name(pipe));
   16211 	else
   16212 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
   16213 					       possible_crtcs, plane_funcs,
   16214 					       formats, num_formats,
   16215 					       i9xx_format_modifiers,
   16216 					       DRM_PLANE_TYPE_PRIMARY,
   16217 					       "plane %c",
   16218 					       plane_name(plane->i9xx_plane));
   16219 	if (ret)
   16220 		goto fail;
   16221 
   16222 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
   16223 		supported_rotations =
   16224 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
   16225 			DRM_MODE_REFLECT_X;
   16226 	} else if (INTEL_GEN(dev_priv) >= 4) {
   16227 		supported_rotations =
   16228 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
   16229 	} else {
   16230 		supported_rotations = DRM_MODE_ROTATE_0;
   16231 	}
   16232 
   16233 	if (INTEL_GEN(dev_priv) >= 4)
   16234 		drm_plane_create_rotation_property(&plane->base,
   16235 						   DRM_MODE_ROTATE_0,
   16236 						   supported_rotations);
   16237 
   16238 	zpos = 0;
   16239 	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
   16240 
   16241 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
   16242 
   16243 	return plane;
   16244 
   16245 fail:
   16246 	intel_plane_free(plane);
   16247 
   16248 	return ERR_PTR(ret);
   16249 }
   16250 
   16251 static struct intel_plane *
   16252 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
   16253 			  enum pipe pipe)
   16254 {
   16255 	unsigned int possible_crtcs;
   16256 	struct intel_plane *cursor;
   16257 	int ret, zpos;
   16258 
   16259 	cursor = intel_plane_alloc();
   16260 	if (IS_ERR(cursor))
   16261 		return cursor;
   16262 
   16263 	cursor->pipe = pipe;
   16264 	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
   16265 	cursor->id = PLANE_CURSOR;
   16266 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
   16267 
   16268 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
   16269 		cursor->max_stride = i845_cursor_max_stride;
   16270 		cursor->update_plane = i845_update_cursor;
   16271 		cursor->disable_plane = i845_disable_cursor;
   16272 		cursor->get_hw_state = i845_cursor_get_hw_state;
   16273 		cursor->check_plane = i845_check_cursor;
   16274 	} else {
   16275 		cursor->max_stride = i9xx_cursor_max_stride;
   16276 		cursor->update_plane = i9xx_update_cursor;
   16277 		cursor->disable_plane = i9xx_disable_cursor;
   16278 		cursor->get_hw_state = i9xx_cursor_get_hw_state;
   16279 		cursor->check_plane = i9xx_check_cursor;
   16280 	}
   16281 
   16282 	cursor->cursor.base = ~0;
   16283 	cursor->cursor.cntl = ~0;
   16284 
   16285 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
   16286 		cursor->cursor.size = ~0;
   16287 
   16288 	possible_crtcs = BIT(pipe);
   16289 
   16290 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
   16291 				       possible_crtcs, &intel_cursor_plane_funcs,
   16292 				       intel_cursor_formats,
   16293 				       ARRAY_SIZE(intel_cursor_formats),
   16294 				       cursor_format_modifiers,
   16295 				       DRM_PLANE_TYPE_CURSOR,
   16296 				       "cursor %c", pipe_name(pipe));
   16297 	if (ret)
   16298 		goto fail;
   16299 
   16300 	if (INTEL_GEN(dev_priv) >= 4)
   16301 		drm_plane_create_rotation_property(&cursor->base,
   16302 						   DRM_MODE_ROTATE_0,
   16303 						   DRM_MODE_ROTATE_0 |
   16304 						   DRM_MODE_ROTATE_180);
   16305 
   16306 	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
   16307 	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
   16308 
   16309 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
   16310 
   16311 	return cursor;
   16312 
   16313 fail:
   16314 	intel_plane_free(cursor);
   16315 
   16316 	return ERR_PTR(ret);
   16317 }
   16318 
   16319 #define INTEL_CRTC_FUNCS \
   16320 	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
   16321 	.set_config = drm_atomic_helper_set_config, \
   16322 	.destroy = intel_crtc_destroy, \
   16323 	.page_flip = drm_atomic_helper_page_flip, \
   16324 	.atomic_duplicate_state = intel_crtc_duplicate_state, \
   16325 	.atomic_destroy_state = intel_crtc_destroy_state, \
   16326 	.set_crc_source = intel_crtc_set_crc_source, \
   16327 	.verify_crc_source = intel_crtc_verify_crc_source, \
   16328 	.get_crc_sources = intel_crtc_get_crc_sources
   16329 
   16330 static const struct drm_crtc_funcs bdw_crtc_funcs = {
   16331 	INTEL_CRTC_FUNCS,
   16332 
   16333 	.get_vblank_counter = g4x_get_vblank_counter,
   16334 	.enable_vblank = bdw_enable_vblank,
   16335 	.disable_vblank = bdw_disable_vblank,
   16336 };
   16337 
   16338 static const struct drm_crtc_funcs ilk_crtc_funcs = {
   16339 	INTEL_CRTC_FUNCS,
   16340 
   16341 	.get_vblank_counter = g4x_get_vblank_counter,
   16342 	.enable_vblank = ilk_enable_vblank,
   16343 	.disable_vblank = ilk_disable_vblank,
   16344 };
   16345 
   16346 static const struct drm_crtc_funcs g4x_crtc_funcs = {
   16347 	INTEL_CRTC_FUNCS,
   16348 
   16349 	.get_vblank_counter = g4x_get_vblank_counter,
   16350 	.enable_vblank = i965_enable_vblank,
   16351 	.disable_vblank = i965_disable_vblank,
   16352 };
   16353 
   16354 static const struct drm_crtc_funcs i965_crtc_funcs = {
   16355 	INTEL_CRTC_FUNCS,
   16356 
   16357 	.get_vblank_counter = i915_get_vblank_counter,
   16358 	.enable_vblank = i965_enable_vblank,
   16359 	.disable_vblank = i965_disable_vblank,
   16360 };
   16361 
   16362 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
   16363 	INTEL_CRTC_FUNCS,
   16364 
   16365 	.get_vblank_counter = i915_get_vblank_counter,
   16366 	.enable_vblank = i915gm_enable_vblank,
   16367 	.disable_vblank = i915gm_disable_vblank,
   16368 };
   16369 
   16370 static const struct drm_crtc_funcs i915_crtc_funcs = {
   16371 	INTEL_CRTC_FUNCS,
   16372 
   16373 	.get_vblank_counter = i915_get_vblank_counter,
   16374 	.enable_vblank = i8xx_enable_vblank,
   16375 	.disable_vblank = i8xx_disable_vblank,
   16376 };
   16377 
   16378 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
   16379 	INTEL_CRTC_FUNCS,
   16380 
   16381 	/* no hw vblank counter */
   16382 	.enable_vblank = i8xx_enable_vblank,
   16383 	.disable_vblank = i8xx_disable_vblank,
   16384 };
   16385 
   16386 static struct intel_crtc *intel_crtc_alloc(void)
   16387 {
   16388 	struct intel_crtc_state *crtc_state;
   16389 	struct intel_crtc *crtc;
   16390 
   16391 	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
   16392 	if (!crtc)
   16393 		return ERR_PTR(-ENOMEM);
   16394 
   16395 	crtc_state = intel_crtc_state_alloc(crtc);
   16396 	if (!crtc_state) {
   16397 		kfree(crtc);
   16398 		return ERR_PTR(-ENOMEM);
   16399 	}
   16400 
   16401 	crtc->base.state = &crtc_state->uapi;
   16402 	crtc->config = crtc_state;
   16403 
   16404 	return crtc;
   16405 }
   16406 
   16407 static void intel_crtc_free(struct intel_crtc *crtc)
   16408 {
   16409 	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
   16410 	kfree(crtc);
   16411 }
   16412 
   16413 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
   16414 {
   16415 	struct intel_plane *primary, *cursor;
   16416 	const struct drm_crtc_funcs *funcs;
   16417 	struct intel_crtc *crtc;
   16418 	int sprite, ret;
   16419 
   16420 	crtc = intel_crtc_alloc();
   16421 	if (IS_ERR(crtc))
   16422 		return PTR_ERR(crtc);
   16423 
   16424 	crtc->pipe = pipe;
   16425 	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
   16426 
   16427 	primary = intel_primary_plane_create(dev_priv, pipe);
   16428 	if (IS_ERR(primary)) {
   16429 		ret = PTR_ERR(primary);
   16430 		goto fail;
   16431 	}
   16432 	crtc->plane_ids_mask |= BIT(primary->id);
   16433 
   16434 	for_each_sprite(dev_priv, pipe, sprite) {
   16435 		struct intel_plane *plane;
   16436 
   16437 		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
   16438 		if (IS_ERR(plane)) {
   16439 			ret = PTR_ERR(plane);
   16440 			goto fail;
   16441 		}
   16442 		crtc->plane_ids_mask |= BIT(plane->id);
   16443 	}
   16444 
   16445 	cursor = intel_cursor_plane_create(dev_priv, pipe);
   16446 	if (IS_ERR(cursor)) {
   16447 		ret = PTR_ERR(cursor);
   16448 		goto fail;
   16449 	}
   16450 	crtc->plane_ids_mask |= BIT(cursor->id);
   16451 
   16452 	if (HAS_GMCH(dev_priv)) {
   16453 		if (IS_CHERRYVIEW(dev_priv) ||
   16454 		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
   16455 			funcs = &g4x_crtc_funcs;
   16456 		else if (IS_GEN(dev_priv, 4))
   16457 			funcs = &i965_crtc_funcs;
   16458 		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
   16459 			funcs = &i915gm_crtc_funcs;
   16460 		else if (IS_GEN(dev_priv, 3))
   16461 			funcs = &i915_crtc_funcs;
   16462 		else
   16463 			funcs = &i8xx_crtc_funcs;
   16464 	} else {
   16465 		if (INTEL_GEN(dev_priv) >= 8)
   16466 			funcs = &bdw_crtc_funcs;
   16467 		else
   16468 			funcs = &ilk_crtc_funcs;
   16469 	}
   16470 
   16471 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
   16472 					&primary->base, &cursor->base,
   16473 					funcs, "pipe %c", pipe_name(pipe));
   16474 	if (ret)
   16475 		goto fail;
   16476 
   16477 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
   16478 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
   16479 	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
   16480 
   16481 	if (INTEL_GEN(dev_priv) < 9) {
   16482 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
   16483 
   16484 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
   16485 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
   16486 		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
   16487 	}
   16488 
   16489 	intel_color_init(crtc);
   16490 
   16491 	WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
   16492 
   16493 	return 0;
   16494 
   16495 fail:
   16496 	intel_crtc_free(crtc);
   16497 
   16498 	return ret;
   16499 }
   16500 
   16501 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
   16502 				      struct drm_file *file)
   16503 {
   16504 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
   16505 	struct drm_crtc *drmmode_crtc;
   16506 	struct intel_crtc *crtc;
   16507 
   16508 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
   16509 	if (!drmmode_crtc)
   16510 		return -ENOENT;
   16511 
   16512 	crtc = to_intel_crtc(drmmode_crtc);
   16513 	pipe_from_crtc_id->pipe = crtc->pipe;
   16514 
   16515 	return 0;
   16516 }
   16517 
   16518 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
   16519 {
   16520 	struct drm_device *dev = encoder->base.dev;
   16521 	struct intel_encoder *source_encoder;
   16522 	u32 possible_clones = 0;
   16523 
   16524 	for_each_intel_encoder(dev, source_encoder) {
   16525 		if (encoders_cloneable(encoder, source_encoder))
   16526 			possible_clones |= drm_encoder_mask(&source_encoder->base);
   16527 	}
   16528 
   16529 	return possible_clones;
   16530 }
   16531 
   16532 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
   16533 {
   16534 	struct drm_device *dev = encoder->base.dev;
   16535 	struct intel_crtc *crtc;
   16536 	u32 possible_crtcs = 0;
   16537 
   16538 	for_each_intel_crtc(dev, crtc) {
   16539 		if (encoder->pipe_mask & BIT(crtc->pipe))
   16540 			possible_crtcs |= drm_crtc_mask(&crtc->base);
   16541 	}
   16542 
   16543 	return possible_crtcs;
   16544 }
   16545 
   16546 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
   16547 {
   16548 	if (!IS_MOBILE(dev_priv))
   16549 		return false;
   16550 
   16551 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
   16552 		return false;
   16553 
   16554 	if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
   16555 		return false;
   16556 
   16557 	return true;
   16558 }
   16559 
   16560 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
   16561 {
   16562 	if (INTEL_GEN(dev_priv) >= 9)
   16563 		return false;
   16564 
   16565 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
   16566 		return false;
   16567 
   16568 	if (HAS_PCH_LPT_H(dev_priv) &&
   16569 	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
   16570 		return false;
   16571 
   16572 	/* DDI E can't be used if DDI A requires 4 lanes */
   16573 	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
   16574 		return false;
   16575 
   16576 	if (!dev_priv->vbt.int_crt_support)
   16577 		return false;
   16578 
   16579 	return true;
   16580 }
   16581 
   16582 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
   16583 {
   16584 	int pps_num;
   16585 	int pps_idx;
   16586 
   16587 	if (HAS_DDI(dev_priv))
   16588 		return;
   16589 	/*
   16590 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
   16591 	 * everywhere where registers can be write protected.
   16592 	 */
   16593 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   16594 		pps_num = 2;
   16595 	else
   16596 		pps_num = 1;
   16597 
   16598 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
   16599 		u32 val = I915_READ(PP_CONTROL(pps_idx));
   16600 
   16601 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
   16602 		I915_WRITE(PP_CONTROL(pps_idx), val);
   16603 	}
   16604 }
   16605 
   16606 static void intel_pps_init(struct drm_i915_private *dev_priv)
   16607 {
   16608 	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
   16609 		dev_priv->pps_mmio_base = PCH_PPS_BASE;
   16610 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   16611 		dev_priv->pps_mmio_base = VLV_PPS_BASE;
   16612 	else
   16613 		dev_priv->pps_mmio_base = PPS_BASE;
   16614 
   16615 	intel_pps_unlock_regs_wa(dev_priv);
   16616 }
   16617 
   16618 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
   16619 {
   16620 	struct intel_encoder *encoder;
   16621 	bool dpd_is_edp = false;
   16622 
   16623 	intel_pps_init(dev_priv);
   16624 
   16625 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
   16626 		return;
   16627 
   16628 	if (INTEL_GEN(dev_priv) >= 12) {
   16629 		intel_ddi_init(dev_priv, PORT_A);
   16630 		intel_ddi_init(dev_priv, PORT_B);
   16631 		intel_ddi_init(dev_priv, PORT_D);
   16632 		intel_ddi_init(dev_priv, PORT_E);
   16633 		intel_ddi_init(dev_priv, PORT_F);
   16634 		intel_ddi_init(dev_priv, PORT_G);
   16635 		intel_ddi_init(dev_priv, PORT_H);
   16636 		intel_ddi_init(dev_priv, PORT_I);
   16637 		icl_dsi_init(dev_priv);
   16638 	} else if (IS_ELKHARTLAKE(dev_priv)) {
   16639 		intel_ddi_init(dev_priv, PORT_A);
   16640 		intel_ddi_init(dev_priv, PORT_B);
   16641 		intel_ddi_init(dev_priv, PORT_C);
   16642 		intel_ddi_init(dev_priv, PORT_D);
   16643 		icl_dsi_init(dev_priv);
   16644 	} else if (IS_GEN(dev_priv, 11)) {
   16645 		intel_ddi_init(dev_priv, PORT_A);
   16646 		intel_ddi_init(dev_priv, PORT_B);
   16647 		intel_ddi_init(dev_priv, PORT_C);
   16648 		intel_ddi_init(dev_priv, PORT_D);
   16649 		intel_ddi_init(dev_priv, PORT_E);
   16650 		/*
   16651 		 * On some ICL SKUs port F is not present. No strap bits for
   16652 		 * this, so rely on VBT.
   16653 		 * Work around broken VBTs on SKUs known to have no port F.
   16654 		 */
   16655 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
   16656 		    intel_bios_is_port_present(dev_priv, PORT_F))
   16657 			intel_ddi_init(dev_priv, PORT_F);
   16658 
   16659 		icl_dsi_init(dev_priv);
   16660 	} else if (IS_GEN9_LP(dev_priv)) {
   16661 		/*
   16662 		 * FIXME: Broxton doesn't support port detection via the
   16663 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
   16664 		 * detect the ports.
   16665 		 */
   16666 		intel_ddi_init(dev_priv, PORT_A);
   16667 		intel_ddi_init(dev_priv, PORT_B);
   16668 		intel_ddi_init(dev_priv, PORT_C);
   16669 
   16670 		vlv_dsi_init(dev_priv);
   16671 	} else if (HAS_DDI(dev_priv)) {
   16672 		int found;
   16673 
   16674 		if (intel_ddi_crt_present(dev_priv))
   16675 			intel_crt_init(dev_priv);
   16676 
   16677 		/*
   16678 		 * Haswell uses DDI functions to detect digital outputs.
   16679 		 * On SKL pre-D0 the strap isn't connected, so we assume
   16680 		 * it's there.
   16681 		 */
   16682 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
   16683 		/* WaIgnoreDDIAStrap: skl */
   16684 		if (found || IS_GEN9_BC(dev_priv))
   16685 			intel_ddi_init(dev_priv, PORT_A);
   16686 
   16687 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
   16688 		 * register */
   16689 		found = I915_READ(SFUSE_STRAP);
   16690 
   16691 		if (found & SFUSE_STRAP_DDIB_DETECTED)
   16692 			intel_ddi_init(dev_priv, PORT_B);
   16693 		if (found & SFUSE_STRAP_DDIC_DETECTED)
   16694 			intel_ddi_init(dev_priv, PORT_C);
   16695 		if (found & SFUSE_STRAP_DDID_DETECTED)
   16696 			intel_ddi_init(dev_priv, PORT_D);
   16697 		if (found & SFUSE_STRAP_DDIF_DETECTED)
   16698 			intel_ddi_init(dev_priv, PORT_F);
   16699 		/*
   16700 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
   16701 		 */
   16702 		if (IS_GEN9_BC(dev_priv) &&
   16703 		    intel_bios_is_port_present(dev_priv, PORT_E))
   16704 			intel_ddi_init(dev_priv, PORT_E);
   16705 
   16706 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   16707 		int found;
   16708 
   16709 		/*
   16710 		 * intel_edp_init_connector() depends on this completing first,
   16711 		 * to prevent the registration of both eDP and LVDS and the
   16712 		 * incorrect sharing of the PPS.
   16713 		 */
   16714 		intel_lvds_init(dev_priv);
   16715 		intel_crt_init(dev_priv);
   16716 
   16717 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
   16718 
   16719 		if (ilk_has_edp_a(dev_priv))
   16720 			intel_dp_init(dev_priv, DP_A, PORT_A);
   16721 
   16722 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
   16723 			/* PCH SDVOB multiplex with HDMIB */
   16724 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
   16725 			if (!found)
   16726 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
   16727 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
   16728 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
   16729 		}
   16730 
   16731 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
   16732 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
   16733 
   16734 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
   16735 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
   16736 
   16737 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
   16738 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
   16739 
   16740 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
   16741 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
   16742 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   16743 		bool has_edp, has_port;
   16744 
   16745 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
   16746 			intel_crt_init(dev_priv);
   16747 
   16748 		/*
   16749 		 * The DP_DETECTED bit is the latched state of the DDC
   16750 		 * SDA pin at boot. However since eDP doesn't require DDC
   16751 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
   16752 		 * eDP ports may have been muxed to an alternate function.
   16753 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
   16754 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
   16755 		 * detect eDP ports.
   16756 		 *
   16757 		 * Sadly the straps seem to be missing sometimes even for HDMI
   16758 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
   16759 		 * and VBT for the presence of the port. Additionally we can't
   16760 		 * trust the port type the VBT declares as we've seen at least
   16761 		 * HDMI ports that the VBT claim are DP or eDP.
   16762 		 */
   16763 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
   16764 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
   16765 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
   16766 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
   16767 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
   16768 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
   16769 
   16770 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
   16771 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
   16772 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
   16773 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
   16774 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
   16775 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
   16776 
   16777 		if (IS_CHERRYVIEW(dev_priv)) {
   16778 			/*
   16779 			 * eDP not supported on port D,
   16780 			 * so no need to worry about it
   16781 			 */
   16782 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
   16783 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
   16784 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
   16785 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
   16786 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
   16787 		}
   16788 
   16789 		vlv_dsi_init(dev_priv);
   16790 	} else if (IS_PINEVIEW(dev_priv)) {
   16791 		intel_lvds_init(dev_priv);
   16792 		intel_crt_init(dev_priv);
   16793 	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
   16794 		bool found = false;
   16795 
   16796 		if (IS_MOBILE(dev_priv))
   16797 			intel_lvds_init(dev_priv);
   16798 
   16799 		intel_crt_init(dev_priv);
   16800 
   16801 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
   16802 			DRM_DEBUG_KMS("probing SDVOB\n");
   16803 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
   16804 			if (!found && IS_G4X(dev_priv)) {
   16805 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
   16806 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
   16807 			}
   16808 
   16809 			if (!found && IS_G4X(dev_priv))
   16810 				intel_dp_init(dev_priv, DP_B, PORT_B);
   16811 		}
   16812 
   16813 		/* Before G4X SDVOC doesn't have its own detect register */
   16814 
   16815 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
   16816 			DRM_DEBUG_KMS("probing SDVOC\n");
   16817 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
   16818 		}
   16819 
   16820 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
   16821 
   16822 			if (IS_G4X(dev_priv)) {
   16823 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
   16824 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
   16825 			}
   16826 			if (IS_G4X(dev_priv))
   16827 				intel_dp_init(dev_priv, DP_C, PORT_C);
   16828 		}
   16829 
   16830 		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
   16831 			intel_dp_init(dev_priv, DP_D, PORT_D);
   16832 
   16833 		if (SUPPORTS_TV(dev_priv))
   16834 			intel_tv_init(dev_priv);
   16835 	} else if (IS_GEN(dev_priv, 2)) {
   16836 		if (IS_I85X(dev_priv))
   16837 			intel_lvds_init(dev_priv);
   16838 
   16839 		intel_crt_init(dev_priv);
   16840 		intel_dvo_init(dev_priv);
   16841 	}
   16842 
   16843 	intel_psr_init(dev_priv);
   16844 
   16845 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   16846 		encoder->base.possible_crtcs =
   16847 			intel_encoder_possible_crtcs(encoder);
   16848 		encoder->base.possible_clones =
   16849 			intel_encoder_possible_clones(encoder);
   16850 	}
   16851 
   16852 	intel_init_pch_refclk(dev_priv);
   16853 
   16854 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
   16855 }
   16856 
   16857 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
   16858 {
   16859 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
   16860 
   16861 	drm_framebuffer_cleanup(fb);
   16862 	intel_frontbuffer_put(intel_fb->frontbuffer);
   16863 
   16864 	kfree(intel_fb);
   16865 }
   16866 
   16867 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
   16868 						struct drm_file *file,
   16869 						unsigned int *handle)
   16870 {
   16871 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   16872 
   16873 	if (obj->userptr.mm) {
   16874 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
   16875 		return -EINVAL;
   16876 	}
   16877 
   16878 	return drm_gem_handle_create(file, &obj->base, handle);
   16879 }
   16880 
   16881 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
   16882 					struct drm_file *file,
   16883 					unsigned flags, unsigned color,
   16884 					struct drm_clip_rect *clips,
   16885 					unsigned num_clips)
   16886 {
   16887 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
   16888 
   16889 	i915_gem_object_flush_if_display(obj);
   16890 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
   16891 
   16892 	return 0;
   16893 }
   16894 
   16895 static const struct drm_framebuffer_funcs intel_fb_funcs = {
   16896 	.destroy = intel_user_framebuffer_destroy,
   16897 	.create_handle = intel_user_framebuffer_create_handle,
   16898 	.dirty = intel_user_framebuffer_dirty,
   16899 };
   16900 
   16901 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
   16902 				  struct drm_i915_gem_object *obj,
   16903 				  struct drm_mode_fb_cmd2 *mode_cmd)
   16904 {
   16905 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
   16906 	struct drm_framebuffer *fb = &intel_fb->base;
   16907 	u32 max_stride;
   16908 	unsigned int tiling, stride;
   16909 	int ret = -EINVAL;
   16910 	int i;
   16911 
   16912 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
   16913 	if (!intel_fb->frontbuffer)
   16914 		return -ENOMEM;
   16915 
   16916 	i915_gem_object_lock(obj);
   16917 	tiling = i915_gem_object_get_tiling(obj);
   16918 	stride = i915_gem_object_get_stride(obj);
   16919 	i915_gem_object_unlock(obj);
   16920 
   16921 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
   16922 		/*
   16923 		 * If there's a fence, enforce that
   16924 		 * the fb modifier and tiling mode match.
   16925 		 */
   16926 		if (tiling != I915_TILING_NONE &&
   16927 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
   16928 			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
   16929 			goto err;
   16930 		}
   16931 	} else {
   16932 		if (tiling == I915_TILING_X) {
   16933 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
   16934 		} else if (tiling == I915_TILING_Y) {
   16935 			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
   16936 			goto err;
   16937 		}
   16938 	}
   16939 
   16940 	if (!drm_any_plane_has_format(&dev_priv->drm,
   16941 				      mode_cmd->pixel_format,
   16942 				      mode_cmd->modifier[0])) {
   16943 		struct drm_format_name_buf format_name;
   16944 
   16945 		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
   16946 			      drm_get_format_name(mode_cmd->pixel_format,
   16947 						  &format_name),
   16948 			      mode_cmd->modifier[0]);
   16949 		goto err;
   16950 	}
   16951 
   16952 	/*
   16953 	 * gen2/3 display engine uses the fence if present,
   16954 	 * so the tiling mode must match the fb modifier exactly.
   16955 	 */
   16956 	if (INTEL_GEN(dev_priv) < 4 &&
   16957 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
   16958 		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
   16959 		goto err;
   16960 	}
   16961 
   16962 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
   16963 					 mode_cmd->modifier[0]);
   16964 	if (mode_cmd->pitches[0] > max_stride) {
   16965 		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
   16966 			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
   16967 			      "tiled" : "linear",
   16968 			      mode_cmd->pitches[0], max_stride);
   16969 		goto err;
   16970 	}
   16971 
   16972 	/*
   16973 	 * If there's a fence, enforce that
   16974 	 * the fb pitch and fence stride match.
   16975 	 */
   16976 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
   16977 		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
   16978 			      mode_cmd->pitches[0], stride);
   16979 		goto err;
   16980 	}
   16981 
   16982 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
   16983 	if (mode_cmd->offsets[0] != 0) {
   16984 		DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
   16985 			      mode_cmd->offsets[0]);
   16986 		goto err;
   16987 	}
   16988 
   16989 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
   16990 
   16991 	for (i = 0; i < fb->format->num_planes; i++) {
   16992 		u32 stride_alignment;
   16993 
   16994 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
   16995 			DRM_DEBUG_KMS("bad plane %d handle\n", i);
   16996 			goto err;
   16997 		}
   16998 
   16999 		stride_alignment = intel_fb_stride_alignment(fb, i);
   17000 		if (fb->pitches[i] & (stride_alignment - 1)) {
   17001 			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
   17002 				      i, fb->pitches[i], stride_alignment);
   17003 			goto err;
   17004 		}
   17005 
   17006 		if (is_gen12_ccs_plane(fb, i)) {
   17007 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
   17008 
   17009 			if (fb->pitches[i] != ccs_aux_stride) {
   17010 				DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
   17011 					      i,
   17012 					      fb->pitches[i], ccs_aux_stride);
   17013 				goto err;
   17014 			}
   17015 		}
   17016 
   17017 		fb->obj[i] = &obj->base;
   17018 	}
   17019 
   17020 	ret = intel_fill_fb_info(dev_priv, fb);
   17021 	if (ret)
   17022 		goto err;
   17023 
   17024 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
   17025 	if (ret) {
   17026 		DRM_ERROR("framebuffer init failed %d\n", ret);
   17027 		goto err;
   17028 	}
   17029 
   17030 	return 0;
   17031 
   17032 err:
   17033 	intel_frontbuffer_put(intel_fb->frontbuffer);
   17034 	return ret;
   17035 }
   17036 
   17037 static struct drm_framebuffer *
   17038 intel_user_framebuffer_create(struct drm_device *dev,
   17039 			      struct drm_file *filp,
   17040 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
   17041 {
   17042 	struct drm_framebuffer *fb;
   17043 	struct drm_i915_gem_object *obj;
   17044 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
   17045 
   17046 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
   17047 	if (!obj)
   17048 		return ERR_PTR(-ENOENT);
   17049 
   17050 	fb = intel_framebuffer_create(obj, &mode_cmd);
   17051 	i915_gem_object_put(obj);
   17052 
   17053 	return fb;
   17054 }
   17055 
   17056 static void intel_atomic_state_free(struct drm_atomic_state *state)
   17057 {
   17058 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
   17059 
   17060 	drm_atomic_state_default_release(state);
   17061 
   17062 	i915_sw_fence_fini(&intel_state->commit_ready);
   17063 
   17064 	kfree(state);
   17065 }
   17066 
   17067 static enum drm_mode_status
   17068 intel_mode_valid(struct drm_device *dev,
   17069 		 const struct drm_display_mode *mode)
   17070 {
   17071 	struct drm_i915_private *dev_priv = to_i915(dev);
   17072 	int hdisplay_max, htotal_max;
   17073 	int vdisplay_max, vtotal_max;
   17074 
   17075 	/*
   17076 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
   17077 	 * of DBLSCAN modes to the output's mode list when they detect
   17078 	 * the scaling mode property on the connector. And they don't
   17079 	 * ask the kernel to validate those modes in any way until
   17080 	 * modeset time at which point the client gets a protocol error.
   17081 	 * So in order to not upset those clients we silently ignore the
   17082 	 * DBLSCAN flag on such connectors. For other connectors we will
   17083 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
   17084 	 * And we always reject DBLSCAN modes in connector->mode_valid()
   17085 	 * as we never want such modes on the connector's mode list.
   17086 	 */
   17087 
   17088 	if (mode->vscan > 1)
   17089 		return MODE_NO_VSCAN;
   17090 
   17091 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
   17092 		return MODE_H_ILLEGAL;
   17093 
   17094 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
   17095 			   DRM_MODE_FLAG_NCSYNC |
   17096 			   DRM_MODE_FLAG_PCSYNC))
   17097 		return MODE_HSYNC;
   17098 
   17099 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
   17100 			   DRM_MODE_FLAG_PIXMUX |
   17101 			   DRM_MODE_FLAG_CLKDIV2))
   17102 		return MODE_BAD;
   17103 
   17104 	/* Transcoder timing limits */
   17105 	if (INTEL_GEN(dev_priv) >= 11) {
   17106 		hdisplay_max = 16384;
   17107 		vdisplay_max = 8192;
   17108 		htotal_max = 16384;
   17109 		vtotal_max = 8192;
   17110 	} else if (INTEL_GEN(dev_priv) >= 9 ||
   17111 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
   17112 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
   17113 		vdisplay_max = 4096;
   17114 		htotal_max = 8192;
   17115 		vtotal_max = 8192;
   17116 	} else if (INTEL_GEN(dev_priv) >= 3) {
   17117 		hdisplay_max = 4096;
   17118 		vdisplay_max = 4096;
   17119 		htotal_max = 8192;
   17120 		vtotal_max = 8192;
   17121 	} else {
   17122 		hdisplay_max = 2048;
   17123 		vdisplay_max = 2048;
   17124 		htotal_max = 4096;
   17125 		vtotal_max = 4096;
   17126 	}
   17127 
   17128 	if (mode->hdisplay > hdisplay_max ||
   17129 	    mode->hsync_start > htotal_max ||
   17130 	    mode->hsync_end > htotal_max ||
   17131 	    mode->htotal > htotal_max)
   17132 		return MODE_H_ILLEGAL;
   17133 
   17134 	if (mode->vdisplay > vdisplay_max ||
   17135 	    mode->vsync_start > vtotal_max ||
   17136 	    mode->vsync_end > vtotal_max ||
   17137 	    mode->vtotal > vtotal_max)
   17138 		return MODE_V_ILLEGAL;
   17139 
   17140 	if (INTEL_GEN(dev_priv) >= 5) {
   17141 		if (mode->hdisplay < 64 ||
   17142 		    mode->htotal - mode->hdisplay < 32)
   17143 			return MODE_H_ILLEGAL;
   17144 
   17145 		if (mode->vtotal - mode->vdisplay < 5)
   17146 			return MODE_V_ILLEGAL;
   17147 	} else {
   17148 		if (mode->htotal - mode->hdisplay < 32)
   17149 			return MODE_H_ILLEGAL;
   17150 
   17151 		if (mode->vtotal - mode->vdisplay < 3)
   17152 			return MODE_V_ILLEGAL;
   17153 	}
   17154 
   17155 	return MODE_OK;
   17156 }
   17157 
   17158 enum drm_mode_status
   17159 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
   17160 				const struct drm_display_mode *mode)
   17161 {
   17162 	int plane_width_max, plane_height_max;
   17163 
   17164 	/*
   17165 	 * intel_mode_valid() should be
   17166 	 * sufficient on older platforms.
   17167 	 */
   17168 	if (INTEL_GEN(dev_priv) < 9)
   17169 		return MODE_OK;
   17170 
   17171 	/*
   17172 	 * Most people will probably want a fullscreen
   17173 	 * plane so let's not advertize modes that are
   17174 	 * too big for that.
   17175 	 */
   17176 	if (INTEL_GEN(dev_priv) >= 11) {
   17177 		plane_width_max = 5120;
   17178 		plane_height_max = 4320;
   17179 	} else {
   17180 		plane_width_max = 5120;
   17181 		plane_height_max = 4096;
   17182 	}
   17183 
   17184 	if (mode->hdisplay > plane_width_max)
   17185 		return MODE_H_ILLEGAL;
   17186 
   17187 	if (mode->vdisplay > plane_height_max)
   17188 		return MODE_V_ILLEGAL;
   17189 
   17190 	return MODE_OK;
   17191 }
   17192 
   17193 static const struct drm_mode_config_funcs intel_mode_funcs = {
   17194 	.fb_create = intel_user_framebuffer_create,
   17195 	.get_format_info = intel_get_format_info,
   17196 	.output_poll_changed = intel_fbdev_output_poll_changed,
   17197 	.mode_valid = intel_mode_valid,
   17198 	.atomic_check = intel_atomic_check,
   17199 	.atomic_commit = intel_atomic_commit,
   17200 	.atomic_state_alloc = intel_atomic_state_alloc,
   17201 	.atomic_state_clear = intel_atomic_state_clear,
   17202 	.atomic_state_free = intel_atomic_state_free,
   17203 };
   17204 
   17205 /**
   17206  * intel_init_display_hooks - initialize the display modesetting hooks
   17207  * @dev_priv: device private
   17208  */
   17209 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
   17210 {
   17211 	intel_init_cdclk_hooks(dev_priv);
   17212 
   17213 	if (INTEL_GEN(dev_priv) >= 9) {
   17214 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
   17215 		dev_priv->display.get_initial_plane_config =
   17216 			skl_get_initial_plane_config;
   17217 		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
   17218 		dev_priv->display.crtc_enable = hsw_crtc_enable;
   17219 		dev_priv->display.crtc_disable = hsw_crtc_disable;
   17220 	} else if (HAS_DDI(dev_priv)) {
   17221 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
   17222 		dev_priv->display.get_initial_plane_config =
   17223 			i9xx_get_initial_plane_config;
   17224 		dev_priv->display.crtc_compute_clock =
   17225 			hsw_crtc_compute_clock;
   17226 		dev_priv->display.crtc_enable = hsw_crtc_enable;
   17227 		dev_priv->display.crtc_disable = hsw_crtc_disable;
   17228 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   17229 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
   17230 		dev_priv->display.get_initial_plane_config =
   17231 			i9xx_get_initial_plane_config;
   17232 		dev_priv->display.crtc_compute_clock =
   17233 			ilk_crtc_compute_clock;
   17234 		dev_priv->display.crtc_enable = ilk_crtc_enable;
   17235 		dev_priv->display.crtc_disable = ilk_crtc_disable;
   17236 	} else if (IS_CHERRYVIEW(dev_priv)) {
   17237 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17238 		dev_priv->display.get_initial_plane_config =
   17239 			i9xx_get_initial_plane_config;
   17240 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
   17241 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
   17242 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17243 	} else if (IS_VALLEYVIEW(dev_priv)) {
   17244 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17245 		dev_priv->display.get_initial_plane_config =
   17246 			i9xx_get_initial_plane_config;
   17247 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
   17248 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
   17249 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17250 	} else if (IS_G4X(dev_priv)) {
   17251 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17252 		dev_priv->display.get_initial_plane_config =
   17253 			i9xx_get_initial_plane_config;
   17254 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
   17255 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17256 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17257 	} else if (IS_PINEVIEW(dev_priv)) {
   17258 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17259 		dev_priv->display.get_initial_plane_config =
   17260 			i9xx_get_initial_plane_config;
   17261 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
   17262 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17263 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17264 	} else if (!IS_GEN(dev_priv, 2)) {
   17265 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17266 		dev_priv->display.get_initial_plane_config =
   17267 			i9xx_get_initial_plane_config;
   17268 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
   17269 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17270 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17271 	} else {
   17272 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
   17273 		dev_priv->display.get_initial_plane_config =
   17274 			i9xx_get_initial_plane_config;
   17275 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
   17276 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
   17277 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
   17278 	}
   17279 
   17280 	if (IS_GEN(dev_priv, 5)) {
   17281 		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
   17282 	} else if (IS_GEN(dev_priv, 6)) {
   17283 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
   17284 	} else if (IS_IVYBRIDGE(dev_priv)) {
   17285 		/* FIXME: detect B0+ stepping and use auto training */
   17286 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
   17287 	}
   17288 
   17289 	if (INTEL_GEN(dev_priv) >= 9)
   17290 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
   17291 	else
   17292 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
   17293 
   17294 }
   17295 
   17296 void intel_modeset_init_hw(struct drm_i915_private *i915)
   17297 {
   17298 	intel_update_cdclk(i915);
   17299 	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
   17300 	i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
   17301 }
   17302 
   17303 /*
   17304  * Calculate what we think the watermarks should be for the state we've read
   17305  * out of the hardware and then immediately program those watermarks so that
   17306  * we ensure the hardware settings match our internal state.
   17307  *
   17308  * We can calculate what we think WM's should be by creating a duplicate of the
   17309  * current state (which was constructed during hardware readout) and running it
   17310  * through the atomic check code to calculate new watermark values in the
   17311  * state object.
   17312  */
   17313 static void sanitize_watermarks(struct drm_device *dev)
   17314 {
   17315 	struct drm_i915_private *dev_priv = to_i915(dev);
   17316 	struct drm_atomic_state *state;
   17317 	struct intel_atomic_state *intel_state;
   17318 	struct intel_crtc *crtc;
   17319 	struct intel_crtc_state *crtc_state;
   17320 	struct drm_modeset_acquire_ctx ctx;
   17321 	int ret;
   17322 	int i;
   17323 
   17324 	/* Only supported on platforms that use atomic watermark design */
   17325 	if (!dev_priv->display.optimize_watermarks)
   17326 		return;
   17327 
   17328 	/*
   17329 	 * We need to hold connection_mutex before calling duplicate_state so
   17330 	 * that the connector loop is protected.
   17331 	 */
   17332 	drm_modeset_acquire_init(&ctx, 0);
   17333 retry:
   17334 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
   17335 	if (ret == -EDEADLK) {
   17336 		drm_modeset_backoff(&ctx);
   17337 		goto retry;
   17338 	} else if (WARN_ON(ret)) {
   17339 		goto fail;
   17340 	}
   17341 
   17342 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
   17343 	if (WARN_ON(IS_ERR(state)))
   17344 		goto fail;
   17345 
   17346 	intel_state = to_intel_atomic_state(state);
   17347 
   17348 	/*
   17349 	 * Hardware readout is the only time we don't want to calculate
   17350 	 * intermediate watermarks (since we don't trust the current
   17351 	 * watermarks).
   17352 	 */
   17353 	if (!HAS_GMCH(dev_priv))
   17354 		intel_state->skip_intermediate_wm = true;
   17355 
   17356 	ret = intel_atomic_check(dev, state);
   17357 	if (ret) {
   17358 		/*
   17359 		 * If we fail here, it means that the hardware appears to be
   17360 		 * programmed in a way that shouldn't be possible, given our
   17361 		 * understanding of watermark requirements.  This might mean a
   17362 		 * mistake in the hardware readout code or a mistake in the
   17363 		 * watermark calculations for a given platform.  Raise a WARN
   17364 		 * so that this is noticeable.
   17365 		 *
   17366 		 * If this actually happens, we'll have to just leave the
   17367 		 * BIOS-programmed watermarks untouched and hope for the best.
   17368 		 */
   17369 		WARN(true, "Could not determine valid watermarks for inherited state\n");
   17370 		goto put_state;
   17371 	}
   17372 
   17373 	/* Write calculated watermark values back */
   17374 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
   17375 		crtc_state->wm.need_postvbl_update = true;
   17376 		dev_priv->display.optimize_watermarks(intel_state, crtc);
   17377 
   17378 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
   17379 	}
   17380 
   17381 put_state:
   17382 	drm_atomic_state_put(state);
   17383 fail:
   17384 	drm_modeset_drop_locks(&ctx);
   17385 	drm_modeset_acquire_fini(&ctx);
   17386 }
   17387 
   17388 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
   17389 {
   17390 	if (IS_GEN(dev_priv, 5)) {
   17391 		u32 fdi_pll_clk =
   17392 			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
   17393 
   17394 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
   17395 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
   17396 		dev_priv->fdi_pll_freq = 270000;
   17397 	} else {
   17398 		return;
   17399 	}
   17400 
   17401 	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
   17402 }
   17403 
   17404 static int intel_initial_commit(struct drm_device *dev)
   17405 {
   17406 	struct drm_atomic_state *state = NULL;
   17407 	struct drm_modeset_acquire_ctx ctx;
   17408 	struct intel_crtc *crtc;
   17409 	int ret = 0;
   17410 
   17411 	state = drm_atomic_state_alloc(dev);
   17412 	if (!state)
   17413 		return -ENOMEM;
   17414 
   17415 	drm_modeset_acquire_init(&ctx, 0);
   17416 
   17417 retry:
   17418 	state->acquire_ctx = &ctx;
   17419 
   17420 	for_each_intel_crtc(dev, crtc) {
   17421 		struct intel_crtc_state *crtc_state =
   17422 			intel_atomic_get_crtc_state(state, crtc);
   17423 
   17424 		if (IS_ERR(crtc_state)) {
   17425 			ret = PTR_ERR(crtc_state);
   17426 			goto out;
   17427 		}
   17428 
   17429 		if (crtc_state->hw.active) {
   17430 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
   17431 			if (ret)
   17432 				goto out;
   17433 
   17434 			/*
   17435 			 * FIXME hack to force a LUT update to avoid the
   17436 			 * plane update forcing the pipe gamma on without
   17437 			 * having a proper LUT loaded. Remove once we
   17438 			 * have readout for pipe gamma enable.
   17439 			 */
   17440 			crtc_state->uapi.color_mgmt_changed = true;
   17441 
   17442 			/*
   17443 			 * FIXME hack to force full modeset when DSC is being
   17444 			 * used.
   17445 			 *
   17446 			 * As long as we do not have full state readout and
   17447 			 * config comparison of crtc_state->dsc, we have no way
   17448 			 * to ensure reliable fastset. Remove once we have
   17449 			 * readout for DSC.
   17450 			 */
   17451 			if (crtc_state->dsc.compression_enable) {
   17452 				ret = drm_atomic_add_affected_connectors(state,
   17453 									 &crtc->base);
   17454 				if (ret)
   17455 					goto out;
   17456 				crtc_state->uapi.mode_changed = true;
   17457 				drm_dbg_kms(dev, "Force full modeset for DSC\n");
   17458 			}
   17459 		}
   17460 	}
   17461 
   17462 	ret = drm_atomic_commit(state);
   17463 
   17464 out:
   17465 	if (ret == -EDEADLK) {
   17466 		drm_atomic_state_clear(state);
   17467 		drm_modeset_backoff(&ctx);
   17468 		goto retry;
   17469 	}
   17470 
   17471 	drm_atomic_state_put(state);
   17472 
   17473 	drm_modeset_drop_locks(&ctx);
   17474 	drm_modeset_acquire_fini(&ctx);
   17475 
   17476 	return ret;
   17477 }
   17478 
   17479 static void intel_mode_config_init(struct drm_i915_private *i915)
   17480 {
   17481 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
   17482 
   17483 	drm_mode_config_init(&i915->drm);
   17484 
   17485 	mode_config->min_width = 0;
   17486 	mode_config->min_height = 0;
   17487 
   17488 	mode_config->preferred_depth = 24;
   17489 	mode_config->prefer_shadow = 1;
   17490 
   17491 	mode_config->allow_fb_modifiers = true;
   17492 
   17493 	mode_config->funcs = &intel_mode_funcs;
   17494 
   17495 	/*
   17496 	 * Maximum framebuffer dimensions, chosen to match
   17497 	 * the maximum render engine surface size on gen4+.
   17498 	 */
   17499 	if (INTEL_GEN(i915) >= 7) {
   17500 		mode_config->max_width = 16384;
   17501 		mode_config->max_height = 16384;
   17502 	} else if (INTEL_GEN(i915) >= 4) {
   17503 		mode_config->max_width = 8192;
   17504 		mode_config->max_height = 8192;
   17505 	} else if (IS_GEN(i915, 3)) {
   17506 		mode_config->max_width = 4096;
   17507 		mode_config->max_height = 4096;
   17508 	} else {
   17509 		mode_config->max_width = 2048;
   17510 		mode_config->max_height = 2048;
   17511 	}
   17512 
   17513 	if (IS_I845G(i915) || IS_I865G(i915)) {
   17514 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
   17515 		mode_config->cursor_height = 1023;
   17516 	} else if (IS_GEN(i915, 2)) {
   17517 		mode_config->cursor_width = 64;
   17518 		mode_config->cursor_height = 64;
   17519 	} else {
   17520 		mode_config->cursor_width = 256;
   17521 		mode_config->cursor_height = 256;
   17522 	}
   17523 }
   17524 
   17525 int intel_modeset_init(struct drm_i915_private *i915)
   17526 {
   17527 	struct drm_device *dev = &i915->drm;
   17528 	enum pipe pipe;
   17529 	struct intel_crtc *crtc;
   17530 	int ret;
   17531 
   17532 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
   17533 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
   17534 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
   17535 
   17536 	intel_mode_config_init(i915);
   17537 
   17538 	ret = intel_bw_init(i915);
   17539 	if (ret)
   17540 		return ret;
   17541 
   17542 	init_llist_head(&i915->atomic_helper.free_list);
   17543 	INIT_WORK(&i915->atomic_helper.free_work,
   17544 		  intel_atomic_helper_free_state_worker);
   17545 
   17546 	intel_init_quirks(i915);
   17547 
   17548 	intel_fbc_init(i915);
   17549 
   17550 	intel_init_pm(i915);
   17551 
   17552 	intel_panel_sanitize_ssc(i915);
   17553 
   17554 	intel_gmbus_setup(i915);
   17555 
   17556 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
   17557 		      INTEL_NUM_PIPES(i915),
   17558 		      INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
   17559 
   17560 	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
   17561 		for_each_pipe(i915, pipe) {
   17562 			ret = intel_crtc_init(i915, pipe);
   17563 			if (ret) {
   17564 				drm_mode_config_cleanup(dev);
   17565 				return ret;
   17566 			}
   17567 		}
   17568 	}
   17569 
   17570 	intel_shared_dpll_init(dev);
   17571 	intel_update_fdi_pll_freq(i915);
   17572 
   17573 	intel_update_czclk(i915);
   17574 	intel_modeset_init_hw(i915);
   17575 
   17576 	intel_hdcp_component_init(i915);
   17577 
   17578 	if (i915->max_cdclk_freq == 0)
   17579 		intel_update_max_cdclk(i915);
   17580 
   17581 	/* Just disable it once at startup */
   17582 	intel_vga_disable(i915);
   17583 	intel_setup_outputs(i915);
   17584 
   17585 	drm_modeset_lock_all(dev);
   17586 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
   17587 	drm_modeset_unlock_all(dev);
   17588 
   17589 	for_each_intel_crtc(dev, crtc) {
   17590 		struct intel_initial_plane_config plane_config = {};
   17591 
   17592 		if (!crtc->active)
   17593 			continue;
   17594 
   17595 		/*
   17596 		 * Note that reserving the BIOS fb up front prevents us
   17597 		 * from stuffing other stolen allocations like the ring
   17598 		 * on top.  This prevents some ugliness at boot time, and
   17599 		 * can even allow for smooth boot transitions if the BIOS
   17600 		 * fb is large enough for the active pipe configuration.
   17601 		 */
   17602 		i915->display.get_initial_plane_config(crtc, &plane_config);
   17603 
   17604 		/*
   17605 		 * If the fb is shared between multiple heads, we'll
   17606 		 * just get the first one.
   17607 		 */
   17608 		intel_find_initial_plane_obj(crtc, &plane_config);
   17609 	}
   17610 
   17611 	/*
   17612 	 * Make sure hardware watermarks really match the state we read out.
   17613 	 * Note that we need to do this after reconstructing the BIOS fb's
   17614 	 * since the watermark calculation done here will use pstate->fb.
   17615 	 */
   17616 	if (!HAS_GMCH(i915))
   17617 		sanitize_watermarks(dev);
   17618 
   17619 	/*
   17620 	 * Force all active planes to recompute their states. So that on
   17621 	 * mode_setcrtc after probe, all the intel_plane_state variables
   17622 	 * are already calculated and there is no assert_plane warnings
   17623 	 * during bootup.
   17624 	 */
   17625 	ret = intel_initial_commit(dev);
   17626 	if (ret)
   17627 		DRM_DEBUG_KMS("Initial commit in probe failed.\n");
   17628 
   17629 	return 0;
   17630 }
   17631 
   17632 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
   17633 {
   17634 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   17635 	/* 640x480@60Hz, ~25175 kHz */
   17636 	struct dpll clock = {
   17637 		.m1 = 18,
   17638 		.m2 = 7,
   17639 		.p1 = 13,
   17640 		.p2 = 4,
   17641 		.n = 2,
   17642 	};
   17643 	u32 dpll, fp;
   17644 	int i;
   17645 
   17646 	WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
   17647 
   17648 	DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
   17649 		      pipe_name(pipe), clock.vco, clock.dot);
   17650 
   17651 	fp = i9xx_dpll_compute_fp(&clock);
   17652 	dpll = DPLL_DVO_2X_MODE |
   17653 		DPLL_VGA_MODE_DIS |
   17654 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
   17655 		PLL_P2_DIVIDE_BY_4 |
   17656 		PLL_REF_INPUT_DREFCLK |
   17657 		DPLL_VCO_ENABLE;
   17658 
   17659 	I915_WRITE(FP0(pipe), fp);
   17660 	I915_WRITE(FP1(pipe), fp);
   17661 
   17662 	I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
   17663 	I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
   17664 	I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
   17665 	I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
   17666 	I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
   17667 	I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
   17668 	I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
   17669 
   17670 	/*
   17671 	 * Apparently we need to have VGA mode enabled prior to changing
   17672 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
   17673 	 * dividers, even though the register value does change.
   17674 	 */
   17675 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
   17676 	I915_WRITE(DPLL(pipe), dpll);
   17677 
   17678 	/* Wait for the clocks to stabilize. */
   17679 	POSTING_READ(DPLL(pipe));
   17680 	udelay(150);
   17681 
   17682 	/* The pixel multiplier can only be updated once the
   17683 	 * DPLL is enabled and the clocks are stable.
   17684 	 *
   17685 	 * So write it again.
   17686 	 */
   17687 	I915_WRITE(DPLL(pipe), dpll);
   17688 
   17689 	/* We do this three times for luck */
   17690 	for (i = 0; i < 3 ; i++) {
   17691 		I915_WRITE(DPLL(pipe), dpll);
   17692 		POSTING_READ(DPLL(pipe));
   17693 		udelay(150); /* wait for warmup */
   17694 	}
   17695 
   17696 	I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
   17697 	POSTING_READ(PIPECONF(pipe));
   17698 
   17699 	intel_wait_for_pipe_scanline_moving(crtc);
   17700 }
   17701 
   17702 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
   17703 {
   17704 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   17705 
   17706 	DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
   17707 		      pipe_name(pipe));
   17708 
   17709 	WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
   17710 	WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
   17711 	WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
   17712 	WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
   17713 	WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
   17714 
   17715 	I915_WRITE(PIPECONF(pipe), 0);
   17716 	POSTING_READ(PIPECONF(pipe));
   17717 
   17718 	intel_wait_for_pipe_scanline_stopped(crtc);
   17719 
   17720 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
   17721 	POSTING_READ(DPLL(pipe));
   17722 }
   17723 
   17724 static void
   17725 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
   17726 {
   17727 	struct intel_crtc *crtc;
   17728 
   17729 	if (INTEL_GEN(dev_priv) >= 4)
   17730 		return;
   17731 
   17732 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   17733 		struct intel_plane *plane =
   17734 			to_intel_plane(crtc->base.primary);
   17735 		struct intel_crtc *plane_crtc;
   17736 		enum pipe pipe;
   17737 
   17738 		if (!plane->get_hw_state(plane, &pipe))
   17739 			continue;
   17740 
   17741 		if (pipe == crtc->pipe)
   17742 			continue;
   17743 
   17744 		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
   17745 			      plane->base.base.id, plane->base.name);
   17746 
   17747 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   17748 		intel_plane_disable_noatomic(plane_crtc, plane);
   17749 	}
   17750 }
   17751 
   17752 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
   17753 {
   17754 	struct drm_device *dev = crtc->base.dev;
   17755 	struct intel_encoder *encoder;
   17756 
   17757 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
   17758 		return true;
   17759 
   17760 	return false;
   17761 }
   17762 
   17763 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
   17764 {
   17765 	struct drm_device *dev = encoder->base.dev;
   17766 	struct intel_connector *connector;
   17767 
   17768 	for_each_connector_on_encoder(dev, &encoder->base, connector)
   17769 		return connector;
   17770 
   17771 	return NULL;
   17772 }
   17773 
   17774 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
   17775 			      enum pipe pch_transcoder)
   17776 {
   17777 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
   17778 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
   17779 }
   17780 
   17781 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
   17782 {
   17783 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   17784 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   17785 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   17786 
   17787 	if (INTEL_GEN(dev_priv) >= 9 ||
   17788 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
   17789 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
   17790 		u32 val;
   17791 
   17792 		if (transcoder_is_dsi(cpu_transcoder))
   17793 			return;
   17794 
   17795 		val = I915_READ(reg);
   17796 		val &= ~HSW_FRAME_START_DELAY_MASK;
   17797 		val |= HSW_FRAME_START_DELAY(0);
   17798 		I915_WRITE(reg, val);
   17799 	} else {
   17800 		i915_reg_t reg = PIPECONF(cpu_transcoder);
   17801 		u32 val;
   17802 
   17803 		val = I915_READ(reg);
   17804 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
   17805 		val |= PIPECONF_FRAME_START_DELAY(0);
   17806 		I915_WRITE(reg, val);
   17807 	}
   17808 
   17809 	if (!crtc_state->has_pch_encoder)
   17810 		return;
   17811 
   17812 	if (HAS_PCH_IBX(dev_priv)) {
   17813 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
   17814 		u32 val;
   17815 
   17816 		val = I915_READ(reg);
   17817 		val &= ~TRANS_FRAME_START_DELAY_MASK;
   17818 		val |= TRANS_FRAME_START_DELAY(0);
   17819 		I915_WRITE(reg, val);
   17820 	} else {
   17821 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
   17822 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
   17823 		u32 val;
   17824 
   17825 		val = I915_READ(reg);
   17826 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
   17827 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
   17828 		I915_WRITE(reg, val);
   17829 	}
   17830 }
   17831 
   17832 static void intel_sanitize_crtc(struct intel_crtc *crtc,
   17833 				struct drm_modeset_acquire_ctx *ctx)
   17834 {
   17835 	struct drm_device *dev = crtc->base.dev;
   17836 	struct drm_i915_private *dev_priv = to_i915(dev);
   17837 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
   17838 
   17839 	if (crtc_state->hw.active) {
   17840 		struct intel_plane *plane;
   17841 
   17842 		/* Clear any frame start delays used for debugging left by the BIOS */
   17843 		intel_sanitize_frame_start_delay(crtc_state);
   17844 
   17845 		/* Disable everything but the primary plane */
   17846 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
   17847 			const struct intel_plane_state *plane_state =
   17848 				to_intel_plane_state(plane->base.state);
   17849 
   17850 			if (plane_state->uapi.visible &&
   17851 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
   17852 				intel_plane_disable_noatomic(crtc, plane);
   17853 		}
   17854 
   17855 		/*
   17856 		 * Disable any background color set by the BIOS, but enable the
   17857 		 * gamma and CSC to match how we program our planes.
   17858 		 */
   17859 		if (INTEL_GEN(dev_priv) >= 9)
   17860 			I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
   17861 				   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
   17862 				   SKL_BOTTOM_COLOR_CSC_ENABLE);
   17863 	}
   17864 
   17865 	/* Adjust the state of the output pipe according to whether we
   17866 	 * have active connectors/encoders. */
   17867 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
   17868 		intel_crtc_disable_noatomic(crtc, ctx);
   17869 
   17870 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
   17871 		/*
   17872 		 * We start out with underrun reporting disabled to avoid races.
   17873 		 * For correct bookkeeping mark this on active crtcs.
   17874 		 *
   17875 		 * Also on gmch platforms we dont have any hardware bits to
   17876 		 * disable the underrun reporting. Which means we need to start
   17877 		 * out with underrun reporting disabled also on inactive pipes,
   17878 		 * since otherwise we'll complain about the garbage we read when
   17879 		 * e.g. coming up after runtime pm.
   17880 		 *
   17881 		 * No protection against concurrent access is required - at
   17882 		 * worst a fifo underrun happens which also sets this to false.
   17883 		 */
   17884 		crtc->cpu_fifo_underrun_disabled = true;
   17885 		/*
   17886 		 * We track the PCH trancoder underrun reporting state
   17887 		 * within the crtc. With crtc for pipe A housing the underrun
   17888 		 * reporting state for PCH transcoder A, crtc for pipe B housing
   17889 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
   17890 		 * and marking underrun reporting as disabled for the non-existing
   17891 		 * PCH transcoders B and C would prevent enabling the south
   17892 		 * error interrupt (see cpt_can_enable_serr_int()).
   17893 		 */
   17894 		if (has_pch_trancoder(dev_priv, crtc->pipe))
   17895 			crtc->pch_fifo_underrun_disabled = true;
   17896 	}
   17897 }
   17898 
   17899 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
   17900 {
   17901 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   17902 
   17903 	/*
   17904 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
   17905 	 * the hardware when a high res displays plugged in. DPLL P
   17906 	 * divider is zero, and the pipe timings are bonkers. We'll
   17907 	 * try to disable everything in that case.
   17908 	 *
   17909 	 * FIXME would be nice to be able to sanitize this state
   17910 	 * without several WARNs, but for now let's take the easy
   17911 	 * road.
   17912 	 */
   17913 	return IS_GEN(dev_priv, 6) &&
   17914 		crtc_state->hw.active &&
   17915 		crtc_state->shared_dpll &&
   17916 		crtc_state->port_clock == 0;
   17917 }
   17918 
   17919 static void intel_sanitize_encoder(struct intel_encoder *encoder)
   17920 {
   17921 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   17922 	struct intel_connector *connector;
   17923 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
   17924 	struct intel_crtc_state *crtc_state = crtc ?
   17925 		to_intel_crtc_state(crtc->base.state) : NULL;
   17926 
   17927 	/* We need to check both for a crtc link (meaning that the
   17928 	 * encoder is active and trying to read from a pipe) and the
   17929 	 * pipe itself being active. */
   17930 	bool has_active_crtc = crtc_state &&
   17931 		crtc_state->hw.active;
   17932 
   17933 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
   17934 		DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
   17935 			      pipe_name(crtc->pipe));
   17936 		has_active_crtc = false;
   17937 	}
   17938 
   17939 	connector = intel_encoder_find_connector(encoder);
   17940 	if (connector && !has_active_crtc) {
   17941 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
   17942 			      encoder->base.base.id,
   17943 			      encoder->base.name);
   17944 
   17945 		/* Connector is active, but has no active pipe. This is
   17946 		 * fallout from our resume register restoring. Disable
   17947 		 * the encoder manually again. */
   17948 		if (crtc_state) {
   17949 			struct drm_encoder *best_encoder;
   17950 
   17951 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
   17952 				      encoder->base.base.id,
   17953 				      encoder->base.name);
   17954 
   17955 			/* avoid oopsing in case the hooks consult best_encoder */
   17956 			best_encoder = connector->base.state->best_encoder;
   17957 			connector->base.state->best_encoder = &encoder->base;
   17958 
   17959 			if (encoder->disable)
   17960 				encoder->disable(encoder, crtc_state,
   17961 						 connector->base.state);
   17962 			if (encoder->post_disable)
   17963 				encoder->post_disable(encoder, crtc_state,
   17964 						      connector->base.state);
   17965 
   17966 			connector->base.state->best_encoder = best_encoder;
   17967 		}
   17968 		encoder->base.crtc = NULL;
   17969 
   17970 		/* Inconsistent output/port/pipe state happens presumably due to
   17971 		 * a bug in one of the get_hw_state functions. Or someplace else
   17972 		 * in our code, like the register restore mess on resume. Clamp
   17973 		 * things to off as a safer default. */
   17974 
   17975 		connector->base.dpms = DRM_MODE_DPMS_OFF;
   17976 		connector->base.encoder = NULL;
   17977 	}
   17978 
   17979 	/* notify opregion of the sanitized encoder state */
   17980 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
   17981 
   17982 	if (INTEL_GEN(dev_priv) >= 11)
   17983 		icl_sanitize_encoder_pll_mapping(encoder);
   17984 }
   17985 
   17986 /* FIXME read out full plane state for all planes */
   17987 static void readout_plane_state(struct drm_i915_private *dev_priv)
   17988 {
   17989 	struct intel_plane *plane;
   17990 	struct intel_crtc *crtc;
   17991 
   17992 	for_each_intel_plane(&dev_priv->drm, plane) {
   17993 		struct intel_plane_state *plane_state =
   17994 			to_intel_plane_state(plane->base.state);
   17995 		struct intel_crtc_state *crtc_state;
   17996 		enum pipe pipe = PIPE_A;
   17997 		bool visible;
   17998 
   17999 		visible = plane->get_hw_state(plane, &pipe);
   18000 
   18001 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   18002 		crtc_state = to_intel_crtc_state(crtc->base.state);
   18003 
   18004 		intel_set_plane_visible(crtc_state, plane_state, visible);
   18005 
   18006 		DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
   18007 			      plane->base.base.id, plane->base.name,
   18008 			      enableddisabled(visible), pipe_name(pipe));
   18009 	}
   18010 
   18011 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   18012 		struct intel_crtc_state *crtc_state =
   18013 			to_intel_crtc_state(crtc->base.state);
   18014 
   18015 		fixup_active_planes(crtc_state);
   18016 	}
   18017 }
   18018 
   18019 static void intel_modeset_readout_hw_state(struct drm_device *dev)
   18020 {
   18021 	struct drm_i915_private *dev_priv = to_i915(dev);
   18022 	enum pipe pipe;
   18023 	struct intel_crtc *crtc;
   18024 	struct intel_encoder *encoder;
   18025 	struct intel_connector *connector;
   18026 	struct drm_connector_list_iter conn_iter;
   18027 	int i;
   18028 
   18029 	dev_priv->active_pipes = 0;
   18030 
   18031 	for_each_intel_crtc(dev, crtc) {
   18032 		struct intel_crtc_state *crtc_state =
   18033 			to_intel_crtc_state(crtc->base.state);
   18034 
   18035 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
   18036 		intel_crtc_free_hw_state(crtc_state);
   18037 		intel_crtc_state_reset(crtc_state, crtc);
   18038 
   18039 		crtc_state->hw.active = crtc_state->hw.enable =
   18040 			dev_priv->display.get_pipe_config(crtc, crtc_state);
   18041 
   18042 		crtc->base.enabled = crtc_state->hw.enable;
   18043 		crtc->active = crtc_state->hw.active;
   18044 
   18045 		if (crtc_state->hw.active)
   18046 			dev_priv->active_pipes |= BIT(crtc->pipe);
   18047 
   18048 		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
   18049 			      crtc->base.base.id, crtc->base.name,
   18050 			      enableddisabled(crtc_state->hw.active));
   18051 	}
   18052 
   18053 	readout_plane_state(dev_priv);
   18054 
   18055 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
   18056 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
   18057 
   18058 		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
   18059 							&pll->state.hw_state);
   18060 
   18061 		if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
   18062 		    pll->info->id == DPLL_ID_EHL_DPLL4) {
   18063 			pll->wakeref = intel_display_power_get(dev_priv,
   18064 							       POWER_DOMAIN_DPLL_DC_OFF);
   18065 		}
   18066 
   18067 		pll->state.crtc_mask = 0;
   18068 		for_each_intel_crtc(dev, crtc) {
   18069 			struct intel_crtc_state *crtc_state =
   18070 				to_intel_crtc_state(crtc->base.state);
   18071 
   18072 			if (crtc_state->hw.active &&
   18073 			    crtc_state->shared_dpll == pll)
   18074 				pll->state.crtc_mask |= 1 << crtc->pipe;
   18075 		}
   18076 		pll->active_mask = pll->state.crtc_mask;
   18077 
   18078 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
   18079 			      pll->info->name, pll->state.crtc_mask, pll->on);
   18080 	}
   18081 
   18082 	for_each_intel_encoder(dev, encoder) {
   18083 		pipe = 0;
   18084 
   18085 		if (encoder->get_hw_state(encoder, &pipe)) {
   18086 			struct intel_crtc_state *crtc_state;
   18087 
   18088 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   18089 			crtc_state = to_intel_crtc_state(crtc->base.state);
   18090 
   18091 			encoder->base.crtc = &crtc->base;
   18092 			encoder->get_config(encoder, crtc_state);
   18093 		} else {
   18094 			encoder->base.crtc = NULL;
   18095 		}
   18096 
   18097 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
   18098 			      encoder->base.base.id, encoder->base.name,
   18099 			      enableddisabled(encoder->base.crtc),
   18100 			      pipe_name(pipe));
   18101 	}
   18102 
   18103 	drm_connector_list_iter_begin(dev, &conn_iter);
   18104 	for_each_intel_connector_iter(connector, &conn_iter) {
   18105 		if (connector->get_hw_state(connector)) {
   18106 			struct intel_crtc_state *crtc_state;
   18107 			struct intel_crtc *crtc;
   18108 
   18109 			connector->base.dpms = DRM_MODE_DPMS_ON;
   18110 
   18111 			encoder = connector->encoder;
   18112 			connector->base.encoder = &encoder->base;
   18113 
   18114 			crtc = to_intel_crtc(encoder->base.crtc);
   18115 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
   18116 
   18117 			if (crtc_state && crtc_state->hw.active) {
   18118 				/*
   18119 				 * This has to be done during hardware readout
   18120 				 * because anything calling .crtc_disable may
   18121 				 * rely on the connector_mask being accurate.
   18122 				 */
   18123 				crtc_state->uapi.connector_mask |=
   18124 					drm_connector_mask(&connector->base);
   18125 				crtc_state->uapi.encoder_mask |=
   18126 					drm_encoder_mask(&encoder->base);
   18127 			}
   18128 		} else {
   18129 			connector->base.dpms = DRM_MODE_DPMS_OFF;
   18130 			connector->base.encoder = NULL;
   18131 		}
   18132 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
   18133 			      connector->base.base.id, connector->base.name,
   18134 			      enableddisabled(connector->base.encoder));
   18135 	}
   18136 	drm_connector_list_iter_end(&conn_iter);
   18137 
   18138 	for_each_intel_crtc(dev, crtc) {
   18139 		struct intel_bw_state *bw_state =
   18140 			to_intel_bw_state(dev_priv->bw_obj.state);
   18141 		struct intel_crtc_state *crtc_state =
   18142 			to_intel_crtc_state(crtc->base.state);
   18143 		struct intel_plane *plane;
   18144 		int min_cdclk = 0;
   18145 
   18146 		if (crtc_state->hw.active) {
   18147 			struct drm_display_mode *mode = &crtc_state->hw.mode;
   18148 
   18149 			intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
   18150 						    crtc_state);
   18151 
   18152 			*mode = crtc_state->hw.adjusted_mode;
   18153 			mode->hdisplay = crtc_state->pipe_src_w;
   18154 			mode->vdisplay = crtc_state->pipe_src_h;
   18155 
   18156 			/*
   18157 			 * The initial mode needs to be set in order to keep
   18158 			 * the atomic core happy. It wants a valid mode if the
   18159 			 * crtc's enabled, so we do the above call.
   18160 			 *
   18161 			 * But we don't set all the derived state fully, hence
   18162 			 * set a flag to indicate that a full recalculation is
   18163 			 * needed on the next commit.
   18164 			 */
   18165 			mode->private_flags = I915_MODE_FLAG_INHERITED;
   18166 
   18167 			intel_crtc_compute_pixel_rate(crtc_state);
   18168 
   18169 			intel_crtc_update_active_timings(crtc_state);
   18170 
   18171 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
   18172 		}
   18173 
   18174 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   18175 			const struct intel_plane_state *plane_state =
   18176 				to_intel_plane_state(plane->base.state);
   18177 
   18178 			/*
   18179 			 * FIXME don't have the fb yet, so can't
   18180 			 * use intel_plane_data_rate() :(
   18181 			 */
   18182 			if (plane_state->uapi.visible)
   18183 				crtc_state->data_rate[plane->id] =
   18184 					4 * crtc_state->pixel_rate;
   18185 			/*
   18186 			 * FIXME don't have the fb yet, so can't
   18187 			 * use plane->min_cdclk() :(
   18188 			 */
   18189 			if (plane_state->uapi.visible && plane->min_cdclk) {
   18190 				if (crtc_state->double_wide ||
   18191 				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   18192 					crtc_state->min_cdclk[plane->id] =
   18193 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
   18194 				else
   18195 					crtc_state->min_cdclk[plane->id] =
   18196 						crtc_state->pixel_rate;
   18197 			}
   18198 			DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
   18199 				      plane->base.base.id, plane->base.name,
   18200 				      crtc_state->min_cdclk[plane->id]);
   18201 		}
   18202 
   18203 		if (crtc_state->hw.active) {
   18204 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
   18205 			if (WARN_ON(min_cdclk < 0))
   18206 				min_cdclk = 0;
   18207 		}
   18208 
   18209 		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
   18210 		dev_priv->min_voltage_level[crtc->pipe] =
   18211 			crtc_state->min_voltage_level;
   18212 
   18213 		intel_bw_crtc_update(bw_state, crtc_state);
   18214 
   18215 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
   18216 	}
   18217 }
   18218 
   18219 static void
   18220 get_encoder_power_domains(struct drm_i915_private *dev_priv)
   18221 {
   18222 	struct intel_encoder *encoder;
   18223 
   18224 	for_each_intel_encoder(&dev_priv->drm, encoder) {
   18225 		struct intel_crtc_state *crtc_state;
   18226 
   18227 		if (!encoder->get_power_domains)
   18228 			continue;
   18229 
   18230 		/*
   18231 		 * MST-primary and inactive encoders don't have a crtc state
   18232 		 * and neither of these require any power domain references.
   18233 		 */
   18234 		if (!encoder->base.crtc)
   18235 			continue;
   18236 
   18237 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
   18238 		encoder->get_power_domains(encoder, crtc_state);
   18239 	}
   18240 }
   18241 
   18242 static void intel_early_display_was(struct drm_i915_private *dev_priv)
   18243 {
   18244 	/*
   18245 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
   18246 	 * Also known as Wa_14010480278.
   18247 	 */
   18248 	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
   18249 		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
   18250 			   DARBF_GATING_DIS);
   18251 
   18252 	if (IS_HASWELL(dev_priv)) {
   18253 		/*
   18254 		 * WaRsPkgCStateDisplayPMReq:hsw
   18255 		 * System hang if this isn't done before disabling all planes!
   18256 		 */
   18257 		I915_WRITE(CHICKEN_PAR1_1,
   18258 			   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
   18259 	}
   18260 }
   18261 
   18262 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
   18263 				       enum port port, i915_reg_t hdmi_reg)
   18264 {
   18265 	u32 val = I915_READ(hdmi_reg);
   18266 
   18267 	if (val & SDVO_ENABLE ||
   18268 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
   18269 		return;
   18270 
   18271 	DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
   18272 		      port_name(port));
   18273 
   18274 	val &= ~SDVO_PIPE_SEL_MASK;
   18275 	val |= SDVO_PIPE_SEL(PIPE_A);
   18276 
   18277 	I915_WRITE(hdmi_reg, val);
   18278 }
   18279 
   18280 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
   18281 				     enum port port, i915_reg_t dp_reg)
   18282 {
   18283 	u32 val = I915_READ(dp_reg);
   18284 
   18285 	if (val & DP_PORT_EN ||
   18286 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
   18287 		return;
   18288 
   18289 	DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
   18290 		      port_name(port));
   18291 
   18292 	val &= ~DP_PIPE_SEL_MASK;
   18293 	val |= DP_PIPE_SEL(PIPE_A);
   18294 
   18295 	I915_WRITE(dp_reg, val);
   18296 }
   18297 
   18298 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
   18299 {
   18300 	/*
   18301 	 * The BIOS may select transcoder B on some of the PCH
   18302 	 * ports even it doesn't enable the port. This would trip
   18303 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
   18304 	 * Sanitize the transcoder select bits to prevent that. We
   18305 	 * assume that the BIOS never actually enabled the port,
   18306 	 * because if it did we'd actually have to toggle the port
   18307 	 * on and back off to make the transcoder A select stick
   18308 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
   18309 	 * intel_disable_sdvo()).
   18310 	 */
   18311 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
   18312 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
   18313 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
   18314 
   18315 	/* PCH SDVOB multiplex with HDMIB */
   18316 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
   18317 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
   18318 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
   18319 }
   18320 
   18321 /* Scan out the current hw modeset state,
   18322  * and sanitizes it to the current state
   18323  */
   18324 static void
   18325 intel_modeset_setup_hw_state(struct drm_device *dev,
   18326 			     struct drm_modeset_acquire_ctx *ctx)
   18327 {
   18328 	struct drm_i915_private *dev_priv = to_i915(dev);
   18329 	struct intel_encoder *encoder;
   18330 	struct intel_crtc *crtc;
   18331 	intel_wakeref_t wakeref;
   18332 	int i;
   18333 
   18334 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
   18335 
   18336 	intel_early_display_was(dev_priv);
   18337 	intel_modeset_readout_hw_state(dev);
   18338 
   18339 	/* HW state is read out, now we need to sanitize this mess. */
   18340 
   18341 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
   18342 	for_each_intel_encoder(dev, encoder) {
   18343 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
   18344 
   18345 		/* We need to sanitize only the MST primary port. */
   18346 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
   18347 		    intel_phy_is_tc(dev_priv, phy))
   18348 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
   18349 	}
   18350 
   18351 	get_encoder_power_domains(dev_priv);
   18352 
   18353 	if (HAS_PCH_IBX(dev_priv))
   18354 		ibx_sanitize_pch_ports(dev_priv);
   18355 
   18356 	/*
   18357 	 * intel_sanitize_plane_mapping() may need to do vblank
   18358 	 * waits, so we need vblank interrupts restored beforehand.
   18359 	 */
   18360 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   18361 		struct intel_crtc_state *crtc_state =
   18362 			to_intel_crtc_state(crtc->base.state);
   18363 
   18364 		drm_crtc_vblank_reset(&crtc->base);
   18365 
   18366 		if (crtc_state->hw.active)
   18367 			intel_crtc_vblank_on(crtc_state);
   18368 	}
   18369 
   18370 	intel_sanitize_plane_mapping(dev_priv);
   18371 
   18372 	for_each_intel_encoder(dev, encoder)
   18373 		intel_sanitize_encoder(encoder);
   18374 
   18375 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   18376 		struct intel_crtc_state *crtc_state =
   18377 			to_intel_crtc_state(crtc->base.state);
   18378 
   18379 		intel_sanitize_crtc(crtc, ctx);
   18380 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
   18381 	}
   18382 
   18383 	intel_modeset_update_connector_atomic_state(dev);
   18384 
   18385 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
   18386 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
   18387 
   18388 		if (!pll->on || pll->active_mask)
   18389 			continue;
   18390 
   18391 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
   18392 			      pll->info->name);
   18393 
   18394 		pll->info->funcs->disable(dev_priv, pll);
   18395 		pll->on = false;
   18396 	}
   18397 
   18398 	if (IS_G4X(dev_priv)) {
   18399 		g4x_wm_get_hw_state(dev_priv);
   18400 		g4x_wm_sanitize(dev_priv);
   18401 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   18402 		vlv_wm_get_hw_state(dev_priv);
   18403 		vlv_wm_sanitize(dev_priv);
   18404 	} else if (INTEL_GEN(dev_priv) >= 9) {
   18405 		skl_wm_get_hw_state(dev_priv);
   18406 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   18407 		ilk_wm_get_hw_state(dev_priv);
   18408 	}
   18409 
   18410 	for_each_intel_crtc(dev, crtc) {
   18411 		struct intel_crtc_state *crtc_state =
   18412 			to_intel_crtc_state(crtc->base.state);
   18413 		u64 put_domains;
   18414 
   18415 		put_domains = modeset_get_crtc_power_domains(crtc_state);
   18416 		if (WARN_ON(put_domains))
   18417 			modeset_put_power_domains(dev_priv, put_domains);
   18418 	}
   18419 
   18420 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
   18421 }
   18422 
   18423 void intel_display_resume(struct drm_device *dev)
   18424 {
   18425 	struct drm_i915_private *dev_priv = to_i915(dev);
   18426 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
   18427 	struct drm_modeset_acquire_ctx ctx;
   18428 	int ret;
   18429 
   18430 	dev_priv->modeset_restore_state = NULL;
   18431 	if (state)
   18432 		state->acquire_ctx = &ctx;
   18433 
   18434 	drm_modeset_acquire_init(&ctx, 0);
   18435 
   18436 	while (1) {
   18437 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
   18438 		if (ret != -EDEADLK)
   18439 			break;
   18440 
   18441 		drm_modeset_backoff(&ctx);
   18442 	}
   18443 
   18444 	if (!ret)
   18445 		ret = __intel_display_resume(dev, state, &ctx);
   18446 
   18447 	intel_enable_ipc(dev_priv);
   18448 	drm_modeset_drop_locks(&ctx);
   18449 	drm_modeset_acquire_fini(&ctx);
   18450 
   18451 	if (ret)
   18452 		DRM_ERROR("Restoring old state failed with %i\n", ret);
   18453 	if (state)
   18454 		drm_atomic_state_put(state);
   18455 }
   18456 
   18457 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
   18458 {
   18459 	struct intel_connector *connector;
   18460 	struct drm_connector_list_iter conn_iter;
   18461 
   18462 	/* Kill all the work that may have been queued by hpd. */
   18463 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
   18464 	for_each_intel_connector_iter(connector, &conn_iter) {
   18465 		if (connector->modeset_retry_work.func)
   18466 			cancel_work_sync(&connector->modeset_retry_work);
   18467 		if (connector->hdcp.shim) {
   18468 			cancel_delayed_work_sync(&connector->hdcp.check_work);
   18469 			cancel_work_sync(&connector->hdcp.prop_work);
   18470 		}
   18471 	}
   18472 	drm_connector_list_iter_end(&conn_iter);
   18473 }
   18474 
   18475 void intel_modeset_driver_remove(struct drm_i915_private *i915)
   18476 {
   18477 	flush_workqueue(i915->flip_wq);
   18478 	flush_workqueue(i915->modeset_wq);
   18479 
   18480 	flush_work(&i915->atomic_helper.free_work);
   18481 	WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
   18482 
   18483 	/*
   18484 	 * Interrupts and polling as the first thing to avoid creating havoc.
   18485 	 * Too much stuff here (turning of connectors, ...) would
   18486 	 * experience fancy races otherwise.
   18487 	 */
   18488 	intel_irq_uninstall(i915);
   18489 
   18490 	/*
   18491 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
   18492 	 * poll handlers. Hence disable polling after hpd handling is shut down.
   18493 	 */
   18494 	intel_hpd_poll_fini(i915);
   18495 
   18496 	/*
   18497 	 * MST topology needs to be suspended so we don't have any calls to
   18498 	 * fbdev after it's finalized. MST will be destroyed later as part of
   18499 	 * drm_mode_config_cleanup()
   18500 	 */
   18501 	intel_dp_mst_suspend(i915);
   18502 
   18503 	/* poll work can call into fbdev, hence clean that up afterwards */
   18504 	intel_fbdev_fini(i915);
   18505 
   18506 	intel_unregister_dsm_handler();
   18507 
   18508 	intel_fbc_global_disable(i915);
   18509 
   18510 	/* flush any delayed tasks or pending work */
   18511 	flush_scheduled_work();
   18512 
   18513 	intel_hdcp_component_fini(i915);
   18514 
   18515 	drm_mode_config_cleanup(&i915->drm);
   18516 
   18517 	intel_overlay_cleanup(i915);
   18518 
   18519 	intel_gmbus_teardown(i915);
   18520 
   18521 	intel_bw_cleanup(i915);
   18522 
   18523 	destroy_workqueue(i915->flip_wq);
   18524 	destroy_workqueue(i915->modeset_wq);
   18525 
   18526 	intel_fbc_cleanup_cfb(i915);
   18527 }
   18528 
   18529 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
   18530 
   18531 struct intel_display_error_state {
   18532 
   18533 	u32 power_well_driver;
   18534 
   18535 	struct intel_cursor_error_state {
   18536 		u32 control;
   18537 		u32 position;
   18538 		u32 base;
   18539 		u32 size;
   18540 	} cursor[I915_MAX_PIPES];
   18541 
   18542 	struct intel_pipe_error_state {
   18543 		bool power_domain_on;
   18544 		u32 source;
   18545 		u32 stat;
   18546 	} pipe[I915_MAX_PIPES];
   18547 
   18548 	struct intel_plane_error_state {
   18549 		u32 control;
   18550 		u32 stride;
   18551 		u32 size;
   18552 		u32 pos;
   18553 		u32 addr;
   18554 		u32 surface;
   18555 		u32 tile_offset;
   18556 	} plane[I915_MAX_PIPES];
   18557 
   18558 	struct intel_transcoder_error_state {
   18559 		bool available;
   18560 		bool power_domain_on;
   18561 		enum transcoder cpu_transcoder;
   18562 
   18563 		u32 conf;
   18564 
   18565 		u32 htotal;
   18566 		u32 hblank;
   18567 		u32 hsync;
   18568 		u32 vtotal;
   18569 		u32 vblank;
   18570 		u32 vsync;
   18571 	} transcoder[5];
   18572 };
   18573 
   18574 struct intel_display_error_state *
   18575 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
   18576 {
   18577 	struct intel_display_error_state *error;
   18578 	int transcoders[] = {
   18579 		TRANSCODER_A,
   18580 		TRANSCODER_B,
   18581 		TRANSCODER_C,
   18582 		TRANSCODER_D,
   18583 		TRANSCODER_EDP,
   18584 	};
   18585 	int i;
   18586 
   18587 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
   18588 
   18589 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
   18590 		return NULL;
   18591 
   18592 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
   18593 	if (error == NULL)
   18594 		return NULL;
   18595 
   18596 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   18597 		error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
   18598 
   18599 	for_each_pipe(dev_priv, i) {
   18600 		error->pipe[i].power_domain_on =
   18601 			__intel_display_power_is_enabled(dev_priv,
   18602 							 POWER_DOMAIN_PIPE(i));
   18603 		if (!error->pipe[i].power_domain_on)
   18604 			continue;
   18605 
   18606 		error->cursor[i].control = I915_READ(CURCNTR(i));
   18607 		error->cursor[i].position = I915_READ(CURPOS(i));
   18608 		error->cursor[i].base = I915_READ(CURBASE(i));
   18609 
   18610 		error->plane[i].control = I915_READ(DSPCNTR(i));
   18611 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
   18612 		if (INTEL_GEN(dev_priv) <= 3) {
   18613 			error->plane[i].size = I915_READ(DSPSIZE(i));
   18614 			error->plane[i].pos = I915_READ(DSPPOS(i));
   18615 		}
   18616 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
   18617 			error->plane[i].addr = I915_READ(DSPADDR(i));
   18618 		if (INTEL_GEN(dev_priv) >= 4) {
   18619 			error->plane[i].surface = I915_READ(DSPSURF(i));
   18620 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
   18621 		}
   18622 
   18623 		error->pipe[i].source = I915_READ(PIPESRC(i));
   18624 
   18625 		if (HAS_GMCH(dev_priv))
   18626 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
   18627 	}
   18628 
   18629 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
   18630 		enum transcoder cpu_transcoder = transcoders[i];
   18631 
   18632 		if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
   18633 			continue;
   18634 
   18635 		error->transcoder[i].available = true;
   18636 		error->transcoder[i].power_domain_on =
   18637 			__intel_display_power_is_enabled(dev_priv,
   18638 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
   18639 		if (!error->transcoder[i].power_domain_on)
   18640 			continue;
   18641 
   18642 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
   18643 
   18644 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
   18645 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
   18646 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
   18647 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
   18648 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
   18649 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
   18650 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
   18651 	}
   18652 
   18653 	return error;
   18654 }
   18655 
   18656 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
   18657 
   18658 void
   18659 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
   18660 				struct intel_display_error_state *error)
   18661 {
   18662 	struct drm_i915_private *dev_priv = m->i915;
   18663 	int i;
   18664 
   18665 	if (!error)
   18666 		return;
   18667 
   18668 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
   18669 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   18670 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
   18671 			   error->power_well_driver);
   18672 	for_each_pipe(dev_priv, i) {
   18673 		err_printf(m, "Pipe [%d]:\n", i);
   18674 		err_printf(m, "  Power: %s\n",
   18675 			   onoff(error->pipe[i].power_domain_on));
   18676 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
   18677 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
   18678 
   18679 		err_printf(m, "Plane [%d]:\n", i);
   18680 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
   18681 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
   18682 		if (INTEL_GEN(dev_priv) <= 3) {
   18683 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
   18684 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
   18685 		}
   18686 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
   18687 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
   18688 		if (INTEL_GEN(dev_priv) >= 4) {
   18689 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
   18690 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
   18691 		}
   18692 
   18693 		err_printf(m, "Cursor [%d]:\n", i);
   18694 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
   18695 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
   18696 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
   18697 	}
   18698 
   18699 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
   18700 		if (!error->transcoder[i].available)
   18701 			continue;
   18702 
   18703 		err_printf(m, "CPU transcoder: %s\n",
   18704 			   transcoder_name(error->transcoder[i].cpu_transcoder));
   18705 		err_printf(m, "  Power: %s\n",
   18706 			   onoff(error->transcoder[i].power_domain_on));
   18707 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
   18708 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
   18709 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
   18710 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
   18711 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
   18712 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
   18713 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
   18714 	}
   18715 }
   18716 
   18717 #endif
   18718