Home | History | Annotate | Line # | Download | only in i915
      1 /*	$NetBSD: intel_pm.c,v 1.26 2021/12/18 23:45:28 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2012 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *    Eugeni Dodonov <eugeni.dodonov (at) intel.com>
     27  *
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: intel_pm.c,v 1.26 2021/12/18 23:45:28 riastradh Exp $");
     32 
     33 #include <linux/module.h>
     34 #include <linux/pm_runtime.h>
     35 
     36 #include <drm/drm_atomic_helper.h>
     37 #include <drm/drm_fourcc.h>
     38 #include <drm/drm_plane_helper.h>
     39 
     40 #include "display/intel_atomic.h"
     41 #include "display/intel_display_types.h"
     42 #include "display/intel_fbc.h"
     43 #include "display/intel_sprite.h"
     44 
     45 #include "gt/intel_llc.h"
     46 
     47 #include "i915_drv.h"
     48 #include "i915_irq.h"
     49 #include "i915_trace.h"
     50 #include "intel_pm.h"
     51 #include "intel_sideband.h"
     52 #ifndef __NetBSD__
     53 #include "../../../platform/x86/intel_ips.h"
     54 #endif
     55 
     56 #include <linux/nbsd-namespace.h>
     57 
     58 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
     59 {
     60 	if (HAS_LLC(dev_priv)) {
     61 		/*
     62 		 * WaCompressedResourceDisplayNewHashMode:skl,kbl
     63 		 * Display WA #0390: skl,kbl
     64 		 *
     65 		 * Must match Sampler, Pixel Back End, and Media. See
     66 		 * WaCompressedResourceSamplerPbeMediaNewHashMode.
     67 		 */
     68 		I915_WRITE(CHICKEN_PAR1_1,
     69 			   I915_READ(CHICKEN_PAR1_1) |
     70 			   SKL_DE_COMPRESSED_HASH_MODE);
     71 	}
     72 
     73 	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
     74 	I915_WRITE(CHICKEN_PAR1_1,
     75 		   I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
     76 
     77 	/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
     78 	I915_WRITE(GEN8_CHICKEN_DCPR_1,
     79 		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
     80 
     81 	/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
     82 	/* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
     83 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
     84 		   DISP_FBC_WM_DIS |
     85 		   DISP_FBC_MEMORY_WAKE);
     86 
     87 	/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
     88 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
     89 		   ILK_DPFC_DISABLE_DUMMY0);
     90 
     91 	if (IS_SKYLAKE(dev_priv)) {
     92 		/* WaDisableDopClockGating */
     93 		I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
     94 			   & ~GEN7_DOP_CLOCK_GATE_ENABLE);
     95 	}
     96 }
     97 
     98 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
     99 {
    100 	gen9_init_clock_gating(dev_priv);
    101 
    102 	/* WaDisableSDEUnitClockGating:bxt */
    103 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
    104 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
    105 
    106 	/*
    107 	 * FIXME:
    108 	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
    109 	 */
    110 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
    111 		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
    112 
    113 	/*
    114 	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
    115 	 * to stay fully on.
    116 	 */
    117 	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
    118 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
    119 
    120 	/*
    121 	 * Lower the display internal timeout.
    122 	 * This is needed to avoid any hard hangs when DSI port PLL
    123 	 * is off and a MMIO access is attempted by any privilege
    124 	 * application, using batch buffers or any other means.
    125 	 */
    126 	I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
    127 }
    128 
    129 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
    130 {
    131 	gen9_init_clock_gating(dev_priv);
    132 
    133 	/*
    134 	 * WaDisablePWMClockGating:glk
    135 	 * Backlight PWM may stop in the asserted state, causing backlight
    136 	 * to stay fully on.
    137 	 */
    138 	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
    139 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
    140 
    141 	/* WaDDIIOTimeout:glk */
    142 	if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
    143 		u32 val = I915_READ(CHICKEN_MISC_2);
    144 		val &= ~(GLK_CL0_PWR_DOWN |
    145 			 GLK_CL1_PWR_DOWN |
    146 			 GLK_CL2_PWR_DOWN);
    147 		I915_WRITE(CHICKEN_MISC_2, val);
    148 	}
    149 
    150 }
    151 
    152 static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
    153 {
    154 	u32 tmp;
    155 
    156 	tmp = I915_READ(CLKCFG);
    157 
    158 	switch (tmp & CLKCFG_FSB_MASK) {
    159 	case CLKCFG_FSB_533:
    160 		dev_priv->fsb_freq = 533; /* 133*4 */
    161 		break;
    162 	case CLKCFG_FSB_800:
    163 		dev_priv->fsb_freq = 800; /* 200*4 */
    164 		break;
    165 	case CLKCFG_FSB_667:
    166 		dev_priv->fsb_freq =  667; /* 167*4 */
    167 		break;
    168 	case CLKCFG_FSB_400:
    169 		dev_priv->fsb_freq = 400; /* 100*4 */
    170 		break;
    171 	}
    172 
    173 	switch (tmp & CLKCFG_MEM_MASK) {
    174 	case CLKCFG_MEM_533:
    175 		dev_priv->mem_freq = 533;
    176 		break;
    177 	case CLKCFG_MEM_667:
    178 		dev_priv->mem_freq = 667;
    179 		break;
    180 	case CLKCFG_MEM_800:
    181 		dev_priv->mem_freq = 800;
    182 		break;
    183 	}
    184 
    185 	/* detect pineview DDR3 setting */
    186 	tmp = I915_READ(CSHRDDR3CTL);
    187 	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
    188 }
    189 
    190 static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
    191 {
    192 	u16 ddrpll, csipll;
    193 
    194 	ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
    195 	csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
    196 
    197 	switch (ddrpll & 0xff) {
    198 	case 0xc:
    199 		dev_priv->mem_freq = 800;
    200 		break;
    201 	case 0x10:
    202 		dev_priv->mem_freq = 1066;
    203 		break;
    204 	case 0x14:
    205 		dev_priv->mem_freq = 1333;
    206 		break;
    207 	case 0x18:
    208 		dev_priv->mem_freq = 1600;
    209 		break;
    210 	default:
    211 		drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
    212 			ddrpll & 0xff);
    213 		dev_priv->mem_freq = 0;
    214 		break;
    215 	}
    216 
    217 	switch (csipll & 0x3ff) {
    218 	case 0x00c:
    219 		dev_priv->fsb_freq = 3200;
    220 		break;
    221 	case 0x00e:
    222 		dev_priv->fsb_freq = 3733;
    223 		break;
    224 	case 0x010:
    225 		dev_priv->fsb_freq = 4266;
    226 		break;
    227 	case 0x012:
    228 		dev_priv->fsb_freq = 4800;
    229 		break;
    230 	case 0x014:
    231 		dev_priv->fsb_freq = 5333;
    232 		break;
    233 	case 0x016:
    234 		dev_priv->fsb_freq = 5866;
    235 		break;
    236 	case 0x018:
    237 		dev_priv->fsb_freq = 6400;
    238 		break;
    239 	default:
    240 		drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
    241 			csipll & 0x3ff);
    242 		dev_priv->fsb_freq = 0;
    243 		break;
    244 	}
    245 }
    246 
    247 static const struct cxsr_latency cxsr_latency_table[] = {
    248 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
    249 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
    250 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
    251 	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
    252 	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
    253 
    254 	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
    255 	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
    256 	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
    257 	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
    258 	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
    259 
    260 	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
    261 	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
    262 	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
    263 	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
    264 	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
    265 
    266 	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
    267 	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
    268 	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
    269 	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
    270 	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
    271 
    272 	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
    273 	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
    274 	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
    275 	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
    276 	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
    277 
    278 	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
    279 	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
    280 	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
    281 	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
    282 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
    283 };
    284 
    285 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
    286 							 bool is_ddr3,
    287 							 int fsb,
    288 							 int mem)
    289 {
    290 	const struct cxsr_latency *latency;
    291 	int i;
    292 
    293 	if (fsb == 0 || mem == 0)
    294 		return NULL;
    295 
    296 	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
    297 		latency = &cxsr_latency_table[i];
    298 		if (is_desktop == latency->is_desktop &&
    299 		    is_ddr3 == latency->is_ddr3 &&
    300 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
    301 			return latency;
    302 	}
    303 
    304 	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
    305 
    306 	return NULL;
    307 }
    308 
    309 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
    310 {
    311 	u32 val;
    312 
    313 	vlv_punit_get(dev_priv);
    314 
    315 	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
    316 	if (enable)
    317 		val &= ~FORCE_DDR_HIGH_FREQ;
    318 	else
    319 		val |= FORCE_DDR_HIGH_FREQ;
    320 	val &= ~FORCE_DDR_LOW_FREQ;
    321 	val |= FORCE_DDR_FREQ_REQ_ACK;
    322 	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
    323 
    324 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
    325 		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
    326 		drm_err(&dev_priv->drm,
    327 			"timed out waiting for Punit DDR DVFS request\n");
    328 
    329 	vlv_punit_put(dev_priv);
    330 }
    331 
    332 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
    333 {
    334 	u32 val;
    335 
    336 	vlv_punit_get(dev_priv);
    337 
    338 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
    339 	if (enable)
    340 		val |= DSP_MAXFIFO_PM5_ENABLE;
    341 	else
    342 		val &= ~DSP_MAXFIFO_PM5_ENABLE;
    343 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
    344 
    345 	vlv_punit_put(dev_priv);
    346 }
    347 
    348 #define FW_WM(value, plane) \
    349 	(((u32)(value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
    350 
    351 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
    352 {
    353 	bool was_enabled;
    354 	u32 val;
    355 
    356 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
    357 		was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
    358 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
    359 		POSTING_READ(FW_BLC_SELF_VLV);
    360 	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
    361 		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
    362 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
    363 		POSTING_READ(FW_BLC_SELF);
    364 	} else if (IS_PINEVIEW(dev_priv)) {
    365 		val = I915_READ(DSPFW3);
    366 		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
    367 		if (enable)
    368 			val |= PINEVIEW_SELF_REFRESH_EN;
    369 		else
    370 			val &= ~PINEVIEW_SELF_REFRESH_EN;
    371 		I915_WRITE(DSPFW3, val);
    372 		POSTING_READ(DSPFW3);
    373 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
    374 		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
    375 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
    376 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
    377 		I915_WRITE(FW_BLC_SELF, val);
    378 		POSTING_READ(FW_BLC_SELF);
    379 	} else if (IS_I915GM(dev_priv)) {
    380 		/*
    381 		 * FIXME can't find a bit like this for 915G, and
    382 		 * and yet it does have the related watermark in
    383 		 * FW_BLC_SELF. What's going on?
    384 		 */
    385 		was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
    386 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
    387 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
    388 		I915_WRITE(INSTPM, val);
    389 		POSTING_READ(INSTPM);
    390 	} else {
    391 		return false;
    392 	}
    393 
    394 	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
    395 
    396 	drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
    397 		    enableddisabled(enable),
    398 		    enableddisabled(was_enabled));
    399 
    400 	return was_enabled;
    401 }
    402 
    403 /**
    404  * intel_set_memory_cxsr - Configure CxSR state
    405  * @dev_priv: i915 device
    406  * @enable: Allow vs. disallow CxSR
    407  *
    408  * Allow or disallow the system to enter a special CxSR
    409  * (C-state self refresh) state. What typically happens in CxSR mode
    410  * is that several display FIFOs may get combined into a single larger
    411  * FIFO for a particular plane (so called max FIFO mode) to allow the
    412  * system to defer memory fetches longer, and the memory will enter
    413  * self refresh.
    414  *
    415  * Note that enabling CxSR does not guarantee that the system enter
    416  * this special mode, nor does it guarantee that the system stays
    417  * in that mode once entered. So this just allows/disallows the system
    418  * to autonomously utilize the CxSR mode. Other factors such as core
    419  * C-states will affect when/if the system actually enters/exits the
    420  * CxSR mode.
    421  *
    422  * Note that on VLV/CHV this actually only controls the max FIFO mode,
    423  * and the system is free to enter/exit memory self refresh at any time
    424  * even when the use of CxSR has been disallowed.
    425  *
    426  * While the system is actually in the CxSR/max FIFO mode, some plane
    427  * control registers will not get latched on vblank. Thus in order to
    428  * guarantee the system will respond to changes in the plane registers
    429  * we must always disallow CxSR prior to making changes to those registers.
    430  * Unfortunately the system will re-evaluate the CxSR conditions at
    431  * frame start which happens after vblank start (which is when the plane
    432  * registers would get latched), so we can't proceed with the plane update
    433  * during the same frame where we disallowed CxSR.
    434  *
    435  * Certain platforms also have a deeper HPLL SR mode. Fortunately the
    436  * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
    437  * the hardware w.r.t. HPLL SR when writing to plane registers.
    438  * Disallowing just CxSR is sufficient.
    439  */
    440 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
    441 {
    442 	bool ret;
    443 
    444 	mutex_lock(&dev_priv->wm.wm_mutex);
    445 	ret = _intel_set_memory_cxsr(dev_priv, enable);
    446 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
    447 		dev_priv->wm.vlv.cxsr = enable;
    448 	else if (IS_G4X(dev_priv))
    449 		dev_priv->wm.g4x.cxsr = enable;
    450 	mutex_unlock(&dev_priv->wm.wm_mutex);
    451 
    452 	return ret;
    453 }
    454 
    455 /*
    456  * Latency for FIFO fetches is dependent on several factors:
    457  *   - memory configuration (speed, channels)
    458  *   - chipset
    459  *   - current MCH state
    460  * It can be fairly high in some situations, so here we assume a fairly
    461  * pessimal value.  It's a tradeoff between extra memory fetches (if we
    462  * set this value too high, the FIFO will fetch frequently to stay full)
    463  * and power consumption (set it too low to save power and we might see
    464  * FIFO underruns and display "flicker").
    465  *
    466  * A value of 5us seems to be a good balance; safe for very low end
    467  * platforms but not overly aggressive on lower latency configs.
    468  */
    469 static const int pessimal_latency_ns = 5000;
    470 
    471 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
    472 	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
    473 
    474 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
    475 {
    476 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    477 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    478 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
    479 	enum pipe pipe = crtc->pipe;
    480 	int sprite0_start, sprite1_start;
    481 
    482 	switch (pipe) {
    483 		u32 dsparb, dsparb2, dsparb3;
    484 	case PIPE_A:
    485 		dsparb = I915_READ(DSPARB);
    486 		dsparb2 = I915_READ(DSPARB2);
    487 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
    488 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
    489 		break;
    490 	case PIPE_B:
    491 		dsparb = I915_READ(DSPARB);
    492 		dsparb2 = I915_READ(DSPARB2);
    493 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
    494 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
    495 		break;
    496 	case PIPE_C:
    497 		dsparb2 = I915_READ(DSPARB2);
    498 		dsparb3 = I915_READ(DSPARB3);
    499 		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
    500 		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
    501 		break;
    502 	default:
    503 		MISSING_CASE(pipe);
    504 		return;
    505 	}
    506 
    507 	fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
    508 	fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
    509 	fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
    510 	fifo_state->plane[PLANE_CURSOR] = 63;
    511 }
    512 
    513 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
    514 			      enum i9xx_plane_id i9xx_plane)
    515 {
    516 	u32 dsparb = I915_READ(DSPARB);
    517 	int size;
    518 
    519 	size = dsparb & 0x7f;
    520 	if (i9xx_plane == PLANE_B)
    521 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
    522 
    523 	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
    524 		    dsparb, plane_name(i9xx_plane), size);
    525 
    526 	return size;
    527 }
    528 
    529 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
    530 			      enum i9xx_plane_id i9xx_plane)
    531 {
    532 	u32 dsparb = I915_READ(DSPARB);
    533 	int size;
    534 
    535 	size = dsparb & 0x1ff;
    536 	if (i9xx_plane == PLANE_B)
    537 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
    538 	size >>= 1; /* Convert to cachelines */
    539 
    540 	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
    541 		    dsparb, plane_name(i9xx_plane), size);
    542 
    543 	return size;
    544 }
    545 
    546 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
    547 			      enum i9xx_plane_id i9xx_plane)
    548 {
    549 	u32 dsparb = I915_READ(DSPARB);
    550 	int size;
    551 
    552 	size = dsparb & 0x7f;
    553 	size >>= 2; /* Convert to cachelines */
    554 
    555 	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
    556 		    dsparb, plane_name(i9xx_plane), size);
    557 
    558 	return size;
    559 }
    560 
    561 /* Pineview has different values for various configs */
    562 static const struct intel_watermark_params pnv_display_wm = {
    563 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
    564 	.max_wm = PINEVIEW_MAX_WM,
    565 	.default_wm = PINEVIEW_DFT_WM,
    566 	.guard_size = PINEVIEW_GUARD_WM,
    567 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
    568 };
    569 
    570 static const struct intel_watermark_params pnv_display_hplloff_wm = {
    571 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
    572 	.max_wm = PINEVIEW_MAX_WM,
    573 	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
    574 	.guard_size = PINEVIEW_GUARD_WM,
    575 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
    576 };
    577 
    578 static const struct intel_watermark_params pnv_cursor_wm = {
    579 	.fifo_size = PINEVIEW_CURSOR_FIFO,
    580 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
    581 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
    582 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
    583 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
    584 };
    585 
    586 static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
    587 	.fifo_size = PINEVIEW_CURSOR_FIFO,
    588 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
    589 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
    590 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
    591 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
    592 };
    593 
    594 static const struct intel_watermark_params i965_cursor_wm_info = {
    595 	.fifo_size = I965_CURSOR_FIFO,
    596 	.max_wm = I965_CURSOR_MAX_WM,
    597 	.default_wm = I965_CURSOR_DFT_WM,
    598 	.guard_size = 2,
    599 	.cacheline_size = I915_FIFO_LINE_SIZE,
    600 };
    601 
    602 static const struct intel_watermark_params i945_wm_info = {
    603 	.fifo_size = I945_FIFO_SIZE,
    604 	.max_wm = I915_MAX_WM,
    605 	.default_wm = 1,
    606 	.guard_size = 2,
    607 	.cacheline_size = I915_FIFO_LINE_SIZE,
    608 };
    609 
    610 static const struct intel_watermark_params i915_wm_info = {
    611 	.fifo_size = I915_FIFO_SIZE,
    612 	.max_wm = I915_MAX_WM,
    613 	.default_wm = 1,
    614 	.guard_size = 2,
    615 	.cacheline_size = I915_FIFO_LINE_SIZE,
    616 };
    617 
    618 static const struct intel_watermark_params i830_a_wm_info = {
    619 	.fifo_size = I855GM_FIFO_SIZE,
    620 	.max_wm = I915_MAX_WM,
    621 	.default_wm = 1,
    622 	.guard_size = 2,
    623 	.cacheline_size = I830_FIFO_LINE_SIZE,
    624 };
    625 
    626 static const struct intel_watermark_params i830_bc_wm_info = {
    627 	.fifo_size = I855GM_FIFO_SIZE,
    628 	.max_wm = I915_MAX_WM/2,
    629 	.default_wm = 1,
    630 	.guard_size = 2,
    631 	.cacheline_size = I830_FIFO_LINE_SIZE,
    632 };
    633 
    634 static const struct intel_watermark_params i845_wm_info = {
    635 	.fifo_size = I830_FIFO_SIZE,
    636 	.max_wm = I915_MAX_WM,
    637 	.default_wm = 1,
    638 	.guard_size = 2,
    639 	.cacheline_size = I830_FIFO_LINE_SIZE,
    640 };
    641 
    642 /**
    643  * intel_wm_method1 - Method 1 / "small buffer" watermark formula
    644  * @pixel_rate: Pipe pixel rate in kHz
    645  * @cpp: Plane bytes per pixel
    646  * @latency: Memory wakeup latency in 0.1us units
    647  *
    648  * Compute the watermark using the method 1 or "small buffer"
    649  * formula. The caller may additonally add extra cachelines
    650  * to account for TLB misses and clock crossings.
    651  *
    652  * This method is concerned with the short term drain rate
    653  * of the FIFO, ie. it does not account for blanking periods
    654  * which would effectively reduce the average drain rate across
    655  * a longer period. The name "small" refers to the fact the
    656  * FIFO is relatively small compared to the amount of data
    657  * fetched.
    658  *
    659  * The FIFO level vs. time graph might look something like:
    660  *
    661  *   |\   |\
    662  *   | \  | \
    663  * __---__---__ (- plane active, _ blanking)
    664  * -> time
    665  *
    666  * or perhaps like this:
    667  *
    668  *   |\|\  |\|\
    669  * __----__----__ (- plane active, _ blanking)
    670  * -> time
    671  *
    672  * Returns:
    673  * The watermark in bytes
    674  */
    675 static unsigned int intel_wm_method1(unsigned int pixel_rate,
    676 				     unsigned int cpp,
    677 				     unsigned int latency)
    678 {
    679 	u64 ret;
    680 
    681 	ret = mul_u32_u32(pixel_rate, cpp * latency);
    682 	ret = DIV_ROUND_UP_ULL(ret, 10000);
    683 
    684 	return ret;
    685 }
    686 
    687 /**
    688  * intel_wm_method2 - Method 2 / "large buffer" watermark formula
    689  * @pixel_rate: Pipe pixel rate in kHz
    690  * @htotal: Pipe horizontal total
    691  * @width: Plane width in pixels
    692  * @cpp: Plane bytes per pixel
    693  * @latency: Memory wakeup latency in 0.1us units
    694  *
    695  * Compute the watermark using the method 2 or "large buffer"
    696  * formula. The caller may additonally add extra cachelines
    697  * to account for TLB misses and clock crossings.
    698  *
    699  * This method is concerned with the long term drain rate
    700  * of the FIFO, ie. it does account for blanking periods
    701  * which effectively reduce the average drain rate across
    702  * a longer period. The name "large" refers to the fact the
    703  * FIFO is relatively large compared to the amount of data
    704  * fetched.
    705  *
    706  * The FIFO level vs. time graph might look something like:
    707  *
    708  *    |\___       |\___
    709  *    |    \___   |    \___
    710  *    |        \  |        \
    711  * __ --__--__--__--__--__--__ (- plane active, _ blanking)
    712  * -> time
    713  *
    714  * Returns:
    715  * The watermark in bytes
    716  */
    717 static unsigned int intel_wm_method2(unsigned int pixel_rate,
    718 				     unsigned int htotal,
    719 				     unsigned int width,
    720 				     unsigned int cpp,
    721 				     unsigned int latency)
    722 {
    723 	unsigned int ret;
    724 
    725 	/*
    726 	 * FIXME remove once all users are computing
    727 	 * watermarks in the correct place.
    728 	 */
    729 	if (WARN_ON_ONCE(htotal == 0))
    730 		htotal = 1;
    731 
    732 	ret = (latency * pixel_rate) / (htotal * 10000);
    733 	ret = (ret + 1) * width * cpp;
    734 
    735 	return ret;
    736 }
    737 
    738 /**
    739  * intel_calculate_wm - calculate watermark level
    740  * @pixel_rate: pixel clock
    741  * @wm: chip FIFO params
    742  * @fifo_size: size of the FIFO buffer
    743  * @cpp: bytes per pixel
    744  * @latency_ns: memory latency for the platform
    745  *
    746  * Calculate the watermark level (the level at which the display plane will
    747  * start fetching from memory again).  Each chip has a different display
    748  * FIFO size and allocation, so the caller needs to figure that out and pass
    749  * in the correct intel_watermark_params structure.
    750  *
    751  * As the pixel clock runs, the FIFO will be drained at a rate that depends
    752  * on the pixel size.  When it reaches the watermark level, it'll start
    753  * fetching FIFO line sized based chunks from memory until the FIFO fills
    754  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
    755  * will occur, and a display engine hang could result.
    756  */
    757 static unsigned int intel_calculate_wm(int pixel_rate,
    758 				       const struct intel_watermark_params *wm,
    759 				       int fifo_size, int cpp,
    760 				       unsigned int latency_ns)
    761 {
    762 	int entries, wm_size;
    763 
    764 	/*
    765 	 * Note: we need to make sure we don't overflow for various clock &
    766 	 * latency values.
    767 	 * clocks go from a few thousand to several hundred thousand.
    768 	 * latency is usually a few thousand
    769 	 */
    770 	entries = intel_wm_method1(pixel_rate, cpp,
    771 				   latency_ns / 100);
    772 	entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
    773 		wm->guard_size;
    774 	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
    775 
    776 	wm_size = fifo_size - entries;
    777 	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
    778 
    779 	/* Don't promote wm_size to unsigned... */
    780 	if (wm_size > wm->max_wm)
    781 		wm_size = wm->max_wm;
    782 	if (wm_size <= 0)
    783 		wm_size = wm->default_wm;
    784 
    785 	/*
    786 	 * Bspec seems to indicate that the value shouldn't be lower than
    787 	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
    788 	 * Lets go for 8 which is the burst size since certain platforms
    789 	 * already use a hardcoded 8 (which is what the spec says should be
    790 	 * done).
    791 	 */
    792 	if (wm_size <= 8)
    793 		wm_size = 8;
    794 
    795 	return wm_size;
    796 }
    797 
    798 static bool is_disabling(int old, int new, int threshold)
    799 {
    800 	return old >= threshold && new < threshold;
    801 }
    802 
    803 static bool is_enabling(int old, int new, int threshold)
    804 {
    805 	return old < threshold && new >= threshold;
    806 }
    807 
    808 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
    809 {
    810 	return dev_priv->wm.max_level + 1;
    811 }
    812 
    813 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
    814 				   const struct intel_plane_state *plane_state)
    815 {
    816 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
    817 
    818 	/* FIXME check the 'enable' instead */
    819 	if (!crtc_state->hw.active)
    820 		return false;
    821 
    822 	/*
    823 	 * Treat cursor with fb as always visible since cursor updates
    824 	 * can happen faster than the vrefresh rate, and the current
    825 	 * watermark code doesn't handle that correctly. Cursor updates
    826 	 * which set/clear the fb or change the cursor size are going
    827 	 * to get throttled by intel_legacy_cursor_update() to work
    828 	 * around this problem with the watermark code.
    829 	 */
    830 	if (plane->id == PLANE_CURSOR)
    831 		return plane_state->hw.fb != NULL;
    832 	else
    833 		return plane_state->uapi.visible;
    834 }
    835 
    836 static bool intel_crtc_active(struct intel_crtc *crtc)
    837 {
    838 	/* Be paranoid as we can arrive here with only partial
    839 	 * state retrieved from the hardware during setup.
    840 	 *
    841 	 * We can ditch the adjusted_mode.crtc_clock check as soon
    842 	 * as Haswell has gained clock readout/fastboot support.
    843 	 *
    844 	 * We can ditch the crtc->primary->state->fb check as soon as we can
    845 	 * properly reconstruct framebuffers.
    846 	 *
    847 	 * FIXME: The intel_crtc->active here should be switched to
    848 	 * crtc->state->active once we have proper CRTC states wired up
    849 	 * for atomic.
    850 	 */
    851 	return crtc->active && crtc->base.primary->state->fb &&
    852 		crtc->config->hw.adjusted_mode.crtc_clock;
    853 }
    854 
    855 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
    856 {
    857 	struct intel_crtc *crtc, *enabled = NULL;
    858 
    859 	for_each_intel_crtc(&dev_priv->drm, crtc) {
    860 		if (intel_crtc_active(crtc)) {
    861 			if (enabled)
    862 				return NULL;
    863 			enabled = crtc;
    864 		}
    865 	}
    866 
    867 	return enabled;
    868 }
    869 
    870 static void pnv_update_wm(struct intel_crtc *unused_crtc)
    871 {
    872 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
    873 	struct intel_crtc *crtc;
    874 	const struct cxsr_latency *latency;
    875 	u32 reg;
    876 	unsigned int wm;
    877 
    878 	latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
    879 					 dev_priv->is_ddr3,
    880 					 dev_priv->fsb_freq,
    881 					 dev_priv->mem_freq);
    882 	if (!latency) {
    883 		drm_dbg_kms(&dev_priv->drm,
    884 			    "Unknown FSB/MEM found, disable CxSR\n");
    885 		intel_set_memory_cxsr(dev_priv, false);
    886 		return;
    887 	}
    888 
    889 	crtc = single_enabled_crtc(dev_priv);
    890 	if (crtc) {
    891 		const struct drm_display_mode *adjusted_mode =
    892 			&crtc->config->hw.adjusted_mode;
    893 		const struct drm_framebuffer *fb =
    894 			crtc->base.primary->state->fb;
    895 		int cpp = fb->format->cpp[0];
    896 		int clock = adjusted_mode->crtc_clock;
    897 
    898 		/* Display SR */
    899 		wm = intel_calculate_wm(clock, &pnv_display_wm,
    900 					pnv_display_wm.fifo_size,
    901 					cpp, latency->display_sr);
    902 		reg = I915_READ(DSPFW1);
    903 		reg &= ~DSPFW_SR_MASK;
    904 		reg |= FW_WM(wm, SR);
    905 		I915_WRITE(DSPFW1, reg);
    906 		drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
    907 
    908 		/* cursor SR */
    909 		wm = intel_calculate_wm(clock, &pnv_cursor_wm,
    910 					pnv_display_wm.fifo_size,
    911 					4, latency->cursor_sr);
    912 		reg = I915_READ(DSPFW3);
    913 		reg &= ~DSPFW_CURSOR_SR_MASK;
    914 		reg |= FW_WM(wm, CURSOR_SR);
    915 		I915_WRITE(DSPFW3, reg);
    916 
    917 		/* Display HPLL off SR */
    918 		wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
    919 					pnv_display_hplloff_wm.fifo_size,
    920 					cpp, latency->display_hpll_disable);
    921 		reg = I915_READ(DSPFW3);
    922 		reg &= ~DSPFW_HPLL_SR_MASK;
    923 		reg |= FW_WM(wm, HPLL_SR);
    924 		I915_WRITE(DSPFW3, reg);
    925 
    926 		/* cursor HPLL off SR */
    927 		wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
    928 					pnv_display_hplloff_wm.fifo_size,
    929 					4, latency->cursor_hpll_disable);
    930 		reg = I915_READ(DSPFW3);
    931 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
    932 		reg |= FW_WM(wm, HPLL_CURSOR);
    933 		I915_WRITE(DSPFW3, reg);
    934 		drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
    935 
    936 		intel_set_memory_cxsr(dev_priv, true);
    937 	} else {
    938 		intel_set_memory_cxsr(dev_priv, false);
    939 	}
    940 }
    941 
    942 /*
    943  * Documentation says:
    944  * "If the line size is small, the TLB fetches can get in the way of the
    945  *  data fetches, causing some lag in the pixel data return which is not
    946  *  accounted for in the above formulas. The following adjustment only
    947  *  needs to be applied if eight whole lines fit in the buffer at once.
    948  *  The WM is adjusted upwards by the difference between the FIFO size
    949  *  and the size of 8 whole lines. This adjustment is always performed
    950  *  in the actual pixel depth regardless of whether FBC is enabled or not."
    951  */
    952 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
    953 {
    954 	int tlb_miss = fifo_size * 64 - width * cpp * 8;
    955 
    956 	return max(0, tlb_miss);
    957 }
    958 
    959 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
    960 				const struct g4x_wm_values *wm)
    961 {
    962 	enum pipe pipe;
    963 
    964 	for_each_pipe(dev_priv, pipe)
    965 		trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
    966 
    967 	I915_WRITE(DSPFW1,
    968 		   FW_WM(wm->sr.plane, SR) |
    969 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
    970 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
    971 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
    972 	I915_WRITE(DSPFW2,
    973 		   (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
    974 		   FW_WM(wm->sr.fbc, FBC_SR) |
    975 		   FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
    976 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
    977 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
    978 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
    979 	I915_WRITE(DSPFW3,
    980 		   (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
    981 		   FW_WM(wm->sr.cursor, CURSOR_SR) |
    982 		   FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
    983 		   FW_WM(wm->hpll.plane, HPLL_SR));
    984 
    985 	POSTING_READ(DSPFW1);
    986 }
    987 
    988 #define FW_WM_VLV(value, plane) \
    989 	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
    990 
    991 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
    992 				const struct vlv_wm_values *wm)
    993 {
    994 	enum pipe pipe;
    995 
    996 	for_each_pipe(dev_priv, pipe) {
    997 		trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
    998 
    999 		I915_WRITE(VLV_DDL(pipe),
   1000 			   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
   1001 			   (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
   1002 			   (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
   1003 			   (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
   1004 	}
   1005 
   1006 	/*
   1007 	 * Zero the (unused) WM1 watermarks, and also clear all the
   1008 	 * high order bits so that there are no out of bounds values
   1009 	 * present in the registers during the reprogramming.
   1010 	 */
   1011 	I915_WRITE(DSPHOWM, 0);
   1012 	I915_WRITE(DSPHOWM1, 0);
   1013 	I915_WRITE(DSPFW4, 0);
   1014 	I915_WRITE(DSPFW5, 0);
   1015 	I915_WRITE(DSPFW6, 0);
   1016 
   1017 	I915_WRITE(DSPFW1,
   1018 		   FW_WM(wm->sr.plane, SR) |
   1019 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
   1020 		   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
   1021 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
   1022 	I915_WRITE(DSPFW2,
   1023 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
   1024 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
   1025 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
   1026 	I915_WRITE(DSPFW3,
   1027 		   FW_WM(wm->sr.cursor, CURSOR_SR));
   1028 
   1029 	if (IS_CHERRYVIEW(dev_priv)) {
   1030 		I915_WRITE(DSPFW7_CHV,
   1031 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
   1032 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
   1033 		I915_WRITE(DSPFW8_CHV,
   1034 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
   1035 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
   1036 		I915_WRITE(DSPFW9_CHV,
   1037 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
   1038 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
   1039 		I915_WRITE(DSPHOWM,
   1040 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
   1041 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
   1042 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
   1043 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
   1044 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
   1045 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
   1046 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
   1047 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
   1048 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
   1049 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
   1050 	} else {
   1051 		I915_WRITE(DSPFW7,
   1052 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
   1053 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
   1054 		I915_WRITE(DSPHOWM,
   1055 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
   1056 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
   1057 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
   1058 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
   1059 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
   1060 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
   1061 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
   1062 	}
   1063 
   1064 	POSTING_READ(DSPFW1);
   1065 }
   1066 
   1067 #undef FW_WM_VLV
   1068 
   1069 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
   1070 {
   1071 	/* all latencies in usec */
   1072 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
   1073 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
   1074 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
   1075 
   1076 	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
   1077 }
   1078 
   1079 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
   1080 {
   1081 	/*
   1082 	 * DSPCNTR[13] supposedly controls whether the
   1083 	 * primary plane can use the FIFO space otherwise
   1084 	 * reserved for the sprite plane. It's not 100% clear
   1085 	 * what the actual FIFO size is, but it looks like we
   1086 	 * can happily set both primary and sprite watermarks
   1087 	 * up to 127 cachelines. So that would seem to mean
   1088 	 * that either DSPCNTR[13] doesn't do anything, or that
   1089 	 * the total FIFO is >= 256 cachelines in size. Either
   1090 	 * way, we don't seem to have to worry about this
   1091 	 * repartitioning as the maximum watermark value the
   1092 	 * register can hold for each plane is lower than the
   1093 	 * minimum FIFO size.
   1094 	 */
   1095 	switch (plane_id) {
   1096 	case PLANE_CURSOR:
   1097 		return 63;
   1098 	case PLANE_PRIMARY:
   1099 		return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
   1100 	case PLANE_SPRITE0:
   1101 		return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
   1102 	default:
   1103 		MISSING_CASE(plane_id);
   1104 		return 0;
   1105 	}
   1106 }
   1107 
   1108 static int g4x_fbc_fifo_size(int level)
   1109 {
   1110 	switch (level) {
   1111 	case G4X_WM_LEVEL_SR:
   1112 		return 7;
   1113 	case G4X_WM_LEVEL_HPLL:
   1114 		return 15;
   1115 	default:
   1116 		MISSING_CASE(level);
   1117 		return 0;
   1118 	}
   1119 }
   1120 
   1121 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
   1122 			  const struct intel_plane_state *plane_state,
   1123 			  int level)
   1124 {
   1125 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   1126 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   1127 	const struct drm_display_mode *adjusted_mode =
   1128 		&crtc_state->hw.adjusted_mode;
   1129 	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
   1130 	unsigned int clock, htotal, cpp, width, wm;
   1131 
   1132 	if (latency == 0)
   1133 		return USHRT_MAX;
   1134 
   1135 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   1136 		return 0;
   1137 
   1138 	cpp = plane_state->hw.fb->format->cpp[0];
   1139 
   1140 	/*
   1141 	 * Not 100% sure which way ELK should go here as the
   1142 	 * spec only says CL/CTG should assume 32bpp and BW
   1143 	 * doesn't need to. But as these things followed the
   1144 	 * mobile vs. desktop lines on gen3 as well, let's
   1145 	 * assume ELK doesn't need this.
   1146 	 *
   1147 	 * The spec also fails to list such a restriction for
   1148 	 * the HPLL watermark, which seems a little strange.
   1149 	 * Let's use 32bpp for the HPLL watermark as well.
   1150 	 */
   1151 	if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
   1152 	    level != G4X_WM_LEVEL_NORMAL)
   1153 		cpp = max(cpp, 4u);
   1154 
   1155 	clock = adjusted_mode->crtc_clock;
   1156 	htotal = adjusted_mode->crtc_htotal;
   1157 
   1158 	width = drm_rect_width(&plane_state->uapi.dst);
   1159 
   1160 	if (plane->id == PLANE_CURSOR) {
   1161 		wm = intel_wm_method2(clock, htotal, width, cpp, latency);
   1162 	} else if (plane->id == PLANE_PRIMARY &&
   1163 		   level == G4X_WM_LEVEL_NORMAL) {
   1164 		wm = intel_wm_method1(clock, cpp, latency);
   1165 	} else {
   1166 		unsigned int small, large;
   1167 
   1168 		small = intel_wm_method1(clock, cpp, latency);
   1169 		large = intel_wm_method2(clock, htotal, width, cpp, latency);
   1170 
   1171 		wm = min(small, large);
   1172 	}
   1173 
   1174 	wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
   1175 			      width, cpp);
   1176 
   1177 	wm = DIV_ROUND_UP(wm, 64) + 2;
   1178 
   1179 	return min_t(unsigned int, wm, USHRT_MAX);
   1180 }
   1181 
   1182 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
   1183 				 int level, enum plane_id plane_id, u16 value)
   1184 {
   1185 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1186 	bool dirty = false;
   1187 
   1188 	for (; level < intel_wm_num_levels(dev_priv); level++) {
   1189 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
   1190 
   1191 		dirty |= raw->plane[plane_id] != value;
   1192 		raw->plane[plane_id] = value;
   1193 	}
   1194 
   1195 	return dirty;
   1196 }
   1197 
   1198 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
   1199 			       int level, u16 value)
   1200 {
   1201 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1202 	bool dirty = false;
   1203 
   1204 	/* NORMAL level doesn't have an FBC watermark */
   1205 	level = max(level, G4X_WM_LEVEL_SR);
   1206 
   1207 	for (; level < intel_wm_num_levels(dev_priv); level++) {
   1208 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
   1209 
   1210 		dirty |= raw->fbc != value;
   1211 		raw->fbc = value;
   1212 	}
   1213 
   1214 	return dirty;
   1215 }
   1216 
   1217 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
   1218 			      const struct intel_plane_state *plane_state,
   1219 			      u32 pri_val);
   1220 
   1221 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
   1222 				     const struct intel_plane_state *plane_state)
   1223 {
   1224 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   1225 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1226 	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
   1227 	enum plane_id plane_id = plane->id;
   1228 	bool dirty = false;
   1229 	int level;
   1230 
   1231 	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
   1232 		dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
   1233 		if (plane_id == PLANE_PRIMARY)
   1234 			dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
   1235 		goto out;
   1236 	}
   1237 
   1238 	for (level = 0; level < num_levels; level++) {
   1239 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
   1240 		int wm, max_wm;
   1241 
   1242 		wm = g4x_compute_wm(crtc_state, plane_state, level);
   1243 		max_wm = g4x_plane_fifo_size(plane_id, level);
   1244 
   1245 		if (wm > max_wm)
   1246 			break;
   1247 
   1248 		dirty |= raw->plane[plane_id] != wm;
   1249 		raw->plane[plane_id] = wm;
   1250 
   1251 		if (plane_id != PLANE_PRIMARY ||
   1252 		    level == G4X_WM_LEVEL_NORMAL)
   1253 			continue;
   1254 
   1255 		wm = ilk_compute_fbc_wm(crtc_state, plane_state,
   1256 					raw->plane[plane_id]);
   1257 		max_wm = g4x_fbc_fifo_size(level);
   1258 
   1259 		/*
   1260 		 * FBC wm is not mandatory as we
   1261 		 * can always just disable its use.
   1262 		 */
   1263 		if (wm > max_wm)
   1264 			wm = USHRT_MAX;
   1265 
   1266 		dirty |= raw->fbc != wm;
   1267 		raw->fbc = wm;
   1268 	}
   1269 
   1270 	/* mark watermarks as invalid */
   1271 	dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
   1272 
   1273 	if (plane_id == PLANE_PRIMARY)
   1274 		dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
   1275 
   1276  out:
   1277 	if (dirty) {
   1278 		drm_dbg_kms(&dev_priv->drm,
   1279 			    "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
   1280 			    plane->base.name,
   1281 			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
   1282 			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
   1283 			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
   1284 
   1285 		if (plane_id == PLANE_PRIMARY)
   1286 			drm_dbg_kms(&dev_priv->drm,
   1287 				    "FBC watermarks: SR=%d, HPLL=%d\n",
   1288 				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
   1289 				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
   1290 	}
   1291 
   1292 	return dirty;
   1293 }
   1294 
   1295 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
   1296 				      enum plane_id plane_id, int level)
   1297 {
   1298 	const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
   1299 
   1300 	return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
   1301 }
   1302 
   1303 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
   1304 				     int level)
   1305 {
   1306 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1307 
   1308 	if (level > dev_priv->wm.max_level)
   1309 		return false;
   1310 
   1311 	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
   1312 		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
   1313 		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
   1314 }
   1315 
   1316 /* mark all levels starting from 'level' as invalid */
   1317 static void g4x_invalidate_wms(struct intel_crtc *crtc,
   1318 			       struct g4x_wm_state *wm_state, int level)
   1319 {
   1320 	if (level <= G4X_WM_LEVEL_NORMAL) {
   1321 		enum plane_id plane_id;
   1322 
   1323 		for_each_plane_id_on_crtc(crtc, plane_id)
   1324 			wm_state->wm.plane[plane_id] = USHRT_MAX;
   1325 	}
   1326 
   1327 	if (level <= G4X_WM_LEVEL_SR) {
   1328 		wm_state->cxsr = false;
   1329 		wm_state->sr.cursor = USHRT_MAX;
   1330 		wm_state->sr.plane = USHRT_MAX;
   1331 		wm_state->sr.fbc = USHRT_MAX;
   1332 	}
   1333 
   1334 	if (level <= G4X_WM_LEVEL_HPLL) {
   1335 		wm_state->hpll_en = false;
   1336 		wm_state->hpll.cursor = USHRT_MAX;
   1337 		wm_state->hpll.plane = USHRT_MAX;
   1338 		wm_state->hpll.fbc = USHRT_MAX;
   1339 	}
   1340 }
   1341 
   1342 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
   1343 {
   1344 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1345 	struct intel_atomic_state *state =
   1346 		to_intel_atomic_state(crtc_state->uapi.state);
   1347 	struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
   1348 	int num_active_planes = hweight8(crtc_state->active_planes &
   1349 					 ~BIT(PLANE_CURSOR));
   1350 	const struct g4x_pipe_wm *raw;
   1351 	const struct intel_plane_state *old_plane_state;
   1352 	const struct intel_plane_state *new_plane_state;
   1353 	struct intel_plane *plane;
   1354 	enum plane_id plane_id;
   1355 	int i, level;
   1356 	unsigned int dirty = 0;
   1357 
   1358 	for_each_oldnew_intel_plane_in_state(state, plane,
   1359 					     old_plane_state,
   1360 					     new_plane_state, i) {
   1361 		if (new_plane_state->hw.crtc != &crtc->base &&
   1362 		    old_plane_state->hw.crtc != &crtc->base)
   1363 			continue;
   1364 
   1365 		if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
   1366 			dirty |= BIT(plane->id);
   1367 	}
   1368 
   1369 	if (!dirty)
   1370 		return 0;
   1371 
   1372 	level = G4X_WM_LEVEL_NORMAL;
   1373 	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
   1374 		goto out;
   1375 
   1376 	raw = &crtc_state->wm.g4x.raw[level];
   1377 	for_each_plane_id_on_crtc(crtc, plane_id)
   1378 		wm_state->wm.plane[plane_id] = raw->plane[plane_id];
   1379 
   1380 	level = G4X_WM_LEVEL_SR;
   1381 
   1382 	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
   1383 		goto out;
   1384 
   1385 	raw = &crtc_state->wm.g4x.raw[level];
   1386 	wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
   1387 	wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
   1388 	wm_state->sr.fbc = raw->fbc;
   1389 
   1390 	wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
   1391 
   1392 	level = G4X_WM_LEVEL_HPLL;
   1393 
   1394 	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
   1395 		goto out;
   1396 
   1397 	raw = &crtc_state->wm.g4x.raw[level];
   1398 	wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
   1399 	wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
   1400 	wm_state->hpll.fbc = raw->fbc;
   1401 
   1402 	wm_state->hpll_en = wm_state->cxsr;
   1403 
   1404 	level++;
   1405 
   1406  out:
   1407 	if (level == G4X_WM_LEVEL_NORMAL)
   1408 		return -EINVAL;
   1409 
   1410 	/* invalidate the higher levels */
   1411 	g4x_invalidate_wms(crtc, wm_state, level);
   1412 
   1413 	/*
   1414 	 * Determine if the FBC watermark(s) can be used. IF
   1415 	 * this isn't the case we prefer to disable the FBC
   1416 	 ( watermark(s) rather than disable the SR/HPLL
   1417 	 * level(s) entirely.
   1418 	 */
   1419 	wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
   1420 
   1421 	if (level >= G4X_WM_LEVEL_SR &&
   1422 	    wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
   1423 		wm_state->fbc_en = false;
   1424 	else if (level >= G4X_WM_LEVEL_HPLL &&
   1425 		 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
   1426 		wm_state->fbc_en = false;
   1427 
   1428 	return 0;
   1429 }
   1430 
   1431 static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
   1432 {
   1433 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   1434 	struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
   1435 	const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
   1436 	struct intel_atomic_state *intel_state =
   1437 		to_intel_atomic_state(new_crtc_state->uapi.state);
   1438 	const struct intel_crtc_state *old_crtc_state =
   1439 		intel_atomic_get_old_crtc_state(intel_state, crtc);
   1440 	const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
   1441 	enum plane_id plane_id;
   1442 
   1443 	if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
   1444 		*intermediate = *optimal;
   1445 
   1446 		intermediate->cxsr = false;
   1447 		intermediate->hpll_en = false;
   1448 		goto out;
   1449 	}
   1450 
   1451 	intermediate->cxsr = optimal->cxsr && active->cxsr &&
   1452 		!new_crtc_state->disable_cxsr;
   1453 	intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
   1454 		!new_crtc_state->disable_cxsr;
   1455 	intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
   1456 
   1457 	for_each_plane_id_on_crtc(crtc, plane_id) {
   1458 		intermediate->wm.plane[plane_id] =
   1459 			max(optimal->wm.plane[plane_id],
   1460 			    active->wm.plane[plane_id]);
   1461 
   1462 		WARN_ON(intermediate->wm.plane[plane_id] >
   1463 			g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
   1464 	}
   1465 
   1466 	intermediate->sr.plane = max(optimal->sr.plane,
   1467 				     active->sr.plane);
   1468 	intermediate->sr.cursor = max(optimal->sr.cursor,
   1469 				      active->sr.cursor);
   1470 	intermediate->sr.fbc = max(optimal->sr.fbc,
   1471 				   active->sr.fbc);
   1472 
   1473 	intermediate->hpll.plane = max(optimal->hpll.plane,
   1474 				       active->hpll.plane);
   1475 	intermediate->hpll.cursor = max(optimal->hpll.cursor,
   1476 					active->hpll.cursor);
   1477 	intermediate->hpll.fbc = max(optimal->hpll.fbc,
   1478 				     active->hpll.fbc);
   1479 
   1480 	WARN_ON((intermediate->sr.plane >
   1481 		 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
   1482 		 intermediate->sr.cursor >
   1483 		 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
   1484 		intermediate->cxsr);
   1485 	WARN_ON((intermediate->sr.plane >
   1486 		 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
   1487 		 intermediate->sr.cursor >
   1488 		 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
   1489 		intermediate->hpll_en);
   1490 
   1491 	WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
   1492 		intermediate->fbc_en && intermediate->cxsr);
   1493 	WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
   1494 		intermediate->fbc_en && intermediate->hpll_en);
   1495 
   1496 out:
   1497 	/*
   1498 	 * If our intermediate WM are identical to the final WM, then we can
   1499 	 * omit the post-vblank programming; only update if it's different.
   1500 	 */
   1501 	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
   1502 		new_crtc_state->wm.need_postvbl_update = true;
   1503 
   1504 	return 0;
   1505 }
   1506 
   1507 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
   1508 			 struct g4x_wm_values *wm)
   1509 {
   1510 	struct intel_crtc *crtc;
   1511 	int num_active_pipes = 0;
   1512 
   1513 	wm->cxsr = true;
   1514 	wm->hpll_en = true;
   1515 	wm->fbc_en = true;
   1516 
   1517 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   1518 		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
   1519 
   1520 		if (!crtc->active)
   1521 			continue;
   1522 
   1523 		if (!wm_state->cxsr)
   1524 			wm->cxsr = false;
   1525 		if (!wm_state->hpll_en)
   1526 			wm->hpll_en = false;
   1527 		if (!wm_state->fbc_en)
   1528 			wm->fbc_en = false;
   1529 
   1530 		num_active_pipes++;
   1531 	}
   1532 
   1533 	if (num_active_pipes != 1) {
   1534 		wm->cxsr = false;
   1535 		wm->hpll_en = false;
   1536 		wm->fbc_en = false;
   1537 	}
   1538 
   1539 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   1540 		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
   1541 		enum pipe pipe = crtc->pipe;
   1542 
   1543 		wm->pipe[pipe] = wm_state->wm;
   1544 		if (crtc->active && wm->cxsr)
   1545 			wm->sr = wm_state->sr;
   1546 		if (crtc->active && wm->hpll_en)
   1547 			wm->hpll = wm_state->hpll;
   1548 	}
   1549 }
   1550 
   1551 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
   1552 {
   1553 	struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
   1554 	struct g4x_wm_values new_wm = {};
   1555 
   1556 	g4x_merge_wm(dev_priv, &new_wm);
   1557 
   1558 	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
   1559 		return;
   1560 
   1561 	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
   1562 		_intel_set_memory_cxsr(dev_priv, false);
   1563 
   1564 	g4x_write_wm_values(dev_priv, &new_wm);
   1565 
   1566 	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
   1567 		_intel_set_memory_cxsr(dev_priv, true);
   1568 
   1569 	*old_wm = new_wm;
   1570 }
   1571 
   1572 static void g4x_initial_watermarks(struct intel_atomic_state *state,
   1573 				   struct intel_crtc *crtc)
   1574 {
   1575 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1576 	const struct intel_crtc_state *crtc_state =
   1577 		intel_atomic_get_new_crtc_state(state, crtc);
   1578 
   1579 	mutex_lock(&dev_priv->wm.wm_mutex);
   1580 	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
   1581 	g4x_program_watermarks(dev_priv);
   1582 	mutex_unlock(&dev_priv->wm.wm_mutex);
   1583 }
   1584 
   1585 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
   1586 				    struct intel_crtc *crtc)
   1587 {
   1588 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1589 	const struct intel_crtc_state *crtc_state =
   1590 		intel_atomic_get_new_crtc_state(state, crtc);
   1591 
   1592 	if (!crtc_state->wm.need_postvbl_update)
   1593 		return;
   1594 
   1595 	mutex_lock(&dev_priv->wm.wm_mutex);
   1596 	crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
   1597 	g4x_program_watermarks(dev_priv);
   1598 	mutex_unlock(&dev_priv->wm.wm_mutex);
   1599 }
   1600 
   1601 /* latency must be in 0.1us units. */
   1602 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
   1603 				   unsigned int htotal,
   1604 				   unsigned int width,
   1605 				   unsigned int cpp,
   1606 				   unsigned int latency)
   1607 {
   1608 	unsigned int ret;
   1609 
   1610 	ret = intel_wm_method2(pixel_rate, htotal,
   1611 			       width, cpp, latency);
   1612 	ret = DIV_ROUND_UP(ret, 64);
   1613 
   1614 	return ret;
   1615 }
   1616 
   1617 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
   1618 {
   1619 	/* all latencies in usec */
   1620 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
   1621 
   1622 	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
   1623 
   1624 	if (IS_CHERRYVIEW(dev_priv)) {
   1625 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
   1626 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
   1627 
   1628 		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
   1629 	}
   1630 }
   1631 
   1632 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
   1633 				const struct intel_plane_state *plane_state,
   1634 				int level)
   1635 {
   1636 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   1637 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   1638 	const struct drm_display_mode *adjusted_mode =
   1639 		&crtc_state->hw.adjusted_mode;
   1640 	unsigned int clock, htotal, cpp, width, wm;
   1641 
   1642 	if (dev_priv->wm.pri_latency[level] == 0)
   1643 		return USHRT_MAX;
   1644 
   1645 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   1646 		return 0;
   1647 
   1648 	cpp = plane_state->hw.fb->format->cpp[0];
   1649 	clock = adjusted_mode->crtc_clock;
   1650 	htotal = adjusted_mode->crtc_htotal;
   1651 	width = crtc_state->pipe_src_w;
   1652 
   1653 	if (plane->id == PLANE_CURSOR) {
   1654 		/*
   1655 		 * FIXME the formula gives values that are
   1656 		 * too big for the cursor FIFO, and hence we
   1657 		 * would never be able to use cursors. For
   1658 		 * now just hardcode the watermark.
   1659 		 */
   1660 		wm = 63;
   1661 	} else {
   1662 		wm = vlv_wm_method2(clock, htotal, width, cpp,
   1663 				    dev_priv->wm.pri_latency[level] * 10);
   1664 	}
   1665 
   1666 	return min_t(unsigned int, wm, USHRT_MAX);
   1667 }
   1668 
   1669 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
   1670 {
   1671 	return (active_planes & (BIT(PLANE_SPRITE0) |
   1672 				 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
   1673 }
   1674 
   1675 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
   1676 {
   1677 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1678 	const struct g4x_pipe_wm *raw =
   1679 		&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
   1680 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
   1681 	unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   1682 	int num_active_planes = hweight8(active_planes);
   1683 	const int fifo_size = 511;
   1684 	int fifo_extra, fifo_left = fifo_size;
   1685 	int sprite0_fifo_extra = 0;
   1686 	unsigned int total_rate;
   1687 	enum plane_id plane_id;
   1688 
   1689 	/*
   1690 	 * When enabling sprite0 after sprite1 has already been enabled
   1691 	 * we tend to get an underrun unless sprite0 already has some
   1692 	 * FIFO space allcoated. Hence we always allocate at least one
   1693 	 * cacheline for sprite0 whenever sprite1 is enabled.
   1694 	 *
   1695 	 * All other plane enable sequences appear immune to this problem.
   1696 	 */
   1697 	if (vlv_need_sprite0_fifo_workaround(active_planes))
   1698 		sprite0_fifo_extra = 1;
   1699 
   1700 	total_rate = raw->plane[PLANE_PRIMARY] +
   1701 		raw->plane[PLANE_SPRITE0] +
   1702 		raw->plane[PLANE_SPRITE1] +
   1703 		sprite0_fifo_extra;
   1704 
   1705 	if (total_rate > fifo_size)
   1706 		return -EINVAL;
   1707 
   1708 	if (total_rate == 0)
   1709 		total_rate = 1;
   1710 
   1711 	for_each_plane_id_on_crtc(crtc, plane_id) {
   1712 		unsigned int rate;
   1713 
   1714 		if ((active_planes & BIT(plane_id)) == 0) {
   1715 			fifo_state->plane[plane_id] = 0;
   1716 			continue;
   1717 		}
   1718 
   1719 		rate = raw->plane[plane_id];
   1720 		fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
   1721 		fifo_left -= fifo_state->plane[plane_id];
   1722 	}
   1723 
   1724 	fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
   1725 	fifo_left -= sprite0_fifo_extra;
   1726 
   1727 	fifo_state->plane[PLANE_CURSOR] = 63;
   1728 
   1729 	fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
   1730 
   1731 	/* spread the remainder evenly */
   1732 	for_each_plane_id_on_crtc(crtc, plane_id) {
   1733 		int plane_extra;
   1734 
   1735 		if (fifo_left == 0)
   1736 			break;
   1737 
   1738 		if ((active_planes & BIT(plane_id)) == 0)
   1739 			continue;
   1740 
   1741 		plane_extra = min(fifo_extra, fifo_left);
   1742 		fifo_state->plane[plane_id] += plane_extra;
   1743 		fifo_left -= plane_extra;
   1744 	}
   1745 
   1746 	WARN_ON(active_planes != 0 && fifo_left != 0);
   1747 
   1748 	/* give it all to the first plane if none are active */
   1749 	if (active_planes == 0) {
   1750 		WARN_ON(fifo_left != fifo_size);
   1751 		fifo_state->plane[PLANE_PRIMARY] = fifo_left;
   1752 	}
   1753 
   1754 	return 0;
   1755 }
   1756 
   1757 /* mark all levels starting from 'level' as invalid */
   1758 static void vlv_invalidate_wms(struct intel_crtc *crtc,
   1759 			       struct vlv_wm_state *wm_state, int level)
   1760 {
   1761 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1762 
   1763 	for (; level < intel_wm_num_levels(dev_priv); level++) {
   1764 		enum plane_id plane_id;
   1765 
   1766 		for_each_plane_id_on_crtc(crtc, plane_id)
   1767 			wm_state->wm[level].plane[plane_id] = USHRT_MAX;
   1768 
   1769 		wm_state->sr[level].cursor = USHRT_MAX;
   1770 		wm_state->sr[level].plane = USHRT_MAX;
   1771 	}
   1772 }
   1773 
   1774 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
   1775 {
   1776 	if (wm > fifo_size)
   1777 		return USHRT_MAX;
   1778 	else
   1779 		return fifo_size - wm;
   1780 }
   1781 
   1782 /*
   1783  * Starting from 'level' set all higher
   1784  * levels to 'value' in the "raw" watermarks.
   1785  */
   1786 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
   1787 				 int level, enum plane_id plane_id, u16 value)
   1788 {
   1789 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1790 	int num_levels = intel_wm_num_levels(dev_priv);
   1791 	bool dirty = false;
   1792 
   1793 	for (; level < num_levels; level++) {
   1794 		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
   1795 
   1796 		dirty |= raw->plane[plane_id] != value;
   1797 		raw->plane[plane_id] = value;
   1798 	}
   1799 
   1800 	return dirty;
   1801 }
   1802 
   1803 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
   1804 				     const struct intel_plane_state *plane_state)
   1805 {
   1806 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   1807 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1808 	enum plane_id plane_id = plane->id;
   1809 	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
   1810 	int level;
   1811 	bool dirty = false;
   1812 
   1813 	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
   1814 		dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
   1815 		goto out;
   1816 	}
   1817 
   1818 	for (level = 0; level < num_levels; level++) {
   1819 		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
   1820 		int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
   1821 		int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
   1822 
   1823 		if (wm > max_wm)
   1824 			break;
   1825 
   1826 		dirty |= raw->plane[plane_id] != wm;
   1827 		raw->plane[plane_id] = wm;
   1828 	}
   1829 
   1830 	/* mark all higher levels as invalid */
   1831 	dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
   1832 
   1833 out:
   1834 	if (dirty)
   1835 		drm_dbg_kms(&dev_priv->drm,
   1836 			    "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
   1837 			    plane->base.name,
   1838 			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
   1839 			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
   1840 			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
   1841 
   1842 	return dirty;
   1843 }
   1844 
   1845 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
   1846 				      enum plane_id plane_id, int level)
   1847 {
   1848 	const struct g4x_pipe_wm *raw =
   1849 		&crtc_state->wm.vlv.raw[level];
   1850 	const struct vlv_fifo_state *fifo_state =
   1851 		&crtc_state->wm.vlv.fifo_state;
   1852 
   1853 	return raw->plane[plane_id] <= fifo_state->plane[plane_id];
   1854 }
   1855 
   1856 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
   1857 {
   1858 	return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
   1859 		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
   1860 		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
   1861 		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
   1862 }
   1863 
   1864 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
   1865 {
   1866 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1867 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1868 	struct intel_atomic_state *state =
   1869 		to_intel_atomic_state(crtc_state->uapi.state);
   1870 	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
   1871 	const struct vlv_fifo_state *fifo_state =
   1872 		&crtc_state->wm.vlv.fifo_state;
   1873 	int num_active_planes = hweight8(crtc_state->active_planes &
   1874 					 ~BIT(PLANE_CURSOR));
   1875 	bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
   1876 	const struct intel_plane_state *old_plane_state;
   1877 	const struct intel_plane_state *new_plane_state;
   1878 	struct intel_plane *plane;
   1879 	enum plane_id plane_id;
   1880 	int level, ret, i;
   1881 	unsigned int dirty = 0;
   1882 
   1883 	for_each_oldnew_intel_plane_in_state(state, plane,
   1884 					     old_plane_state,
   1885 					     new_plane_state, i) {
   1886 		if (new_plane_state->hw.crtc != &crtc->base &&
   1887 		    old_plane_state->hw.crtc != &crtc->base)
   1888 			continue;
   1889 
   1890 		if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
   1891 			dirty |= BIT(plane->id);
   1892 	}
   1893 
   1894 	/*
   1895 	 * DSPARB registers may have been reset due to the
   1896 	 * power well being turned off. Make sure we restore
   1897 	 * them to a consistent state even if no primary/sprite
   1898 	 * planes are initially active.
   1899 	 */
   1900 	if (needs_modeset)
   1901 		crtc_state->fifo_changed = true;
   1902 
   1903 	if (!dirty)
   1904 		return 0;
   1905 
   1906 	/* cursor changes don't warrant a FIFO recompute */
   1907 	if (dirty & ~BIT(PLANE_CURSOR)) {
   1908 		const struct intel_crtc_state *old_crtc_state =
   1909 			intel_atomic_get_old_crtc_state(state, crtc);
   1910 		const struct vlv_fifo_state *old_fifo_state =
   1911 			&old_crtc_state->wm.vlv.fifo_state;
   1912 
   1913 		ret = vlv_compute_fifo(crtc_state);
   1914 		if (ret)
   1915 			return ret;
   1916 
   1917 		if (needs_modeset ||
   1918 		    memcmp(old_fifo_state, fifo_state,
   1919 			   sizeof(*fifo_state)) != 0)
   1920 			crtc_state->fifo_changed = true;
   1921 	}
   1922 
   1923 	/* initially allow all levels */
   1924 	wm_state->num_levels = intel_wm_num_levels(dev_priv);
   1925 	/*
   1926 	 * Note that enabling cxsr with no primary/sprite planes
   1927 	 * enabled can wedge the pipe. Hence we only allow cxsr
   1928 	 * with exactly one enabled primary/sprite plane.
   1929 	 */
   1930 	wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
   1931 
   1932 	for (level = 0; level < wm_state->num_levels; level++) {
   1933 		const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
   1934 		const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
   1935 
   1936 		if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
   1937 			break;
   1938 
   1939 		for_each_plane_id_on_crtc(crtc, plane_id) {
   1940 			wm_state->wm[level].plane[plane_id] =
   1941 				vlv_invert_wm_value(raw->plane[plane_id],
   1942 						    fifo_state->plane[plane_id]);
   1943 		}
   1944 
   1945 		wm_state->sr[level].plane =
   1946 			vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
   1947 						 raw->plane[PLANE_SPRITE0],
   1948 						 raw->plane[PLANE_SPRITE1]),
   1949 					    sr_fifo_size);
   1950 
   1951 		wm_state->sr[level].cursor =
   1952 			vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
   1953 					    63);
   1954 	}
   1955 
   1956 	if (level == 0)
   1957 		return -EINVAL;
   1958 
   1959 	/* limit to only levels we can actually handle */
   1960 	wm_state->num_levels = level;
   1961 
   1962 	/* invalidate the higher levels */
   1963 	vlv_invalidate_wms(crtc, wm_state, level);
   1964 
   1965 	return 0;
   1966 }
   1967 
   1968 #define VLV_FIFO(plane, value) \
   1969 	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
   1970 
   1971 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
   1972 				   struct intel_crtc *crtc)
   1973 {
   1974 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1975 	struct intel_uncore *uncore = &dev_priv->uncore;
   1976 	const struct intel_crtc_state *crtc_state =
   1977 		intel_atomic_get_new_crtc_state(state, crtc);
   1978 	const struct vlv_fifo_state *fifo_state =
   1979 		&crtc_state->wm.vlv.fifo_state;
   1980 	int sprite0_start, sprite1_start, fifo_size;
   1981 
   1982 	if (!crtc_state->fifo_changed)
   1983 		return;
   1984 
   1985 	sprite0_start = fifo_state->plane[PLANE_PRIMARY];
   1986 	sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
   1987 	fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
   1988 
   1989 	WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
   1990 	WARN_ON(fifo_size != 511);
   1991 
   1992 	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
   1993 
   1994 	/*
   1995 	 * uncore.lock serves a double purpose here. It allows us to
   1996 	 * use the less expensive I915_{READ,WRITE}_FW() functions, and
   1997 	 * it protects the DSPARB registers from getting clobbered by
   1998 	 * parallel updates from multiple pipes.
   1999 	 *
   2000 	 * intel_pipe_update_start() has already disabled interrupts
   2001 	 * for us, so a plain spin_lock() is sufficient here.
   2002 	 */
   2003 	spin_lock(&uncore->lock);
   2004 
   2005 	switch (crtc->pipe) {
   2006 		u32 dsparb, dsparb2, dsparb3;
   2007 	case PIPE_A:
   2008 		dsparb = intel_uncore_read_fw(uncore, DSPARB);
   2009 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
   2010 
   2011 		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
   2012 			    VLV_FIFO(SPRITEB, 0xff));
   2013 		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
   2014 			   VLV_FIFO(SPRITEB, sprite1_start));
   2015 
   2016 		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
   2017 			     VLV_FIFO(SPRITEB_HI, 0x1));
   2018 		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
   2019 			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
   2020 
   2021 		intel_uncore_write_fw(uncore, DSPARB, dsparb);
   2022 		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
   2023 		break;
   2024 	case PIPE_B:
   2025 		dsparb = intel_uncore_read_fw(uncore, DSPARB);
   2026 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
   2027 
   2028 		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
   2029 			    VLV_FIFO(SPRITED, 0xff));
   2030 		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
   2031 			   VLV_FIFO(SPRITED, sprite1_start));
   2032 
   2033 		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
   2034 			     VLV_FIFO(SPRITED_HI, 0xff));
   2035 		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
   2036 			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
   2037 
   2038 		intel_uncore_write_fw(uncore, DSPARB, dsparb);
   2039 		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
   2040 		break;
   2041 	case PIPE_C:
   2042 		dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
   2043 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
   2044 
   2045 		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
   2046 			     VLV_FIFO(SPRITEF, 0xff));
   2047 		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
   2048 			    VLV_FIFO(SPRITEF, sprite1_start));
   2049 
   2050 		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
   2051 			     VLV_FIFO(SPRITEF_HI, 0xff));
   2052 		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
   2053 			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
   2054 
   2055 		intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
   2056 		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
   2057 		break;
   2058 	default:
   2059 		break;
   2060 	}
   2061 
   2062 	intel_uncore_posting_read_fw(uncore, DSPARB);
   2063 
   2064 	spin_unlock(&uncore->lock);
   2065 }
   2066 
   2067 #undef VLV_FIFO
   2068 
   2069 static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
   2070 {
   2071 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   2072 	struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
   2073 	const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
   2074 	struct intel_atomic_state *intel_state =
   2075 		to_intel_atomic_state(new_crtc_state->uapi.state);
   2076 	const struct intel_crtc_state *old_crtc_state =
   2077 		intel_atomic_get_old_crtc_state(intel_state, crtc);
   2078 	const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
   2079 	int level;
   2080 
   2081 	if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
   2082 		*intermediate = *optimal;
   2083 
   2084 		intermediate->cxsr = false;
   2085 		goto out;
   2086 	}
   2087 
   2088 	intermediate->num_levels = min(optimal->num_levels, active->num_levels);
   2089 	intermediate->cxsr = optimal->cxsr && active->cxsr &&
   2090 		!new_crtc_state->disable_cxsr;
   2091 
   2092 	for (level = 0; level < intermediate->num_levels; level++) {
   2093 		enum plane_id plane_id;
   2094 
   2095 		for_each_plane_id_on_crtc(crtc, plane_id) {
   2096 			intermediate->wm[level].plane[plane_id] =
   2097 				min(optimal->wm[level].plane[plane_id],
   2098 				    active->wm[level].plane[plane_id]);
   2099 		}
   2100 
   2101 		intermediate->sr[level].plane = min(optimal->sr[level].plane,
   2102 						    active->sr[level].plane);
   2103 		intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
   2104 						     active->sr[level].cursor);
   2105 	}
   2106 
   2107 	vlv_invalidate_wms(crtc, intermediate, level);
   2108 
   2109 out:
   2110 	/*
   2111 	 * If our intermediate WM are identical to the final WM, then we can
   2112 	 * omit the post-vblank programming; only update if it's different.
   2113 	 */
   2114 	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
   2115 		new_crtc_state->wm.need_postvbl_update = true;
   2116 
   2117 	return 0;
   2118 }
   2119 
   2120 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
   2121 			 struct vlv_wm_values *wm)
   2122 {
   2123 	struct intel_crtc *crtc;
   2124 	int num_active_pipes = 0;
   2125 
   2126 	wm->level = dev_priv->wm.max_level;
   2127 	wm->cxsr = true;
   2128 
   2129 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   2130 		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
   2131 
   2132 		if (!crtc->active)
   2133 			continue;
   2134 
   2135 		if (!wm_state->cxsr)
   2136 			wm->cxsr = false;
   2137 
   2138 		num_active_pipes++;
   2139 		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
   2140 	}
   2141 
   2142 	if (num_active_pipes != 1)
   2143 		wm->cxsr = false;
   2144 
   2145 	if (num_active_pipes > 1)
   2146 		wm->level = VLV_WM_LEVEL_PM2;
   2147 
   2148 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   2149 		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
   2150 		enum pipe pipe = crtc->pipe;
   2151 
   2152 		wm->pipe[pipe] = wm_state->wm[wm->level];
   2153 		if (crtc->active && wm->cxsr)
   2154 			wm->sr = wm_state->sr[wm->level];
   2155 
   2156 		wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
   2157 		wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
   2158 		wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
   2159 		wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
   2160 	}
   2161 }
   2162 
   2163 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
   2164 {
   2165 	struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
   2166 	struct vlv_wm_values new_wm = {};
   2167 
   2168 	vlv_merge_wm(dev_priv, &new_wm);
   2169 
   2170 	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
   2171 		return;
   2172 
   2173 	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
   2174 		chv_set_memory_dvfs(dev_priv, false);
   2175 
   2176 	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
   2177 		chv_set_memory_pm5(dev_priv, false);
   2178 
   2179 	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
   2180 		_intel_set_memory_cxsr(dev_priv, false);
   2181 
   2182 	vlv_write_wm_values(dev_priv, &new_wm);
   2183 
   2184 	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
   2185 		_intel_set_memory_cxsr(dev_priv, true);
   2186 
   2187 	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
   2188 		chv_set_memory_pm5(dev_priv, true);
   2189 
   2190 	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
   2191 		chv_set_memory_dvfs(dev_priv, true);
   2192 
   2193 	*old_wm = new_wm;
   2194 }
   2195 
   2196 static void vlv_initial_watermarks(struct intel_atomic_state *state,
   2197 				   struct intel_crtc *crtc)
   2198 {
   2199 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2200 	const struct intel_crtc_state *crtc_state =
   2201 		intel_atomic_get_new_crtc_state(state, crtc);
   2202 
   2203 	mutex_lock(&dev_priv->wm.wm_mutex);
   2204 	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
   2205 	vlv_program_watermarks(dev_priv);
   2206 	mutex_unlock(&dev_priv->wm.wm_mutex);
   2207 }
   2208 
   2209 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
   2210 				    struct intel_crtc *crtc)
   2211 {
   2212 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2213 	const struct intel_crtc_state *crtc_state =
   2214 		intel_atomic_get_new_crtc_state(state, crtc);
   2215 
   2216 	if (!crtc_state->wm.need_postvbl_update)
   2217 		return;
   2218 
   2219 	mutex_lock(&dev_priv->wm.wm_mutex);
   2220 	crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
   2221 	vlv_program_watermarks(dev_priv);
   2222 	mutex_unlock(&dev_priv->wm.wm_mutex);
   2223 }
   2224 
   2225 static void i965_update_wm(struct intel_crtc *unused_crtc)
   2226 {
   2227 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
   2228 	struct intel_crtc *crtc;
   2229 	int srwm = 1;
   2230 	int cursor_sr = 16;
   2231 	bool cxsr_enabled;
   2232 
   2233 	/* Calc sr entries for one plane configs */
   2234 	crtc = single_enabled_crtc(dev_priv);
   2235 	if (crtc) {
   2236 		/* self-refresh has much higher latency */
   2237 		static const int sr_latency_ns = 12000;
   2238 		const struct drm_display_mode *adjusted_mode =
   2239 			&crtc->config->hw.adjusted_mode;
   2240 		const struct drm_framebuffer *fb =
   2241 			crtc->base.primary->state->fb;
   2242 		int clock = adjusted_mode->crtc_clock;
   2243 		int htotal = adjusted_mode->crtc_htotal;
   2244 		int hdisplay = crtc->config->pipe_src_w;
   2245 		int cpp = fb->format->cpp[0];
   2246 		int entries;
   2247 
   2248 		entries = intel_wm_method2(clock, htotal,
   2249 					   hdisplay, cpp, sr_latency_ns / 100);
   2250 		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
   2251 		srwm = I965_FIFO_SIZE - entries;
   2252 		if (srwm < 0)
   2253 			srwm = 1;
   2254 		srwm &= 0x1ff;
   2255 		drm_dbg_kms(&dev_priv->drm,
   2256 			    "self-refresh entries: %d, wm: %d\n",
   2257 			    entries, srwm);
   2258 
   2259 		entries = intel_wm_method2(clock, htotal,
   2260 					   crtc->base.cursor->state->crtc_w, 4,
   2261 					   sr_latency_ns / 100);
   2262 		entries = DIV_ROUND_UP(entries,
   2263 				       i965_cursor_wm_info.cacheline_size) +
   2264 			i965_cursor_wm_info.guard_size;
   2265 
   2266 		cursor_sr = i965_cursor_wm_info.fifo_size - entries;
   2267 		if (cursor_sr > i965_cursor_wm_info.max_wm)
   2268 			cursor_sr = i965_cursor_wm_info.max_wm;
   2269 
   2270 		drm_dbg_kms(&dev_priv->drm,
   2271 			    "self-refresh watermark: display plane %d "
   2272 			    "cursor %d\n", srwm, cursor_sr);
   2273 
   2274 		cxsr_enabled = true;
   2275 	} else {
   2276 		cxsr_enabled = false;
   2277 		/* Turn off self refresh if both pipes are enabled */
   2278 		intel_set_memory_cxsr(dev_priv, false);
   2279 	}
   2280 
   2281 	drm_dbg_kms(&dev_priv->drm,
   2282 		    "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
   2283 		    srwm);
   2284 
   2285 	/* 965 has limitations... */
   2286 	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
   2287 		   FW_WM(8, CURSORB) |
   2288 		   FW_WM(8, PLANEB) |
   2289 		   FW_WM(8, PLANEA));
   2290 	I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
   2291 		   FW_WM(8, PLANEC_OLD));
   2292 	/* update cursor SR watermark */
   2293 	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
   2294 
   2295 	if (cxsr_enabled)
   2296 		intel_set_memory_cxsr(dev_priv, true);
   2297 }
   2298 
   2299 #undef FW_WM
   2300 
   2301 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
   2302 {
   2303 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
   2304 	const struct intel_watermark_params *wm_info;
   2305 	u32 fwater_lo;
   2306 	u32 fwater_hi;
   2307 	int cwm, srwm = 1;
   2308 	int fifo_size;
   2309 	int planea_wm, planeb_wm;
   2310 	struct intel_crtc *crtc, *enabled = NULL;
   2311 
   2312 	if (IS_I945GM(dev_priv))
   2313 		wm_info = &i945_wm_info;
   2314 	else if (!IS_GEN(dev_priv, 2))
   2315 		wm_info = &i915_wm_info;
   2316 	else
   2317 		wm_info = &i830_a_wm_info;
   2318 
   2319 	fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
   2320 	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
   2321 	if (intel_crtc_active(crtc)) {
   2322 		const struct drm_display_mode *adjusted_mode =
   2323 			&crtc->config->hw.adjusted_mode;
   2324 		const struct drm_framebuffer *fb =
   2325 			crtc->base.primary->state->fb;
   2326 		int cpp;
   2327 
   2328 		if (IS_GEN(dev_priv, 2))
   2329 			cpp = 4;
   2330 		else
   2331 			cpp = fb->format->cpp[0];
   2332 
   2333 		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
   2334 					       wm_info, fifo_size, cpp,
   2335 					       pessimal_latency_ns);
   2336 		enabled = crtc;
   2337 	} else {
   2338 		planea_wm = fifo_size - wm_info->guard_size;
   2339 		if (planea_wm > (long)wm_info->max_wm)
   2340 			planea_wm = wm_info->max_wm;
   2341 	}
   2342 
   2343 	if (IS_GEN(dev_priv, 2))
   2344 		wm_info = &i830_bc_wm_info;
   2345 
   2346 	fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
   2347 	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
   2348 	if (intel_crtc_active(crtc)) {
   2349 		const struct drm_display_mode *adjusted_mode =
   2350 			&crtc->config->hw.adjusted_mode;
   2351 		const struct drm_framebuffer *fb =
   2352 			crtc->base.primary->state->fb;
   2353 		int cpp;
   2354 
   2355 		if (IS_GEN(dev_priv, 2))
   2356 			cpp = 4;
   2357 		else
   2358 			cpp = fb->format->cpp[0];
   2359 
   2360 		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
   2361 					       wm_info, fifo_size, cpp,
   2362 					       pessimal_latency_ns);
   2363 		if (enabled == NULL)
   2364 			enabled = crtc;
   2365 		else
   2366 			enabled = NULL;
   2367 	} else {
   2368 		planeb_wm = fifo_size - wm_info->guard_size;
   2369 		if (planeb_wm > (long)wm_info->max_wm)
   2370 			planeb_wm = wm_info->max_wm;
   2371 	}
   2372 
   2373 	drm_dbg_kms(&dev_priv->drm,
   2374 		    "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
   2375 
   2376 	if (IS_I915GM(dev_priv) && enabled) {
   2377 		struct drm_i915_gem_object *obj;
   2378 
   2379 		obj = intel_fb_obj(enabled->base.primary->state->fb);
   2380 
   2381 		/* self-refresh seems busted with untiled */
   2382 		if (!i915_gem_object_is_tiled(obj))
   2383 			enabled = NULL;
   2384 	}
   2385 
   2386 	/*
   2387 	 * Overlay gets an aggressive default since video jitter is bad.
   2388 	 */
   2389 	cwm = 2;
   2390 
   2391 	/* Play safe and disable self-refresh before adjusting watermarks. */
   2392 	intel_set_memory_cxsr(dev_priv, false);
   2393 
   2394 	/* Calc sr entries for one plane configs */
   2395 	if (HAS_FW_BLC(dev_priv) && enabled) {
   2396 		/* self-refresh has much higher latency */
   2397 		static const int sr_latency_ns = 6000;
   2398 		const struct drm_display_mode *adjusted_mode =
   2399 			&enabled->config->hw.adjusted_mode;
   2400 		const struct drm_framebuffer *fb =
   2401 			enabled->base.primary->state->fb;
   2402 		int clock = adjusted_mode->crtc_clock;
   2403 		int htotal = adjusted_mode->crtc_htotal;
   2404 		int hdisplay = enabled->config->pipe_src_w;
   2405 		int cpp;
   2406 		int entries;
   2407 
   2408 		if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
   2409 			cpp = 4;
   2410 		else
   2411 			cpp = fb->format->cpp[0];
   2412 
   2413 		entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
   2414 					   sr_latency_ns / 100);
   2415 		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
   2416 		drm_dbg_kms(&dev_priv->drm,
   2417 			    "self-refresh entries: %d\n", entries);
   2418 		srwm = wm_info->fifo_size - entries;
   2419 		if (srwm < 0)
   2420 			srwm = 1;
   2421 
   2422 		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
   2423 			I915_WRITE(FW_BLC_SELF,
   2424 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
   2425 		else
   2426 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
   2427 	}
   2428 
   2429 	drm_dbg_kms(&dev_priv->drm,
   2430 		    "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
   2431 		     planea_wm, planeb_wm, cwm, srwm);
   2432 
   2433 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
   2434 	fwater_hi = (cwm & 0x1f);
   2435 
   2436 	/* Set request length to 8 cachelines per fetch */
   2437 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
   2438 	fwater_hi = fwater_hi | (1 << 8);
   2439 
   2440 	I915_WRITE(FW_BLC, fwater_lo);
   2441 	I915_WRITE(FW_BLC2, fwater_hi);
   2442 
   2443 	if (enabled)
   2444 		intel_set_memory_cxsr(dev_priv, true);
   2445 }
   2446 
   2447 static void i845_update_wm(struct intel_crtc *unused_crtc)
   2448 {
   2449 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
   2450 	struct intel_crtc *crtc;
   2451 	const struct drm_display_mode *adjusted_mode;
   2452 	u32 fwater_lo;
   2453 	int planea_wm;
   2454 
   2455 	crtc = single_enabled_crtc(dev_priv);
   2456 	if (crtc == NULL)
   2457 		return;
   2458 
   2459 	adjusted_mode = &crtc->config->hw.adjusted_mode;
   2460 	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
   2461 				       &i845_wm_info,
   2462 				       dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
   2463 				       4, pessimal_latency_ns);
   2464 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
   2465 	fwater_lo |= (3<<8) | planea_wm;
   2466 
   2467 	drm_dbg_kms(&dev_priv->drm,
   2468 		    "Setting FIFO watermarks - A: %d\n", planea_wm);
   2469 
   2470 	I915_WRITE(FW_BLC, fwater_lo);
   2471 }
   2472 
   2473 /* latency must be in 0.1us units. */
   2474 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
   2475 				   unsigned int cpp,
   2476 				   unsigned int latency)
   2477 {
   2478 	unsigned int ret;
   2479 
   2480 	ret = intel_wm_method1(pixel_rate, cpp, latency);
   2481 	ret = DIV_ROUND_UP(ret, 64) + 2;
   2482 
   2483 	return ret;
   2484 }
   2485 
   2486 /* latency must be in 0.1us units. */
   2487 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
   2488 				   unsigned int htotal,
   2489 				   unsigned int width,
   2490 				   unsigned int cpp,
   2491 				   unsigned int latency)
   2492 {
   2493 	unsigned int ret;
   2494 
   2495 	ret = intel_wm_method2(pixel_rate, htotal,
   2496 			       width, cpp, latency);
   2497 	ret = DIV_ROUND_UP(ret, 64) + 2;
   2498 
   2499 	return ret;
   2500 }
   2501 
   2502 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
   2503 {
   2504 	/*
   2505 	 * Neither of these should be possible since this function shouldn't be
   2506 	 * called if the CRTC is off or the plane is invisible.  But let's be
   2507 	 * extra paranoid to avoid a potential divide-by-zero if we screw up
   2508 	 * elsewhere in the driver.
   2509 	 */
   2510 	if (WARN_ON(!cpp))
   2511 		return 0;
   2512 	if (WARN_ON(!horiz_pixels))
   2513 		return 0;
   2514 
   2515 	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
   2516 }
   2517 
   2518 struct ilk_wm_maximums {
   2519 	u16 pri;
   2520 	u16 spr;
   2521 	u16 cur;
   2522 	u16 fbc;
   2523 };
   2524 
   2525 /*
   2526  * For both WM_PIPE and WM_LP.
   2527  * mem_value must be in 0.1us units.
   2528  */
   2529 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
   2530 			      const struct intel_plane_state *plane_state,
   2531 			      u32 mem_value, bool is_lp)
   2532 {
   2533 	u32 method1, method2;
   2534 	int cpp;
   2535 
   2536 	if (mem_value == 0)
   2537 		return U32_MAX;
   2538 
   2539 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   2540 		return 0;
   2541 
   2542 	cpp = plane_state->hw.fb->format->cpp[0];
   2543 
   2544 	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
   2545 
   2546 	if (!is_lp)
   2547 		return method1;
   2548 
   2549 	method2 = ilk_wm_method2(crtc_state->pixel_rate,
   2550 				 crtc_state->hw.adjusted_mode.crtc_htotal,
   2551 				 drm_rect_width(&plane_state->uapi.dst),
   2552 				 cpp, mem_value);
   2553 
   2554 	return min(method1, method2);
   2555 }
   2556 
   2557 /*
   2558  * For both WM_PIPE and WM_LP.
   2559  * mem_value must be in 0.1us units.
   2560  */
   2561 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
   2562 			      const struct intel_plane_state *plane_state,
   2563 			      u32 mem_value)
   2564 {
   2565 	u32 method1, method2;
   2566 	int cpp;
   2567 
   2568 	if (mem_value == 0)
   2569 		return U32_MAX;
   2570 
   2571 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   2572 		return 0;
   2573 
   2574 	cpp = plane_state->hw.fb->format->cpp[0];
   2575 
   2576 	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
   2577 	method2 = ilk_wm_method2(crtc_state->pixel_rate,
   2578 				 crtc_state->hw.adjusted_mode.crtc_htotal,
   2579 				 drm_rect_width(&plane_state->uapi.dst),
   2580 				 cpp, mem_value);
   2581 	return min(method1, method2);
   2582 }
   2583 
   2584 /*
   2585  * For both WM_PIPE and WM_LP.
   2586  * mem_value must be in 0.1us units.
   2587  */
   2588 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
   2589 			      const struct intel_plane_state *plane_state,
   2590 			      u32 mem_value)
   2591 {
   2592 	int cpp;
   2593 
   2594 	if (mem_value == 0)
   2595 		return U32_MAX;
   2596 
   2597 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   2598 		return 0;
   2599 
   2600 	cpp = plane_state->hw.fb->format->cpp[0];
   2601 
   2602 	return ilk_wm_method2(crtc_state->pixel_rate,
   2603 			      crtc_state->hw.adjusted_mode.crtc_htotal,
   2604 			      drm_rect_width(&plane_state->uapi.dst),
   2605 			      cpp, mem_value);
   2606 }
   2607 
   2608 /* Only for WM_LP. */
   2609 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
   2610 			      const struct intel_plane_state *plane_state,
   2611 			      u32 pri_val)
   2612 {
   2613 	int cpp;
   2614 
   2615 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   2616 		return 0;
   2617 
   2618 	cpp = plane_state->hw.fb->format->cpp[0];
   2619 
   2620 	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
   2621 			  cpp);
   2622 }
   2623 
   2624 static unsigned int
   2625 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
   2626 {
   2627 	if (INTEL_GEN(dev_priv) >= 8)
   2628 		return 3072;
   2629 	else if (INTEL_GEN(dev_priv) >= 7)
   2630 		return 768;
   2631 	else
   2632 		return 512;
   2633 }
   2634 
   2635 static unsigned int
   2636 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
   2637 		     int level, bool is_sprite)
   2638 {
   2639 	if (INTEL_GEN(dev_priv) >= 8)
   2640 		/* BDW primary/sprite plane watermarks */
   2641 		return level == 0 ? 255 : 2047;
   2642 	else if (INTEL_GEN(dev_priv) >= 7)
   2643 		/* IVB/HSW primary/sprite plane watermarks */
   2644 		return level == 0 ? 127 : 1023;
   2645 	else if (!is_sprite)
   2646 		/* ILK/SNB primary plane watermarks */
   2647 		return level == 0 ? 127 : 511;
   2648 	else
   2649 		/* ILK/SNB sprite plane watermarks */
   2650 		return level == 0 ? 63 : 255;
   2651 }
   2652 
   2653 static unsigned int
   2654 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
   2655 {
   2656 	if (INTEL_GEN(dev_priv) >= 7)
   2657 		return level == 0 ? 63 : 255;
   2658 	else
   2659 		return level == 0 ? 31 : 63;
   2660 }
   2661 
   2662 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
   2663 {
   2664 	if (INTEL_GEN(dev_priv) >= 8)
   2665 		return 31;
   2666 	else
   2667 		return 15;
   2668 }
   2669 
   2670 /* Calculate the maximum primary/sprite plane watermark */
   2671 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
   2672 				     int level,
   2673 				     const struct intel_wm_config *config,
   2674 				     enum intel_ddb_partitioning ddb_partitioning,
   2675 				     bool is_sprite)
   2676 {
   2677 	unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
   2678 
   2679 	/* if sprites aren't enabled, sprites get nothing */
   2680 	if (is_sprite && !config->sprites_enabled)
   2681 		return 0;
   2682 
   2683 	/* HSW allows LP1+ watermarks even with multiple pipes */
   2684 	if (level == 0 || config->num_pipes_active > 1) {
   2685 		fifo_size /= INTEL_NUM_PIPES(dev_priv);
   2686 
   2687 		/*
   2688 		 * For some reason the non self refresh
   2689 		 * FIFO size is only half of the self
   2690 		 * refresh FIFO size on ILK/SNB.
   2691 		 */
   2692 		if (INTEL_GEN(dev_priv) <= 6)
   2693 			fifo_size /= 2;
   2694 	}
   2695 
   2696 	if (config->sprites_enabled) {
   2697 		/* level 0 is always calculated with 1:1 split */
   2698 		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
   2699 			if (is_sprite)
   2700 				fifo_size *= 5;
   2701 			fifo_size /= 6;
   2702 		} else {
   2703 			fifo_size /= 2;
   2704 		}
   2705 	}
   2706 
   2707 	/* clamp to max that the registers can hold */
   2708 	return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
   2709 }
   2710 
   2711 /* Calculate the maximum cursor plane watermark */
   2712 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
   2713 				      int level,
   2714 				      const struct intel_wm_config *config)
   2715 {
   2716 	/* HSW LP1+ watermarks w/ multiple pipes */
   2717 	if (level > 0 && config->num_pipes_active > 1)
   2718 		return 64;
   2719 
   2720 	/* otherwise just report max that registers can hold */
   2721 	return ilk_cursor_wm_reg_max(dev_priv, level);
   2722 }
   2723 
   2724 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
   2725 				    int level,
   2726 				    const struct intel_wm_config *config,
   2727 				    enum intel_ddb_partitioning ddb_partitioning,
   2728 				    struct ilk_wm_maximums *max)
   2729 {
   2730 	max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
   2731 	max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
   2732 	max->cur = ilk_cursor_wm_max(dev_priv, level, config);
   2733 	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
   2734 }
   2735 
   2736 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
   2737 					int level,
   2738 					struct ilk_wm_maximums *max)
   2739 {
   2740 	max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
   2741 	max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
   2742 	max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
   2743 	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
   2744 }
   2745 
   2746 static bool ilk_validate_wm_level(int level,
   2747 				  const struct ilk_wm_maximums *max,
   2748 				  struct intel_wm_level *result)
   2749 {
   2750 	bool ret;
   2751 
   2752 	/* already determined to be invalid? */
   2753 	if (!result->enable)
   2754 		return false;
   2755 
   2756 	result->enable = result->pri_val <= max->pri &&
   2757 			 result->spr_val <= max->spr &&
   2758 			 result->cur_val <= max->cur;
   2759 
   2760 	ret = result->enable;
   2761 
   2762 	/*
   2763 	 * HACK until we can pre-compute everything,
   2764 	 * and thus fail gracefully if LP0 watermarks
   2765 	 * are exceeded...
   2766 	 */
   2767 	if (level == 0 && !result->enable) {
   2768 		if (result->pri_val > max->pri)
   2769 			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
   2770 				      level, result->pri_val, max->pri);
   2771 		if (result->spr_val > max->spr)
   2772 			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
   2773 				      level, result->spr_val, max->spr);
   2774 		if (result->cur_val > max->cur)
   2775 			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
   2776 				      level, result->cur_val, max->cur);
   2777 
   2778 		result->pri_val = min_t(u32, result->pri_val, max->pri);
   2779 		result->spr_val = min_t(u32, result->spr_val, max->spr);
   2780 		result->cur_val = min_t(u32, result->cur_val, max->cur);
   2781 		result->enable = true;
   2782 	}
   2783 
   2784 	return ret;
   2785 }
   2786 
   2787 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
   2788 				 const struct intel_crtc *intel_crtc,
   2789 				 int level,
   2790 				 struct intel_crtc_state *crtc_state,
   2791 				 const struct intel_plane_state *pristate,
   2792 				 const struct intel_plane_state *sprstate,
   2793 				 const struct intel_plane_state *curstate,
   2794 				 struct intel_wm_level *result)
   2795 {
   2796 	u16 pri_latency = dev_priv->wm.pri_latency[level];
   2797 	u16 spr_latency = dev_priv->wm.spr_latency[level];
   2798 	u16 cur_latency = dev_priv->wm.cur_latency[level];
   2799 
   2800 	/* WM1+ latency values stored in 0.5us units */
   2801 	if (level > 0) {
   2802 		pri_latency *= 5;
   2803 		spr_latency *= 5;
   2804 		cur_latency *= 5;
   2805 	}
   2806 
   2807 	if (pristate) {
   2808 		result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
   2809 						     pri_latency, level);
   2810 		result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
   2811 	}
   2812 
   2813 	if (sprstate)
   2814 		result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
   2815 
   2816 	if (curstate)
   2817 		result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
   2818 
   2819 	result->enable = true;
   2820 }
   2821 
   2822 static u32
   2823 hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
   2824 {
   2825 	const struct intel_atomic_state *intel_state =
   2826 		to_intel_atomic_state(crtc_state->uapi.state);
   2827 	const struct drm_display_mode *adjusted_mode =
   2828 		&crtc_state->hw.adjusted_mode;
   2829 	u32 linetime, ips_linetime;
   2830 
   2831 	if (!crtc_state->hw.active)
   2832 		return 0;
   2833 	if (WARN_ON(adjusted_mode->crtc_clock == 0))
   2834 		return 0;
   2835 	if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
   2836 		return 0;
   2837 
   2838 	/* The WM are computed with base on how long it takes to fill a single
   2839 	 * row at the given clock rate, multiplied by 8.
   2840 	 * */
   2841 	linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
   2842 				     adjusted_mode->crtc_clock);
   2843 	ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
   2844 					 intel_state->cdclk.logical.cdclk);
   2845 
   2846 	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
   2847 	       PIPE_WM_LINETIME_TIME(linetime);
   2848 }
   2849 
   2850 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
   2851 				  u16 wm[8])
   2852 {
   2853 	struct intel_uncore *uncore = &dev_priv->uncore;
   2854 
   2855 	if (INTEL_GEN(dev_priv) >= 9) {
   2856 		u32 val;
   2857 		int ret, i;
   2858 		int level, max_level = ilk_wm_max_level(dev_priv);
   2859 
   2860 		/* read the first set of memory latencies[0:3] */
   2861 		val = 0; /* data0 to be programmed to 0 for first set */
   2862 		ret = sandybridge_pcode_read(dev_priv,
   2863 					     GEN9_PCODE_READ_MEM_LATENCY,
   2864 					     &val, NULL);
   2865 
   2866 		if (ret) {
   2867 			drm_err(&dev_priv->drm,
   2868 				"SKL Mailbox read error = %d\n", ret);
   2869 			return;
   2870 		}
   2871 
   2872 		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
   2873 		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
   2874 				GEN9_MEM_LATENCY_LEVEL_MASK;
   2875 		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
   2876 				GEN9_MEM_LATENCY_LEVEL_MASK;
   2877 		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
   2878 				GEN9_MEM_LATENCY_LEVEL_MASK;
   2879 
   2880 		/* read the second set of memory latencies[4:7] */
   2881 		val = 1; /* data0 to be programmed to 1 for second set */
   2882 		ret = sandybridge_pcode_read(dev_priv,
   2883 					     GEN9_PCODE_READ_MEM_LATENCY,
   2884 					     &val, NULL);
   2885 		if (ret) {
   2886 			drm_err(&dev_priv->drm,
   2887 				"SKL Mailbox read error = %d\n", ret);
   2888 			return;
   2889 		}
   2890 
   2891 		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
   2892 		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
   2893 				GEN9_MEM_LATENCY_LEVEL_MASK;
   2894 		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
   2895 				GEN9_MEM_LATENCY_LEVEL_MASK;
   2896 		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
   2897 				GEN9_MEM_LATENCY_LEVEL_MASK;
   2898 
   2899 		/*
   2900 		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
   2901 		 * need to be disabled. We make sure to sanitize the values out
   2902 		 * of the punit to satisfy this requirement.
   2903 		 */
   2904 		for (level = 1; level <= max_level; level++) {
   2905 			if (wm[level] == 0) {
   2906 				for (i = level + 1; i <= max_level; i++)
   2907 					wm[i] = 0;
   2908 				break;
   2909 			}
   2910 		}
   2911 
   2912 		/*
   2913 		 * WaWmMemoryReadLatency:skl+,glk
   2914 		 *
   2915 		 * punit doesn't take into account the read latency so we need
   2916 		 * to add 2us to the various latency levels we retrieve from the
   2917 		 * punit when level 0 response data us 0us.
   2918 		 */
   2919 		if (wm[0] == 0) {
   2920 			wm[0] += 2;
   2921 			for (level = 1; level <= max_level; level++) {
   2922 				if (wm[level] == 0)
   2923 					break;
   2924 				wm[level] += 2;
   2925 			}
   2926 		}
   2927 
   2928 		/*
   2929 		 * WA Level-0 adjustment for 16GB DIMMs: SKL+
   2930 		 * If we could not get dimm info enable this WA to prevent from
   2931 		 * any underrun. If not able to get Dimm info assume 16GB dimm
   2932 		 * to avoid any underrun.
   2933 		 */
   2934 		if (dev_priv->dram_info.is_16gb_dimm)
   2935 			wm[0] += 1;
   2936 
   2937 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   2938 		u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
   2939 
   2940 		wm[0] = (sskpd >> 56) & 0xFF;
   2941 		if (wm[0] == 0)
   2942 			wm[0] = sskpd & 0xF;
   2943 		wm[1] = (sskpd >> 4) & 0xFF;
   2944 		wm[2] = (sskpd >> 12) & 0xFF;
   2945 		wm[3] = (sskpd >> 20) & 0x1FF;
   2946 		wm[4] = (sskpd >> 32) & 0x1FF;
   2947 	} else if (INTEL_GEN(dev_priv) >= 6) {
   2948 		u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
   2949 
   2950 		wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
   2951 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
   2952 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
   2953 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
   2954 	} else if (INTEL_GEN(dev_priv) >= 5) {
   2955 		u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
   2956 
   2957 		/* ILK primary LP0 latency is 700 ns */
   2958 		wm[0] = 7;
   2959 		wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
   2960 		wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
   2961 	} else {
   2962 		MISSING_CASE(INTEL_DEVID(dev_priv));
   2963 	}
   2964 }
   2965 
   2966 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
   2967 				       u16 wm[5])
   2968 {
   2969 	/* ILK sprite LP0 latency is 1300 ns */
   2970 	if (IS_GEN(dev_priv, 5))
   2971 		wm[0] = 13;
   2972 }
   2973 
   2974 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
   2975 				       u16 wm[5])
   2976 {
   2977 	/* ILK cursor LP0 latency is 1300 ns */
   2978 	if (IS_GEN(dev_priv, 5))
   2979 		wm[0] = 13;
   2980 }
   2981 
   2982 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
   2983 {
   2984 	/* how many WM levels are we expecting */
   2985 	if (INTEL_GEN(dev_priv) >= 9)
   2986 		return 7;
   2987 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   2988 		return 4;
   2989 	else if (INTEL_GEN(dev_priv) >= 6)
   2990 		return 3;
   2991 	else
   2992 		return 2;
   2993 }
   2994 
   2995 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
   2996 				   const char *name,
   2997 				   const u16 wm[8])
   2998 {
   2999 	int level, max_level = ilk_wm_max_level(dev_priv);
   3000 
   3001 	for (level = 0; level <= max_level; level++) {
   3002 		unsigned int latency = wm[level];
   3003 
   3004 		if (latency == 0) {
   3005 			drm_dbg_kms(&dev_priv->drm,
   3006 				    "%s WM%d latency not provided\n",
   3007 				    name, level);
   3008 			continue;
   3009 		}
   3010 
   3011 		/*
   3012 		 * - latencies are in us on gen9.
   3013 		 * - before then, WM1+ latency values are in 0.5us units
   3014 		 */
   3015 		if (INTEL_GEN(dev_priv) >= 9)
   3016 			latency *= 10;
   3017 		else if (level > 0)
   3018 			latency *= 5;
   3019 
   3020 		drm_dbg_kms(&dev_priv->drm,
   3021 			    "%s WM%d latency %u (%u.%u usec)\n", name, level,
   3022 			    wm[level], latency / 10, latency % 10);
   3023 	}
   3024 }
   3025 
   3026 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
   3027 				    u16 wm[5], u16 min)
   3028 {
   3029 	int level, max_level = ilk_wm_max_level(dev_priv);
   3030 
   3031 	if (wm[0] >= min)
   3032 		return false;
   3033 
   3034 	wm[0] = max(wm[0], min);
   3035 	for (level = 1; level <= max_level; level++)
   3036 		wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
   3037 
   3038 	return true;
   3039 }
   3040 
   3041 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
   3042 {
   3043 	bool changed;
   3044 
   3045 	/*
   3046 	 * The BIOS provided WM memory latency values are often
   3047 	 * inadequate for high resolution displays. Adjust them.
   3048 	 */
   3049 	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
   3050 		ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
   3051 		ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
   3052 
   3053 	if (!changed)
   3054 		return;
   3055 
   3056 	drm_dbg_kms(&dev_priv->drm,
   3057 		    "WM latency values increased to avoid potential underruns\n");
   3058 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
   3059 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
   3060 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
   3061 }
   3062 
   3063 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
   3064 {
   3065 	/*
   3066 	 * On some SNB machines (Thinkpad X220 Tablet at least)
   3067 	 * LP3 usage can cause vblank interrupts to be lost.
   3068 	 * The DEIIR bit will go high but it looks like the CPU
   3069 	 * never gets interrupted.
   3070 	 *
   3071 	 * It's not clear whether other interrupt source could
   3072 	 * be affected or if this is somehow limited to vblank
   3073 	 * interrupts only. To play it safe we disable LP3
   3074 	 * watermarks entirely.
   3075 	 */
   3076 	if (dev_priv->wm.pri_latency[3] == 0 &&
   3077 	    dev_priv->wm.spr_latency[3] == 0 &&
   3078 	    dev_priv->wm.cur_latency[3] == 0)
   3079 		return;
   3080 
   3081 	dev_priv->wm.pri_latency[3] = 0;
   3082 	dev_priv->wm.spr_latency[3] = 0;
   3083 	dev_priv->wm.cur_latency[3] = 0;
   3084 
   3085 	drm_dbg_kms(&dev_priv->drm,
   3086 		    "LP3 watermarks disabled due to potential for lost interrupts\n");
   3087 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
   3088 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
   3089 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
   3090 }
   3091 
   3092 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
   3093 {
   3094 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
   3095 
   3096 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
   3097 	       sizeof(dev_priv->wm.pri_latency));
   3098 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
   3099 	       sizeof(dev_priv->wm.pri_latency));
   3100 
   3101 	intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
   3102 	intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
   3103 
   3104 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
   3105 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
   3106 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
   3107 
   3108 	if (IS_GEN(dev_priv, 6)) {
   3109 		snb_wm_latency_quirk(dev_priv);
   3110 		snb_wm_lp3_irq_quirk(dev_priv);
   3111 	}
   3112 }
   3113 
   3114 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
   3115 {
   3116 	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
   3117 	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
   3118 }
   3119 
   3120 static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
   3121 				 struct intel_pipe_wm *pipe_wm)
   3122 {
   3123 	/* LP0 watermark maximums depend on this pipe alone */
   3124 	const struct intel_wm_config config = {
   3125 		.num_pipes_active = 1,
   3126 		.sprites_enabled = pipe_wm->sprites_enabled,
   3127 		.sprites_scaled = pipe_wm->sprites_scaled,
   3128 	};
   3129 	struct ilk_wm_maximums max;
   3130 
   3131 	/* LP0 watermarks always use 1/2 DDB partitioning */
   3132 	ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
   3133 
   3134 	/* At least LP0 must be valid */
   3135 	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
   3136 		drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
   3137 		return false;
   3138 	}
   3139 
   3140 	return true;
   3141 }
   3142 
   3143 /* Compute new watermarks for the pipe */
   3144 static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
   3145 {
   3146 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   3147 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3148 	struct intel_pipe_wm *pipe_wm;
   3149 	struct intel_plane *plane;
   3150 	const struct intel_plane_state *plane_state;
   3151 	const struct intel_plane_state *pristate = NULL;
   3152 	const struct intel_plane_state *sprstate = NULL;
   3153 	const struct intel_plane_state *curstate = NULL;
   3154 	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
   3155 	struct ilk_wm_maximums max;
   3156 
   3157 	pipe_wm = &crtc_state->wm.ilk.optimal;
   3158 
   3159 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
   3160 		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
   3161 			pristate = plane_state;
   3162 		else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
   3163 			sprstate = plane_state;
   3164 		else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
   3165 			curstate = plane_state;
   3166 	}
   3167 
   3168 	pipe_wm->pipe_enabled = crtc_state->hw.active;
   3169 	if (sprstate) {
   3170 		pipe_wm->sprites_enabled = sprstate->uapi.visible;
   3171 		pipe_wm->sprites_scaled = sprstate->uapi.visible &&
   3172 			(drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
   3173 			 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
   3174 	}
   3175 
   3176 	usable_level = max_level;
   3177 
   3178 	/* ILK/SNB: LP2+ watermarks only w/o sprites */
   3179 	if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
   3180 		usable_level = 1;
   3181 
   3182 	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
   3183 	if (pipe_wm->sprites_scaled)
   3184 		usable_level = 0;
   3185 
   3186 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
   3187 	ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
   3188 			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
   3189 
   3190 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   3191 		pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
   3192 
   3193 	if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
   3194 		return -EINVAL;
   3195 
   3196 	ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
   3197 
   3198 	for (level = 1; level <= usable_level; level++) {
   3199 		struct intel_wm_level *wm = &pipe_wm->wm[level];
   3200 
   3201 		ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
   3202 				     pristate, sprstate, curstate, wm);
   3203 
   3204 		/*
   3205 		 * Disable any watermark level that exceeds the
   3206 		 * register maximums since such watermarks are
   3207 		 * always invalid.
   3208 		 */
   3209 		if (!ilk_validate_wm_level(level, &max, wm)) {
   3210 			memset(wm, 0, sizeof(*wm));
   3211 			break;
   3212 		}
   3213 	}
   3214 
   3215 	return 0;
   3216 }
   3217 
   3218 /*
   3219  * Build a set of 'intermediate' watermark values that satisfy both the old
   3220  * state and the new state.  These can be programmed to the hardware
   3221  * immediately.
   3222  */
   3223 static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
   3224 {
   3225 	struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
   3226 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
   3227 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
   3228 	struct intel_atomic_state *intel_state =
   3229 		to_intel_atomic_state(newstate->uapi.state);
   3230 	const struct intel_crtc_state *oldstate =
   3231 		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
   3232 	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
   3233 	int level, max_level = ilk_wm_max_level(dev_priv);
   3234 
   3235 	/*
   3236 	 * Start with the final, target watermarks, then combine with the
   3237 	 * currently active watermarks to get values that are safe both before
   3238 	 * and after the vblank.
   3239 	 */
   3240 	*a = newstate->wm.ilk.optimal;
   3241 	if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
   3242 	    intel_state->skip_intermediate_wm)
   3243 		return 0;
   3244 
   3245 	a->pipe_enabled |= b->pipe_enabled;
   3246 	a->sprites_enabled |= b->sprites_enabled;
   3247 	a->sprites_scaled |= b->sprites_scaled;
   3248 
   3249 	for (level = 0; level <= max_level; level++) {
   3250 		struct intel_wm_level *a_wm = &a->wm[level];
   3251 		const struct intel_wm_level *b_wm = &b->wm[level];
   3252 
   3253 		a_wm->enable &= b_wm->enable;
   3254 		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
   3255 		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
   3256 		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
   3257 		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
   3258 	}
   3259 
   3260 	/*
   3261 	 * We need to make sure that these merged watermark values are
   3262 	 * actually a valid configuration themselves.  If they're not,
   3263 	 * there's no safe way to transition from the old state to
   3264 	 * the new state, so we need to fail the atomic transaction.
   3265 	 */
   3266 	if (!ilk_validate_pipe_wm(dev_priv, a))
   3267 		return -EINVAL;
   3268 
   3269 	/*
   3270 	 * If our intermediate WM are identical to the final WM, then we can
   3271 	 * omit the post-vblank programming; only update if it's different.
   3272 	 */
   3273 	if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
   3274 		newstate->wm.need_postvbl_update = true;
   3275 
   3276 	return 0;
   3277 }
   3278 
   3279 /*
   3280  * Merge the watermarks from all active pipes for a specific level.
   3281  */
   3282 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
   3283 			       int level,
   3284 			       struct intel_wm_level *ret_wm)
   3285 {
   3286 	const struct intel_crtc *intel_crtc;
   3287 
   3288 	ret_wm->enable = true;
   3289 
   3290 	for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
   3291 		const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
   3292 		const struct intel_wm_level *wm = &active->wm[level];
   3293 
   3294 		if (!active->pipe_enabled)
   3295 			continue;
   3296 
   3297 		/*
   3298 		 * The watermark values may have been used in the past,
   3299 		 * so we must maintain them in the registers for some
   3300 		 * time even if the level is now disabled.
   3301 		 */
   3302 		if (!wm->enable)
   3303 			ret_wm->enable = false;
   3304 
   3305 		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
   3306 		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
   3307 		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
   3308 		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
   3309 	}
   3310 }
   3311 
   3312 /*
   3313  * Merge all low power watermarks for all active pipes.
   3314  */
   3315 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
   3316 			 const struct intel_wm_config *config,
   3317 			 const struct ilk_wm_maximums *max,
   3318 			 struct intel_pipe_wm *merged)
   3319 {
   3320 	int level, max_level = ilk_wm_max_level(dev_priv);
   3321 	int last_enabled_level = max_level;
   3322 
   3323 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
   3324 	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
   3325 	    config->num_pipes_active > 1)
   3326 		last_enabled_level = 0;
   3327 
   3328 	/* ILK: FBC WM must be disabled always */
   3329 	merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
   3330 
   3331 	/* merge each WM1+ level */
   3332 	for (level = 1; level <= max_level; level++) {
   3333 		struct intel_wm_level *wm = &merged->wm[level];
   3334 
   3335 		ilk_merge_wm_level(dev_priv, level, wm);
   3336 
   3337 		if (level > last_enabled_level)
   3338 			wm->enable = false;
   3339 		else if (!ilk_validate_wm_level(level, max, wm))
   3340 			/* make sure all following levels get disabled */
   3341 			last_enabled_level = level - 1;
   3342 
   3343 		/*
   3344 		 * The spec says it is preferred to disable
   3345 		 * FBC WMs instead of disabling a WM level.
   3346 		 */
   3347 		if (wm->fbc_val > max->fbc) {
   3348 			if (wm->enable)
   3349 				merged->fbc_wm_enabled = false;
   3350 			wm->fbc_val = 0;
   3351 		}
   3352 	}
   3353 
   3354 	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
   3355 	/*
   3356 	 * FIXME this is racy. FBC might get enabled later.
   3357 	 * What we should check here is whether FBC can be
   3358 	 * enabled sometime later.
   3359 	 */
   3360 	if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
   3361 	    intel_fbc_is_active(dev_priv)) {
   3362 		for (level = 2; level <= max_level; level++) {
   3363 			struct intel_wm_level *wm = &merged->wm[level];
   3364 
   3365 			wm->enable = false;
   3366 		}
   3367 	}
   3368 }
   3369 
   3370 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
   3371 {
   3372 	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
   3373 	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
   3374 }
   3375 
   3376 /* The value we need to program into the WM_LPx latency field */
   3377 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
   3378 				      int level)
   3379 {
   3380 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   3381 		return 2 * level;
   3382 	else
   3383 		return dev_priv->wm.pri_latency[level];
   3384 }
   3385 
   3386 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
   3387 				   const struct intel_pipe_wm *merged,
   3388 				   enum intel_ddb_partitioning partitioning,
   3389 				   struct ilk_wm_values *results)
   3390 {
   3391 	struct intel_crtc *intel_crtc;
   3392 	int level, wm_lp;
   3393 
   3394 	results->enable_fbc_wm = merged->fbc_wm_enabled;
   3395 	results->partitioning = partitioning;
   3396 
   3397 	/* LP1+ register values */
   3398 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
   3399 		const struct intel_wm_level *r;
   3400 
   3401 		level = ilk_wm_lp_to_level(wm_lp, merged);
   3402 
   3403 		r = &merged->wm[level];
   3404 
   3405 		/*
   3406 		 * Maintain the watermark values even if the level is
   3407 		 * disabled. Doing otherwise could cause underruns.
   3408 		 */
   3409 		results->wm_lp[wm_lp - 1] =
   3410 			(ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
   3411 			(r->pri_val << WM1_LP_SR_SHIFT) |
   3412 			r->cur_val;
   3413 
   3414 		if (r->enable)
   3415 			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
   3416 
   3417 		if (INTEL_GEN(dev_priv) >= 8)
   3418 			results->wm_lp[wm_lp - 1] |=
   3419 				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
   3420 		else
   3421 			results->wm_lp[wm_lp - 1] |=
   3422 				r->fbc_val << WM1_LP_FBC_SHIFT;
   3423 
   3424 		/*
   3425 		 * Always set WM1S_LP_EN when spr_val != 0, even if the
   3426 		 * level is disabled. Doing otherwise could cause underruns.
   3427 		 */
   3428 		if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
   3429 			WARN_ON(wm_lp != 1);
   3430 			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
   3431 		} else
   3432 			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
   3433 	}
   3434 
   3435 	/* LP0 register values */
   3436 	for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
   3437 		enum pipe pipe = intel_crtc->pipe;
   3438 		const struct intel_wm_level *r =
   3439 			&intel_crtc->wm.active.ilk.wm[0];
   3440 
   3441 		if (WARN_ON(!r->enable))
   3442 			continue;
   3443 
   3444 		results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
   3445 
   3446 		results->wm_pipe[pipe] =
   3447 			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
   3448 			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
   3449 			r->cur_val;
   3450 	}
   3451 }
   3452 
   3453 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
   3454  * case both are at the same level. Prefer r1 in case they're the same. */
   3455 static struct intel_pipe_wm *
   3456 ilk_find_best_result(struct drm_i915_private *dev_priv,
   3457 		     struct intel_pipe_wm *r1,
   3458 		     struct intel_pipe_wm *r2)
   3459 {
   3460 	int level, max_level = ilk_wm_max_level(dev_priv);
   3461 	int level1 = 0, level2 = 0;
   3462 
   3463 	for (level = 1; level <= max_level; level++) {
   3464 		if (r1->wm[level].enable)
   3465 			level1 = level;
   3466 		if (r2->wm[level].enable)
   3467 			level2 = level;
   3468 	}
   3469 
   3470 	if (level1 == level2) {
   3471 		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
   3472 			return r2;
   3473 		else
   3474 			return r1;
   3475 	} else if (level1 > level2) {
   3476 		return r1;
   3477 	} else {
   3478 		return r2;
   3479 	}
   3480 }
   3481 
   3482 /* dirty bits used to track which watermarks need changes */
   3483 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
   3484 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
   3485 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
   3486 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
   3487 #define WM_DIRTY_FBC (1 << 24)
   3488 #define WM_DIRTY_DDB (1 << 25)
   3489 
   3490 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
   3491 					 const struct ilk_wm_values *old,
   3492 					 const struct ilk_wm_values *new)
   3493 {
   3494 	unsigned int dirty = 0;
   3495 	enum pipe pipe;
   3496 	int wm_lp;
   3497 
   3498 	for_each_pipe(dev_priv, pipe) {
   3499 		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
   3500 			dirty |= WM_DIRTY_LINETIME(pipe);
   3501 			/* Must disable LP1+ watermarks too */
   3502 			dirty |= WM_DIRTY_LP_ALL;
   3503 		}
   3504 
   3505 		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
   3506 			dirty |= WM_DIRTY_PIPE(pipe);
   3507 			/* Must disable LP1+ watermarks too */
   3508 			dirty |= WM_DIRTY_LP_ALL;
   3509 		}
   3510 	}
   3511 
   3512 	if (old->enable_fbc_wm != new->enable_fbc_wm) {
   3513 		dirty |= WM_DIRTY_FBC;
   3514 		/* Must disable LP1+ watermarks too */
   3515 		dirty |= WM_DIRTY_LP_ALL;
   3516 	}
   3517 
   3518 	if (old->partitioning != new->partitioning) {
   3519 		dirty |= WM_DIRTY_DDB;
   3520 		/* Must disable LP1+ watermarks too */
   3521 		dirty |= WM_DIRTY_LP_ALL;
   3522 	}
   3523 
   3524 	/* LP1+ watermarks already deemed dirty, no need to continue */
   3525 	if (dirty & WM_DIRTY_LP_ALL)
   3526 		return dirty;
   3527 
   3528 	/* Find the lowest numbered LP1+ watermark in need of an update... */
   3529 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
   3530 		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
   3531 		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
   3532 			break;
   3533 	}
   3534 
   3535 	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
   3536 	for (; wm_lp <= 3; wm_lp++)
   3537 		dirty |= WM_DIRTY_LP(wm_lp);
   3538 
   3539 	return dirty;
   3540 }
   3541 
   3542 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
   3543 			       unsigned int dirty)
   3544 {
   3545 	struct ilk_wm_values *previous = &dev_priv->wm.hw;
   3546 	bool changed = false;
   3547 
   3548 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
   3549 		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
   3550 		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
   3551 		changed = true;
   3552 	}
   3553 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
   3554 		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
   3555 		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
   3556 		changed = true;
   3557 	}
   3558 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
   3559 		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
   3560 		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
   3561 		changed = true;
   3562 	}
   3563 
   3564 	/*
   3565 	 * Don't touch WM1S_LP_EN here.
   3566 	 * Doing so could cause underruns.
   3567 	 */
   3568 
   3569 	return changed;
   3570 }
   3571 
   3572 /*
   3573  * The spec says we shouldn't write when we don't need, because every write
   3574  * causes WMs to be re-evaluated, expending some power.
   3575  */
   3576 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
   3577 				struct ilk_wm_values *results)
   3578 {
   3579 	struct ilk_wm_values *previous = &dev_priv->wm.hw;
   3580 	unsigned int dirty;
   3581 	u32 val;
   3582 
   3583 	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
   3584 	if (!dirty)
   3585 		return;
   3586 
   3587 	_ilk_disable_lp_wm(dev_priv, dirty);
   3588 
   3589 	if (dirty & WM_DIRTY_PIPE(PIPE_A))
   3590 		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
   3591 	if (dirty & WM_DIRTY_PIPE(PIPE_B))
   3592 		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
   3593 	if (dirty & WM_DIRTY_PIPE(PIPE_C))
   3594 		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
   3595 
   3596 	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
   3597 		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
   3598 	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
   3599 		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
   3600 	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
   3601 		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
   3602 
   3603 	if (dirty & WM_DIRTY_DDB) {
   3604 		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
   3605 			val = I915_READ(WM_MISC);
   3606 			if (results->partitioning == INTEL_DDB_PART_1_2)
   3607 				val &= ~WM_MISC_DATA_PARTITION_5_6;
   3608 			else
   3609 				val |= WM_MISC_DATA_PARTITION_5_6;
   3610 			I915_WRITE(WM_MISC, val);
   3611 		} else {
   3612 			val = I915_READ(DISP_ARB_CTL2);
   3613 			if (results->partitioning == INTEL_DDB_PART_1_2)
   3614 				val &= ~DISP_DATA_PARTITION_5_6;
   3615 			else
   3616 				val |= DISP_DATA_PARTITION_5_6;
   3617 			I915_WRITE(DISP_ARB_CTL2, val);
   3618 		}
   3619 	}
   3620 
   3621 	if (dirty & WM_DIRTY_FBC) {
   3622 		val = I915_READ(DISP_ARB_CTL);
   3623 		if (results->enable_fbc_wm)
   3624 			val &= ~DISP_FBC_WM_DIS;
   3625 		else
   3626 			val |= DISP_FBC_WM_DIS;
   3627 		I915_WRITE(DISP_ARB_CTL, val);
   3628 	}
   3629 
   3630 	if (dirty & WM_DIRTY_LP(1) &&
   3631 	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
   3632 		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
   3633 
   3634 	if (INTEL_GEN(dev_priv) >= 7) {
   3635 		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
   3636 			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
   3637 		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
   3638 			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
   3639 	}
   3640 
   3641 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
   3642 		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
   3643 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
   3644 		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
   3645 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
   3646 		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
   3647 
   3648 	dev_priv->wm.hw = *results;
   3649 }
   3650 
   3651 bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
   3652 {
   3653 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
   3654 }
   3655 
   3656 static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
   3657 {
   3658 	u8 enabled_slices;
   3659 
   3660 	/* Slice 1 will always be enabled */
   3661 	enabled_slices = 1;
   3662 
   3663 	/* Gen prior to GEN11 have only one DBuf slice */
   3664 	if (INTEL_GEN(dev_priv) < 11)
   3665 		return enabled_slices;
   3666 
   3667 	/*
   3668 	 * FIXME: for now we'll only ever use 1 slice; pretend that we have
   3669 	 * only that 1 slice enabled until we have a proper way for on-demand
   3670 	 * toggling of the second slice.
   3671 	 */
   3672 	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
   3673 		enabled_slices++;
   3674 
   3675 	return enabled_slices;
   3676 }
   3677 
   3678 /*
   3679  * FIXME: We still don't have the proper code detect if we need to apply the WA,
   3680  * so assume we'll always need it in order to avoid underruns.
   3681  */
   3682 static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
   3683 {
   3684 	return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
   3685 }
   3686 
   3687 static bool
   3688 intel_has_sagv(struct drm_i915_private *dev_priv)
   3689 {
   3690 	/* HACK! */
   3691 	if (IS_GEN(dev_priv, 12))
   3692 		return false;
   3693 
   3694 	return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
   3695 		dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
   3696 }
   3697 
   3698 static void
   3699 skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
   3700 {
   3701 	if (INTEL_GEN(dev_priv) >= 12) {
   3702 		u32 val = 0;
   3703 		int ret;
   3704 
   3705 		ret = sandybridge_pcode_read(dev_priv,
   3706 					     GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
   3707 					     &val, NULL);
   3708 		if (!ret) {
   3709 			dev_priv->sagv_block_time_us = val;
   3710 			return;
   3711 		}
   3712 
   3713 		drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
   3714 	} else if (IS_GEN(dev_priv, 11)) {
   3715 		dev_priv->sagv_block_time_us = 10;
   3716 		return;
   3717 	} else if (IS_GEN(dev_priv, 10)) {
   3718 		dev_priv->sagv_block_time_us = 20;
   3719 		return;
   3720 	} else if (IS_GEN(dev_priv, 9)) {
   3721 		dev_priv->sagv_block_time_us = 30;
   3722 		return;
   3723 	} else {
   3724 		MISSING_CASE(INTEL_GEN(dev_priv));
   3725 	}
   3726 
   3727 	/* Default to an unusable block time */
   3728 	dev_priv->sagv_block_time_us = -1;
   3729 }
   3730 
   3731 /*
   3732  * SAGV dynamically adjusts the system agent voltage and clock frequencies
   3733  * depending on power and performance requirements. The display engine access
   3734  * to system memory is blocked during the adjustment time. Because of the
   3735  * blocking time, having this enabled can cause full system hangs and/or pipe
   3736  * underruns if we don't meet all of the following requirements:
   3737  *
   3738  *  - <= 1 pipe enabled
   3739  *  - All planes can enable watermarks for latencies >= SAGV engine block time
   3740  *  - We're not using an interlaced display configuration
   3741  */
   3742 int
   3743 intel_enable_sagv(struct drm_i915_private *dev_priv)
   3744 {
   3745 	int ret;
   3746 
   3747 	if (!intel_has_sagv(dev_priv))
   3748 		return 0;
   3749 
   3750 	if (dev_priv->sagv_status == I915_SAGV_ENABLED)
   3751 		return 0;
   3752 
   3753 	drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
   3754 	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
   3755 				      GEN9_SAGV_ENABLE);
   3756 
   3757 	/* We don't need to wait for SAGV when enabling */
   3758 
   3759 	/*
   3760 	 * Some skl systems, pre-release machines in particular,
   3761 	 * don't actually have SAGV.
   3762 	 */
   3763 	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
   3764 		drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
   3765 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
   3766 		return 0;
   3767 	} else if (ret < 0) {
   3768 		drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
   3769 		return ret;
   3770 	}
   3771 
   3772 	dev_priv->sagv_status = I915_SAGV_ENABLED;
   3773 	return 0;
   3774 }
   3775 
   3776 int
   3777 intel_disable_sagv(struct drm_i915_private *dev_priv)
   3778 {
   3779 	int ret;
   3780 
   3781 	if (!intel_has_sagv(dev_priv))
   3782 		return 0;
   3783 
   3784 	if (dev_priv->sagv_status == I915_SAGV_DISABLED)
   3785 		return 0;
   3786 
   3787 	drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
   3788 	/* bspec says to keep retrying for at least 1 ms */
   3789 	ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
   3790 				GEN9_SAGV_DISABLE,
   3791 				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
   3792 				1);
   3793 	/*
   3794 	 * Some skl systems, pre-release machines in particular,
   3795 	 * don't actually have SAGV.
   3796 	 */
   3797 	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
   3798 		drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
   3799 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
   3800 		return 0;
   3801 	} else if (ret < 0) {
   3802 		drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
   3803 		return ret;
   3804 	}
   3805 
   3806 	dev_priv->sagv_status = I915_SAGV_DISABLED;
   3807 	return 0;
   3808 }
   3809 
   3810 bool intel_can_enable_sagv(struct intel_atomic_state *state)
   3811 {
   3812 	struct drm_device *dev = state->base.dev;
   3813 	struct drm_i915_private *dev_priv = to_i915(dev);
   3814 	struct intel_crtc *crtc;
   3815 	struct intel_plane *plane;
   3816 	struct intel_crtc_state *crtc_state;
   3817 	enum pipe pipe;
   3818 	int level, latency;
   3819 
   3820 	if (!intel_has_sagv(dev_priv))
   3821 		return false;
   3822 
   3823 	/*
   3824 	 * If there are no active CRTCs, no additional checks need be performed
   3825 	 */
   3826 	if (hweight8(state->active_pipes) == 0)
   3827 		return true;
   3828 
   3829 	/*
   3830 	 * SKL+ workaround: bspec recommends we disable SAGV when we have
   3831 	 * more then one pipe enabled
   3832 	 */
   3833 	if (hweight8(state->active_pipes) > 1)
   3834 		return false;
   3835 
   3836 	/* Since we're now guaranteed to only have one active CRTC... */
   3837 	pipe = ffs(state->active_pipes) - 1;
   3838 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
   3839 	crtc_state = to_intel_crtc_state(crtc->base.state);
   3840 
   3841 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   3842 		return false;
   3843 
   3844 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
   3845 		struct skl_plane_wm *wm =
   3846 			&crtc_state->wm.skl.optimal.planes[plane->id];
   3847 
   3848 		/* Skip this plane if it's not enabled */
   3849 		if (!wm->wm[0].plane_en)
   3850 			continue;
   3851 
   3852 		/* Find the highest enabled wm level for this plane */
   3853 		for (level = ilk_wm_max_level(dev_priv);
   3854 		     !wm->wm[level].plane_en; --level)
   3855 		     { }
   3856 
   3857 		latency = dev_priv->wm.skl_latency[level];
   3858 
   3859 		if (skl_needs_memory_bw_wa(dev_priv) &&
   3860 		    plane->base.state->fb->modifier ==
   3861 		    I915_FORMAT_MOD_X_TILED)
   3862 			latency += 15;
   3863 
   3864 		/*
   3865 		 * If any of the planes on this pipe don't enable wm levels that
   3866 		 * incur memory latencies higher than sagv_block_time_us we
   3867 		 * can't enable SAGV.
   3868 		 */
   3869 		if (latency < dev_priv->sagv_block_time_us)
   3870 			return false;
   3871 	}
   3872 
   3873 	return true;
   3874 }
   3875 
   3876 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
   3877 			      const struct intel_crtc_state *crtc_state,
   3878 			      const u64 total_data_rate,
   3879 			      const int num_active,
   3880 			      struct skl_ddb_allocation *ddb)
   3881 {
   3882 	const struct drm_display_mode *adjusted_mode;
   3883 	u64 total_data_bw;
   3884 	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
   3885 
   3886 	WARN_ON(ddb_size == 0);
   3887 
   3888 	if (INTEL_GEN(dev_priv) < 11)
   3889 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
   3890 
   3891 	adjusted_mode = &crtc_state->hw.adjusted_mode;
   3892 	total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
   3893 
   3894 	/*
   3895 	 * 12GB/s is maximum BW supported by single DBuf slice.
   3896 	 *
   3897 	 * FIXME dbuf slice code is broken:
   3898 	 * - must wait for planes to stop using the slice before powering it off
   3899 	 * - plane straddling both slices is illegal in multi-pipe scenarios
   3900 	 * - should validate we stay within the hw bandwidth limits
   3901 	 */
   3902 	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
   3903 		ddb->enabled_slices = 2;
   3904 	} else {
   3905 		ddb->enabled_slices = 1;
   3906 		ddb_size /= 2;
   3907 	}
   3908 
   3909 	return ddb_size;
   3910 }
   3911 
   3912 static void
   3913 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
   3914 				   const struct intel_crtc_state *crtc_state,
   3915 				   const u64 total_data_rate,
   3916 				   struct skl_ddb_allocation *ddb,
   3917 				   struct skl_ddb_entry *alloc, /* out */
   3918 				   int *num_active /* out */)
   3919 {
   3920 	struct drm_atomic_state *state = crtc_state->uapi.state;
   3921 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
   3922 	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
   3923 	const struct intel_crtc *crtc;
   3924 	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
   3925 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
   3926 	u16 ddb_size;
   3927 	u32 i;
   3928 
   3929 	if (WARN_ON(!state) || !crtc_state->hw.active) {
   3930 		alloc->start = 0;
   3931 		alloc->end = 0;
   3932 		*num_active = hweight8(dev_priv->active_pipes);
   3933 		return;
   3934 	}
   3935 
   3936 	if (intel_state->active_pipe_changes)
   3937 		*num_active = hweight8(intel_state->active_pipes);
   3938 	else
   3939 		*num_active = hweight8(dev_priv->active_pipes);
   3940 
   3941 	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
   3942 				      *num_active, ddb);
   3943 
   3944 	/*
   3945 	 * If the state doesn't change the active CRTC's or there is no
   3946 	 * modeset request, then there's no need to recalculate;
   3947 	 * the existing pipe allocation limits should remain unchanged.
   3948 	 * Note that we're safe from racing commits since any racing commit
   3949 	 * that changes the active CRTC list or do modeset would need to
   3950 	 * grab _all_ crtc locks, including the one we currently hold.
   3951 	 */
   3952 	if (!intel_state->active_pipe_changes && !intel_state->modeset) {
   3953 		/*
   3954 		 * alloc may be cleared by clear_intel_crtc_state,
   3955 		 * copy from old state to be sure
   3956 		 */
   3957 		*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
   3958 		return;
   3959 	}
   3960 
   3961 	/*
   3962 	 * Watermark/ddb requirement highly depends upon width of the
   3963 	 * framebuffer, So instead of allocating DDB equally among pipes
   3964 	 * distribute DDB based on resolution/width of the display.
   3965 	 */
   3966 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
   3967 		const struct drm_display_mode *adjusted_mode =
   3968 			&crtc_state->hw.adjusted_mode;
   3969 		enum pipe pipe = crtc->pipe;
   3970 		int hdisplay, vdisplay;
   3971 
   3972 		if (!crtc_state->hw.enable)
   3973 			continue;
   3974 
   3975 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
   3976 		total_width += hdisplay;
   3977 
   3978 		if (pipe < for_pipe)
   3979 			width_before_pipe += hdisplay;
   3980 		else if (pipe == for_pipe)
   3981 			pipe_width = hdisplay;
   3982 	}
   3983 
   3984 	alloc->start = ddb_size * width_before_pipe / total_width;
   3985 	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
   3986 }
   3987 
   3988 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
   3989 				 int width, const struct drm_format_info *format,
   3990 				 u64 modifier, unsigned int rotation,
   3991 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
   3992 				 int color_plane);
   3993 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
   3994 				 int level,
   3995 				 const struct skl_wm_params *wp,
   3996 				 const struct skl_wm_level *result_prev,
   3997 				 struct skl_wm_level *result /* out */);
   3998 
   3999 static unsigned int
   4000 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
   4001 		      int num_active)
   4002 {
   4003 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4004 	int level, max_level = ilk_wm_max_level(dev_priv);
   4005 	struct skl_wm_level wm = {};
   4006 	int ret, min_ddb_alloc = 0;
   4007 	struct skl_wm_params wp;
   4008 
   4009 	ret = skl_compute_wm_params(crtc_state, 256,
   4010 				    drm_format_info(DRM_FORMAT_ARGB8888),
   4011 				    DRM_FORMAT_MOD_LINEAR,
   4012 				    DRM_MODE_ROTATE_0,
   4013 				    crtc_state->pixel_rate, &wp, 0);
   4014 	WARN_ON(ret);
   4015 
   4016 	for (level = 0; level <= max_level; level++) {
   4017 		skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
   4018 		if (wm.min_ddb_alloc == U16_MAX)
   4019 			break;
   4020 
   4021 		min_ddb_alloc = wm.min_ddb_alloc;
   4022 	}
   4023 
   4024 	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
   4025 }
   4026 
   4027 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
   4028 				       struct skl_ddb_entry *entry, u32 reg)
   4029 {
   4030 
   4031 	entry->start = reg & DDB_ENTRY_MASK;
   4032 	entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
   4033 
   4034 	if (entry->end)
   4035 		entry->end += 1;
   4036 }
   4037 
   4038 static void
   4039 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
   4040 			   const enum pipe pipe,
   4041 			   const enum plane_id plane_id,
   4042 			   struct skl_ddb_entry *ddb_y,
   4043 			   struct skl_ddb_entry *ddb_uv)
   4044 {
   4045 	u32 val, val2;
   4046 	u32 fourcc = 0;
   4047 
   4048 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
   4049 	if (plane_id == PLANE_CURSOR) {
   4050 		val = I915_READ(CUR_BUF_CFG(pipe));
   4051 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
   4052 		return;
   4053 	}
   4054 
   4055 	val = I915_READ(PLANE_CTL(pipe, plane_id));
   4056 
   4057 	/* No DDB allocated for disabled planes */
   4058 	if (val & PLANE_CTL_ENABLE)
   4059 		fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
   4060 					      val & PLANE_CTL_ORDER_RGBX,
   4061 					      val & PLANE_CTL_ALPHA_MASK);
   4062 
   4063 	if (INTEL_GEN(dev_priv) >= 11) {
   4064 		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
   4065 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
   4066 	} else {
   4067 		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
   4068 		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
   4069 
   4070 		if (fourcc &&
   4071 		    drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
   4072 			swap(val, val2);
   4073 
   4074 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
   4075 		skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
   4076 	}
   4077 }
   4078 
   4079 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
   4080 			       struct skl_ddb_entry *ddb_y,
   4081 			       struct skl_ddb_entry *ddb_uv)
   4082 {
   4083 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4084 	enum intel_display_power_domain power_domain;
   4085 	enum pipe pipe = crtc->pipe;
   4086 	intel_wakeref_t wakeref;
   4087 	enum plane_id plane_id;
   4088 
   4089 	power_domain = POWER_DOMAIN_PIPE(pipe);
   4090 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   4091 	if (!wakeref)
   4092 		return;
   4093 
   4094 	for_each_plane_id_on_crtc(crtc, plane_id)
   4095 		skl_ddb_get_hw_plane_state(dev_priv, pipe,
   4096 					   plane_id,
   4097 					   &ddb_y[plane_id],
   4098 					   &ddb_uv[plane_id]);
   4099 
   4100 	intel_display_power_put(dev_priv, power_domain, wakeref);
   4101 }
   4102 
   4103 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
   4104 			  struct skl_ddb_allocation *ddb /* out */)
   4105 {
   4106 	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
   4107 }
   4108 
   4109 /*
   4110  * Determines the downscale amount of a plane for the purposes of watermark calculations.
   4111  * The bspec defines downscale amount as:
   4112  *
   4113  * """
   4114  * Horizontal down scale amount = maximum[1, Horizontal source size /
   4115  *                                           Horizontal destination size]
   4116  * Vertical down scale amount = maximum[1, Vertical source size /
   4117  *                                         Vertical destination size]
   4118  * Total down scale amount = Horizontal down scale amount *
   4119  *                           Vertical down scale amount
   4120  * """
   4121  *
   4122  * Return value is provided in 16.16 fixed point form to retain fractional part.
   4123  * Caller should take care of dividing & rounding off the value.
   4124  */
   4125 static uint_fixed_16_16_t
   4126 skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
   4127 			   const struct intel_plane_state *plane_state)
   4128 {
   4129 	u32 src_w, src_h, dst_w, dst_h;
   4130 	uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
   4131 	uint_fixed_16_16_t downscale_h, downscale_w;
   4132 
   4133 	if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
   4134 		return u32_to_fixed16(0);
   4135 
   4136 	/*
   4137 	 * Src coordinates are already rotated by 270 degrees for
   4138 	 * the 90/270 degree plane rotation cases (to match the
   4139 	 * GTT mapping), hence no need to account for rotation here.
   4140 	 *
   4141 	 * n.b., src is 16.16 fixed point, dst is whole integer.
   4142 	 */
   4143 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
   4144 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
   4145 	dst_w = drm_rect_width(&plane_state->uapi.dst);
   4146 	dst_h = drm_rect_height(&plane_state->uapi.dst);
   4147 
   4148 	fp_w_ratio = div_fixed16(src_w, dst_w);
   4149 	fp_h_ratio = div_fixed16(src_h, dst_h);
   4150 	downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
   4151 	downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
   4152 
   4153 	return mul_fixed16(downscale_w, downscale_h);
   4154 }
   4155 
   4156 static u64
   4157 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
   4158 			     const struct intel_plane_state *plane_state,
   4159 			     int color_plane)
   4160 {
   4161 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   4162 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4163 	u32 data_rate;
   4164 	u32 width = 0, height = 0;
   4165 	uint_fixed_16_16_t down_scale_amount;
   4166 	u64 rate;
   4167 
   4168 	if (!plane_state->uapi.visible)
   4169 		return 0;
   4170 
   4171 	if (plane->id == PLANE_CURSOR)
   4172 		return 0;
   4173 
   4174 	if (color_plane == 1 &&
   4175 	    !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
   4176 		return 0;
   4177 
   4178 	/*
   4179 	 * Src coordinates are already rotated by 270 degrees for
   4180 	 * the 90/270 degree plane rotation cases (to match the
   4181 	 * GTT mapping), hence no need to account for rotation here.
   4182 	 */
   4183 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
   4184 	height = drm_rect_height(&plane_state->uapi.src) >> 16;
   4185 
   4186 	/* UV plane does 1/2 pixel sub-sampling */
   4187 	if (color_plane == 1) {
   4188 		width /= 2;
   4189 		height /= 2;
   4190 	}
   4191 
   4192 	data_rate = width * height;
   4193 
   4194 	down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
   4195 
   4196 	rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
   4197 
   4198 	rate *= fb->format->cpp[color_plane];
   4199 	return rate;
   4200 }
   4201 
   4202 static u64
   4203 skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
   4204 				 u64 *plane_data_rate,
   4205 				 u64 *uv_plane_data_rate)
   4206 {
   4207 	struct drm_atomic_state *state = crtc_state->uapi.state;
   4208 	struct intel_plane *plane;
   4209 	const struct intel_plane_state *plane_state;
   4210 	u64 total_data_rate = 0;
   4211 
   4212 	if (WARN_ON(!state))
   4213 		return 0;
   4214 
   4215 	/* Calculate and cache data rate for each plane */
   4216 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
   4217 		enum plane_id plane_id = plane->id;
   4218 		u64 rate;
   4219 
   4220 		/* packed/y */
   4221 		rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
   4222 		plane_data_rate[plane_id] = rate;
   4223 		total_data_rate += rate;
   4224 
   4225 		/* uv-plane */
   4226 		rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
   4227 		uv_plane_data_rate[plane_id] = rate;
   4228 		total_data_rate += rate;
   4229 	}
   4230 
   4231 	return total_data_rate;
   4232 }
   4233 
   4234 static u64
   4235 icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
   4236 				 u64 *plane_data_rate)
   4237 {
   4238 	struct intel_plane *plane;
   4239 	const struct intel_plane_state *plane_state;
   4240 	u64 total_data_rate = 0;
   4241 
   4242 	if (WARN_ON(!crtc_state->uapi.state))
   4243 		return 0;
   4244 
   4245 	/* Calculate and cache data rate for each plane */
   4246 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
   4247 		enum plane_id plane_id = plane->id;
   4248 		u64 rate;
   4249 
   4250 		if (!plane_state->planar_linked_plane) {
   4251 			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
   4252 			plane_data_rate[plane_id] = rate;
   4253 			total_data_rate += rate;
   4254 		} else {
   4255 			enum plane_id y_plane_id;
   4256 
   4257 			/*
   4258 			 * The slave plane might not iterate in
   4259 			 * intel_atomic_crtc_state_for_each_plane_state(),
   4260 			 * and needs the master plane state which may be
   4261 			 * NULL if we try get_new_plane_state(), so we
   4262 			 * always calculate from the master.
   4263 			 */
   4264 			if (plane_state->planar_slave)
   4265 				continue;
   4266 
   4267 			/* Y plane rate is calculated on the slave */
   4268 			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
   4269 			y_plane_id = plane_state->planar_linked_plane->id;
   4270 			plane_data_rate[y_plane_id] = rate;
   4271 			total_data_rate += rate;
   4272 
   4273 			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
   4274 			plane_data_rate[plane_id] = rate;
   4275 			total_data_rate += rate;
   4276 		}
   4277 	}
   4278 
   4279 	return total_data_rate;
   4280 }
   4281 
   4282 static int
   4283 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
   4284 		      struct skl_ddb_allocation *ddb /* out */)
   4285 {
   4286 	struct drm_atomic_state *state = crtc_state->uapi.state;
   4287 	struct drm_crtc *crtc = crtc_state->uapi.crtc;
   4288 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
   4289 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
   4290 	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
   4291 	u16 alloc_size, start = 0;
   4292 	u16 total[I915_MAX_PLANES] = {};
   4293 	u16 uv_total[I915_MAX_PLANES] = {};
   4294 	u64 total_data_rate;
   4295 	enum plane_id plane_id;
   4296 	int num_active;
   4297 	u64 plane_data_rate[I915_MAX_PLANES] = {};
   4298 	u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
   4299 	u32 blocks;
   4300 	int level;
   4301 
   4302 	/* Clear the partitioning for disabled planes. */
   4303 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
   4304 	memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
   4305 
   4306 	if (WARN_ON(!state))
   4307 		return 0;
   4308 
   4309 	if (!crtc_state->hw.active) {
   4310 		alloc->start = alloc->end = 0;
   4311 		return 0;
   4312 	}
   4313 
   4314 	if (INTEL_GEN(dev_priv) >= 11)
   4315 		total_data_rate =
   4316 			icl_get_total_relative_data_rate(crtc_state,
   4317 							 plane_data_rate);
   4318 	else
   4319 		total_data_rate =
   4320 			skl_get_total_relative_data_rate(crtc_state,
   4321 							 plane_data_rate,
   4322 							 uv_plane_data_rate);
   4323 
   4324 
   4325 	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
   4326 					   ddb, alloc, &num_active);
   4327 	alloc_size = skl_ddb_entry_size(alloc);
   4328 	if (alloc_size == 0)
   4329 		return 0;
   4330 
   4331 	/* Allocate fixed number of blocks for cursor. */
   4332 	total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
   4333 	alloc_size -= total[PLANE_CURSOR];
   4334 	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
   4335 		alloc->end - total[PLANE_CURSOR];
   4336 	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
   4337 
   4338 	if (total_data_rate == 0)
   4339 		return 0;
   4340 
   4341 	/*
   4342 	 * Find the highest watermark level for which we can satisfy the block
   4343 	 * requirement of active planes.
   4344 	 */
   4345 	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
   4346 		blocks = 0;
   4347 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
   4348 			const struct skl_plane_wm *wm =
   4349 				&crtc_state->wm.skl.optimal.planes[plane_id];
   4350 
   4351 			if (plane_id == PLANE_CURSOR) {
   4352 				if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
   4353 					WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
   4354 					blocks = U32_MAX;
   4355 					break;
   4356 				}
   4357 				continue;
   4358 			}
   4359 
   4360 			blocks += wm->wm[level].min_ddb_alloc;
   4361 			blocks += wm->uv_wm[level].min_ddb_alloc;
   4362 		}
   4363 
   4364 		if (blocks <= alloc_size) {
   4365 			alloc_size -= blocks;
   4366 			break;
   4367 		}
   4368 	}
   4369 
   4370 	if (level < 0) {
   4371 		drm_dbg_kms(&dev_priv->drm,
   4372 			    "Requested display configuration exceeds system DDB limitations");
   4373 		drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
   4374 			    blocks, alloc_size);
   4375 		return -EINVAL;
   4376 	}
   4377 
   4378 	/*
   4379 	 * Grant each plane the blocks it requires at the highest achievable
   4380 	 * watermark level, plus an extra share of the leftover blocks
   4381 	 * proportional to its relative data rate.
   4382 	 */
   4383 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
   4384 		const struct skl_plane_wm *wm =
   4385 			&crtc_state->wm.skl.optimal.planes[plane_id];
   4386 		u64 rate;
   4387 		u16 extra;
   4388 
   4389 		if (plane_id == PLANE_CURSOR)
   4390 			continue;
   4391 
   4392 		/*
   4393 		 * We've accounted for all active planes; remaining planes are
   4394 		 * all disabled.
   4395 		 */
   4396 		if (total_data_rate == 0)
   4397 			break;
   4398 
   4399 		rate = plane_data_rate[plane_id];
   4400 		extra = min_t(u16, alloc_size,
   4401 			      DIV64_U64_ROUND_UP(alloc_size * rate,
   4402 						 total_data_rate));
   4403 		total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
   4404 		alloc_size -= extra;
   4405 		total_data_rate -= rate;
   4406 
   4407 		if (total_data_rate == 0)
   4408 			break;
   4409 
   4410 		rate = uv_plane_data_rate[plane_id];
   4411 		extra = min_t(u16, alloc_size,
   4412 			      DIV64_U64_ROUND_UP(alloc_size * rate,
   4413 						 total_data_rate));
   4414 		uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
   4415 		alloc_size -= extra;
   4416 		total_data_rate -= rate;
   4417 	}
   4418 	WARN_ON(alloc_size != 0 || total_data_rate != 0);
   4419 
   4420 	/* Set the actual DDB start/end points for each plane */
   4421 	start = alloc->start;
   4422 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
   4423 		struct skl_ddb_entry *plane_alloc =
   4424 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
   4425 		struct skl_ddb_entry *uv_plane_alloc =
   4426 			&crtc_state->wm.skl.plane_ddb_uv[plane_id];
   4427 
   4428 		if (plane_id == PLANE_CURSOR)
   4429 			continue;
   4430 
   4431 		/* Gen11+ uses a separate plane for UV watermarks */
   4432 		WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
   4433 
   4434 		/* Leave disabled planes at (0,0) */
   4435 		if (total[plane_id]) {
   4436 			plane_alloc->start = start;
   4437 			start += total[plane_id];
   4438 			plane_alloc->end = start;
   4439 		}
   4440 
   4441 		if (uv_total[plane_id]) {
   4442 			uv_plane_alloc->start = start;
   4443 			start += uv_total[plane_id];
   4444 			uv_plane_alloc->end = start;
   4445 		}
   4446 	}
   4447 
   4448 	/*
   4449 	 * When we calculated watermark values we didn't know how high
   4450 	 * of a level we'd actually be able to hit, so we just marked
   4451 	 * all levels as "enabled."  Go back now and disable the ones
   4452 	 * that aren't actually possible.
   4453 	 */
   4454 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
   4455 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
   4456 			struct skl_plane_wm *wm =
   4457 				&crtc_state->wm.skl.optimal.planes[plane_id];
   4458 
   4459 			/*
   4460 			 * We only disable the watermarks for each plane if
   4461 			 * they exceed the ddb allocation of said plane. This
   4462 			 * is done so that we don't end up touching cursor
   4463 			 * watermarks needlessly when some other plane reduces
   4464 			 * our max possible watermark level.
   4465 			 *
   4466 			 * Bspec has this to say about the PLANE_WM enable bit:
   4467 			 * "All the watermarks at this level for all enabled
   4468 			 *  planes must be enabled before the level will be used."
   4469 			 * So this is actually safe to do.
   4470 			 */
   4471 			if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
   4472 			    wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
   4473 				memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
   4474 
   4475 			/*
   4476 			 * Wa_1408961008:icl, ehl
   4477 			 * Underruns with WM1+ disabled
   4478 			 */
   4479 			if (IS_GEN(dev_priv, 11) &&
   4480 			    level == 1 && wm->wm[0].plane_en) {
   4481 				wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
   4482 				wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
   4483 				wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
   4484 			}
   4485 		}
   4486 	}
   4487 
   4488 	/*
   4489 	 * Go back and disable the transition watermark if it turns out we
   4490 	 * don't have enough DDB blocks for it.
   4491 	 */
   4492 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
   4493 		struct skl_plane_wm *wm =
   4494 			&crtc_state->wm.skl.optimal.planes[plane_id];
   4495 
   4496 		if (wm->trans_wm.plane_res_b >= total[plane_id])
   4497 			memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
   4498 	}
   4499 
   4500 	return 0;
   4501 }
   4502 
   4503 /*
   4504  * The max latency should be 257 (max the punit can code is 255 and we add 2us
   4505  * for the read latency) and cpp should always be <= 8, so that
   4506  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
   4507  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
   4508 */
   4509 static uint_fixed_16_16_t
   4510 skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
   4511 	       u8 cpp, u32 latency, u32 dbuf_block_size)
   4512 {
   4513 	u32 wm_intermediate_val;
   4514 	uint_fixed_16_16_t ret;
   4515 
   4516 	if (latency == 0)
   4517 		return FP_16_16_MAX;
   4518 
   4519 	wm_intermediate_val = latency * pixel_rate * cpp;
   4520 	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
   4521 
   4522 	if (INTEL_GEN(dev_priv) >= 10)
   4523 		ret = add_fixed16_u32(ret, 1);
   4524 
   4525 	return ret;
   4526 }
   4527 
   4528 static uint_fixed_16_16_t
   4529 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
   4530 	       uint_fixed_16_16_t plane_blocks_per_line)
   4531 {
   4532 	u32 wm_intermediate_val;
   4533 	uint_fixed_16_16_t ret;
   4534 
   4535 	if (latency == 0)
   4536 		return FP_16_16_MAX;
   4537 
   4538 	wm_intermediate_val = latency * pixel_rate;
   4539 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
   4540 					   pipe_htotal * 1000);
   4541 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
   4542 	return ret;
   4543 }
   4544 
   4545 static uint_fixed_16_16_t
   4546 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
   4547 {
   4548 	u32 pixel_rate;
   4549 	u32 crtc_htotal;
   4550 	uint_fixed_16_16_t linetime_us;
   4551 
   4552 	if (!crtc_state->hw.active)
   4553 		return u32_to_fixed16(0);
   4554 
   4555 	pixel_rate = crtc_state->pixel_rate;
   4556 
   4557 	if (WARN_ON(pixel_rate == 0))
   4558 		return u32_to_fixed16(0);
   4559 
   4560 	crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
   4561 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
   4562 
   4563 	return linetime_us;
   4564 }
   4565 
   4566 static u32
   4567 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
   4568 			      const struct intel_plane_state *plane_state)
   4569 {
   4570 	u64 adjusted_pixel_rate;
   4571 	uint_fixed_16_16_t downscale_amount;
   4572 
   4573 	/* Shouldn't reach here on disabled planes... */
   4574 	if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
   4575 		return 0;
   4576 
   4577 	/*
   4578 	 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
   4579 	 * with additional adjustments for plane-specific scaling.
   4580 	 */
   4581 	adjusted_pixel_rate = crtc_state->pixel_rate;
   4582 	downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
   4583 
   4584 	return mul_round_up_u32_fixed16(adjusted_pixel_rate,
   4585 					    downscale_amount);
   4586 }
   4587 
   4588 static int
   4589 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
   4590 		      int width, const struct drm_format_info *format,
   4591 		      u64 modifier, unsigned int rotation,
   4592 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
   4593 		      int color_plane)
   4594 {
   4595 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4596 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4597 	u32 interm_pbpl;
   4598 
   4599 	/* only planar format has two planes */
   4600 	if (color_plane == 1 &&
   4601 	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
   4602 		drm_dbg_kms(&dev_priv->drm,
   4603 			    "Non planar format have single plane\n");
   4604 		return -EINVAL;
   4605 	}
   4606 
   4607 	wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
   4608 		      modifier == I915_FORMAT_MOD_Yf_TILED ||
   4609 		      modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
   4610 		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
   4611 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
   4612 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
   4613 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
   4614 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
   4615 
   4616 	wp->width = width;
   4617 	if (color_plane == 1 && wp->is_planar)
   4618 		wp->width /= 2;
   4619 
   4620 	wp->cpp = format->cpp[color_plane];
   4621 	wp->plane_pixel_rate = plane_pixel_rate;
   4622 
   4623 	if (INTEL_GEN(dev_priv) >= 11 &&
   4624 	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
   4625 		wp->dbuf_block_size = 256;
   4626 	else
   4627 		wp->dbuf_block_size = 512;
   4628 
   4629 	if (drm_rotation_90_or_270(rotation)) {
   4630 		switch (wp->cpp) {
   4631 		case 1:
   4632 			wp->y_min_scanlines = 16;
   4633 			break;
   4634 		case 2:
   4635 			wp->y_min_scanlines = 8;
   4636 			break;
   4637 		case 4:
   4638 			wp->y_min_scanlines = 4;
   4639 			break;
   4640 		default:
   4641 			MISSING_CASE(wp->cpp);
   4642 			return -EINVAL;
   4643 		}
   4644 	} else {
   4645 		wp->y_min_scanlines = 4;
   4646 	}
   4647 
   4648 	if (skl_needs_memory_bw_wa(dev_priv))
   4649 		wp->y_min_scanlines *= 2;
   4650 
   4651 	wp->plane_bytes_per_line = wp->width * wp->cpp;
   4652 	if (wp->y_tiled) {
   4653 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
   4654 					   wp->y_min_scanlines,
   4655 					   wp->dbuf_block_size);
   4656 
   4657 		if (INTEL_GEN(dev_priv) >= 10)
   4658 			interm_pbpl++;
   4659 
   4660 		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
   4661 							wp->y_min_scanlines);
   4662 	} else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
   4663 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
   4664 					   wp->dbuf_block_size);
   4665 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
   4666 	} else {
   4667 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
   4668 					   wp->dbuf_block_size) + 1;
   4669 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
   4670 	}
   4671 
   4672 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
   4673 					     wp->plane_blocks_per_line);
   4674 
   4675 	wp->linetime_us = fixed16_to_u32_round_up(
   4676 					intel_get_linetime_us(crtc_state));
   4677 
   4678 	return 0;
   4679 }
   4680 
   4681 static int
   4682 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
   4683 			    const struct intel_plane_state *plane_state,
   4684 			    struct skl_wm_params *wp, int color_plane)
   4685 {
   4686 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4687 	int width;
   4688 
   4689 	/*
   4690 	 * Src coordinates are already rotated by 270 degrees for
   4691 	 * the 90/270 degree plane rotation cases (to match the
   4692 	 * GTT mapping), hence no need to account for rotation here.
   4693 	 */
   4694 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
   4695 
   4696 	return skl_compute_wm_params(crtc_state, width,
   4697 				     fb->format, fb->modifier,
   4698 				     plane_state->hw.rotation,
   4699 				     skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
   4700 				     wp, color_plane);
   4701 }
   4702 
   4703 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
   4704 {
   4705 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
   4706 		return true;
   4707 
   4708 	/* The number of lines are ignored for the level 0 watermark. */
   4709 	return level > 0;
   4710 }
   4711 
   4712 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
   4713 				 int level,
   4714 				 const struct skl_wm_params *wp,
   4715 				 const struct skl_wm_level *result_prev,
   4716 				 struct skl_wm_level *result /* out */)
   4717 {
   4718 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4719 	u32 latency = dev_priv->wm.skl_latency[level];
   4720 	uint_fixed_16_16_t method1, method2;
   4721 	uint_fixed_16_16_t selected_result;
   4722 	u32 res_blocks, res_lines, min_ddb_alloc = 0;
   4723 
   4724 	if (latency == 0) {
   4725 		/* reject it */
   4726 		result->min_ddb_alloc = U16_MAX;
   4727 		return;
   4728 	}
   4729 
   4730 	/*
   4731 	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
   4732 	 * Display WA #1141: kbl,cfl
   4733 	 */
   4734 	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
   4735 	    dev_priv->ipc_enabled)
   4736 		latency += 4;
   4737 
   4738 	if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
   4739 		latency += 15;
   4740 
   4741 	method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
   4742 				 wp->cpp, latency, wp->dbuf_block_size);
   4743 	method2 = skl_wm_method2(wp->plane_pixel_rate,
   4744 				 crtc_state->hw.adjusted_mode.crtc_htotal,
   4745 				 latency,
   4746 				 wp->plane_blocks_per_line);
   4747 
   4748 	if (wp->y_tiled) {
   4749 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
   4750 	} else {
   4751 		if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
   4752 		     wp->dbuf_block_size < 1) &&
   4753 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
   4754 			selected_result = method2;
   4755 		} else if (latency >= wp->linetime_us) {
   4756 			if (IS_GEN(dev_priv, 9) &&
   4757 			    !IS_GEMINILAKE(dev_priv))
   4758 				selected_result = min_fixed16(method1, method2);
   4759 			else
   4760 				selected_result = method2;
   4761 		} else {
   4762 			selected_result = method1;
   4763 		}
   4764 	}
   4765 
   4766 	res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
   4767 	res_lines = div_round_up_fixed16(selected_result,
   4768 					 wp->plane_blocks_per_line);
   4769 
   4770 	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
   4771 		/* Display WA #1125: skl,bxt,kbl */
   4772 		if (level == 0 && wp->rc_surface)
   4773 			res_blocks +=
   4774 				fixed16_to_u32_round_up(wp->y_tile_minimum);
   4775 
   4776 		/* Display WA #1126: skl,bxt,kbl */
   4777 		if (level >= 1 && level <= 7) {
   4778 			if (wp->y_tiled) {
   4779 				res_blocks +=
   4780 				    fixed16_to_u32_round_up(wp->y_tile_minimum);
   4781 				res_lines += wp->y_min_scanlines;
   4782 			} else {
   4783 				res_blocks++;
   4784 			}
   4785 
   4786 			/*
   4787 			 * Make sure result blocks for higher latency levels are
   4788 			 * atleast as high as level below the current level.
   4789 			 * Assumption in DDB algorithm optimization for special
   4790 			 * cases. Also covers Display WA #1125 for RC.
   4791 			 */
   4792 			if (result_prev->plane_res_b > res_blocks)
   4793 				res_blocks = result_prev->plane_res_b;
   4794 		}
   4795 	}
   4796 
   4797 	if (INTEL_GEN(dev_priv) >= 11) {
   4798 		if (wp->y_tiled) {
   4799 			int extra_lines;
   4800 
   4801 			if (res_lines % wp->y_min_scanlines == 0)
   4802 				extra_lines = wp->y_min_scanlines;
   4803 			else
   4804 				extra_lines = wp->y_min_scanlines * 2 -
   4805 					res_lines % wp->y_min_scanlines;
   4806 
   4807 			min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
   4808 								 wp->plane_blocks_per_line);
   4809 		} else {
   4810 			min_ddb_alloc = res_blocks +
   4811 				DIV_ROUND_UP(res_blocks, 10);
   4812 		}
   4813 	}
   4814 
   4815 	if (!skl_wm_has_lines(dev_priv, level))
   4816 		res_lines = 0;
   4817 
   4818 	if (res_lines > 31) {
   4819 		/* reject it */
   4820 		result->min_ddb_alloc = U16_MAX;
   4821 		return;
   4822 	}
   4823 
   4824 	/*
   4825 	 * If res_lines is valid, assume we can use this watermark level
   4826 	 * for now.  We'll come back and disable it after we calculate the
   4827 	 * DDB allocation if it turns out we don't actually have enough
   4828 	 * blocks to satisfy it.
   4829 	 */
   4830 	result->plane_res_b = res_blocks;
   4831 	result->plane_res_l = res_lines;
   4832 	/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
   4833 	result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
   4834 	result->plane_en = true;
   4835 }
   4836 
   4837 static void
   4838 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
   4839 		      const struct skl_wm_params *wm_params,
   4840 		      struct skl_wm_level *levels)
   4841 {
   4842 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   4843 	int level, max_level = ilk_wm_max_level(dev_priv);
   4844 	struct skl_wm_level *result_prev = &levels[0];
   4845 
   4846 	for (level = 0; level <= max_level; level++) {
   4847 		struct skl_wm_level *result = &levels[level];
   4848 
   4849 		skl_compute_plane_wm(crtc_state, level, wm_params,
   4850 				     result_prev, result);
   4851 
   4852 		result_prev = result;
   4853 	}
   4854 }
   4855 
   4856 static u32
   4857 skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
   4858 {
   4859 	struct drm_atomic_state *state = crtc_state->uapi.state;
   4860 	struct drm_i915_private *dev_priv = to_i915(state->dev);
   4861 	uint_fixed_16_16_t linetime_us;
   4862 	u32 linetime_wm;
   4863 
   4864 	linetime_us = intel_get_linetime_us(crtc_state);
   4865 	linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
   4866 
   4867 	/* Display WA #1135: BXT:ALL GLK:ALL */
   4868 	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
   4869 		linetime_wm /= 2;
   4870 
   4871 	return linetime_wm;
   4872 }
   4873 
   4874 static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
   4875 				      const struct skl_wm_params *wp,
   4876 				      struct skl_plane_wm *wm)
   4877 {
   4878 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
   4879 	const struct drm_i915_private *dev_priv = to_i915(dev);
   4880 	u16 trans_min, trans_y_tile_min;
   4881 	const u16 trans_amount = 10; /* This is configurable amount */
   4882 	u16 wm0_sel_res_b, trans_offset_b, res_blocks;
   4883 
   4884 	/* Transition WM are not recommended by HW team for GEN9 */
   4885 	if (INTEL_GEN(dev_priv) <= 9)
   4886 		return;
   4887 
   4888 	/* Transition WM don't make any sense if ipc is disabled */
   4889 	if (!dev_priv->ipc_enabled)
   4890 		return;
   4891 
   4892 	trans_min = 14;
   4893 	if (INTEL_GEN(dev_priv) >= 11)
   4894 		trans_min = 4;
   4895 
   4896 	trans_offset_b = trans_min + trans_amount;
   4897 
   4898 	/*
   4899 	 * The spec asks for Selected Result Blocks for wm0 (the real value),
   4900 	 * not Result Blocks (the integer value). Pay attention to the capital
   4901 	 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
   4902 	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
   4903 	 * and since we later will have to get the ceiling of the sum in the
   4904 	 * transition watermarks calculation, we can just pretend Selected
   4905 	 * Result Blocks is Result Blocks minus 1 and it should work for the
   4906 	 * current platforms.
   4907 	 */
   4908 	wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
   4909 
   4910 	if (wp->y_tiled) {
   4911 		trans_y_tile_min =
   4912 			(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
   4913 		res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
   4914 				trans_offset_b;
   4915 	} else {
   4916 		res_blocks = wm0_sel_res_b + trans_offset_b;
   4917 
   4918 		/* WA BUG:1938466 add one block for non y-tile planes */
   4919 		if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
   4920 			res_blocks += 1;
   4921 
   4922 	}
   4923 
   4924 	/*
   4925 	 * Just assume we can enable the transition watermark.  After
   4926 	 * computing the DDB we'll come back and disable it if that
   4927 	 * assumption turns out to be false.
   4928 	 */
   4929 	wm->trans_wm.plane_res_b = res_blocks + 1;
   4930 	wm->trans_wm.plane_en = true;
   4931 }
   4932 
   4933 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
   4934 				     const struct intel_plane_state *plane_state,
   4935 				     enum plane_id plane_id, int color_plane)
   4936 {
   4937 	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
   4938 	struct skl_wm_params wm_params;
   4939 	int ret;
   4940 
   4941 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
   4942 					  &wm_params, color_plane);
   4943 	if (ret)
   4944 		return ret;
   4945 
   4946 	skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
   4947 	skl_compute_transition_wm(crtc_state, &wm_params, wm);
   4948 
   4949 	return 0;
   4950 }
   4951 
   4952 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
   4953 				 const struct intel_plane_state *plane_state,
   4954 				 enum plane_id plane_id)
   4955 {
   4956 	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
   4957 	struct skl_wm_params wm_params;
   4958 	int ret;
   4959 
   4960 	wm->is_planar = true;
   4961 
   4962 	/* uv plane watermarks must also be validated for NV12/Planar */
   4963 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
   4964 					  &wm_params, 1);
   4965 	if (ret)
   4966 		return ret;
   4967 
   4968 	skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
   4969 
   4970 	return 0;
   4971 }
   4972 
   4973 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
   4974 			      const struct intel_plane_state *plane_state)
   4975 {
   4976 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   4977 	const struct drm_framebuffer *fb = plane_state->hw.fb;
   4978 	enum plane_id plane_id = plane->id;
   4979 	int ret;
   4980 
   4981 	if (!intel_wm_plane_visible(crtc_state, plane_state))
   4982 		return 0;
   4983 
   4984 	ret = skl_build_plane_wm_single(crtc_state, plane_state,
   4985 					plane_id, 0);
   4986 	if (ret)
   4987 		return ret;
   4988 
   4989 	if (fb->format->is_yuv && fb->format->num_planes > 1) {
   4990 		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
   4991 					    plane_id);
   4992 		if (ret)
   4993 			return ret;
   4994 	}
   4995 
   4996 	return 0;
   4997 }
   4998 
   4999 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
   5000 			      const struct intel_plane_state *plane_state)
   5001 {
   5002 	enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
   5003 	int ret;
   5004 
   5005 	/* Watermarks calculated in master */
   5006 	if (plane_state->planar_slave)
   5007 		return 0;
   5008 
   5009 	if (plane_state->planar_linked_plane) {
   5010 		const struct drm_framebuffer *fb = plane_state->hw.fb;
   5011 		enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
   5012 
   5013 		WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
   5014 		WARN_ON(!fb->format->is_yuv ||
   5015 			fb->format->num_planes == 1);
   5016 
   5017 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
   5018 						y_plane_id, 0);
   5019 		if (ret)
   5020 			return ret;
   5021 
   5022 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
   5023 						plane_id, 1);
   5024 		if (ret)
   5025 			return ret;
   5026 	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
   5027 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
   5028 						plane_id, 0);
   5029 		if (ret)
   5030 			return ret;
   5031 	}
   5032 
   5033 	return 0;
   5034 }
   5035 
   5036 static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
   5037 {
   5038 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   5039 	struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
   5040 	struct intel_plane *plane;
   5041 	const struct intel_plane_state *plane_state;
   5042 	int ret;
   5043 
   5044 	/*
   5045 	 * We'll only calculate watermarks for planes that are actually
   5046 	 * enabled, so make sure all other planes are set as disabled.
   5047 	 */
   5048 	memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
   5049 
   5050 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
   5051 						     crtc_state) {
   5052 
   5053 		if (INTEL_GEN(dev_priv) >= 11)
   5054 			ret = icl_build_plane_wm(crtc_state, plane_state);
   5055 		else
   5056 			ret = skl_build_plane_wm(crtc_state, plane_state);
   5057 		if (ret)
   5058 			return ret;
   5059 	}
   5060 
   5061 	pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
   5062 
   5063 	return 0;
   5064 }
   5065 
   5066 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
   5067 				i915_reg_t reg,
   5068 				const struct skl_ddb_entry *entry)
   5069 {
   5070 	if (entry->end)
   5071 		I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
   5072 	else
   5073 		I915_WRITE_FW(reg, 0);
   5074 }
   5075 
   5076 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
   5077 			       i915_reg_t reg,
   5078 			       const struct skl_wm_level *level)
   5079 {
   5080 	u32 val = 0;
   5081 
   5082 	if (level->plane_en)
   5083 		val |= PLANE_WM_EN;
   5084 	if (level->ignore_lines)
   5085 		val |= PLANE_WM_IGNORE_LINES;
   5086 	val |= level->plane_res_b;
   5087 	val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
   5088 
   5089 	I915_WRITE_FW(reg, val);
   5090 }
   5091 
   5092 void skl_write_plane_wm(struct intel_plane *plane,
   5093 			const struct intel_crtc_state *crtc_state)
   5094 {
   5095 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   5096 	int level, max_level = ilk_wm_max_level(dev_priv);
   5097 	enum plane_id plane_id = plane->id;
   5098 	enum pipe pipe = plane->pipe;
   5099 	const struct skl_plane_wm *wm =
   5100 		&crtc_state->wm.skl.optimal.planes[plane_id];
   5101 	const struct skl_ddb_entry *ddb_y =
   5102 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
   5103 	const struct skl_ddb_entry *ddb_uv =
   5104 		&crtc_state->wm.skl.plane_ddb_uv[plane_id];
   5105 
   5106 	for (level = 0; level <= max_level; level++) {
   5107 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
   5108 				   &wm->wm[level]);
   5109 	}
   5110 	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
   5111 			   &wm->trans_wm);
   5112 
   5113 	if (INTEL_GEN(dev_priv) >= 11) {
   5114 		skl_ddb_entry_write(dev_priv,
   5115 				    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
   5116 		return;
   5117 	}
   5118 
   5119 	if (wm->is_planar)
   5120 		swap(ddb_y, ddb_uv);
   5121 
   5122 	skl_ddb_entry_write(dev_priv,
   5123 			    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
   5124 	skl_ddb_entry_write(dev_priv,
   5125 			    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
   5126 }
   5127 
   5128 void skl_write_cursor_wm(struct intel_plane *plane,
   5129 			 const struct intel_crtc_state *crtc_state)
   5130 {
   5131 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   5132 	int level, max_level = ilk_wm_max_level(dev_priv);
   5133 	enum plane_id plane_id = plane->id;
   5134 	enum pipe pipe = plane->pipe;
   5135 	const struct skl_plane_wm *wm =
   5136 		&crtc_state->wm.skl.optimal.planes[plane_id];
   5137 	const struct skl_ddb_entry *ddb =
   5138 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
   5139 
   5140 	for (level = 0; level <= max_level; level++) {
   5141 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
   5142 				   &wm->wm[level]);
   5143 	}
   5144 	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
   5145 
   5146 	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
   5147 }
   5148 
   5149 bool skl_wm_level_equals(const struct skl_wm_level *l1,
   5150 			 const struct skl_wm_level *l2)
   5151 {
   5152 	return l1->plane_en == l2->plane_en &&
   5153 		l1->ignore_lines == l2->ignore_lines &&
   5154 		l1->plane_res_l == l2->plane_res_l &&
   5155 		l1->plane_res_b == l2->plane_res_b;
   5156 }
   5157 
   5158 static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
   5159 				const struct skl_plane_wm *wm1,
   5160 				const struct skl_plane_wm *wm2)
   5161 {
   5162 	int level, max_level = ilk_wm_max_level(dev_priv);
   5163 
   5164 	for (level = 0; level <= max_level; level++) {
   5165 		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
   5166 		    !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
   5167 			return false;
   5168 	}
   5169 
   5170 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
   5171 }
   5172 
   5173 static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
   5174 			       const struct skl_pipe_wm *wm1,
   5175 			       const struct skl_pipe_wm *wm2)
   5176 {
   5177 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5178 	enum plane_id plane_id;
   5179 
   5180 	for_each_plane_id_on_crtc(crtc, plane_id) {
   5181 		if (!skl_plane_wm_equals(dev_priv,
   5182 					 &wm1->planes[plane_id],
   5183 					 &wm2->planes[plane_id]))
   5184 			return false;
   5185 	}
   5186 
   5187 	return wm1->linetime == wm2->linetime;
   5188 }
   5189 
   5190 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
   5191 					   const struct skl_ddb_entry *b)
   5192 {
   5193 	return a->start < b->end && b->start < a->end;
   5194 }
   5195 
   5196 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
   5197 				 const struct skl_ddb_entry *entries,
   5198 				 int num_entries, int ignore_idx)
   5199 {
   5200 	int i;
   5201 
   5202 	for (i = 0; i < num_entries; i++) {
   5203 		if (i != ignore_idx &&
   5204 		    skl_ddb_entries_overlap(ddb, &entries[i]))
   5205 			return true;
   5206 	}
   5207 
   5208 	return false;
   5209 }
   5210 
   5211 static int
   5212 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
   5213 			    struct intel_crtc_state *new_crtc_state)
   5214 {
   5215 	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
   5216 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   5217 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5218 	struct intel_plane *plane;
   5219 
   5220 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   5221 		struct intel_plane_state *plane_state;
   5222 		enum plane_id plane_id = plane->id;
   5223 
   5224 		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
   5225 					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
   5226 		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
   5227 					&new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
   5228 			continue;
   5229 
   5230 		plane_state = intel_atomic_get_plane_state(state, plane);
   5231 		if (IS_ERR(plane_state))
   5232 			return PTR_ERR(plane_state);
   5233 
   5234 		new_crtc_state->update_planes |= BIT(plane_id);
   5235 	}
   5236 
   5237 	return 0;
   5238 }
   5239 
   5240 static int
   5241 skl_compute_ddb(struct intel_atomic_state *state)
   5242 {
   5243 	const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   5244 	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
   5245 	struct intel_crtc_state *old_crtc_state;
   5246 	struct intel_crtc_state *new_crtc_state;
   5247 	struct intel_crtc *crtc;
   5248 	int ret, i;
   5249 
   5250 	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
   5251 
   5252 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   5253 					    new_crtc_state, i) {
   5254 		ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
   5255 		if (ret)
   5256 			return ret;
   5257 
   5258 		ret = skl_ddb_add_affected_planes(old_crtc_state,
   5259 						  new_crtc_state);
   5260 		if (ret)
   5261 			return ret;
   5262 	}
   5263 
   5264 	return 0;
   5265 }
   5266 
   5267 static char enast(bool enable)
   5268 {
   5269 	return enable ? '*' : ' ';
   5270 }
   5271 
   5272 static void
   5273 skl_print_wm_changes(struct intel_atomic_state *state)
   5274 {
   5275 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   5276 	const struct intel_crtc_state *old_crtc_state;
   5277 	const struct intel_crtc_state *new_crtc_state;
   5278 	struct intel_plane *plane;
   5279 	struct intel_crtc *crtc;
   5280 	int i;
   5281 
   5282 	if (!drm_debug_enabled(DRM_UT_KMS))
   5283 		return;
   5284 
   5285 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   5286 					    new_crtc_state, i) {
   5287 		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
   5288 
   5289 		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
   5290 		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
   5291 
   5292 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   5293 			enum plane_id plane_id = plane->id;
   5294 			const struct skl_ddb_entry *old, *new;
   5295 
   5296 			old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
   5297 			new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
   5298 
   5299 			if (skl_ddb_entry_equal(old, new))
   5300 				continue;
   5301 
   5302 			drm_dbg_kms(&dev_priv->drm,
   5303 				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
   5304 				    plane->base.base.id, plane->base.name,
   5305 				    old->start, old->end, new->start, new->end,
   5306 				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
   5307 		}
   5308 
   5309 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   5310 			enum plane_id plane_id = plane->id;
   5311 			const struct skl_plane_wm *old_wm, *new_wm;
   5312 
   5313 			old_wm = &old_pipe_wm->planes[plane_id];
   5314 			new_wm = &new_pipe_wm->planes[plane_id];
   5315 
   5316 			if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
   5317 				continue;
   5318 
   5319 			drm_dbg_kms(&dev_priv->drm,
   5320 				    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
   5321 				    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
   5322 				    plane->base.base.id, plane->base.name,
   5323 				    enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
   5324 				    enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
   5325 				    enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
   5326 				    enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
   5327 				    enast(old_wm->trans_wm.plane_en),
   5328 				    enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
   5329 				    enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
   5330 				    enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
   5331 				    enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
   5332 				    enast(new_wm->trans_wm.plane_en));
   5333 
   5334 			drm_dbg_kms(&dev_priv->drm,
   5335 				    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
   5336 				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
   5337 				    plane->base.base.id, plane->base.name,
   5338 				    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
   5339 				    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
   5340 				    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
   5341 				    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
   5342 				    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
   5343 				    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
   5344 				    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
   5345 				    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
   5346 				    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
   5347 
   5348 				    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
   5349 				    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
   5350 				    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
   5351 				    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
   5352 				    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
   5353 				    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
   5354 				    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
   5355 				    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
   5356 				    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
   5357 
   5358 			drm_dbg_kms(&dev_priv->drm,
   5359 				    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
   5360 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
   5361 				    plane->base.base.id, plane->base.name,
   5362 				    old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
   5363 				    old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
   5364 				    old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
   5365 				    old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
   5366 				    old_wm->trans_wm.plane_res_b,
   5367 				    new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
   5368 				    new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
   5369 				    new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
   5370 				    new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
   5371 				    new_wm->trans_wm.plane_res_b);
   5372 
   5373 			drm_dbg_kms(&dev_priv->drm,
   5374 				    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
   5375 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
   5376 				    plane->base.base.id, plane->base.name,
   5377 				    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
   5378 				    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
   5379 				    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
   5380 				    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
   5381 				    old_wm->trans_wm.min_ddb_alloc,
   5382 				    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
   5383 				    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
   5384 				    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
   5385 				    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
   5386 				    new_wm->trans_wm.min_ddb_alloc);
   5387 		}
   5388 	}
   5389 }
   5390 
   5391 static int intel_add_all_pipes(struct intel_atomic_state *state)
   5392 {
   5393 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   5394 	struct intel_crtc *crtc;
   5395 
   5396 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   5397 		struct intel_crtc_state *crtc_state;
   5398 
   5399 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
   5400 		if (IS_ERR(crtc_state))
   5401 			return PTR_ERR(crtc_state);
   5402 	}
   5403 
   5404 	return 0;
   5405 }
   5406 
   5407 static int
   5408 skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
   5409 {
   5410 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   5411 	int ret;
   5412 
   5413 	/*
   5414 	 * If this is our first atomic update following hardware readout,
   5415 	 * we can't trust the DDB that the BIOS programmed for us.  Let's
   5416 	 * pretend that all pipes switched active status so that we'll
   5417 	 * ensure a full DDB recompute.
   5418 	 */
   5419 	if (dev_priv->wm.distrust_bios_wm) {
   5420 		ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
   5421 				       state->base.acquire_ctx);
   5422 		if (ret)
   5423 			return ret;
   5424 
   5425 		state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
   5426 
   5427 		/*
   5428 		 * We usually only initialize state->active_pipes if we
   5429 		 * we're doing a modeset; make sure this field is always
   5430 		 * initialized during the sanitization process that happens
   5431 		 * on the first commit too.
   5432 		 */
   5433 		if (!state->modeset)
   5434 			state->active_pipes = dev_priv->active_pipes;
   5435 	}
   5436 
   5437 	/*
   5438 	 * If the modeset changes which CRTC's are active, we need to
   5439 	 * recompute the DDB allocation for *all* active pipes, even
   5440 	 * those that weren't otherwise being modified in any way by this
   5441 	 * atomic commit.  Due to the shrinking of the per-pipe allocations
   5442 	 * when new active CRTC's are added, it's possible for a pipe that
   5443 	 * we were already using and aren't changing at all here to suddenly
   5444 	 * become invalid if its DDB needs exceeds its new allocation.
   5445 	 *
   5446 	 * Note that if we wind up doing a full DDB recompute, we can't let
   5447 	 * any other display updates race with this transaction, so we need
   5448 	 * to grab the lock on *all* CRTC's.
   5449 	 */
   5450 	if (state->active_pipe_changes || state->modeset) {
   5451 		state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
   5452 
   5453 		ret = intel_add_all_pipes(state);
   5454 		if (ret)
   5455 			return ret;
   5456 	}
   5457 
   5458 	return 0;
   5459 }
   5460 
   5461 /*
   5462  * To make sure the cursor watermark registers are always consistent
   5463  * with our computed state the following scenario needs special
   5464  * treatment:
   5465  *
   5466  * 1. enable cursor
   5467  * 2. move cursor entirely offscreen
   5468  * 3. disable cursor
   5469  *
   5470  * Step 2. does call .disable_plane() but does not zero the watermarks
   5471  * (since we consider an offscreen cursor still active for the purposes
   5472  * of watermarks). Step 3. would not normally call .disable_plane()
   5473  * because the actual plane visibility isn't changing, and we don't
   5474  * deallocate the cursor ddb until the pipe gets disabled. So we must
   5475  * force step 3. to call .disable_plane() to update the watermark
   5476  * registers properly.
   5477  *
   5478  * Other planes do not suffer from this issues as their watermarks are
   5479  * calculated based on the actual plane visibility. The only time this
   5480  * can trigger for the other planes is during the initial readout as the
   5481  * default value of the watermarks registers is not zero.
   5482  */
   5483 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
   5484 				      struct intel_crtc *crtc)
   5485 {
   5486 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5487 	const struct intel_crtc_state *old_crtc_state =
   5488 		intel_atomic_get_old_crtc_state(state, crtc);
   5489 	struct intel_crtc_state *new_crtc_state =
   5490 		intel_atomic_get_new_crtc_state(state, crtc);
   5491 	struct intel_plane *plane;
   5492 
   5493 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   5494 		struct intel_plane_state *plane_state;
   5495 		enum plane_id plane_id = plane->id;
   5496 
   5497 		/*
   5498 		 * Force a full wm update for every plane on modeset.
   5499 		 * Required because the reset value of the wm registers
   5500 		 * is non-zero, whereas we want all disabled planes to
   5501 		 * have zero watermarks. So if we turn off the relevant
   5502 		 * power well the hardware state will go out of sync
   5503 		 * with the software state.
   5504 		 */
   5505 		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
   5506 		    skl_plane_wm_equals(dev_priv,
   5507 					&old_crtc_state->wm.skl.optimal.planes[plane_id],
   5508 					&new_crtc_state->wm.skl.optimal.planes[plane_id]))
   5509 			continue;
   5510 
   5511 		plane_state = intel_atomic_get_plane_state(state, plane);
   5512 		if (IS_ERR(plane_state))
   5513 			return PTR_ERR(plane_state);
   5514 
   5515 		new_crtc_state->update_planes |= BIT(plane_id);
   5516 	}
   5517 
   5518 	return 0;
   5519 }
   5520 
   5521 static int
   5522 skl_compute_wm(struct intel_atomic_state *state)
   5523 {
   5524 	struct intel_crtc *crtc;
   5525 	struct intel_crtc_state *new_crtc_state;
   5526 	struct intel_crtc_state *old_crtc_state;
   5527 	struct skl_ddb_values *results = &state->wm_results;
   5528 	int ret, i;
   5529 
   5530 	/* Clear all dirty flags */
   5531 	results->dirty_pipes = 0;
   5532 
   5533 	ret = skl_ddb_add_affected_pipes(state);
   5534 	if (ret)
   5535 		return ret;
   5536 
   5537 	/*
   5538 	 * Calculate WM's for all pipes that are part of this transaction.
   5539 	 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
   5540 	 * weren't otherwise being modified (and set bits in dirty_pipes) if
   5541 	 * pipe allocations had to change.
   5542 	 */
   5543 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   5544 					    new_crtc_state, i) {
   5545 		ret = skl_build_pipe_wm(new_crtc_state);
   5546 		if (ret)
   5547 			return ret;
   5548 
   5549 		ret = skl_wm_add_affected_planes(state, crtc);
   5550 		if (ret)
   5551 			return ret;
   5552 
   5553 		if (!skl_pipe_wm_equals(crtc,
   5554 					&old_crtc_state->wm.skl.optimal,
   5555 					&new_crtc_state->wm.skl.optimal))
   5556 			results->dirty_pipes |= BIT(crtc->pipe);
   5557 	}
   5558 
   5559 	ret = skl_compute_ddb(state);
   5560 	if (ret)
   5561 		return ret;
   5562 
   5563 	skl_print_wm_changes(state);
   5564 
   5565 	return 0;
   5566 }
   5567 
   5568 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
   5569 				      struct intel_crtc *crtc)
   5570 {
   5571 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5572 	const struct intel_crtc_state *crtc_state =
   5573 		intel_atomic_get_new_crtc_state(state, crtc);
   5574 	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
   5575 	enum pipe pipe = crtc->pipe;
   5576 
   5577 	if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
   5578 		return;
   5579 
   5580 	I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
   5581 }
   5582 
   5583 static void skl_initial_wm(struct intel_atomic_state *state,
   5584 			   struct intel_crtc *crtc)
   5585 {
   5586 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5587 	const struct intel_crtc_state *crtc_state =
   5588 		intel_atomic_get_new_crtc_state(state, crtc);
   5589 	struct skl_ddb_values *results = &state->wm_results;
   5590 
   5591 	if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
   5592 		return;
   5593 
   5594 	mutex_lock(&dev_priv->wm.wm_mutex);
   5595 
   5596 	if (crtc_state->uapi.active_changed)
   5597 		skl_atomic_update_crtc_wm(state, crtc);
   5598 
   5599 	mutex_unlock(&dev_priv->wm.wm_mutex);
   5600 }
   5601 
   5602 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
   5603 				  struct intel_wm_config *config)
   5604 {
   5605 	struct intel_crtc *crtc;
   5606 
   5607 	/* Compute the currently _active_ config */
   5608 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   5609 		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
   5610 
   5611 		if (!wm->pipe_enabled)
   5612 			continue;
   5613 
   5614 		config->sprites_enabled |= wm->sprites_enabled;
   5615 		config->sprites_scaled |= wm->sprites_scaled;
   5616 		config->num_pipes_active++;
   5617 	}
   5618 }
   5619 
   5620 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
   5621 {
   5622 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
   5623 	struct ilk_wm_maximums max;
   5624 	struct intel_wm_config config = {};
   5625 	struct ilk_wm_values results = {};
   5626 	enum intel_ddb_partitioning partitioning;
   5627 
   5628 	ilk_compute_wm_config(dev_priv, &config);
   5629 
   5630 	ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
   5631 	ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
   5632 
   5633 	/* 5/6 split only in single pipe config on IVB+ */
   5634 	if (INTEL_GEN(dev_priv) >= 7 &&
   5635 	    config.num_pipes_active == 1 && config.sprites_enabled) {
   5636 		ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
   5637 		ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
   5638 
   5639 		best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
   5640 	} else {
   5641 		best_lp_wm = &lp_wm_1_2;
   5642 	}
   5643 
   5644 	partitioning = (best_lp_wm == &lp_wm_1_2) ?
   5645 		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
   5646 
   5647 	ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
   5648 
   5649 	ilk_write_wm_values(dev_priv, &results);
   5650 }
   5651 
   5652 static void ilk_initial_watermarks(struct intel_atomic_state *state,
   5653 				   struct intel_crtc *crtc)
   5654 {
   5655 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5656 	const struct intel_crtc_state *crtc_state =
   5657 		intel_atomic_get_new_crtc_state(state, crtc);
   5658 
   5659 	mutex_lock(&dev_priv->wm.wm_mutex);
   5660 	crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
   5661 	ilk_program_watermarks(dev_priv);
   5662 	mutex_unlock(&dev_priv->wm.wm_mutex);
   5663 }
   5664 
   5665 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
   5666 				    struct intel_crtc *crtc)
   5667 {
   5668 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5669 	const struct intel_crtc_state *crtc_state =
   5670 		intel_atomic_get_new_crtc_state(state, crtc);
   5671 
   5672 	if (!crtc_state->wm.need_postvbl_update)
   5673 		return;
   5674 
   5675 	mutex_lock(&dev_priv->wm.wm_mutex);
   5676 	crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
   5677 	ilk_program_watermarks(dev_priv);
   5678 	mutex_unlock(&dev_priv->wm.wm_mutex);
   5679 }
   5680 
   5681 static inline void skl_wm_level_from_reg_val(u32 val,
   5682 					     struct skl_wm_level *level)
   5683 {
   5684 	level->plane_en = val & PLANE_WM_EN;
   5685 	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
   5686 	level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
   5687 	level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
   5688 		PLANE_WM_LINES_MASK;
   5689 }
   5690 
   5691 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
   5692 			      struct skl_pipe_wm *out)
   5693 {
   5694 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5695 	enum pipe pipe = crtc->pipe;
   5696 	int level, max_level;
   5697 	enum plane_id plane_id;
   5698 	u32 val;
   5699 
   5700 	max_level = ilk_wm_max_level(dev_priv);
   5701 
   5702 	for_each_plane_id_on_crtc(crtc, plane_id) {
   5703 		struct skl_plane_wm *wm = &out->planes[plane_id];
   5704 
   5705 		for (level = 0; level <= max_level; level++) {
   5706 			if (plane_id != PLANE_CURSOR)
   5707 				val = I915_READ(PLANE_WM(pipe, plane_id, level));
   5708 			else
   5709 				val = I915_READ(CUR_WM(pipe, level));
   5710 
   5711 			skl_wm_level_from_reg_val(val, &wm->wm[level]);
   5712 		}
   5713 
   5714 		if (plane_id != PLANE_CURSOR)
   5715 			val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
   5716 		else
   5717 			val = I915_READ(CUR_WM_TRANS(pipe));
   5718 
   5719 		skl_wm_level_from_reg_val(val, &wm->trans_wm);
   5720 	}
   5721 
   5722 	if (!crtc->active)
   5723 		return;
   5724 
   5725 	out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
   5726 }
   5727 
   5728 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
   5729 {
   5730 	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
   5731 	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
   5732 	struct intel_crtc *crtc;
   5733 	struct intel_crtc_state *crtc_state;
   5734 
   5735 	skl_ddb_get_hw_state(dev_priv, ddb);
   5736 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   5737 		crtc_state = to_intel_crtc_state(crtc->base.state);
   5738 
   5739 		skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
   5740 
   5741 		if (crtc->active)
   5742 			hw->dirty_pipes |= BIT(crtc->pipe);
   5743 	}
   5744 
   5745 	if (dev_priv->active_pipes) {
   5746 		/* Fully recompute DDB on first atomic commit */
   5747 		dev_priv->wm.distrust_bios_wm = true;
   5748 	}
   5749 }
   5750 
   5751 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
   5752 {
   5753 	struct drm_device *dev = crtc->base.dev;
   5754 	struct drm_i915_private *dev_priv = to_i915(dev);
   5755 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
   5756 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
   5757 	struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
   5758 	enum pipe pipe = crtc->pipe;
   5759 	static const i915_reg_t wm0_pipe_reg[] = {
   5760 		[PIPE_A] = WM0_PIPEA_ILK,
   5761 		[PIPE_B] = WM0_PIPEB_ILK,
   5762 		[PIPE_C] = WM0_PIPEC_IVB,
   5763 	};
   5764 
   5765 	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
   5766 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   5767 		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
   5768 
   5769 	memset(active, 0, sizeof(*active));
   5770 
   5771 	active->pipe_enabled = crtc->active;
   5772 
   5773 	if (active->pipe_enabled) {
   5774 		u32 tmp = hw->wm_pipe[pipe];
   5775 
   5776 		/*
   5777 		 * For active pipes LP0 watermark is marked as
   5778 		 * enabled, and LP1+ watermaks as disabled since
   5779 		 * we can't really reverse compute them in case
   5780 		 * multiple pipes are active.
   5781 		 */
   5782 		active->wm[0].enable = true;
   5783 		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
   5784 		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
   5785 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
   5786 		active->linetime = hw->wm_linetime[pipe];
   5787 	} else {
   5788 		int level, max_level = ilk_wm_max_level(dev_priv);
   5789 
   5790 		/*
   5791 		 * For inactive pipes, all watermark levels
   5792 		 * should be marked as enabled but zeroed,
   5793 		 * which is what we'd compute them to.
   5794 		 */
   5795 		for (level = 0; level <= max_level; level++)
   5796 			active->wm[level].enable = true;
   5797 	}
   5798 
   5799 	crtc->wm.active.ilk = *active;
   5800 }
   5801 
   5802 #define _FW_WM(value, plane) \
   5803 	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
   5804 #define _FW_WM_VLV(value, plane) \
   5805 	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
   5806 
   5807 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
   5808 			       struct g4x_wm_values *wm)
   5809 {
   5810 	u32 tmp;
   5811 
   5812 	tmp = I915_READ(DSPFW1);
   5813 	wm->sr.plane = _FW_WM(tmp, SR);
   5814 	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
   5815 	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
   5816 	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
   5817 
   5818 	tmp = I915_READ(DSPFW2);
   5819 	wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
   5820 	wm->sr.fbc = _FW_WM(tmp, FBC_SR);
   5821 	wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
   5822 	wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
   5823 	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
   5824 	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
   5825 
   5826 	tmp = I915_READ(DSPFW3);
   5827 	wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
   5828 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
   5829 	wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
   5830 	wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
   5831 }
   5832 
   5833 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
   5834 			       struct vlv_wm_values *wm)
   5835 {
   5836 	enum pipe pipe;
   5837 	u32 tmp;
   5838 
   5839 	for_each_pipe(dev_priv, pipe) {
   5840 		tmp = I915_READ(VLV_DDL(pipe));
   5841 
   5842 		wm->ddl[pipe].plane[PLANE_PRIMARY] =
   5843 			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
   5844 		wm->ddl[pipe].plane[PLANE_CURSOR] =
   5845 			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
   5846 		wm->ddl[pipe].plane[PLANE_SPRITE0] =
   5847 			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
   5848 		wm->ddl[pipe].plane[PLANE_SPRITE1] =
   5849 			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
   5850 	}
   5851 
   5852 	tmp = I915_READ(DSPFW1);
   5853 	wm->sr.plane = _FW_WM(tmp, SR);
   5854 	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
   5855 	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
   5856 	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
   5857 
   5858 	tmp = I915_READ(DSPFW2);
   5859 	wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
   5860 	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
   5861 	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
   5862 
   5863 	tmp = I915_READ(DSPFW3);
   5864 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
   5865 
   5866 	if (IS_CHERRYVIEW(dev_priv)) {
   5867 		tmp = I915_READ(DSPFW7_CHV);
   5868 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
   5869 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
   5870 
   5871 		tmp = I915_READ(DSPFW8_CHV);
   5872 		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
   5873 		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
   5874 
   5875 		tmp = I915_READ(DSPFW9_CHV);
   5876 		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
   5877 		wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
   5878 
   5879 		tmp = I915_READ(DSPHOWM);
   5880 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
   5881 		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
   5882 		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
   5883 		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
   5884 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
   5885 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
   5886 		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
   5887 		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
   5888 		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
   5889 		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
   5890 	} else {
   5891 		tmp = I915_READ(DSPFW7);
   5892 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
   5893 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
   5894 
   5895 		tmp = I915_READ(DSPHOWM);
   5896 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
   5897 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
   5898 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
   5899 		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
   5900 		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
   5901 		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
   5902 		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
   5903 	}
   5904 }
   5905 
   5906 #undef _FW_WM
   5907 #undef _FW_WM_VLV
   5908 
   5909 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
   5910 {
   5911 	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
   5912 	struct intel_crtc *crtc;
   5913 
   5914 	g4x_read_wm_values(dev_priv, wm);
   5915 
   5916 	wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
   5917 
   5918 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   5919 		struct intel_crtc_state *crtc_state =
   5920 			to_intel_crtc_state(crtc->base.state);
   5921 		struct g4x_wm_state *active = &crtc->wm.active.g4x;
   5922 		struct g4x_pipe_wm *raw;
   5923 		enum pipe pipe = crtc->pipe;
   5924 		enum plane_id plane_id;
   5925 		int level, max_level;
   5926 
   5927 		active->cxsr = wm->cxsr;
   5928 		active->hpll_en = wm->hpll_en;
   5929 		active->fbc_en = wm->fbc_en;
   5930 
   5931 		active->sr = wm->sr;
   5932 		active->hpll = wm->hpll;
   5933 
   5934 		for_each_plane_id_on_crtc(crtc, plane_id) {
   5935 			active->wm.plane[plane_id] =
   5936 				wm->pipe[pipe].plane[plane_id];
   5937 		}
   5938 
   5939 		if (wm->cxsr && wm->hpll_en)
   5940 			max_level = G4X_WM_LEVEL_HPLL;
   5941 		else if (wm->cxsr)
   5942 			max_level = G4X_WM_LEVEL_SR;
   5943 		else
   5944 			max_level = G4X_WM_LEVEL_NORMAL;
   5945 
   5946 		level = G4X_WM_LEVEL_NORMAL;
   5947 		raw = &crtc_state->wm.g4x.raw[level];
   5948 		for_each_plane_id_on_crtc(crtc, plane_id)
   5949 			raw->plane[plane_id] = active->wm.plane[plane_id];
   5950 
   5951 		if (++level > max_level)
   5952 			goto out;
   5953 
   5954 		raw = &crtc_state->wm.g4x.raw[level];
   5955 		raw->plane[PLANE_PRIMARY] = active->sr.plane;
   5956 		raw->plane[PLANE_CURSOR] = active->sr.cursor;
   5957 		raw->plane[PLANE_SPRITE0] = 0;
   5958 		raw->fbc = active->sr.fbc;
   5959 
   5960 		if (++level > max_level)
   5961 			goto out;
   5962 
   5963 		raw = &crtc_state->wm.g4x.raw[level];
   5964 		raw->plane[PLANE_PRIMARY] = active->hpll.plane;
   5965 		raw->plane[PLANE_CURSOR] = active->hpll.cursor;
   5966 		raw->plane[PLANE_SPRITE0] = 0;
   5967 		raw->fbc = active->hpll.fbc;
   5968 
   5969 	out:
   5970 		for_each_plane_id_on_crtc(crtc, plane_id)
   5971 			g4x_raw_plane_wm_set(crtc_state, level,
   5972 					     plane_id, USHRT_MAX);
   5973 		g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
   5974 
   5975 		crtc_state->wm.g4x.optimal = *active;
   5976 		crtc_state->wm.g4x.intermediate = *active;
   5977 
   5978 		drm_dbg_kms(&dev_priv->drm,
   5979 			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
   5980 			    pipe_name(pipe),
   5981 			    wm->pipe[pipe].plane[PLANE_PRIMARY],
   5982 			    wm->pipe[pipe].plane[PLANE_CURSOR],
   5983 			    wm->pipe[pipe].plane[PLANE_SPRITE0]);
   5984 	}
   5985 
   5986 	drm_dbg_kms(&dev_priv->drm,
   5987 		    "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
   5988 		    wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
   5989 	drm_dbg_kms(&dev_priv->drm,
   5990 		    "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
   5991 		    wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
   5992 	drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
   5993 		    yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
   5994 }
   5995 
   5996 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
   5997 {
   5998 	struct intel_plane *plane;
   5999 	struct intel_crtc *crtc;
   6000 
   6001 	mutex_lock(&dev_priv->wm.wm_mutex);
   6002 
   6003 	for_each_intel_plane(&dev_priv->drm, plane) {
   6004 		struct intel_crtc *crtc =
   6005 			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
   6006 		struct intel_crtc_state *crtc_state =
   6007 			to_intel_crtc_state(crtc->base.state);
   6008 		struct intel_plane_state *plane_state =
   6009 			to_intel_plane_state(plane->base.state);
   6010 		struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
   6011 		enum plane_id plane_id = plane->id;
   6012 		int level;
   6013 
   6014 		if (plane_state->uapi.visible)
   6015 			continue;
   6016 
   6017 		for (level = 0; level < 3; level++) {
   6018 			struct g4x_pipe_wm *raw =
   6019 				&crtc_state->wm.g4x.raw[level];
   6020 
   6021 			raw->plane[plane_id] = 0;
   6022 			wm_state->wm.plane[plane_id] = 0;
   6023 		}
   6024 
   6025 		if (plane_id == PLANE_PRIMARY) {
   6026 			for (level = 0; level < 3; level++) {
   6027 				struct g4x_pipe_wm *raw =
   6028 					&crtc_state->wm.g4x.raw[level];
   6029 				raw->fbc = 0;
   6030 			}
   6031 
   6032 			wm_state->sr.fbc = 0;
   6033 			wm_state->hpll.fbc = 0;
   6034 			wm_state->fbc_en = false;
   6035 		}
   6036 	}
   6037 
   6038 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   6039 		struct intel_crtc_state *crtc_state =
   6040 			to_intel_crtc_state(crtc->base.state);
   6041 
   6042 		crtc_state->wm.g4x.intermediate =
   6043 			crtc_state->wm.g4x.optimal;
   6044 		crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
   6045 	}
   6046 
   6047 	g4x_program_watermarks(dev_priv);
   6048 
   6049 	mutex_unlock(&dev_priv->wm.wm_mutex);
   6050 }
   6051 
   6052 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
   6053 {
   6054 	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
   6055 	struct intel_crtc *crtc;
   6056 	u32 val;
   6057 
   6058 	vlv_read_wm_values(dev_priv, wm);
   6059 
   6060 	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
   6061 	wm->level = VLV_WM_LEVEL_PM2;
   6062 
   6063 	if (IS_CHERRYVIEW(dev_priv)) {
   6064 		vlv_punit_get(dev_priv);
   6065 
   6066 		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
   6067 		if (val & DSP_MAXFIFO_PM5_ENABLE)
   6068 			wm->level = VLV_WM_LEVEL_PM5;
   6069 
   6070 		/*
   6071 		 * If DDR DVFS is disabled in the BIOS, Punit
   6072 		 * will never ack the request. So if that happens
   6073 		 * assume we don't have to enable/disable DDR DVFS
   6074 		 * dynamically. To test that just set the REQ_ACK
   6075 		 * bit to poke the Punit, but don't change the
   6076 		 * HIGH/LOW bits so that we don't actually change
   6077 		 * the current state.
   6078 		 */
   6079 		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
   6080 		val |= FORCE_DDR_FREQ_REQ_ACK;
   6081 		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
   6082 
   6083 		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
   6084 			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
   6085 			drm_dbg_kms(&dev_priv->drm,
   6086 				    "Punit not acking DDR DVFS request, "
   6087 				    "assuming DDR DVFS is disabled\n");
   6088 			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
   6089 		} else {
   6090 			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
   6091 			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
   6092 				wm->level = VLV_WM_LEVEL_DDR_DVFS;
   6093 		}
   6094 
   6095 		vlv_punit_put(dev_priv);
   6096 	}
   6097 
   6098 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   6099 		struct intel_crtc_state *crtc_state =
   6100 			to_intel_crtc_state(crtc->base.state);
   6101 		struct vlv_wm_state *active = &crtc->wm.active.vlv;
   6102 		const struct vlv_fifo_state *fifo_state =
   6103 			&crtc_state->wm.vlv.fifo_state;
   6104 		enum pipe pipe = crtc->pipe;
   6105 		enum plane_id plane_id;
   6106 		int level;
   6107 
   6108 		vlv_get_fifo_size(crtc_state);
   6109 
   6110 		active->num_levels = wm->level + 1;
   6111 		active->cxsr = wm->cxsr;
   6112 
   6113 		for (level = 0; level < active->num_levels; level++) {
   6114 			struct g4x_pipe_wm *raw =
   6115 				&crtc_state->wm.vlv.raw[level];
   6116 
   6117 			active->sr[level].plane = wm->sr.plane;
   6118 			active->sr[level].cursor = wm->sr.cursor;
   6119 
   6120 			for_each_plane_id_on_crtc(crtc, plane_id) {
   6121 				active->wm[level].plane[plane_id] =
   6122 					wm->pipe[pipe].plane[plane_id];
   6123 
   6124 				raw->plane[plane_id] =
   6125 					vlv_invert_wm_value(active->wm[level].plane[plane_id],
   6126 							    fifo_state->plane[plane_id]);
   6127 			}
   6128 		}
   6129 
   6130 		for_each_plane_id_on_crtc(crtc, plane_id)
   6131 			vlv_raw_plane_wm_set(crtc_state, level,
   6132 					     plane_id, USHRT_MAX);
   6133 		vlv_invalidate_wms(crtc, active, level);
   6134 
   6135 		crtc_state->wm.vlv.optimal = *active;
   6136 		crtc_state->wm.vlv.intermediate = *active;
   6137 
   6138 		drm_dbg_kms(&dev_priv->drm,
   6139 			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
   6140 			    pipe_name(pipe),
   6141 			    wm->pipe[pipe].plane[PLANE_PRIMARY],
   6142 			    wm->pipe[pipe].plane[PLANE_CURSOR],
   6143 			    wm->pipe[pipe].plane[PLANE_SPRITE0],
   6144 			    wm->pipe[pipe].plane[PLANE_SPRITE1]);
   6145 	}
   6146 
   6147 	drm_dbg_kms(&dev_priv->drm,
   6148 		    "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
   6149 		    wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
   6150 }
   6151 
   6152 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
   6153 {
   6154 	struct intel_plane *plane;
   6155 	struct intel_crtc *crtc;
   6156 
   6157 	mutex_lock(&dev_priv->wm.wm_mutex);
   6158 
   6159 	for_each_intel_plane(&dev_priv->drm, plane) {
   6160 		struct intel_crtc *crtc =
   6161 			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
   6162 		struct intel_crtc_state *crtc_state =
   6163 			to_intel_crtc_state(crtc->base.state);
   6164 		struct intel_plane_state *plane_state =
   6165 			to_intel_plane_state(plane->base.state);
   6166 		struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
   6167 		const struct vlv_fifo_state *fifo_state =
   6168 			&crtc_state->wm.vlv.fifo_state;
   6169 		enum plane_id plane_id = plane->id;
   6170 		int level;
   6171 
   6172 		if (plane_state->uapi.visible)
   6173 			continue;
   6174 
   6175 		for (level = 0; level < wm_state->num_levels; level++) {
   6176 			struct g4x_pipe_wm *raw =
   6177 				&crtc_state->wm.vlv.raw[level];
   6178 
   6179 			raw->plane[plane_id] = 0;
   6180 
   6181 			wm_state->wm[level].plane[plane_id] =
   6182 				vlv_invert_wm_value(raw->plane[plane_id],
   6183 						    fifo_state->plane[plane_id]);
   6184 		}
   6185 	}
   6186 
   6187 	for_each_intel_crtc(&dev_priv->drm, crtc) {
   6188 		struct intel_crtc_state *crtc_state =
   6189 			to_intel_crtc_state(crtc->base.state);
   6190 
   6191 		crtc_state->wm.vlv.intermediate =
   6192 			crtc_state->wm.vlv.optimal;
   6193 		crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
   6194 	}
   6195 
   6196 	vlv_program_watermarks(dev_priv);
   6197 
   6198 	mutex_unlock(&dev_priv->wm.wm_mutex);
   6199 }
   6200 
   6201 /*
   6202  * FIXME should probably kill this and improve
   6203  * the real watermark readout/sanitation instead
   6204  */
   6205 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
   6206 {
   6207 	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
   6208 	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
   6209 	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
   6210 
   6211 	/*
   6212 	 * Don't touch WM1S_LP_EN here.
   6213 	 * Doing so could cause underruns.
   6214 	 */
   6215 }
   6216 
   6217 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
   6218 {
   6219 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
   6220 	struct intel_crtc *crtc;
   6221 
   6222 	ilk_init_lp_watermarks(dev_priv);
   6223 
   6224 	for_each_intel_crtc(&dev_priv->drm, crtc)
   6225 		ilk_pipe_wm_get_hw_state(crtc);
   6226 
   6227 	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
   6228 	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
   6229 	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
   6230 
   6231 	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
   6232 	if (INTEL_GEN(dev_priv) >= 7) {
   6233 		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
   6234 		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
   6235 	}
   6236 
   6237 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
   6238 		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
   6239 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
   6240 	else if (IS_IVYBRIDGE(dev_priv))
   6241 		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
   6242 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
   6243 
   6244 	hw->enable_fbc_wm =
   6245 		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
   6246 }
   6247 
   6248 /**
   6249  * intel_update_watermarks - update FIFO watermark values based on current modes
   6250  * @crtc: the #intel_crtc on which to compute the WM
   6251  *
   6252  * Calculate watermark values for the various WM regs based on current mode
   6253  * and plane configuration.
   6254  *
   6255  * There are several cases to deal with here:
   6256  *   - normal (i.e. non-self-refresh)
   6257  *   - self-refresh (SR) mode
   6258  *   - lines are large relative to FIFO size (buffer can hold up to 2)
   6259  *   - lines are small relative to FIFO size (buffer can hold more than 2
   6260  *     lines), so need to account for TLB latency
   6261  *
   6262  *   The normal calculation is:
   6263  *     watermark = dotclock * bytes per pixel * latency
   6264  *   where latency is platform & configuration dependent (we assume pessimal
   6265  *   values here).
   6266  *
   6267  *   The SR calculation is:
   6268  *     watermark = (trunc(latency/line time)+1) * surface width *
   6269  *       bytes per pixel
   6270  *   where
   6271  *     line time = htotal / dotclock
   6272  *     surface width = hdisplay for normal plane and 64 for cursor
   6273  *   and latency is assumed to be high, as above.
   6274  *
   6275  * The final value programmed to the register should always be rounded up,
   6276  * and include an extra 2 entries to account for clock crossings.
   6277  *
   6278  * We don't use the sprite, so we can ignore that.  And on Crestline we have
   6279  * to set the non-SR watermarks to 8.
   6280  */
   6281 void intel_update_watermarks(struct intel_crtc *crtc)
   6282 {
   6283 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6284 
   6285 	if (dev_priv->display.update_wm)
   6286 		dev_priv->display.update_wm(crtc);
   6287 }
   6288 
   6289 void intel_enable_ipc(struct drm_i915_private *dev_priv)
   6290 {
   6291 	u32 val;
   6292 
   6293 	if (!HAS_IPC(dev_priv))
   6294 		return;
   6295 
   6296 	val = I915_READ(DISP_ARB_CTL2);
   6297 
   6298 	if (dev_priv->ipc_enabled)
   6299 		val |= DISP_IPC_ENABLE;
   6300 	else
   6301 		val &= ~DISP_IPC_ENABLE;
   6302 
   6303 	I915_WRITE(DISP_ARB_CTL2, val);
   6304 }
   6305 
   6306 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
   6307 {
   6308 	/* Display WA #0477 WaDisableIPC: skl */
   6309 	if (IS_SKYLAKE(dev_priv))
   6310 		return false;
   6311 
   6312 	/* Display WA #1141: SKL:all KBL:all CFL */
   6313 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
   6314 		return dev_priv->dram_info.symmetric_memory;
   6315 
   6316 	return true;
   6317 }
   6318 
   6319 void intel_init_ipc(struct drm_i915_private *dev_priv)
   6320 {
   6321 	if (!HAS_IPC(dev_priv))
   6322 		return;
   6323 
   6324 	dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
   6325 
   6326 	intel_enable_ipc(dev_priv);
   6327 }
   6328 
   6329 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
   6330 {
   6331 	/*
   6332 	 * On Ibex Peak and Cougar Point, we need to disable clock
   6333 	 * gating for the panel power sequencer or it will fail to
   6334 	 * start up when no ports are active.
   6335 	 */
   6336 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
   6337 }
   6338 
   6339 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
   6340 {
   6341 	enum pipe pipe;
   6342 
   6343 	for_each_pipe(dev_priv, pipe) {
   6344 		I915_WRITE(DSPCNTR(pipe),
   6345 			   I915_READ(DSPCNTR(pipe)) |
   6346 			   DISPPLANE_TRICKLE_FEED_DISABLE);
   6347 
   6348 		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
   6349 		POSTING_READ(DSPSURF(pipe));
   6350 	}
   6351 }
   6352 
   6353 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
   6354 {
   6355 	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
   6356 
   6357 	/*
   6358 	 * Required for FBC
   6359 	 * WaFbcDisableDpfcClockGating:ilk
   6360 	 */
   6361 	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
   6362 		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
   6363 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
   6364 
   6365 	I915_WRITE(PCH_3DCGDIS0,
   6366 		   MARIUNIT_CLOCK_GATE_DISABLE |
   6367 		   SVSMUNIT_CLOCK_GATE_DISABLE);
   6368 	I915_WRITE(PCH_3DCGDIS1,
   6369 		   VFMUNIT_CLOCK_GATE_DISABLE);
   6370 
   6371 	/*
   6372 	 * According to the spec the following bits should be set in
   6373 	 * order to enable memory self-refresh
   6374 	 * The bit 22/21 of 0x42004
   6375 	 * The bit 5 of 0x42020
   6376 	 * The bit 15 of 0x45000
   6377 	 */
   6378 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
   6379 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
   6380 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
   6381 	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
   6382 	I915_WRITE(DISP_ARB_CTL,
   6383 		   (I915_READ(DISP_ARB_CTL) |
   6384 		    DISP_FBC_WM_DIS));
   6385 
   6386 	/*
   6387 	 * Based on the document from hardware guys the following bits
   6388 	 * should be set unconditionally in order to enable FBC.
   6389 	 * The bit 22 of 0x42000
   6390 	 * The bit 22 of 0x42004
   6391 	 * The bit 7,8,9 of 0x42020.
   6392 	 */
   6393 	if (IS_IRONLAKE_M(dev_priv)) {
   6394 		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
   6395 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
   6396 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
   6397 			   ILK_FBCQ_DIS);
   6398 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
   6399 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
   6400 			   ILK_DPARB_GATE);
   6401 	}
   6402 
   6403 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
   6404 
   6405 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
   6406 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
   6407 		   ILK_ELPIN_409_SELECT);
   6408 	I915_WRITE(_3D_CHICKEN2,
   6409 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
   6410 		   _3D_CHICKEN2_WM_READ_PIPELINED);
   6411 
   6412 	/* WaDisableRenderCachePipelinedFlush:ilk */
   6413 	I915_WRITE(CACHE_MODE_0,
   6414 		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
   6415 
   6416 	/* WaDisable_RenderCache_OperationalFlush:ilk */
   6417 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   6418 
   6419 	g4x_disable_trickle_feed(dev_priv);
   6420 
   6421 	ibx_init_clock_gating(dev_priv);
   6422 }
   6423 
   6424 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
   6425 {
   6426 	enum pipe pipe;
   6427 	u32 val;
   6428 
   6429 	/*
   6430 	 * On Ibex Peak and Cougar Point, we need to disable clock
   6431 	 * gating for the panel power sequencer or it will fail to
   6432 	 * start up when no ports are active.
   6433 	 */
   6434 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
   6435 		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
   6436 		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
   6437 	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
   6438 		   DPLS_EDP_PPS_FIX_DIS);
   6439 	/* The below fixes the weird display corruption, a few pixels shifted
   6440 	 * downward, on (only) LVDS of some HP laptops with IVY.
   6441 	 */
   6442 	for_each_pipe(dev_priv, pipe) {
   6443 		val = I915_READ(TRANS_CHICKEN2(pipe));
   6444 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
   6445 		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
   6446 		if (dev_priv->vbt.fdi_rx_polarity_inverted)
   6447 			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
   6448 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
   6449 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
   6450 		I915_WRITE(TRANS_CHICKEN2(pipe), val);
   6451 	}
   6452 	/* WADP0ClockGatingDisable */
   6453 	for_each_pipe(dev_priv, pipe) {
   6454 		I915_WRITE(TRANS_CHICKEN1(pipe),
   6455 			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
   6456 	}
   6457 }
   6458 
   6459 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
   6460 {
   6461 	u32 tmp;
   6462 
   6463 	tmp = I915_READ(MCH_SSKPD);
   6464 	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
   6465 		drm_dbg_kms(&dev_priv->drm,
   6466 			    "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
   6467 			    tmp);
   6468 }
   6469 
   6470 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
   6471 {
   6472 	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
   6473 
   6474 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
   6475 
   6476 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
   6477 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
   6478 		   ILK_ELPIN_409_SELECT);
   6479 
   6480 	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
   6481 	I915_WRITE(_3D_CHICKEN,
   6482 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
   6483 
   6484 	/* WaDisable_RenderCache_OperationalFlush:snb */
   6485 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   6486 
   6487 	/*
   6488 	 * BSpec recoomends 8x4 when MSAA is used,
   6489 	 * however in practice 16x4 seems fastest.
   6490 	 *
   6491 	 * Note that PS/WM thread counts depend on the WIZ hashing
   6492 	 * disable bit, which we don't touch here, but it's good
   6493 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
   6494 	 */
   6495 	I915_WRITE(GEN6_GT_MODE,
   6496 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
   6497 
   6498 	I915_WRITE(CACHE_MODE_0,
   6499 		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
   6500 
   6501 	I915_WRITE(GEN6_UCGCTL1,
   6502 		   I915_READ(GEN6_UCGCTL1) |
   6503 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
   6504 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
   6505 
   6506 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
   6507 	 * gating disable must be set.  Failure to set it results in
   6508 	 * flickering pixels due to Z write ordering failures after
   6509 	 * some amount of runtime in the Mesa "fire" demo, and Unigine
   6510 	 * Sanctuary and Tropics, and apparently anything else with
   6511 	 * alpha test or pixel discard.
   6512 	 *
   6513 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
   6514 	 * but we didn't debug actual testcases to find it out.
   6515 	 *
   6516 	 * WaDisableRCCUnitClockGating:snb
   6517 	 * WaDisableRCPBUnitClockGating:snb
   6518 	 */
   6519 	I915_WRITE(GEN6_UCGCTL2,
   6520 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
   6521 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
   6522 
   6523 	/* WaStripsFansDisableFastClipPerformanceFix:snb */
   6524 	I915_WRITE(_3D_CHICKEN3,
   6525 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
   6526 
   6527 	/*
   6528 	 * Bspec says:
   6529 	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
   6530 	 * 3DSTATE_SF number of SF output attributes is more than 16."
   6531 	 */
   6532 	I915_WRITE(_3D_CHICKEN3,
   6533 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
   6534 
   6535 	/*
   6536 	 * According to the spec the following bits should be
   6537 	 * set in order to enable memory self-refresh and fbc:
   6538 	 * The bit21 and bit22 of 0x42000
   6539 	 * The bit21 and bit22 of 0x42004
   6540 	 * The bit5 and bit7 of 0x42020
   6541 	 * The bit14 of 0x70180
   6542 	 * The bit14 of 0x71180
   6543 	 *
   6544 	 * WaFbcAsynchFlipDisableFbcQueue:snb
   6545 	 */
   6546 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
   6547 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
   6548 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
   6549 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
   6550 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
   6551 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
   6552 	I915_WRITE(ILK_DSPCLK_GATE_D,
   6553 		   I915_READ(ILK_DSPCLK_GATE_D) |
   6554 		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
   6555 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
   6556 
   6557 	g4x_disable_trickle_feed(dev_priv);
   6558 
   6559 	cpt_init_clock_gating(dev_priv);
   6560 
   6561 	gen6_check_mch_setup(dev_priv);
   6562 }
   6563 
   6564 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
   6565 {
   6566 	u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
   6567 
   6568 	/*
   6569 	 * WaVSThreadDispatchOverride:ivb,vlv
   6570 	 *
   6571 	 * This actually overrides the dispatch
   6572 	 * mode for all thread types.
   6573 	 */
   6574 	reg &= ~GEN7_FF_SCHED_MASK;
   6575 	reg |= GEN7_FF_TS_SCHED_HW;
   6576 	reg |= GEN7_FF_VS_SCHED_HW;
   6577 	reg |= GEN7_FF_DS_SCHED_HW;
   6578 
   6579 	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
   6580 }
   6581 
   6582 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
   6583 {
   6584 	/*
   6585 	 * TODO: this bit should only be enabled when really needed, then
   6586 	 * disabled when not needed anymore in order to save power.
   6587 	 */
   6588 	if (HAS_PCH_LPT_LP(dev_priv))
   6589 		I915_WRITE(SOUTH_DSPCLK_GATE_D,
   6590 			   I915_READ(SOUTH_DSPCLK_GATE_D) |
   6591 			   PCH_LP_PARTITION_LEVEL_DISABLE);
   6592 
   6593 	/* WADPOClockGatingDisable:hsw */
   6594 	I915_WRITE(TRANS_CHICKEN1(PIPE_A),
   6595 		   I915_READ(TRANS_CHICKEN1(PIPE_A)) |
   6596 		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
   6597 }
   6598 
   6599 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
   6600 {
   6601 	if (HAS_PCH_LPT_LP(dev_priv)) {
   6602 		u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
   6603 
   6604 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
   6605 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
   6606 	}
   6607 }
   6608 
   6609 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
   6610 				   int general_prio_credits,
   6611 				   int high_prio_credits)
   6612 {
   6613 	u32 misccpctl;
   6614 	u32 val;
   6615 
   6616 	/* WaTempDisableDOPClkGating:bdw */
   6617 	misccpctl = I915_READ(GEN7_MISCCPCTL);
   6618 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
   6619 
   6620 	val = I915_READ(GEN8_L3SQCREG1);
   6621 	val &= ~L3_PRIO_CREDITS_MASK;
   6622 	val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
   6623 	val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
   6624 	I915_WRITE(GEN8_L3SQCREG1, val);
   6625 
   6626 	/*
   6627 	 * Wait at least 100 clocks before re-enabling clock gating.
   6628 	 * See the definition of L3SQCREG1 in BSpec.
   6629 	 */
   6630 	POSTING_READ(GEN8_L3SQCREG1);
   6631 	udelay(1);
   6632 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
   6633 }
   6634 
   6635 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
   6636 {
   6637 	/* This is not an Wa. Enable to reduce Sampler power */
   6638 	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
   6639 		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
   6640 
   6641 	/* WaEnable32PlaneMode:icl */
   6642 	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
   6643 		   _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
   6644 
   6645 	/*
   6646 	 * Wa_1408615072:icl,ehl  (vsunit)
   6647 	 * Wa_1407596294:icl,ehl  (hsunit)
   6648 	 */
   6649 	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
   6650 			 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
   6651 
   6652 	/* Wa_1407352427:icl,ehl */
   6653 	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
   6654 			 0, PSDUNIT_CLKGATE_DIS);
   6655 }
   6656 
   6657 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
   6658 {
   6659 	u32 vd_pg_enable = 0;
   6660 	unsigned int i;
   6661 
   6662 	/* Wa_1408615072:tgl */
   6663 	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
   6664 			 0, VSUNIT_CLKGATE_DIS_TGL);
   6665 
   6666 	/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
   6667 	for (i = 0; i < I915_MAX_VCS; i++) {
   6668 		if (HAS_ENGINE(dev_priv, _VCS(i)))
   6669 			vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
   6670 					VDN_MFX_POWERGATE_ENABLE(i);
   6671 	}
   6672 
   6673 	I915_WRITE(POWERGATE_ENABLE,
   6674 		   I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
   6675 }
   6676 
   6677 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
   6678 {
   6679 	if (!HAS_PCH_CNP(dev_priv))
   6680 		return;
   6681 
   6682 	/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
   6683 	I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
   6684 		   CNP_PWM_CGE_GATING_DISABLE);
   6685 }
   6686 
   6687 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
   6688 {
   6689 	u32 val;
   6690 	cnp_init_clock_gating(dev_priv);
   6691 
   6692 	/* This is not an Wa. Enable for better image quality */
   6693 	I915_WRITE(_3D_CHICKEN3,
   6694 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
   6695 
   6696 	/* WaEnableChickenDCPR:cnl */
   6697 	I915_WRITE(GEN8_CHICKEN_DCPR_1,
   6698 		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
   6699 
   6700 	/* WaFbcWakeMemOn:cnl */
   6701 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
   6702 		   DISP_FBC_MEMORY_WAKE);
   6703 
   6704 	val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
   6705 	/* ReadHitWriteOnlyDisable:cnl */
   6706 	val |= RCCUNIT_CLKGATE_DIS;
   6707 	/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
   6708 	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
   6709 		val |= SARBUNIT_CLKGATE_DIS;
   6710 	I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
   6711 
   6712 	/* Wa_2201832410:cnl */
   6713 	val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
   6714 	val |= GWUNIT_CLKGATE_DIS;
   6715 	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
   6716 
   6717 	/* WaDisableVFclkgate:cnl */
   6718 	/* WaVFUnitClockGatingDisable:cnl */
   6719 	val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
   6720 	val |= VFUNIT_CLKGATE_DIS;
   6721 	I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
   6722 }
   6723 
   6724 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
   6725 {
   6726 	cnp_init_clock_gating(dev_priv);
   6727 	gen9_init_clock_gating(dev_priv);
   6728 
   6729 	/* WaFbcNukeOnHostModify:cfl */
   6730 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
   6731 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
   6732 }
   6733 
   6734 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
   6735 {
   6736 	gen9_init_clock_gating(dev_priv);
   6737 
   6738 	/* WaDisableSDEUnitClockGating:kbl */
   6739 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
   6740 		I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
   6741 			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
   6742 
   6743 	/* WaDisableGamClockGating:kbl */
   6744 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
   6745 		I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
   6746 			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
   6747 
   6748 	/* WaFbcNukeOnHostModify:kbl */
   6749 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
   6750 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
   6751 }
   6752 
   6753 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
   6754 {
   6755 	gen9_init_clock_gating(dev_priv);
   6756 
   6757 	/* WAC6entrylatency:skl */
   6758 	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
   6759 		   FBC_LLC_FULLY_OPEN);
   6760 
   6761 	/* WaFbcNukeOnHostModify:skl */
   6762 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
   6763 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
   6764 }
   6765 
   6766 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
   6767 {
   6768 	enum pipe pipe;
   6769 
   6770 	/* WaSwitchSolVfFArbitrationPriority:bdw */
   6771 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
   6772 
   6773 	/* WaPsrDPAMaskVBlankInSRD:bdw */
   6774 	I915_WRITE(CHICKEN_PAR1_1,
   6775 		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
   6776 
   6777 	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
   6778 	for_each_pipe(dev_priv, pipe) {
   6779 		I915_WRITE(CHICKEN_PIPESL_1(pipe),
   6780 			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
   6781 			   BDW_DPRS_MASK_VBLANK_SRD);
   6782 	}
   6783 
   6784 	/* WaVSRefCountFullforceMissDisable:bdw */
   6785 	/* WaDSRefCountFullforceMissDisable:bdw */
   6786 	I915_WRITE(GEN7_FF_THREAD_MODE,
   6787 		   I915_READ(GEN7_FF_THREAD_MODE) &
   6788 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
   6789 
   6790 	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
   6791 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
   6792 
   6793 	/* WaDisableSDEUnitClockGating:bdw */
   6794 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
   6795 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
   6796 
   6797 	/* WaProgramL3SqcReg1Default:bdw */
   6798 	gen8_set_l3sqc_credits(dev_priv, 30, 2);
   6799 
   6800 	/* WaKVMNotificationOnConfigChange:bdw */
   6801 	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
   6802 		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
   6803 
   6804 	lpt_init_clock_gating(dev_priv);
   6805 
   6806 	/* WaDisableDopClockGating:bdw
   6807 	 *
   6808 	 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
   6809 	 * clock gating.
   6810 	 */
   6811 	I915_WRITE(GEN6_UCGCTL1,
   6812 		   I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
   6813 }
   6814 
   6815 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
   6816 {
   6817 	/* L3 caching of data atomics doesn't work -- disable it. */
   6818 	I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
   6819 	I915_WRITE(HSW_ROW_CHICKEN3,
   6820 		   _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
   6821 
   6822 	/* This is required by WaCatErrorRejectionIssue:hsw */
   6823 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
   6824 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
   6825 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
   6826 
   6827 	/* WaVSRefCountFullforceMissDisable:hsw */
   6828 	I915_WRITE(GEN7_FF_THREAD_MODE,
   6829 		   I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
   6830 
   6831 	/* WaDisable_RenderCache_OperationalFlush:hsw */
   6832 	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   6833 
   6834 	/* enable HiZ Raw Stall Optimization */
   6835 	I915_WRITE(CACHE_MODE_0_GEN7,
   6836 		   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
   6837 
   6838 	/* WaDisable4x2SubspanOptimization:hsw */
   6839 	I915_WRITE(CACHE_MODE_1,
   6840 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
   6841 
   6842 	/*
   6843 	 * BSpec recommends 8x4 when MSAA is used,
   6844 	 * however in practice 16x4 seems fastest.
   6845 	 *
   6846 	 * Note that PS/WM thread counts depend on the WIZ hashing
   6847 	 * disable bit, which we don't touch here, but it's good
   6848 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
   6849 	 */
   6850 	I915_WRITE(GEN7_GT_MODE,
   6851 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
   6852 
   6853 	/* WaSampleCChickenBitEnable:hsw */
   6854 	I915_WRITE(HALF_SLICE_CHICKEN3,
   6855 		   _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
   6856 
   6857 	/* WaSwitchSolVfFArbitrationPriority:hsw */
   6858 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
   6859 
   6860 	lpt_init_clock_gating(dev_priv);
   6861 }
   6862 
   6863 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
   6864 {
   6865 	u32 snpcr;
   6866 
   6867 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
   6868 
   6869 	/* WaDisableEarlyCull:ivb */
   6870 	I915_WRITE(_3D_CHICKEN3,
   6871 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
   6872 
   6873 	/* WaDisableBackToBackFlipFix:ivb */
   6874 	I915_WRITE(IVB_CHICKEN3,
   6875 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
   6876 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
   6877 
   6878 	/* WaDisablePSDDualDispatchEnable:ivb */
   6879 	if (IS_IVB_GT1(dev_priv))
   6880 		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
   6881 			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
   6882 
   6883 	/* WaDisable_RenderCache_OperationalFlush:ivb */
   6884 	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   6885 
   6886 	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
   6887 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
   6888 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
   6889 
   6890 	/* WaApplyL3ControlAndL3ChickenMode:ivb */
   6891 	I915_WRITE(GEN7_L3CNTLREG1,
   6892 			GEN7_WA_FOR_GEN7_L3_CONTROL);
   6893 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
   6894 		   GEN7_WA_L3_CHICKEN_MODE);
   6895 	if (IS_IVB_GT1(dev_priv))
   6896 		I915_WRITE(GEN7_ROW_CHICKEN2,
   6897 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
   6898 	else {
   6899 		/* must write both registers */
   6900 		I915_WRITE(GEN7_ROW_CHICKEN2,
   6901 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
   6902 		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
   6903 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
   6904 	}
   6905 
   6906 	/* WaForceL3Serialization:ivb */
   6907 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
   6908 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
   6909 
   6910 	/*
   6911 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
   6912 	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
   6913 	 */
   6914 	I915_WRITE(GEN6_UCGCTL2,
   6915 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
   6916 
   6917 	/* This is required by WaCatErrorRejectionIssue:ivb */
   6918 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
   6919 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
   6920 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
   6921 
   6922 	g4x_disable_trickle_feed(dev_priv);
   6923 
   6924 	gen7_setup_fixed_func_scheduler(dev_priv);
   6925 
   6926 	if (0) { /* causes HiZ corruption on ivb:gt1 */
   6927 		/* enable HiZ Raw Stall Optimization */
   6928 		I915_WRITE(CACHE_MODE_0_GEN7,
   6929 			   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
   6930 	}
   6931 
   6932 	/* WaDisable4x2SubspanOptimization:ivb */
   6933 	I915_WRITE(CACHE_MODE_1,
   6934 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
   6935 
   6936 	/*
   6937 	 * BSpec recommends 8x4 when MSAA is used,
   6938 	 * however in practice 16x4 seems fastest.
   6939 	 *
   6940 	 * Note that PS/WM thread counts depend on the WIZ hashing
   6941 	 * disable bit, which we don't touch here, but it's good
   6942 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
   6943 	 */
   6944 	I915_WRITE(GEN7_GT_MODE,
   6945 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
   6946 
   6947 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
   6948 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
   6949 	snpcr |= GEN6_MBC_SNPCR_MED;
   6950 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
   6951 
   6952 	if (!HAS_PCH_NOP(dev_priv))
   6953 		cpt_init_clock_gating(dev_priv);
   6954 
   6955 	gen6_check_mch_setup(dev_priv);
   6956 }
   6957 
   6958 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
   6959 {
   6960 	/* WaDisableEarlyCull:vlv */
   6961 	I915_WRITE(_3D_CHICKEN3,
   6962 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
   6963 
   6964 	/* WaDisableBackToBackFlipFix:vlv */
   6965 	I915_WRITE(IVB_CHICKEN3,
   6966 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
   6967 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
   6968 
   6969 	/* WaPsdDispatchEnable:vlv */
   6970 	/* WaDisablePSDDualDispatchEnable:vlv */
   6971 	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
   6972 		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
   6973 				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
   6974 
   6975 	/* WaDisable_RenderCache_OperationalFlush:vlv */
   6976 	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   6977 
   6978 	/* WaForceL3Serialization:vlv */
   6979 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
   6980 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
   6981 
   6982 	/* WaDisableDopClockGating:vlv */
   6983 	I915_WRITE(GEN7_ROW_CHICKEN2,
   6984 		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
   6985 
   6986 	/* This is required by WaCatErrorRejectionIssue:vlv */
   6987 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
   6988 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
   6989 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
   6990 
   6991 	gen7_setup_fixed_func_scheduler(dev_priv);
   6992 
   6993 	/*
   6994 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
   6995 	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
   6996 	 */
   6997 	I915_WRITE(GEN6_UCGCTL2,
   6998 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
   6999 
   7000 	/* WaDisableL3Bank2xClockGate:vlv
   7001 	 * Disabling L3 clock gating- MMIO 940c[25] = 1
   7002 	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
   7003 	I915_WRITE(GEN7_UCGCTL4,
   7004 		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
   7005 
   7006 	/*
   7007 	 * BSpec says this must be set, even though
   7008 	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
   7009 	 */
   7010 	I915_WRITE(CACHE_MODE_1,
   7011 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
   7012 
   7013 	/*
   7014 	 * BSpec recommends 8x4 when MSAA is used,
   7015 	 * however in practice 16x4 seems fastest.
   7016 	 *
   7017 	 * Note that PS/WM thread counts depend on the WIZ hashing
   7018 	 * disable bit, which we don't touch here, but it's good
   7019 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
   7020 	 */
   7021 	I915_WRITE(GEN7_GT_MODE,
   7022 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
   7023 
   7024 	/*
   7025 	 * WaIncreaseL3CreditsForVLVB0:vlv
   7026 	 * This is the hardware default actually.
   7027 	 */
   7028 	I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
   7029 
   7030 	/*
   7031 	 * WaDisableVLVClockGating_VBIIssue:vlv
   7032 	 * Disable clock gating on th GCFG unit to prevent a delay
   7033 	 * in the reporting of vblank events.
   7034 	 */
   7035 	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
   7036 }
   7037 
   7038 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
   7039 {
   7040 	/* WaVSRefCountFullforceMissDisable:chv */
   7041 	/* WaDSRefCountFullforceMissDisable:chv */
   7042 	I915_WRITE(GEN7_FF_THREAD_MODE,
   7043 		   I915_READ(GEN7_FF_THREAD_MODE) &
   7044 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
   7045 
   7046 	/* WaDisableSemaphoreAndSyncFlipWait:chv */
   7047 	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
   7048 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
   7049 
   7050 	/* WaDisableCSUnitClockGating:chv */
   7051 	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
   7052 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
   7053 
   7054 	/* WaDisableSDEUnitClockGating:chv */
   7055 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
   7056 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
   7057 
   7058 	/*
   7059 	 * WaProgramL3SqcReg1Default:chv
   7060 	 * See gfxspecs/Related Documents/Performance Guide/
   7061 	 * LSQC Setting Recommendations.
   7062 	 */
   7063 	gen8_set_l3sqc_credits(dev_priv, 38, 2);
   7064 }
   7065 
   7066 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
   7067 {
   7068 	u32 dspclk_gate;
   7069 
   7070 	I915_WRITE(RENCLK_GATE_D1, 0);
   7071 	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
   7072 		   GS_UNIT_CLOCK_GATE_DISABLE |
   7073 		   CL_UNIT_CLOCK_GATE_DISABLE);
   7074 	I915_WRITE(RAMCLK_GATE_D, 0);
   7075 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
   7076 		OVRUNIT_CLOCK_GATE_DISABLE |
   7077 		OVCUNIT_CLOCK_GATE_DISABLE;
   7078 	if (IS_GM45(dev_priv))
   7079 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
   7080 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
   7081 
   7082 	/* WaDisableRenderCachePipelinedFlush */
   7083 	I915_WRITE(CACHE_MODE_0,
   7084 		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
   7085 
   7086 	/* WaDisable_RenderCache_OperationalFlush:g4x */
   7087 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   7088 
   7089 	g4x_disable_trickle_feed(dev_priv);
   7090 }
   7091 
   7092 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
   7093 {
   7094 	struct intel_uncore *uncore = &dev_priv->uncore;
   7095 
   7096 	intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
   7097 	intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
   7098 	intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
   7099 	intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
   7100 	intel_uncore_write16(uncore, DEUC, 0);
   7101 	intel_uncore_write(uncore,
   7102 			   MI_ARB_STATE,
   7103 			   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
   7104 
   7105 	/* WaDisable_RenderCache_OperationalFlush:gen4 */
   7106 	intel_uncore_write(uncore,
   7107 			   CACHE_MODE_0,
   7108 			   _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   7109 }
   7110 
   7111 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
   7112 {
   7113 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
   7114 		   I965_RCC_CLOCK_GATE_DISABLE |
   7115 		   I965_RCPB_CLOCK_GATE_DISABLE |
   7116 		   I965_ISC_CLOCK_GATE_DISABLE |
   7117 		   I965_FBC_CLOCK_GATE_DISABLE);
   7118 	I915_WRITE(RENCLK_GATE_D2, 0);
   7119 	I915_WRITE(MI_ARB_STATE,
   7120 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
   7121 
   7122 	/* WaDisable_RenderCache_OperationalFlush:gen4 */
   7123 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
   7124 }
   7125 
   7126 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
   7127 {
   7128 	u32 dstate = I915_READ(D_STATE);
   7129 
   7130 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
   7131 		DSTATE_DOT_CLOCK_GATING;
   7132 	I915_WRITE(D_STATE, dstate);
   7133 
   7134 	if (IS_PINEVIEW(dev_priv))
   7135 		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
   7136 
   7137 	/* IIR "flip pending" means done if this bit is set */
   7138 	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
   7139 
   7140 	/* interrupts should cause a wake up from C3 */
   7141 	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
   7142 
   7143 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
   7144 	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
   7145 
   7146 	I915_WRITE(MI_ARB_STATE,
   7147 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
   7148 }
   7149 
   7150 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
   7151 {
   7152 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
   7153 
   7154 	/* interrupts should cause a wake up from C3 */
   7155 	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
   7156 		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
   7157 
   7158 	I915_WRITE(MEM_MODE,
   7159 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
   7160 }
   7161 
   7162 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
   7163 {
   7164 	I915_WRITE(MEM_MODE,
   7165 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
   7166 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
   7167 }
   7168 
   7169 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
   7170 {
   7171 	dev_priv->display.init_clock_gating(dev_priv);
   7172 }
   7173 
   7174 void intel_suspend_hw(struct drm_i915_private *dev_priv)
   7175 {
   7176 	if (HAS_PCH_LPT(dev_priv))
   7177 		lpt_suspend_hw(dev_priv);
   7178 }
   7179 
   7180 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
   7181 {
   7182 	drm_dbg_kms(&dev_priv->drm,
   7183 		    "No clock gating settings or workarounds applied.\n");
   7184 }
   7185 
   7186 /**
   7187  * intel_init_clock_gating_hooks - setup the clock gating hooks
   7188  * @dev_priv: device private
   7189  *
   7190  * Setup the hooks that configure which clocks of a given platform can be
   7191  * gated and also apply various GT and display specific workarounds for these
   7192  * platforms. Note that some GT specific workarounds are applied separately
   7193  * when GPU contexts or batchbuffers start their execution.
   7194  */
   7195 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
   7196 {
   7197 	if (IS_GEN(dev_priv, 12))
   7198 		dev_priv->display.init_clock_gating = tgl_init_clock_gating;
   7199 	else if (IS_GEN(dev_priv, 11))
   7200 		dev_priv->display.init_clock_gating = icl_init_clock_gating;
   7201 	else if (IS_CANNONLAKE(dev_priv))
   7202 		dev_priv->display.init_clock_gating = cnl_init_clock_gating;
   7203 	else if (IS_COFFEELAKE(dev_priv))
   7204 		dev_priv->display.init_clock_gating = cfl_init_clock_gating;
   7205 	else if (IS_SKYLAKE(dev_priv))
   7206 		dev_priv->display.init_clock_gating = skl_init_clock_gating;
   7207 	else if (IS_KABYLAKE(dev_priv))
   7208 		dev_priv->display.init_clock_gating = kbl_init_clock_gating;
   7209 	else if (IS_BROXTON(dev_priv))
   7210 		dev_priv->display.init_clock_gating = bxt_init_clock_gating;
   7211 	else if (IS_GEMINILAKE(dev_priv))
   7212 		dev_priv->display.init_clock_gating = glk_init_clock_gating;
   7213 	else if (IS_BROADWELL(dev_priv))
   7214 		dev_priv->display.init_clock_gating = bdw_init_clock_gating;
   7215 	else if (IS_CHERRYVIEW(dev_priv))
   7216 		dev_priv->display.init_clock_gating = chv_init_clock_gating;
   7217 	else if (IS_HASWELL(dev_priv))
   7218 		dev_priv->display.init_clock_gating = hsw_init_clock_gating;
   7219 	else if (IS_IVYBRIDGE(dev_priv))
   7220 		dev_priv->display.init_clock_gating = ivb_init_clock_gating;
   7221 	else if (IS_VALLEYVIEW(dev_priv))
   7222 		dev_priv->display.init_clock_gating = vlv_init_clock_gating;
   7223 	else if (IS_GEN(dev_priv, 6))
   7224 		dev_priv->display.init_clock_gating = gen6_init_clock_gating;
   7225 	else if (IS_GEN(dev_priv, 5))
   7226 		dev_priv->display.init_clock_gating = ilk_init_clock_gating;
   7227 	else if (IS_G4X(dev_priv))
   7228 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
   7229 	else if (IS_I965GM(dev_priv))
   7230 		dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
   7231 	else if (IS_I965G(dev_priv))
   7232 		dev_priv->display.init_clock_gating = i965g_init_clock_gating;
   7233 	else if (IS_GEN(dev_priv, 3))
   7234 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
   7235 	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
   7236 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
   7237 	else if (IS_GEN(dev_priv, 2))
   7238 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
   7239 	else {
   7240 		MISSING_CASE(INTEL_DEVID(dev_priv));
   7241 		dev_priv->display.init_clock_gating = nop_init_clock_gating;
   7242 	}
   7243 }
   7244 
   7245 /* Set up chip specific power management-related functions */
   7246 void intel_init_pm(struct drm_i915_private *dev_priv)
   7247 {
   7248 	/* For cxsr */
   7249 	if (IS_PINEVIEW(dev_priv))
   7250 		pnv_get_mem_freq(dev_priv);
   7251 	else if (IS_GEN(dev_priv, 5))
   7252 		ilk_get_mem_freq(dev_priv);
   7253 
   7254 	if (intel_has_sagv(dev_priv))
   7255 		skl_setup_sagv_block_time(dev_priv);
   7256 
   7257 	/* For FIFO watermark updates */
   7258 	if (INTEL_GEN(dev_priv) >= 9) {
   7259 		skl_setup_wm_latency(dev_priv);
   7260 		dev_priv->display.initial_watermarks = skl_initial_wm;
   7261 		dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
   7262 		dev_priv->display.compute_global_watermarks = skl_compute_wm;
   7263 	} else if (HAS_PCH_SPLIT(dev_priv)) {
   7264 		ilk_setup_wm_latency(dev_priv);
   7265 
   7266 		if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
   7267 		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
   7268 		    (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
   7269 		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
   7270 			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
   7271 			dev_priv->display.compute_intermediate_wm =
   7272 				ilk_compute_intermediate_wm;
   7273 			dev_priv->display.initial_watermarks =
   7274 				ilk_initial_watermarks;
   7275 			dev_priv->display.optimize_watermarks =
   7276 				ilk_optimize_watermarks;
   7277 		} else {
   7278 			drm_dbg_kms(&dev_priv->drm,
   7279 				    "Failed to read display plane latency. "
   7280 				    "Disable CxSR\n");
   7281 		}
   7282 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   7283 		vlv_setup_wm_latency(dev_priv);
   7284 		dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
   7285 		dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
   7286 		dev_priv->display.initial_watermarks = vlv_initial_watermarks;
   7287 		dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
   7288 		dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
   7289 	} else if (IS_G4X(dev_priv)) {
   7290 		g4x_setup_wm_latency(dev_priv);
   7291 		dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
   7292 		dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
   7293 		dev_priv->display.initial_watermarks = g4x_initial_watermarks;
   7294 		dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
   7295 	} else if (IS_PINEVIEW(dev_priv)) {
   7296 		if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
   7297 					    dev_priv->is_ddr3,
   7298 					    dev_priv->fsb_freq,
   7299 					    dev_priv->mem_freq)) {
   7300 			drm_info(&dev_priv->drm,
   7301 				 "failed to find known CxSR latency "
   7302 				 "(found ddr%s fsb freq %d, mem freq %d), "
   7303 				 "disabling CxSR\n",
   7304 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
   7305 				 dev_priv->fsb_freq, dev_priv->mem_freq);
   7306 			/* Disable CxSR and never update its watermark again */
   7307 			intel_set_memory_cxsr(dev_priv, false);
   7308 			dev_priv->display.update_wm = NULL;
   7309 		} else
   7310 			dev_priv->display.update_wm = pnv_update_wm;
   7311 	} else if (IS_GEN(dev_priv, 4)) {
   7312 		dev_priv->display.update_wm = i965_update_wm;
   7313 	} else if (IS_GEN(dev_priv, 3)) {
   7314 		dev_priv->display.update_wm = i9xx_update_wm;
   7315 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
   7316 	} else if (IS_GEN(dev_priv, 2)) {
   7317 		if (INTEL_NUM_PIPES(dev_priv) == 1) {
   7318 			dev_priv->display.update_wm = i845_update_wm;
   7319 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
   7320 		} else {
   7321 			dev_priv->display.update_wm = i9xx_update_wm;
   7322 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
   7323 		}
   7324 	} else {
   7325 		drm_err(&dev_priv->drm,
   7326 			"unexpected fall-through in %s\n", __func__);
   7327 	}
   7328 }
   7329 
   7330 void intel_pm_setup(struct drm_i915_private *dev_priv)
   7331 {
   7332 	dev_priv->runtime_pm.suspended = false;
   7333 	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
   7334 }
   7335