Home | History | Annotate | Download | only in i915

Lines Matching defs:wm

444 	mutex_lock(&dev_priv->wm.wm_mutex);
447 dev_priv->wm.vlv.cxsr = enable;
449 dev_priv->wm.g4x.cxsr = enable;
450 mutex_unlock(&dev_priv->wm.wm_mutex);
478 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
741 * @wm: chip FIFO params
758 const struct intel_watermark_params *wm,
772 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
773 wm->guard_size;
780 if (wm_size > wm->max_wm)
781 wm_size = wm->max_wm;
783 wm_size = wm->default_wm;
810 return dev_priv->wm.max_level + 1;
876 unsigned int wm;
899 wm = intel_calculate_wm(clock, &pnv_display_wm,
904 reg |= FW_WM(wm, SR);
909 wm = intel_calculate_wm(clock, &pnv_cursor_wm,
914 reg |= FW_WM(wm, CURSOR_SR);
918 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
923 reg |= FW_WM(wm, HPLL_SR);
927 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
932 reg |= FW_WM(wm, HPLL_CURSOR);
948 * The WM is adjusted upwards by the difference between the FIFO size
960 const struct g4x_wm_values *wm)
965 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
968 FW_WM(wm->sr.plane, SR) |
969 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
970 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
971 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
973 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
974 FW_WM(wm->sr.fbc, FBC_SR) |
975 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
976 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
977 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
978 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
980 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
981 FW_WM(wm->sr.cursor, CURSOR_SR) |
982 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
983 FW_WM(wm->hpll.plane, HPLL_SR));
992 const struct vlv_wm_values *wm)
997 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
1000 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
1001 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
1002 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
1003 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
1018 FW_WM(wm->sr.plane, SR) |
1019 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1020 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1021 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1023 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1024 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1025 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1027 FW_WM(wm->sr.cursor, CURSOR_SR));
1031 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1032 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1034 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1035 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1037 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1038 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1040 FW_WM(wm->sr.plane >> 9, SR_HI) |
1041 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1042 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1043 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1044 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1045 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1046 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1047 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1048 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1049 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1052 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1053 FW_WM_VLV(wm
1055 FW_WM(wm->sr.plane >> 9, SR_HI) |
1056 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1057 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1058 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1059 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1060 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1061 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1072 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1073 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1074 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1076 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1129 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1130 unsigned int clock, htotal, cpp, width, wm;
1161 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1164 wm = intel_wm_method1(clock, cpp, latency);
1171 wm = min(small, large);
1174 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1177 wm = DIV_ROUND_UP(wm, 64) + 2;
1179 return min_t(unsigned int, wm, USHRT_MAX);
1189 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1208 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1239 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1240 int wm, max_wm;
1242 wm = g4x_compute_wm(crtc_state, plane_state, level);
1245 if (wm > max_wm)
1248 dirty |= raw->plane[plane_id] != wm;
1249 raw->plane[plane_id] = wm;
1255 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1260 * FBC wm is not mandatory as we
1263 if (wm > max_wm)
1264 wm = USHRT_MAX;
1266 dirty |= raw->fbc != wm;
1267 raw->fbc = wm;
1281 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1282 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1283 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1288 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1289 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1298 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1308 if (level > dev_priv->wm.max_level)
1324 wm_state->wm.plane[plane_id] = USHRT_MAX;
1347 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1376 raw = &crtc_state->wm.g4x.raw[level];
1378 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1385 raw = &crtc_state->wm.g4x.raw[level];
1397 raw = &crtc_state->wm.g4x.raw[level];
1434 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1435 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1440 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1458 intermediate->wm.plane[plane_id] =
1459 max(optimal->wm.plane[plane_id],
1460 active->wm.plane[plane_id]);
1462 WARN_ON(intermediate->wm.plane[plane_id] >
1498 * If our intermediate WM are identical to the final WM, then we can
1502 new_crtc_state->wm.need_postvbl_update = true;
1508 struct g4x_wm_values *wm)
1513 wm->cxsr = true;
1514 wm->hpll_en = true;
1515 wm->fbc_en = true;
1518 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1524 wm->cxsr = false;
1526 wm->hpll_en = false;
1528 wm->fbc_en = false;
1534 wm->cxsr = false;
1535 wm->hpll_en = false;
1536 wm->fbc_en = false;
1540 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1543 wm->pipe[pipe] = wm_state->wm;
1544 if (crtc->active && wm->cxsr)
1545 wm->sr = wm_state->sr;
1546 if (crtc->active && wm->hpll_en)
1547 wm->hpll = wm_state->hpll;
1553 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1579 mutex_lock(&dev_priv->wm.wm_mutex);
1580 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1582 mutex_unlock(&dev_priv->wm.wm_mutex);
1592 if (!crtc_state->wm.need_postvbl_update)
1595 mutex_lock(&dev_priv->wm.wm_mutex);
1596 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1598 mutex_unlock(&dev_priv->wm.wm_mutex);
1620 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1622 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1625 wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1626 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1628 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1640 unsigned int clock, htotal, cpp, width, wm;
1642 if (dev_priv->wm.pri_latency[level] == 0)
1660 wm = 63;
1662 wm = vlv_wm_method2(clock, htotal, width, cpp,
1663 dev_priv->wm.pri_latency[level] * 10);
1666 return min_t(unsigned int, wm, USHRT_MAX);
1679 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1680 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1767 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1774 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1776 if (wm > fifo_size)
1779 return fifo_size - wm;
1794 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1819 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1820 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1823 if (wm > max_wm)
1826 dirty |= raw->plane[plane_id] != wm;
1827 raw->plane[plane_id] = wm;
1838 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1839 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1840 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1849 &crtc_state->wm.vlv.raw[level];
1851 &crtc_state->wm.vlv.fifo_state;
1870 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1872 &crtc_state->wm.vlv.fifo_state;
1911 &old_crtc_state->wm.vlv.fifo_state;
1933 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1940 wm_state->wm[level].plane[plane_id] =
1979 &crtc_state->wm.vlv.fifo_state;
2072 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2073 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2078 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2096 intermediate->wm[level].plane[plane_id] =
2097 min(optimal->wm[level].plane[plane_id],
2098 active->wm[level].plane[plane_id]);
2111 * If our intermediate WM are identical to the final WM, then we can
2115 new_crtc_state->wm.need_postvbl_update = true;
2121 struct vlv_wm_values *wm)
2126 wm->level = dev_priv->wm.max_level;
2127 wm->cxsr = true;
2130 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2136 wm->cxsr = false;
2139 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2143 wm->cxsr = false;
2146 wm->level = VLV_WM_LEVEL_PM2;
2149 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2152 wm->pipe[pipe] = wm_state->wm[wm->level];
2153 if (crtc->active && wm->cxsr)
2154 wm->sr = wm_state->sr[wm->level];
2156 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2157 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2158 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2159 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2165 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2203 mutex_lock(&dev_priv->wm.wm_mutex);
2204 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2206 mutex_unlock(&dev_priv->wm.wm_mutex);
2216 if (!crtc_state->wm.need_postvbl_update)
2219 mutex_lock(&dev_priv->wm.wm_mutex);
2220 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2222 mutex_unlock(&dev_priv->wm.wm_mutex);
2256 "self-refresh entries: %d, wm: %d\n",
2769 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2772 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2775 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2796 u16 pri_latency = dev_priv->wm.pri_latency[level];
2797 u16 spr_latency = dev_priv->wm.spr_latency[level];
2798 u16 cur_latency = dev_priv->wm.cur_latency[level];
2838 /* The WM are computed with base on how long it takes to fill a single
2851 u16 wm[8])
2872 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2873 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2875 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2877 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2891 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2892 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2894 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2896 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2905 if (wm[level] == 0) {
2907 wm[i] = 0;
2919 if (wm[0] == 0) {
2920 wm[0] += 2;
2922 if (wm[level] == 0)
2924 wm[level] += 2;
2935 wm[0] += 1;
2940 wm[0] = (sskpd >> 56) & 0xFF;
2941 if (wm[0] == 0)
2942 wm[0] = sskpd & 0xF;
2943 wm[1] = (sskpd >> 4) & 0xFF;
2944 wm[2] = (sskpd >> 12) & 0xFF;
2945 wm[3] = (sskpd >> 20) & 0x1FF;
2946 wm[4] = (sskpd >> 32) & 0x1FF;
2950 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2951 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2952 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2953 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2958 wm[0] = 7;
2959 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2960 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2967 u16 wm[5])
2971 wm[0] = 13;
2975 u16 wm[5])
2979 wm[0] = 13;
2984 /* how many WM levels are we expecting */
2997 const u16 wm[8])
3002 unsigned int latency = wm[level];
3006 "%s WM%d latency not provided\n",
3021 "%s WM%d latency %u (%u.%u usec)\n", name, level,
3022 wm[level], latency / 10, latency % 10);
3027 u16 wm[5], u16 min)
3031 if (wm[0] >= min)
3034 wm[0] = max(wm[0], min);
3036 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3046 * The BIOS provided WM memory latency values are often
3049 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3050 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3051 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3057 "WM latency values increased to avoid potential underruns\n");
3058 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3059 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3060 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3076 if (dev_priv->wm.pri_latency[3] == 0 &&
3077 dev_priv->wm.spr_latency[3] == 0 &&
3078 dev_priv->wm.cur_latency[3] == 0)
3081 dev_priv->wm.pri_latency[3] = 0;
3082 dev_priv->wm.spr_latency[3] = 0;
3083 dev_priv->wm.cur_latency[3] = 0;
3087 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3088 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3089 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3094 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3096 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3097 sizeof(dev_priv->wm.pri_latency));
3098 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3099 sizeof(dev_priv->wm.pri_latency));
3101 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3102 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3104 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3105 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3106 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3116 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3117 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3135 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3157 pipe_wm = &crtc_state->wm.ilk.optimal;
3186 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3188 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3199 struct intel_wm_level *wm = &pipe_wm->wm[level];
3202 pristate, sprstate, curstate, wm);
3209 if (!ilk_validate_wm_level(level, &max, wm)) {
3210 memset(wm, 0, sizeof(*wm));
3227 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3232 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3240 *a = newstate->wm.ilk.optimal;
3250 struct intel_wm_level *a_wm = &a->wm[level];
3251 const struct intel_wm_level *b_wm = &b->wm[level];
3270 * If our intermediate WM are identical to the final WM, then we can
3273 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3274 newstate->wm.need_postvbl_update = true;
3291 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3292 const struct intel_wm_level *wm = &active->wm[level];
3302 if (!wm->enable)
3305 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3306 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3307 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3308 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3328 /* ILK: FBC WM must be disabled always */
3333 struct intel_wm_level *wm = &merged->wm[level];
3335 ilk_merge_wm_level(dev_priv, level, wm);
3338 wm->enable = false;
3339 else if (!ilk_validate_wm_level(level, max, wm))
3345 * FBC WMs instead of disabling a WM level.
3347 if (wm->fbc_val > max->fbc) {
3348 if (wm->enable)
3350 wm->fbc_val = 0;
3354 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3363 struct intel_wm_level *wm = &merged->wm[level];
3365 wm->enable = false;
3373 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3383 return dev_priv->wm.pri_latency[level];
3403 r = &merged->wm[level];
3439 &intel_crtc->wm.active.ilk.wm[0];
3444 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3464 if (r1->wm[level].enable)
3466 if (r2->wm[level].enable)
3545 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3579 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3648 dev_priv->wm.hw = *results;
3845 struct skl_plane_wm *wm =
3846 &crtc_state->wm.skl.optimal.planes[plane->id];
3849 if (!wm->wm[0].plane_en)
3852 /* Find the highest enabled wm level for this plane */
3854 !wm->wm[level].plane_en; --level)
3857 latency = dev_priv->wm.skl_latency[level];
3865 * If any of the planes on this pipe don't enable wm levels that
3957 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
4005 struct skl_wm_level wm = {};
4017 skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
4018 if (wm.min_ddb_alloc == U16_MAX)
4021 min_ddb_alloc = wm.min_ddb_alloc;
4290 struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
4303 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
4304 memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
4334 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4336 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4348 const struct skl_plane_wm *wm =
4349 &crtc_state->wm.skl.optimal.planes[plane_id];
4352 if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
4353 WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
4360 blocks += wm->wm[level].min_ddb_alloc;
4361 blocks += wm->uv_wm[level].min_ddb_alloc;
4384 const struct skl_plane_wm *wm =
4385 &crtc_state->wm.skl.optimal.planes[plane_id];
4403 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4414 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4424 &crtc_state->wm.skl.plane_ddb_y[plane_id];
4426 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
4456 struct skl_plane_wm *wm =
4457 &crtc_state->wm
4471 if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
4472 wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
4473 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4480 level == 1 && wm->wm[0].plane_en) {
4481 wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
4482 wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
4483 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
4493 struct skl_plane_wm *wm =
4494 &crtc_state->wm.skl.optimal.planes[plane_id];
4496 if (wm->trans_wm.plane_res_b >= total[plane_id])
4497 memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4719 u32 latency = dev_priv->wm.skl_latency[level];
4876 struct skl_plane_wm *wm)
4884 /* Transition WM are not recommended by HW team for GEN9 */
4888 /* Transition WM don't make any sense if ipc is disabled */
4908 wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
4929 wm->trans_wm.plane_res_b = res_blocks + 1;
4930 wm->trans_wm.plane_en = true;
4937 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4946 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
4947 skl_compute_transition_wm(crtc_state, &wm_params, wm);
4956 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4960 wm->is_planar = true;
4968 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5039 struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5099 const struct skl_plane_wm *wm =
5100 &crtc_state->wm.skl.optimal.planes[plane_id];
5102 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5104 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5108 &wm->wm[level]);
5111 &wm->trans_wm);
5119 if (wm->is_planar)
5135 const struct skl_plane_wm *wm =
5136 &crtc_state->wm.skl.optimal.planes[plane_id];
5138 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5142 &wm->wm[level]);
5144 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5165 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
5224 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5225 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5226 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5227 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5250 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
5289 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5290 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5296 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5297 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5323 enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5324 enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5325 enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5326 enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5328 enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5329 enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5330 enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5331 enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5338 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5339 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5340 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5341 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5342 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5343 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5344 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5345 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5348 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5349 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5350 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5351 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5352 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5353 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5354 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5355 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5362 old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5363 old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5364 old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5365 old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5367 new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5368 new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5369 new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5370 new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5377 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5378 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5379 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5380 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5382 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5383 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5384 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5385 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5419 if (dev_priv->wm.distrust_bios_wm) {
5498 * Force a full wm update for every plane on modeset.
5499 * Required because the reset value of the wm registers
5507 &old_crtc_state->wm.skl.optimal.planes[plane_id],
5508 &new_crtc_state->wm.skl.optimal.planes[plane_id]))
5538 * Calculate WM's for all pipes that are part of this transaction.
5554 &old_crtc_state->wm.skl.optimal,
5555 &new_crtc_state->wm.skl.optimal))
5574 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5594 mutex_lock(&dev_priv->wm.wm_mutex);
5599 mutex_unlock(&dev_priv->wm.wm_mutex);
5609 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5611 if (!wm->pipe_enabled)
5614 config->sprites_enabled |= wm->sprites_enabled;
5615 config->sprites_scaled |= wm->sprites_scaled;
5659 mutex_lock(&dev_priv->wm.wm_mutex);
5660 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
5662 mutex_unlock(&dev_priv->wm.wm_mutex);
5672 if (!crtc_state->wm.need_postvbl_update)
5675 mutex_lock(&dev_priv->wm.wm_mutex);
5676 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
5678 mutex_unlock(&dev_priv->wm.wm_mutex);
5703 struct skl_plane_wm *wm = &out->planes[plane_id];
5711 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5719 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5730 struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5731 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5739 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
5747 dev_priv->wm.distrust_bios_wm = true;
5755 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5757 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
5782 active->wm[0].enable = true;
5783 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5784 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5785 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5796 active->wm[level].enable = true;
5799 crtc->wm.active.ilk = *active;
5808 struct g4x_wm_values *wm)
5813 wm->sr.plane = _FW_WM(tmp, SR);
5814 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5815 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5816 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5819 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5820 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5821 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5822 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5823 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5824 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5827 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5828 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5829 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5830 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5834 struct vlv_wm_values *wm)
5842 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5844 wm->ddl[pipe].plane[PLANE_CURSOR] =
5846 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5848 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5853 wm->sr.plane = _FW_WM(tmp, SR);
5854 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5855 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5856 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5859 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5860 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5861 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5864 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5868 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5869 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5872 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5873 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5876 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5877 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5880 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5881 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5882 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5883 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5884 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5885 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5886 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5887 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5888 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5889 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5892 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5893 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5896 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5897 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5898 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5899 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5900 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5901 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5902 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5911 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5914 g4x_read_wm_values(dev_priv, wm);
5916 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5921 struct g4x_wm_state *active = &crtc->wm.active.g4x;
5927 active->cxsr = wm->cxsr;
5928 active->hpll_en = wm->hpll_en;
5929 active->fbc_en = wm->fbc_en;
5931 active->sr = wm->sr;
5932 active->hpll = wm->hpll;
5935 active->wm.plane[plane_id] =
5936 wm->pipe[pipe].plane[plane_id];
5939 if (wm->cxsr && wm->hpll_en)
5941 else if (wm->cxsr)
5947 raw = &crtc_state->wm.g4x.raw[level];
5949 raw->plane[plane_id] = active->wm.plane[plane_id];
5954 raw = &crtc_state->wm.g4x.raw[level];
5963 raw = &crtc_state->wm.g4x.raw[level];
5975 crtc_state->wm.g4x.optimal = *active;
5976 crtc_state->wm.g4x.intermediate = *active;
5981 wm->pipe[pipe].plane[PLANE_PRIMARY],
5982 wm->pipe[pipe].plane[PLANE_CURSOR],
5983 wm->pipe[pipe].plane[PLANE_SPRITE0]);
5988 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
5991 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
5993 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
6001 mutex_lock(&dev_priv->wm.wm_mutex);
6010 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6019 &crtc_state->wm.g4x.raw[level];
6022 wm_state->wm.plane[plane_id] = 0;
6028 &crtc_state->wm.g4x.raw[level];
6042 crtc_state->wm.g4x.intermediate =
6043 crtc_state->wm.g4x.optimal;
6044 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6049 mutex_unlock(&dev_priv->wm.wm_mutex);
6054 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6058 vlv_read_wm_values(dev_priv, wm);
6060 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6061 wm->level = VLV_WM_LEVEL_PM2;
6068 wm->level = VLV_WM_LEVEL_PM5;
6088 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6092 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6101 struct vlv_wm_state *active = &crtc->wm.active.vlv;
6103 &crtc_state->wm.vlv.fifo_state;
6110 active->num_levels = wm->level + 1;
6111 active->cxsr = wm->cxsr;
6115 &crtc_state->wm.vlv.raw[level];
6117 active->sr[level].plane = wm->sr.plane;
6118 active->sr[level].cursor = wm->sr.cursor;
6121 active->wm[level].plane[plane_id] =
6122 wm->pipe[pipe].plane[plane_id];
6125 vlv_invert_wm_value(active->wm[level].plane[plane_id],
6135 crtc_state->wm.vlv.optimal = *active;
6136 crtc_state->wm.vlv.intermediate = *active;
6141 wm->pipe[pipe].plane[PLANE_PRIMARY],
6142 wm->pipe[pipe].plane[PLANE_CURSOR],
6143 wm->pipe[pipe].plane[PLANE_SPRITE0],
6144 wm->pipe[pipe].plane[PLANE_SPRITE1]);
6149 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6157 mutex_lock(&dev_priv->wm.wm_mutex);
6166 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6168 &crtc_state->wm.vlv.fifo_state;
6177 &crtc_state->wm.vlv.raw[level];
6181 wm_state->wm[level].plane[plane_id] =
6191 crtc_state->wm.vlv.intermediate =
6192 crtc_state->wm.vlv.optimal;
6193 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6198 mutex_unlock(&dev_priv->wm.wm_mutex);
6219 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6250 * @crtc: the #intel_crtc on which to compute the WM
6252 * Calculate watermark values for the various WM regs based on current mode
6491 * Note that PS/WM thread counts depend on the WIZ hashing
6846 * Note that PS/WM thread counts depend on the WIZ hashing
6940 * Note that PS/WM thread counts depend on the WIZ hashing
7017 * Note that PS/WM thread counts depend on the WIZ hashing
7266 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
7267 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7268 (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
7269 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {