Home | History | Annotate | Line # | Download | only in i915
      1 /*	$NetBSD: intel_device_info.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2016 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: intel_device_info.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $");
     29 
     30 #include <drm/drm_print.h>
     31 
     32 #include "intel_device_info.h"
     33 #include "i915_drv.h"
     34 
     35 #define PLATFORM_NAME(x) [INTEL_##x] = #x
     36 static const char * const platform_names[] = {
     37 	PLATFORM_NAME(I830),
     38 	PLATFORM_NAME(I845G),
     39 	PLATFORM_NAME(I85X),
     40 	PLATFORM_NAME(I865G),
     41 	PLATFORM_NAME(I915G),
     42 	PLATFORM_NAME(I915GM),
     43 	PLATFORM_NAME(I945G),
     44 	PLATFORM_NAME(I945GM),
     45 	PLATFORM_NAME(G33),
     46 	PLATFORM_NAME(PINEVIEW),
     47 	PLATFORM_NAME(I965G),
     48 	PLATFORM_NAME(I965GM),
     49 	PLATFORM_NAME(G45),
     50 	PLATFORM_NAME(GM45),
     51 	PLATFORM_NAME(IRONLAKE),
     52 	PLATFORM_NAME(SANDYBRIDGE),
     53 	PLATFORM_NAME(IVYBRIDGE),
     54 	PLATFORM_NAME(VALLEYVIEW),
     55 	PLATFORM_NAME(HASWELL),
     56 	PLATFORM_NAME(BROADWELL),
     57 	PLATFORM_NAME(CHERRYVIEW),
     58 	PLATFORM_NAME(SKYLAKE),
     59 	PLATFORM_NAME(BROXTON),
     60 	PLATFORM_NAME(KABYLAKE),
     61 	PLATFORM_NAME(GEMINILAKE),
     62 	PLATFORM_NAME(COFFEELAKE),
     63 	PLATFORM_NAME(CANNONLAKE),
     64 	PLATFORM_NAME(ICELAKE),
     65 	PLATFORM_NAME(ELKHARTLAKE),
     66 	PLATFORM_NAME(TIGERLAKE),
     67 };
     68 #undef PLATFORM_NAME
     69 
     70 const char *intel_platform_name(enum intel_platform platform)
     71 {
     72 	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
     73 
     74 	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
     75 			 platform_names[platform] == NULL))
     76 		return "<unknown>";
     77 
     78 	return platform_names[platform];
     79 }
     80 
     81 static const char *iommu_name(void)
     82 {
     83 	const char *msg = "n/a";
     84 
     85 #ifdef CONFIG_INTEL_IOMMU
     86 	msg = enableddisabled(intel_iommu_gfx_mapped);
     87 #endif
     88 
     89 	return msg;
     90 }
     91 
     92 void intel_device_info_print_static(const struct intel_device_info *info,
     93 				    struct drm_printer *p)
     94 {
     95 	drm_printf(p, "engines: %x\n", info->engine_mask);
     96 	drm_printf(p, "gen: %d\n", info->gen);
     97 	drm_printf(p, "gt: %d\n", info->gt);
     98 	drm_printf(p, "iommu: %s\n", iommu_name());
     99 	drm_printf(p, "memory-regions: %x\n", info->memory_regions);
    100 	drm_printf(p, "page-sizes: %x\n", info->page_sizes);
    101 	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
    102 	drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
    103 	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
    104 
    105 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
    106 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
    107 #undef PRINT_FLAG
    108 
    109 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
    110 	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
    111 #undef PRINT_FLAG
    112 }
    113 
    114 static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
    115 {
    116 	int s;
    117 
    118 	drm_printf(p, "slice total: %u, mask=%04x\n",
    119 		   hweight8(sseu->slice_mask), sseu->slice_mask);
    120 	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
    121 	for (s = 0; s < sseu->max_slices; s++) {
    122 		drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
    123 			   s, intel_sseu_subslices_per_slice(sseu, s),
    124 			   intel_sseu_get_subslices(sseu, s));
    125 	}
    126 	drm_printf(p, "EU total: %u\n", sseu->eu_total);
    127 	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
    128 	drm_printf(p, "has slice power gating: %s\n",
    129 		   yesno(sseu->has_slice_pg));
    130 	drm_printf(p, "has subslice power gating: %s\n",
    131 		   yesno(sseu->has_subslice_pg));
    132 	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
    133 }
    134 
    135 void intel_device_info_print_runtime(const struct intel_runtime_info *info,
    136 				     struct drm_printer *p)
    137 {
    138 	sseu_dump(&info->sseu, p);
    139 
    140 	drm_printf(p, "CS timestamp frequency: %u kHz\n",
    141 		   info->cs_timestamp_frequency_khz);
    142 }
    143 
    144 static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
    145 		       int subslice)
    146 {
    147 	int slice_stride = sseu->max_subslices * sseu->eu_stride;
    148 
    149 	return slice * slice_stride + subslice * sseu->eu_stride;
    150 }
    151 
    152 static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
    153 			int subslice)
    154 {
    155 	int i, offset = sseu_eu_idx(sseu, slice, subslice);
    156 	u16 eu_mask = 0;
    157 
    158 	for (i = 0; i < sseu->eu_stride; i++) {
    159 		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
    160 			(i * BITS_PER_BYTE);
    161 	}
    162 
    163 	return eu_mask;
    164 }
    165 
    166 static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
    167 			 u16 eu_mask)
    168 {
    169 	int i, offset = sseu_eu_idx(sseu, slice, subslice);
    170 
    171 	for (i = 0; i < sseu->eu_stride; i++) {
    172 		sseu->eu_mask[offset + i] =
    173 			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
    174 	}
    175 }
    176 
    177 void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
    178 				      struct drm_printer *p)
    179 {
    180 	int s, ss;
    181 
    182 	if (sseu->max_slices == 0) {
    183 		drm_printf(p, "Unavailable\n");
    184 		return;
    185 	}
    186 
    187 	for (s = 0; s < sseu->max_slices; s++) {
    188 		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
    189 			   s, intel_sseu_subslices_per_slice(sseu, s),
    190 			   intel_sseu_get_subslices(sseu, s));
    191 
    192 		for (ss = 0; ss < sseu->max_subslices; ss++) {
    193 			u16 enabled_eus = sseu_get_eus(sseu, s, ss);
    194 
    195 			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
    196 				   ss, hweight16(enabled_eus), enabled_eus);
    197 		}
    198 	}
    199 }
    200 
    201 static u16 compute_eu_total(const struct sseu_dev_info *sseu)
    202 {
    203 	u16 i, total = 0;
    204 
    205 	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
    206 		total += hweight8(sseu->eu_mask[i]);
    207 
    208 	return total;
    209 }
    210 
    211 static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
    212 				    u8 s_en, u32 ss_en, u16 eu_en)
    213 {
    214 	int s, ss;
    215 
    216 	/* ss_en represents entire subslice mask across all slices */
    217 	GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
    218 		   sizeof(ss_en) * BITS_PER_BYTE);
    219 
    220 	for (s = 0; s < sseu->max_slices; s++) {
    221 		if ((s_en & BIT(s)) == 0)
    222 			continue;
    223 
    224 		sseu->slice_mask |= BIT(s);
    225 
    226 		intel_sseu_set_subslices(sseu, s, ss_en);
    227 
    228 		for (ss = 0; ss < sseu->max_subslices; ss++)
    229 			if (intel_sseu_has_subslice(sseu, s, ss))
    230 				sseu_set_eus(sseu, s, ss, eu_en);
    231 	}
    232 	sseu->eu_per_subslice = hweight16(eu_en);
    233 	sseu->eu_total = compute_eu_total(sseu);
    234 }
    235 
    236 static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
    237 {
    238 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    239 	u8 s_en;
    240 	u32 dss_en;
    241 	u16 eu_en = 0;
    242 	u8 eu_en_fuse;
    243 	int eu;
    244 
    245 	/*
    246 	 * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
    247 	 * Instead of splitting these, provide userspace with an array
    248 	 * of DSS to more closely represent the hardware resource.
    249 	 */
    250 	intel_sseu_set_info(sseu, 1, 6, 16);
    251 
    252 	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
    253 
    254 	dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
    255 
    256 	/* one bit per pair of EUs */
    257 	eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
    258 	for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
    259 		if (eu_en_fuse & BIT(eu))
    260 			eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
    261 
    262 	gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
    263 
    264 	/* TGL only supports slice-level power gating */
    265 	sseu->has_slice_pg = 1;
    266 }
    267 
    268 static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
    269 {
    270 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    271 	u8 s_en;
    272 	u32 ss_en;
    273 	u8 eu_en;
    274 
    275 	if (IS_ELKHARTLAKE(dev_priv))
    276 		intel_sseu_set_info(sseu, 1, 4, 8);
    277 	else
    278 		intel_sseu_set_info(sseu, 1, 8, 8);
    279 
    280 	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
    281 	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
    282 	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
    283 
    284 	gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
    285 
    286 	/* ICL has no power gating restrictions. */
    287 	sseu->has_slice_pg = 1;
    288 	sseu->has_subslice_pg = 1;
    289 	sseu->has_eu_pg = 1;
    290 }
    291 
    292 static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
    293 {
    294 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    295 	const u32 fuse2 = I915_READ(GEN8_FUSE2);
    296 	int s, ss;
    297 	const int eu_mask = 0xff;
    298 	u32 subslice_mask, eu_en;
    299 
    300 	intel_sseu_set_info(sseu, 6, 4, 8);
    301 
    302 	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
    303 			    GEN10_F2_S_ENA_SHIFT;
    304 
    305 	/* Slice0 */
    306 	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
    307 	for (ss = 0; ss < sseu->max_subslices; ss++)
    308 		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
    309 	/* Slice1 */
    310 	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
    311 	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
    312 	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
    313 	/* Slice2 */
    314 	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
    315 	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
    316 	/* Slice3 */
    317 	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
    318 	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
    319 	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
    320 	/* Slice4 */
    321 	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
    322 	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
    323 	/* Slice5 */
    324 	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
    325 	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
    326 	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
    327 
    328 	subslice_mask = (1 << 4) - 1;
    329 	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
    330 			   GEN10_F2_SS_DIS_SHIFT);
    331 
    332 	for (s = 0; s < sseu->max_slices; s++) {
    333 		u32 subslice_mask_with_eus = subslice_mask;
    334 
    335 		for (ss = 0; ss < sseu->max_subslices; ss++) {
    336 			if (sseu_get_eus(sseu, s, ss) == 0)
    337 				subslice_mask_with_eus &= ~BIT(ss);
    338 		}
    339 
    340 		/*
    341 		 * Slice0 can have up to 3 subslices, but there are only 2 in
    342 		 * slice1/2.
    343 		 */
    344 		intel_sseu_set_subslices(sseu, s, s == 0 ?
    345 						  subslice_mask_with_eus :
    346 						  subslice_mask_with_eus & 0x3);
    347 	}
    348 
    349 	sseu->eu_total = compute_eu_total(sseu);
    350 
    351 	/*
    352 	 * CNL is expected to always have a uniform distribution
    353 	 * of EU across subslices with the exception that any one
    354 	 * EU in any one subslice may be fused off for die
    355 	 * recovery.
    356 	 */
    357 	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
    358 				DIV_ROUND_UP(sseu->eu_total,
    359 					     intel_sseu_subslice_total(sseu)) :
    360 				0;
    361 
    362 	/* No restrictions on Power Gating */
    363 	sseu->has_slice_pg = 1;
    364 	sseu->has_subslice_pg = 1;
    365 	sseu->has_eu_pg = 1;
    366 }
    367 
    368 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
    369 {
    370 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    371 	u32 fuse;
    372 	u8 subslice_mask = 0;
    373 
    374 	fuse = I915_READ(CHV_FUSE_GT);
    375 
    376 	sseu->slice_mask = BIT(0);
    377 	intel_sseu_set_info(sseu, 1, 2, 8);
    378 
    379 	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
    380 		u8 disabled_mask =
    381 			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
    382 			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
    383 			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
    384 			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
    385 
    386 		subslice_mask |= BIT(0);
    387 		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
    388 	}
    389 
    390 	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
    391 		u8 disabled_mask =
    392 			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
    393 			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
    394 			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
    395 			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
    396 
    397 		subslice_mask |= BIT(1);
    398 		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
    399 	}
    400 
    401 	intel_sseu_set_subslices(sseu, 0, subslice_mask);
    402 
    403 	sseu->eu_total = compute_eu_total(sseu);
    404 
    405 	/*
    406 	 * CHV expected to always have a uniform distribution of EU
    407 	 * across subslices.
    408 	*/
    409 	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
    410 				sseu->eu_total /
    411 					intel_sseu_subslice_total(sseu) :
    412 				0;
    413 	/*
    414 	 * CHV supports subslice power gating on devices with more than
    415 	 * one subslice, and supports EU power gating on devices with
    416 	 * more than one EU pair per subslice.
    417 	*/
    418 	sseu->has_slice_pg = 0;
    419 	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
    420 	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
    421 }
    422 
    423 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
    424 {
    425 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
    426 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    427 	int s, ss;
    428 	u32 fuse2, eu_disable, subslice_mask;
    429 	const u8 eu_mask = 0xff;
    430 
    431 	fuse2 = I915_READ(GEN8_FUSE2);
    432 	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
    433 
    434 	/* BXT has a single slice and at most 3 subslices. */
    435 	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
    436 			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
    437 
    438 	/*
    439 	 * The subslice disable field is global, i.e. it applies
    440 	 * to each of the enabled slices.
    441 	*/
    442 	subslice_mask = (1 << sseu->max_subslices) - 1;
    443 	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
    444 			   GEN9_F2_SS_DIS_SHIFT);
    445 
    446 	/*
    447 	 * Iterate through enabled slices and subslices to
    448 	 * count the total enabled EU.
    449 	*/
    450 	for (s = 0; s < sseu->max_slices; s++) {
    451 		if (!(sseu->slice_mask & BIT(s)))
    452 			/* skip disabled slice */
    453 			continue;
    454 
    455 		intel_sseu_set_subslices(sseu, s, subslice_mask);
    456 
    457 		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
    458 		for (ss = 0; ss < sseu->max_subslices; ss++) {
    459 			int eu_per_ss;
    460 			u8 eu_disabled_mask;
    461 
    462 			if (!intel_sseu_has_subslice(sseu, s, ss))
    463 				/* skip disabled subslice */
    464 				continue;
    465 
    466 			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
    467 
    468 			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
    469 
    470 			eu_per_ss = sseu->max_eus_per_subslice -
    471 				hweight8(eu_disabled_mask);
    472 
    473 			/*
    474 			 * Record which subslice(s) has(have) 7 EUs. we
    475 			 * can tune the hash used to spread work among
    476 			 * subslices if they are unbalanced.
    477 			 */
    478 			if (eu_per_ss == 7)
    479 				sseu->subslice_7eu[s] |= BIT(ss);
    480 		}
    481 	}
    482 
    483 	sseu->eu_total = compute_eu_total(sseu);
    484 
    485 	/*
    486 	 * SKL is expected to always have a uniform distribution
    487 	 * of EU across subslices with the exception that any one
    488 	 * EU in any one subslice may be fused off for die
    489 	 * recovery. BXT is expected to be perfectly uniform in EU
    490 	 * distribution.
    491 	*/
    492 	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
    493 				DIV_ROUND_UP(sseu->eu_total,
    494 					     intel_sseu_subslice_total(sseu)) :
    495 				0;
    496 	/*
    497 	 * SKL+ supports slice power gating on devices with more than
    498 	 * one slice, and supports EU power gating on devices with
    499 	 * more than one EU pair per subslice. BXT+ supports subslice
    500 	 * power gating on devices with more than one subslice, and
    501 	 * supports EU power gating on devices with more than one EU
    502 	 * pair per subslice.
    503 	*/
    504 	sseu->has_slice_pg =
    505 		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
    506 	sseu->has_subslice_pg =
    507 		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
    508 	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
    509 
    510 	if (IS_GEN9_LP(dev_priv)) {
    511 #define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
    512 		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
    513 
    514 		sseu->min_eu_in_pool = 0;
    515 		if (info->has_pooled_eu) {
    516 			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
    517 				sseu->min_eu_in_pool = 3;
    518 			else if (IS_SS_DISABLED(1))
    519 				sseu->min_eu_in_pool = 6;
    520 			else
    521 				sseu->min_eu_in_pool = 9;
    522 		}
    523 #undef IS_SS_DISABLED
    524 	}
    525 }
    526 
    527 static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
    528 {
    529 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    530 	int s, ss;
    531 	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
    532 
    533 	fuse2 = I915_READ(GEN8_FUSE2);
    534 	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
    535 	intel_sseu_set_info(sseu, 3, 3, 8);
    536 
    537 	/*
    538 	 * The subslice disable field is global, i.e. it applies
    539 	 * to each of the enabled slices.
    540 	 */
    541 	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
    542 	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
    543 			   GEN8_F2_SS_DIS_SHIFT);
    544 
    545 	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
    546 	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
    547 			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
    548 			 (32 - GEN8_EU_DIS0_S1_SHIFT));
    549 	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
    550 			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
    551 			 (32 - GEN8_EU_DIS1_S2_SHIFT));
    552 
    553 	/*
    554 	 * Iterate through enabled slices and subslices to
    555 	 * count the total enabled EU.
    556 	 */
    557 	for (s = 0; s < sseu->max_slices; s++) {
    558 		if (!(sseu->slice_mask & BIT(s)))
    559 			/* skip disabled slice */
    560 			continue;
    561 
    562 		intel_sseu_set_subslices(sseu, s, subslice_mask);
    563 
    564 		for (ss = 0; ss < sseu->max_subslices; ss++) {
    565 			u8 eu_disabled_mask;
    566 			u32 n_disabled;
    567 
    568 			if (!intel_sseu_has_subslice(sseu, s, ss))
    569 				/* skip disabled subslice */
    570 				continue;
    571 
    572 			eu_disabled_mask =
    573 				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
    574 
    575 			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
    576 
    577 			n_disabled = hweight8(eu_disabled_mask);
    578 
    579 			/*
    580 			 * Record which subslices have 7 EUs.
    581 			 */
    582 			if (sseu->max_eus_per_subslice - n_disabled == 7)
    583 				sseu->subslice_7eu[s] |= 1 << ss;
    584 		}
    585 	}
    586 
    587 	sseu->eu_total = compute_eu_total(sseu);
    588 
    589 	/*
    590 	 * BDW is expected to always have a uniform distribution of EU across
    591 	 * subslices with the exception that any one EU in any one subslice may
    592 	 * be fused off for die recovery.
    593 	 */
    594 	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
    595 				DIV_ROUND_UP(sseu->eu_total,
    596 					     intel_sseu_subslice_total(sseu)) :
    597 				0;
    598 
    599 	/*
    600 	 * BDW supports slice power gating on devices with more than
    601 	 * one slice.
    602 	 */
    603 	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
    604 	sseu->has_subslice_pg = 0;
    605 	sseu->has_eu_pg = 0;
    606 }
    607 
    608 static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
    609 {
    610 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
    611 	u32 fuse1;
    612 	u8 subslice_mask = 0;
    613 	int s, ss;
    614 
    615 	/*
    616 	 * There isn't a register to tell us how many slices/subslices. We
    617 	 * work off the PCI-ids here.
    618 	 */
    619 	switch (INTEL_INFO(dev_priv)->gt) {
    620 	default:
    621 		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
    622 		/* fall through */
    623 	case 1:
    624 		sseu->slice_mask = BIT(0);
    625 		subslice_mask = BIT(0);
    626 		break;
    627 	case 2:
    628 		sseu->slice_mask = BIT(0);
    629 		subslice_mask = BIT(0) | BIT(1);
    630 		break;
    631 	case 3:
    632 		sseu->slice_mask = BIT(0) | BIT(1);
    633 		subslice_mask = BIT(0) | BIT(1);
    634 		break;
    635 	}
    636 
    637 	fuse1 = I915_READ(HSW_PAVP_FUSE1);
    638 	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
    639 	default:
    640 		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
    641 			     HSW_F1_EU_DIS_SHIFT);
    642 		/* fall through */
    643 	case HSW_F1_EU_DIS_10EUS:
    644 		sseu->eu_per_subslice = 10;
    645 		break;
    646 	case HSW_F1_EU_DIS_8EUS:
    647 		sseu->eu_per_subslice = 8;
    648 		break;
    649 	case HSW_F1_EU_DIS_6EUS:
    650 		sseu->eu_per_subslice = 6;
    651 		break;
    652 	}
    653 
    654 	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
    655 			    hweight8(subslice_mask),
    656 			    sseu->eu_per_subslice);
    657 
    658 	for (s = 0; s < sseu->max_slices; s++) {
    659 		intel_sseu_set_subslices(sseu, s, subslice_mask);
    660 
    661 		for (ss = 0; ss < sseu->max_subslices; ss++) {
    662 			sseu_set_eus(sseu, s, ss,
    663 				     (1UL << sseu->eu_per_subslice) - 1);
    664 		}
    665 	}
    666 
    667 	sseu->eu_total = compute_eu_total(sseu);
    668 
    669 	/* No powergating for you. */
    670 	sseu->has_slice_pg = 0;
    671 	sseu->has_subslice_pg = 0;
    672 	sseu->has_eu_pg = 0;
    673 }
    674 
    675 static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
    676 {
    677 	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
    678 	u32 base_freq, frac_freq;
    679 
    680 	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
    681 		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
    682 	base_freq *= 1000;
    683 
    684 	frac_freq = ((ts_override &
    685 		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
    686 		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
    687 	frac_freq = 1000 / (frac_freq + 1);
    688 
    689 	return base_freq + frac_freq;
    690 }
    691 
    692 static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
    693 					u32 rpm_config_reg)
    694 {
    695 	u32 f19_2_mhz = 19200;
    696 	u32 f24_mhz = 24000;
    697 	u32 crystal_clock = (rpm_config_reg &
    698 			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
    699 			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
    700 
    701 	switch (crystal_clock) {
    702 	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
    703 		return f19_2_mhz;
    704 	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
    705 		return f24_mhz;
    706 	default:
    707 		MISSING_CASE(crystal_clock);
    708 		return 0;
    709 	}
    710 }
    711 
    712 static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
    713 					u32 rpm_config_reg)
    714 {
    715 	u32 f19_2_mhz = 19200;
    716 	u32 f24_mhz = 24000;
    717 	u32 f25_mhz = 25000;
    718 	u32 f38_4_mhz = 38400;
    719 	u32 crystal_clock = (rpm_config_reg &
    720 			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
    721 			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
    722 
    723 	switch (crystal_clock) {
    724 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
    725 		return f24_mhz;
    726 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
    727 		return f19_2_mhz;
    728 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
    729 		return f38_4_mhz;
    730 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
    731 		return f25_mhz;
    732 	default:
    733 		MISSING_CASE(crystal_clock);
    734 		return 0;
    735 	}
    736 }
    737 
    738 static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
    739 {
    740 	u32 f12_5_mhz = 12500;
    741 	u32 f19_2_mhz = 19200;
    742 	u32 f24_mhz = 24000;
    743 
    744 	if (INTEL_GEN(dev_priv) <= 4) {
    745 		/* PRMs say:
    746 		 *
    747 		 *     "The value in this register increments once every 16
    748 		 *      hclks." (through the Clocking Configuration
    749 		 *      (CLKCFG) MCHBAR register)
    750 		 */
    751 		return dev_priv->rawclk_freq / 16;
    752 	} else if (INTEL_GEN(dev_priv) <= 8) {
    753 		/* PRMs say:
    754 		 *
    755 		 *     "The PCU TSC counts 10ns increments; this timestamp
    756 		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
    757 		 *      rolling over every 1.5 hours).
    758 		 */
    759 		return f12_5_mhz;
    760 	} else if (INTEL_GEN(dev_priv) <= 9) {
    761 		u32 ctc_reg = I915_READ(CTC_MODE);
    762 		u32 freq = 0;
    763 
    764 		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
    765 			freq = read_reference_ts_freq(dev_priv);
    766 		} else {
    767 			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
    768 
    769 			/* Now figure out how the command stream's timestamp
    770 			 * register increments from this frequency (it might
    771 			 * increment only every few clock cycle).
    772 			 */
    773 			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
    774 				      CTC_SHIFT_PARAMETER_SHIFT);
    775 		}
    776 
    777 		return freq;
    778 	} else if (INTEL_GEN(dev_priv) <= 12) {
    779 		u32 ctc_reg = I915_READ(CTC_MODE);
    780 		u32 freq = 0;
    781 
    782 		/* First figure out the reference frequency. There are 2 ways
    783 		 * we can compute the frequency, either through the
    784 		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
    785 		 * tells us which one we should use.
    786 		 */
    787 		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
    788 			freq = read_reference_ts_freq(dev_priv);
    789 		} else {
    790 			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
    791 
    792 			if (INTEL_GEN(dev_priv) <= 10)
    793 				freq = gen10_get_crystal_clock_freq(dev_priv,
    794 								rpm_config_reg);
    795 			else
    796 				freq = gen11_get_crystal_clock_freq(dev_priv,
    797 								rpm_config_reg);
    798 
    799 			/* Now figure out how the command stream's timestamp
    800 			 * register increments from this frequency (it might
    801 			 * increment only every few clock cycle).
    802 			 */
    803 			freq >>= 3 - ((rpm_config_reg &
    804 				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
    805 				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
    806 		}
    807 
    808 		return freq;
    809 	}
    810 
    811 	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
    812 	return 0;
    813 }
    814 
    815 #undef INTEL_VGA_DEVICE
    816 #define INTEL_VGA_DEVICE(id, info) (id)
    817 
    818 static const u16 subplatform_ult_ids[] = {
    819 	INTEL_HSW_ULT_GT1_IDS(0),
    820 	INTEL_HSW_ULT_GT2_IDS(0),
    821 	INTEL_HSW_ULT_GT3_IDS(0),
    822 	INTEL_BDW_ULT_GT1_IDS(0),
    823 	INTEL_BDW_ULT_GT2_IDS(0),
    824 	INTEL_BDW_ULT_GT3_IDS(0),
    825 	INTEL_BDW_ULT_RSVD_IDS(0),
    826 	INTEL_SKL_ULT_GT1_IDS(0),
    827 	INTEL_SKL_ULT_GT2_IDS(0),
    828 	INTEL_SKL_ULT_GT3_IDS(0),
    829 	INTEL_KBL_ULT_GT1_IDS(0),
    830 	INTEL_KBL_ULT_GT2_IDS(0),
    831 	INTEL_KBL_ULT_GT3_IDS(0),
    832 	INTEL_CFL_U_GT2_IDS(0),
    833 	INTEL_CFL_U_GT3_IDS(0),
    834 	INTEL_WHL_U_GT1_IDS(0),
    835 	INTEL_WHL_U_GT2_IDS(0),
    836 	INTEL_WHL_U_GT3_IDS(0),
    837 	INTEL_CML_U_GT1_IDS(0),
    838 	INTEL_CML_U_GT2_IDS(0),
    839 };
    840 
    841 static const u16 subplatform_ulx_ids[] = {
    842 	INTEL_HSW_ULX_GT1_IDS(0),
    843 	INTEL_HSW_ULX_GT2_IDS(0),
    844 	INTEL_BDW_ULX_GT1_IDS(0),
    845 	INTEL_BDW_ULX_GT2_IDS(0),
    846 	INTEL_BDW_ULX_GT3_IDS(0),
    847 	INTEL_BDW_ULX_RSVD_IDS(0),
    848 	INTEL_SKL_ULX_GT1_IDS(0),
    849 	INTEL_SKL_ULX_GT2_IDS(0),
    850 	INTEL_KBL_ULX_GT1_IDS(0),
    851 	INTEL_KBL_ULX_GT2_IDS(0),
    852 	INTEL_AML_KBL_GT2_IDS(0),
    853 	INTEL_AML_CFL_GT2_IDS(0),
    854 };
    855 
    856 static const u16 subplatform_portf_ids[] = {
    857 	INTEL_CNL_PORT_F_IDS(0),
    858 	INTEL_ICL_PORT_F_IDS(0),
    859 };
    860 
    861 static bool find_devid(u16 id, const u16 *p, unsigned int num)
    862 {
    863 	for (; num; num--, p++) {
    864 		if (*p == id)
    865 			return true;
    866 	}
    867 
    868 	return false;
    869 }
    870 
    871 void intel_device_info_subplatform_init(struct drm_i915_private *i915)
    872 {
    873 	const struct intel_device_info *info = INTEL_INFO(i915);
    874 	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
    875 	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
    876 	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
    877 	u16 devid = INTEL_DEVID(i915);
    878 	u32 mask = 0;
    879 
    880 	/* Make sure IS_<platform> checks are working. */
    881 	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
    882 
    883 	/* Find and mark subplatform bits based on the PCI device id. */
    884 	if (find_devid(devid, subplatform_ult_ids,
    885 		       ARRAY_SIZE(subplatform_ult_ids))) {
    886 		mask = BIT(INTEL_SUBPLATFORM_ULT);
    887 	} else if (find_devid(devid, subplatform_ulx_ids,
    888 			      ARRAY_SIZE(subplatform_ulx_ids))) {
    889 		mask = BIT(INTEL_SUBPLATFORM_ULX);
    890 		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
    891 			/* ULX machines are also considered ULT. */
    892 			mask |= BIT(INTEL_SUBPLATFORM_ULT);
    893 		}
    894 	} else if (find_devid(devid, subplatform_portf_ids,
    895 			      ARRAY_SIZE(subplatform_portf_ids))) {
    896 		mask = BIT(INTEL_SUBPLATFORM_PORTF);
    897 	}
    898 
    899 	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
    900 
    901 	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
    902 }
    903 
    904 /**
    905  * intel_device_info_runtime_init - initialize runtime info
    906  * @dev_priv: the i915 device
    907  *
    908  * Determine various intel_device_info fields at runtime.
    909  *
    910  * Use it when either:
    911  *   - it's judged too laborious to fill n static structures with the limit
    912  *     when a simple if statement does the job,
    913  *   - run-time checks (eg read fuse/strap registers) are needed.
    914  *
    915  * This function needs to be called:
    916  *   - after the MMIO has been setup as we are reading registers,
    917  *   - after the PCH has been detected,
    918  *   - before the first usage of the fields it can tweak.
    919  */
    920 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
    921 {
    922 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
    923 	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
    924 	enum pipe pipe;
    925 
    926 	if (INTEL_GEN(dev_priv) >= 10) {
    927 		for_each_pipe(dev_priv, pipe)
    928 			runtime->num_scalers[pipe] = 2;
    929 	} else if (IS_GEN(dev_priv, 9)) {
    930 		runtime->num_scalers[PIPE_A] = 2;
    931 		runtime->num_scalers[PIPE_B] = 2;
    932 		runtime->num_scalers[PIPE_C] = 1;
    933 	}
    934 
    935 	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
    936 
    937 	if (INTEL_GEN(dev_priv) >= 11)
    938 		for_each_pipe(dev_priv, pipe)
    939 			runtime->num_sprites[pipe] = 6;
    940 	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
    941 		for_each_pipe(dev_priv, pipe)
    942 			runtime->num_sprites[pipe] = 3;
    943 	else if (IS_BROXTON(dev_priv)) {
    944 		/*
    945 		 * Skylake and Broxton currently don't expose the topmost plane as its
    946 		 * use is exclusive with the legacy cursor and we only want to expose
    947 		 * one of those, not both. Until we can safely expose the topmost plane
    948 		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
    949 		 * we don't expose the topmost plane at all to prevent ABI breakage
    950 		 * down the line.
    951 		 */
    952 
    953 		runtime->num_sprites[PIPE_A] = 2;
    954 		runtime->num_sprites[PIPE_B] = 2;
    955 		runtime->num_sprites[PIPE_C] = 1;
    956 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
    957 		for_each_pipe(dev_priv, pipe)
    958 			runtime->num_sprites[pipe] = 2;
    959 	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
    960 		for_each_pipe(dev_priv, pipe)
    961 			runtime->num_sprites[pipe] = 1;
    962 	}
    963 
    964 	if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
    965 	    HAS_PCH_SPLIT(dev_priv)) {
    966 		u32 fuse_strap = I915_READ(FUSE_STRAP);
    967 		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
    968 
    969 		/*
    970 		 * SFUSE_STRAP is supposed to have a bit signalling the display
    971 		 * is fused off. Unfortunately it seems that, at least in
    972 		 * certain cases, fused off display means that PCH display
    973 		 * reads don't land anywhere. In that case, we read 0s.
    974 		 *
    975 		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
    976 		 * should be set when taking over after the firmware.
    977 		 */
    978 		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
    979 		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
    980 		    (HAS_PCH_CPT(dev_priv) &&
    981 		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
    982 			DRM_INFO("Display fused off, disabling\n");
    983 			info->pipe_mask = 0;
    984 		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
    985 			DRM_INFO("PipeC fused off\n");
    986 			info->pipe_mask &= ~BIT(PIPE_C);
    987 		}
    988 	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
    989 		u32 dfsm = I915_READ(SKL_DFSM);
    990 		u8 enabled_mask = info->pipe_mask;
    991 
    992 		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
    993 			enabled_mask &= ~BIT(PIPE_A);
    994 		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
    995 			enabled_mask &= ~BIT(PIPE_B);
    996 		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
    997 			enabled_mask &= ~BIT(PIPE_C);
    998 		if (INTEL_GEN(dev_priv) >= 12 &&
    999 		    (dfsm & TGL_DFSM_PIPE_D_DISABLE))
   1000 			enabled_mask &= ~BIT(PIPE_D);
   1001 
   1002 		/*
   1003 		 * At least one pipe should be enabled and if there are
   1004 		 * disabled pipes, they should be the last ones, with no holes
   1005 		 * in the mask.
   1006 		 */
   1007 		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
   1008 			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
   1009 				  enabled_mask);
   1010 		else
   1011 			info->pipe_mask = enabled_mask;
   1012 
   1013 		if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
   1014 			info->display.has_hdcp = 0;
   1015 
   1016 		if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
   1017 			info->display.has_fbc = 0;
   1018 
   1019 		if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
   1020 			info->display.has_csr = 0;
   1021 
   1022 		if (INTEL_GEN(dev_priv) >= 10 &&
   1023 		    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
   1024 			info->display.has_dsc = 0;
   1025 	}
   1026 
   1027 	/* Initialize slice/subslice/EU info */
   1028 	if (IS_HASWELL(dev_priv))
   1029 		hsw_sseu_info_init(dev_priv);
   1030 	else if (IS_CHERRYVIEW(dev_priv))
   1031 		cherryview_sseu_info_init(dev_priv);
   1032 	else if (IS_BROADWELL(dev_priv))
   1033 		bdw_sseu_info_init(dev_priv);
   1034 	else if (IS_GEN(dev_priv, 9))
   1035 		gen9_sseu_info_init(dev_priv);
   1036 	else if (IS_GEN(dev_priv, 10))
   1037 		gen10_sseu_info_init(dev_priv);
   1038 	else if (IS_GEN(dev_priv, 11))
   1039 		gen11_sseu_info_init(dev_priv);
   1040 	else if (INTEL_GEN(dev_priv) >= 12)
   1041 		gen12_sseu_info_init(dev_priv);
   1042 
   1043 	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
   1044 		DRM_INFO("Disabling ppGTT for VT-d support\n");
   1045 		info->ppgtt_type = INTEL_PPGTT_NONE;
   1046 	}
   1047 
   1048 	/* Initialize command stream timestamp frequency */
   1049 	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
   1050 }
   1051 
   1052 void intel_driver_caps_print(const struct intel_driver_caps *caps,
   1053 			     struct drm_printer *p)
   1054 {
   1055 	drm_printf(p, "Has logical contexts? %s\n",
   1056 		   yesno(caps->has_logical_contexts));
   1057 	drm_printf(p, "scheduler: %x\n", caps->scheduler);
   1058 }
   1059 
   1060 /*
   1061  * Determine which engines are fused off in our particular hardware. Since the
   1062  * fuse register is in the blitter powerwell, we need forcewake to be ready at
   1063  * this point (but later we need to prune the forcewake domains for engines that
   1064  * are indeed fused off).
   1065  */
   1066 void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
   1067 {
   1068 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
   1069 	unsigned int logical_vdbox = 0;
   1070 	unsigned int i;
   1071 	u32 media_fuse;
   1072 	u16 vdbox_mask;
   1073 	u16 vebox_mask;
   1074 
   1075 	if (INTEL_GEN(dev_priv) < 11)
   1076 		return;
   1077 
   1078 	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
   1079 
   1080 	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
   1081 	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
   1082 		      GEN11_GT_VEBOX_DISABLE_SHIFT;
   1083 
   1084 	for (i = 0; i < I915_MAX_VCS; i++) {
   1085 		if (!HAS_ENGINE(dev_priv, _VCS(i))) {
   1086 			vdbox_mask &= ~BIT(i);
   1087 			continue;
   1088 		}
   1089 
   1090 		if (!(BIT(i) & vdbox_mask)) {
   1091 			info->engine_mask &= ~BIT(_VCS(i));
   1092 			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
   1093 			continue;
   1094 		}
   1095 
   1096 		/*
   1097 		 * In Gen11, only even numbered logical VDBOXes are
   1098 		 * hooked up to an SFC (Scaler & Format Converter) unit.
   1099 		 * In TGL each VDBOX has access to an SFC.
   1100 		 */
   1101 		if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
   1102 			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
   1103 	}
   1104 	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
   1105 			 vdbox_mask, VDBOX_MASK(dev_priv));
   1106 	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
   1107 
   1108 	for (i = 0; i < I915_MAX_VECS; i++) {
   1109 		if (!HAS_ENGINE(dev_priv, _VECS(i))) {
   1110 			vebox_mask &= ~BIT(i);
   1111 			continue;
   1112 		}
   1113 
   1114 		if (!(BIT(i) & vebox_mask)) {
   1115 			info->engine_mask &= ~BIT(_VECS(i));
   1116 			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
   1117 		}
   1118 	}
   1119 	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
   1120 			 vebox_mask, VEBOX_MASK(dev_priv));
   1121 	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
   1122 }
   1123