Home | History | Annotate | Line # | Download | only in selftests
      1 /*	$NetBSD: intel_uncore.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2016 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: intel_uncore.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
     29 
     30 #include "../i915_selftest.h"
     31 
     32 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
     33 				unsigned int num_ranges,
     34 				bool is_watertight)
     35 {
     36 	unsigned int i;
     37 	s32 prev;
     38 
     39 	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
     40 		/* Check that the table is watertight */
     41 		if (is_watertight && (prev + 1) != (s32)ranges->start) {
     42 			pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
     43 			       __func__, i, ranges->start, ranges->end, prev);
     44 			return -EINVAL;
     45 		}
     46 
     47 		/* Check that the table never goes backwards */
     48 		if (prev >= (s32)ranges->start) {
     49 			pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
     50 			       __func__, i, ranges->start, ranges->end, prev);
     51 			return -EINVAL;
     52 		}
     53 
     54 		/* Check that the entry is valid */
     55 		if (ranges->start >= ranges->end) {
     56 			pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
     57 			       __func__, i, ranges->start, ranges->end);
     58 			return -EINVAL;
     59 		}
     60 
     61 		prev = ranges->end;
     62 	}
     63 
     64 	return 0;
     65 }
     66 
     67 static int intel_shadow_table_check(void)
     68 {
     69 	struct {
     70 		const i915_reg_t *regs;
     71 		unsigned int size;
     72 	} reg_lists[] = {
     73 		{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
     74 		{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
     75 		{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
     76 	};
     77 	const i915_reg_t *reg;
     78 	unsigned int i, j;
     79 	s32 prev;
     80 
     81 	for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
     82 		reg = reg_lists[j].regs;
     83 		for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
     84 			u32 offset = i915_mmio_reg_offset(*reg);
     85 
     86 			if (prev >= (s32)offset) {
     87 				pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
     88 				       __func__, i, offset, prev);
     89 				return -EINVAL;
     90 			}
     91 
     92 			prev = offset;
     93 		}
     94 	}
     95 
     96 	return 0;
     97 }
     98 
     99 int intel_uncore_mock_selftests(void)
    100 {
    101 	struct {
    102 		const struct intel_forcewake_range *ranges;
    103 		unsigned int num_ranges;
    104 		bool is_watertight;
    105 	} fw[] = {
    106 		{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
    107 		{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
    108 		{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
    109 		{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
    110 		{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
    111 	};
    112 	int err, i;
    113 
    114 	for (i = 0; i < ARRAY_SIZE(fw); i++) {
    115 		err = intel_fw_table_check(fw[i].ranges,
    116 					   fw[i].num_ranges,
    117 					   fw[i].is_watertight);
    118 		if (err)
    119 			return err;
    120 	}
    121 
    122 	err = intel_shadow_table_check();
    123 	if (err)
    124 		return err;
    125 
    126 	return 0;
    127 }
    128 
    129 static int live_forcewake_ops(void *arg)
    130 {
    131 	static const struct reg {
    132 		const char *name;
    133 		unsigned long platforms;
    134 		unsigned int offset;
    135 	} registers[] = {
    136 		{
    137 			"RING_START",
    138 			INTEL_GEN_MASK(6, 7),
    139 			0x38,
    140 		},
    141 		{
    142 			"RING_MI_MODE",
    143 			INTEL_GEN_MASK(8, BITS_PER_LONG),
    144 			0x9c,
    145 		}
    146 	};
    147 	const struct reg *r;
    148 	struct intel_gt *gt = arg;
    149 	struct intel_uncore_forcewake_domain *domain;
    150 	struct intel_uncore *uncore = gt->uncore;
    151 	struct intel_engine_cs *engine;
    152 	enum intel_engine_id id;
    153 	intel_wakeref_t wakeref;
    154 	unsigned int tmp;
    155 	int err = 0;
    156 
    157 	GEM_BUG_ON(gt->awake);
    158 
    159 	/* vlv/chv with their pcu behave differently wrt reads */
    160 	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
    161 		pr_debug("PCU fakes forcewake badly; skipping\n");
    162 		return 0;
    163 	}
    164 
    165 	/*
    166 	 * Not quite as reliable across the gen as one would hope.
    167 	 *
    168 	 * Either our theory of operation is incorrect, or there remain
    169 	 * external parties interfering with the powerwells.
    170 	 *
    171 	 * https://bugs.freedesktop.org/show_bug.cgi?id=110210
    172 	 */
    173 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
    174 		return 0;
    175 
    176 	/* We have to pick carefully to get the exact behaviour we need */
    177 	for (r = registers; r->name; r++)
    178 		if (r->platforms & INTEL_INFO(gt->i915)->gen_mask)
    179 			break;
    180 	if (!r->name) {
    181 		pr_debug("Forcewaked register not known for %s; skipping\n",
    182 			 intel_platform_name(INTEL_INFO(gt->i915)->platform));
    183 		return 0;
    184 	}
    185 
    186 	wakeref = intel_runtime_pm_get(uncore->rpm);
    187 
    188 	for_each_fw_domain(domain, uncore, tmp) {
    189 		smp_store_mb(domain->active, false);
    190 		if (!hrtimer_cancel(&domain->timer))
    191 			continue;
    192 
    193 		intel_uncore_fw_release_timer(&domain->timer);
    194 	}
    195 
    196 	for_each_engine(engine, gt, id) {
    197 		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
    198 		u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
    199 		enum forcewake_domains fw_domains;
    200 		u32 val;
    201 
    202 		if (!engine->default_state)
    203 			continue;
    204 
    205 		fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
    206 							    FW_REG_READ);
    207 		if (!fw_domains)
    208 			continue;
    209 
    210 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
    211 			if (!domain->wake_count)
    212 				continue;
    213 
    214 			pr_err("fw_domain %s still active, aborting test!\n",
    215 			       intel_uncore_forcewake_domain_to_str(domain->id));
    216 			err = -EINVAL;
    217 			goto out_rpm;
    218 		}
    219 
    220 		intel_uncore_forcewake_get(uncore, fw_domains);
    221 		val = readl(reg);
    222 		intel_uncore_forcewake_put(uncore, fw_domains);
    223 
    224 		/* Flush the forcewake release (delayed onto a timer) */
    225 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
    226 			smp_store_mb(domain->active, false);
    227 			if (hrtimer_cancel(&domain->timer))
    228 				intel_uncore_fw_release_timer(&domain->timer);
    229 
    230 			preempt_disable();
    231 			err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
    232 			preempt_enable();
    233 			if (err) {
    234 				pr_err("Failed to clear fw_domain %s\n",
    235 				       intel_uncore_forcewake_domain_to_str(domain->id));
    236 				goto out_rpm;
    237 			}
    238 		}
    239 
    240 		if (!val) {
    241 			pr_err("%s:%s was zero while fw was held!\n",
    242 			       engine->name, r->name);
    243 			err = -EINVAL;
    244 			goto out_rpm;
    245 		}
    246 
    247 		/* We then expect the read to return 0 outside of the fw */
    248 		if (wait_for(readl(reg) == 0, 100)) {
    249 			pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
    250 			       engine->name, r->name, readl(reg), fw_domains);
    251 			err = -ETIMEDOUT;
    252 			goto out_rpm;
    253 		}
    254 	}
    255 
    256 out_rpm:
    257 	intel_runtime_pm_put(uncore->rpm, wakeref);
    258 	return err;
    259 }
    260 
    261 static int live_forcewake_domains(void *arg)
    262 {
    263 #define FW_RANGE 0x40000
    264 	struct intel_gt *gt = arg;
    265 	struct intel_uncore *uncore = gt->uncore;
    266 	unsigned long *valid;
    267 	u32 offset;
    268 	int err;
    269 
    270 	if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
    271 	    !IS_VALLEYVIEW(gt->i915) &&
    272 	    !IS_CHERRYVIEW(gt->i915))
    273 		return 0;
    274 
    275 	/*
    276 	 * This test may lockup the machine or cause GPU hangs afterwards.
    277 	 */
    278 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
    279 		return 0;
    280 
    281 	valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
    282 	if (!valid)
    283 		return -ENOMEM;
    284 
    285 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
    286 
    287 	check_for_unclaimed_mmio(uncore);
    288 	for (offset = 0; offset < FW_RANGE; offset += 4) {
    289 		i915_reg_t reg = { offset };
    290 
    291 		intel_uncore_posting_read_fw(uncore, reg);
    292 		if (!check_for_unclaimed_mmio(uncore))
    293 			set_bit(offset, valid);
    294 	}
    295 
    296 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
    297 
    298 	err = 0;
    299 	for_each_set_bit(offset, valid, FW_RANGE) {
    300 		i915_reg_t reg = { offset };
    301 
    302 		iosf_mbi_punit_acquire();
    303 		intel_uncore_forcewake_reset(uncore);
    304 		iosf_mbi_punit_release();
    305 
    306 		check_for_unclaimed_mmio(uncore);
    307 
    308 		intel_uncore_posting_read_fw(uncore, reg);
    309 		if (check_for_unclaimed_mmio(uncore)) {
    310 			pr_err("Unclaimed mmio read to register 0x%04x\n",
    311 			       offset);
    312 			err = -EINVAL;
    313 		}
    314 	}
    315 
    316 	bitmap_free(valid);
    317 	return err;
    318 }
    319 
    320 static int live_fw_table(void *arg)
    321 {
    322 	struct intel_gt *gt = arg;
    323 
    324 	/* Confirm the table we load is still valid */
    325 	return intel_fw_table_check(gt->uncore->fw_domains_table,
    326 				    gt->uncore->fw_domains_table_entries,
    327 				    INTEL_GEN(gt->i915) >= 9);
    328 }
    329 
    330 int intel_uncore_live_selftests(struct drm_i915_private *i915)
    331 {
    332 	static const struct i915_subtest tests[] = {
    333 		SUBTEST(live_fw_table),
    334 		SUBTEST(live_forcewake_ops),
    335 		SUBTEST(live_forcewake_domains),
    336 	};
    337 
    338 	return intel_gt_live_subtests(tests, &i915->gt);
    339 }
    340