Home | History | Annotate | Line # | Download | only in i915
      1 /*	$NetBSD: i915_sysfs.c,v 1.3 2021/12/18 23:45:28 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2012 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *    Ben Widawsky <ben (at) bwidawsk.net>
     27  *
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: i915_sysfs.c,v 1.3 2021/12/18 23:45:28 riastradh Exp $");
     32 
     33 #include <linux/device.h>
     34 #include <linux/module.h>
     35 #include <linux/stat.h>
     36 #include <linux/sysfs.h>
     37 
     38 #include "gt/intel_rc6.h"
     39 #include "gt/intel_rps.h"
     40 
     41 #include "i915_drv.h"
     42 #include "i915_sysfs.h"
     43 #include "intel_pm.h"
     44 #include "intel_sideband.h"
     45 
     46 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
     47 {
     48 	struct drm_minor *minor = dev_get_drvdata(kdev);
     49 	return to_i915(minor->dev);
     50 }
     51 
     52 #ifdef CONFIG_PM
     53 static u32 calc_residency(struct drm_i915_private *dev_priv,
     54 			  i915_reg_t reg)
     55 {
     56 	intel_wakeref_t wakeref;
     57 	u64 res = 0;
     58 
     59 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
     60 		res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
     61 
     62 	return DIV_ROUND_CLOSEST_ULL(res, 1000);
     63 }
     64 
     65 static ssize_t
     66 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
     67 {
     68 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
     69 	unsigned int mask;
     70 
     71 	mask = 0;
     72 	if (HAS_RC6(dev_priv))
     73 		mask |= BIT(0);
     74 	if (HAS_RC6p(dev_priv))
     75 		mask |= BIT(1);
     76 	if (HAS_RC6pp(dev_priv))
     77 		mask |= BIT(2);
     78 
     79 	return snprintf(buf, PAGE_SIZE, "%x\n", mask);
     80 }
     81 
     82 static ssize_t
     83 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
     84 {
     85 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
     86 	u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
     87 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
     88 }
     89 
     90 static ssize_t
     91 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
     92 {
     93 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
     94 	u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
     95 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
     96 }
     97 
     98 static ssize_t
     99 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
    100 {
    101 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    102 	u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
    103 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
    104 }
    105 
    106 static ssize_t
    107 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
    108 {
    109 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    110 	u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
    111 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
    112 }
    113 
    114 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
    115 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
    116 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
    117 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
    118 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
    119 
    120 static struct attribute *rc6_attrs[] = {
    121 	&dev_attr_rc6_enable.attr,
    122 	&dev_attr_rc6_residency_ms.attr,
    123 	NULL
    124 };
    125 
    126 static const struct attribute_group rc6_attr_group = {
    127 	.name = power_group_name,
    128 	.attrs =  rc6_attrs
    129 };
    130 
    131 static struct attribute *rc6p_attrs[] = {
    132 	&dev_attr_rc6p_residency_ms.attr,
    133 	&dev_attr_rc6pp_residency_ms.attr,
    134 	NULL
    135 };
    136 
    137 static const struct attribute_group rc6p_attr_group = {
    138 	.name = power_group_name,
    139 	.attrs =  rc6p_attrs
    140 };
    141 
    142 static struct attribute *media_rc6_attrs[] = {
    143 	&dev_attr_media_rc6_residency_ms.attr,
    144 	NULL
    145 };
    146 
    147 static const struct attribute_group media_rc6_attr_group = {
    148 	.name = power_group_name,
    149 	.attrs =  media_rc6_attrs
    150 };
    151 #endif
    152 
    153 static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
    154 {
    155 	if (!HAS_L3_DPF(i915))
    156 		return -EPERM;
    157 
    158 	if (!IS_ALIGNED(offset, sizeof(u32)))
    159 		return -EINVAL;
    160 
    161 	if (offset >= GEN7_L3LOG_SIZE)
    162 		return -ENXIO;
    163 
    164 	return 0;
    165 }
    166 
    167 static ssize_t
    168 i915_l3_read(struct file *filp, struct kobject *kobj,
    169 	     struct bin_attribute *attr, char *buf,
    170 	     loff_t offset, size_t count)
    171 {
    172 	struct device *kdev = kobj_to_dev(kobj);
    173 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
    174 	int slice = (int)(uintptr_t)attr->private;
    175 	int ret;
    176 
    177 	ret = l3_access_valid(i915, offset);
    178 	if (ret)
    179 		return ret;
    180 
    181 	count = round_down(count, sizeof(u32));
    182 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
    183 	memset(buf, 0, count);
    184 
    185 	spin_lock(&i915->gem.contexts.lock);
    186 	if (i915->l3_parity.remap_info[slice])
    187 		memcpy(buf,
    188 		       i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
    189 		       count);
    190 	spin_unlock(&i915->gem.contexts.lock);
    191 
    192 	return count;
    193 }
    194 
    195 static ssize_t
    196 i915_l3_write(struct file *filp, struct kobject *kobj,
    197 	      struct bin_attribute *attr, char *buf,
    198 	      loff_t offset, size_t count)
    199 {
    200 	struct device *kdev = kobj_to_dev(kobj);
    201 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
    202 	int slice = (int)(uintptr_t)attr->private;
    203 	u32 *remap_info, *freeme = NULL;
    204 	struct i915_gem_context *ctx;
    205 	int ret;
    206 
    207 	ret = l3_access_valid(i915, offset);
    208 	if (ret)
    209 		return ret;
    210 
    211 	if (count < sizeof(u32))
    212 		return -EINVAL;
    213 
    214 	remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
    215 	if (!remap_info)
    216 		return -ENOMEM;
    217 
    218 	spin_lock(&i915->gem.contexts.lock);
    219 
    220 	if (i915->l3_parity.remap_info[slice]) {
    221 		freeme = remap_info;
    222 		remap_info = i915->l3_parity.remap_info[slice];
    223 	} else {
    224 		i915->l3_parity.remap_info[slice] = remap_info;
    225 	}
    226 
    227 	count = round_down(count, sizeof(u32));
    228 	memcpy(remap_info + offset / sizeof(u32), buf, count);
    229 
    230 	/* NB: We defer the remapping until we switch to the context */
    231 	list_for_each_entry(ctx, &i915->gem.contexts.list, link)
    232 		ctx->remap_slice |= BIT(slice);
    233 
    234 	spin_unlock(&i915->gem.contexts.lock);
    235 	kfree(freeme);
    236 
    237 	/*
    238 	 * TODO: Ideally we really want a GPU reset here to make sure errors
    239 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
    240 	 * at this point it is left as a TODO.
    241 	*/
    242 
    243 	return count;
    244 }
    245 
    246 static const struct bin_attribute dpf_attrs = {
    247 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
    248 	.size = GEN7_L3LOG_SIZE,
    249 	.read = i915_l3_read,
    250 	.write = i915_l3_write,
    251 	.mmap = NULL,
    252 	.private = (void *)0
    253 };
    254 
    255 static const struct bin_attribute dpf_attrs_1 = {
    256 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
    257 	.size = GEN7_L3LOG_SIZE,
    258 	.read = i915_l3_read,
    259 	.write = i915_l3_write,
    260 	.mmap = NULL,
    261 	.private = (void *)1
    262 };
    263 
    264 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
    265 				    struct device_attribute *attr, char *buf)
    266 {
    267 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
    268 	struct intel_rps *rps = &i915->gt.rps;
    269 
    270 	return snprintf(buf, PAGE_SIZE, "%d\n",
    271 			intel_rps_read_actual_frequency(rps));
    272 }
    273 
    274 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
    275 				    struct device_attribute *attr, char *buf)
    276 {
    277 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
    278 	struct intel_rps *rps = &i915->gt.rps;
    279 
    280 	return snprintf(buf, PAGE_SIZE, "%d\n",
    281 			intel_gpu_freq(rps, rps->cur_freq));
    282 }
    283 
    284 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    285 {
    286 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
    287 	struct intel_rps *rps = &i915->gt.rps;
    288 
    289 	return snprintf(buf, PAGE_SIZE, "%d\n",
    290 			intel_gpu_freq(rps, rps->boost_freq));
    291 }
    292 
    293 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
    294 				       struct device_attribute *attr,
    295 				       const char *buf, size_t count)
    296 {
    297 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    298 	struct intel_rps *rps = &dev_priv->gt.rps;
    299 	bool boost = false;
    300 	ssize_t ret;
    301 	u32 val;
    302 
    303 	ret = kstrtou32(buf, 0, &val);
    304 	if (ret)
    305 		return ret;
    306 
    307 	/* Validate against (static) hardware limits */
    308 	val = intel_freq_opcode(rps, val);
    309 	if (val < rps->min_freq || val > rps->max_freq)
    310 		return -EINVAL;
    311 
    312 	mutex_lock(&rps->lock);
    313 	if (val != rps->boost_freq) {
    314 		rps->boost_freq = val;
    315 		boost = atomic_read(&rps->num_waiters);
    316 	}
    317 	mutex_unlock(&rps->lock);
    318 	if (boost)
    319 		schedule_work(&rps->work);
    320 
    321 	return count;
    322 }
    323 
    324 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
    325 				     struct device_attribute *attr, char *buf)
    326 {
    327 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    328 	struct intel_rps *rps = &dev_priv->gt.rps;
    329 
    330 	return snprintf(buf, PAGE_SIZE, "%d\n",
    331 			intel_gpu_freq(rps, rps->efficient_freq));
    332 }
    333 
    334 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    335 {
    336 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    337 	struct intel_rps *rps = &dev_priv->gt.rps;
    338 
    339 	return snprintf(buf, PAGE_SIZE, "%d\n",
    340 			intel_gpu_freq(rps, rps->max_freq_softlimit));
    341 }
    342 
    343 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
    344 				     struct device_attribute *attr,
    345 				     const char *buf, size_t count)
    346 {
    347 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    348 	struct intel_rps *rps = &dev_priv->gt.rps;
    349 	ssize_t ret;
    350 	u32 val;
    351 
    352 	ret = kstrtou32(buf, 0, &val);
    353 	if (ret)
    354 		return ret;
    355 
    356 	mutex_lock(&rps->lock);
    357 
    358 	val = intel_freq_opcode(rps, val);
    359 	if (val < rps->min_freq ||
    360 	    val > rps->max_freq ||
    361 	    val < rps->min_freq_softlimit) {
    362 		ret = -EINVAL;
    363 		goto unlock;
    364 	}
    365 
    366 	if (val > rps->rp0_freq)
    367 		DRM_DEBUG("User requested overclocking to %d\n",
    368 			  intel_gpu_freq(rps, val));
    369 
    370 	rps->max_freq_softlimit = val;
    371 
    372 	val = clamp_t(int, rps->cur_freq,
    373 		      rps->min_freq_softlimit,
    374 		      rps->max_freq_softlimit);
    375 
    376 	/*
    377 	 * We still need *_set_rps to process the new max_delay and
    378 	 * update the interrupt limits and PMINTRMSK even though
    379 	 * frequency request may be unchanged.
    380 	 */
    381 	intel_rps_set(rps, val);
    382 
    383 unlock:
    384 	mutex_unlock(&rps->lock);
    385 
    386 	return ret ?: count;
    387 }
    388 
    389 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    390 {
    391 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    392 	struct intel_rps *rps = &dev_priv->gt.rps;
    393 
    394 	return snprintf(buf, PAGE_SIZE, "%d\n",
    395 			intel_gpu_freq(rps, rps->min_freq_softlimit));
    396 }
    397 
    398 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
    399 				     struct device_attribute *attr,
    400 				     const char *buf, size_t count)
    401 {
    402 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    403 	struct intel_rps *rps = &dev_priv->gt.rps;
    404 	ssize_t ret;
    405 	u32 val;
    406 
    407 	ret = kstrtou32(buf, 0, &val);
    408 	if (ret)
    409 		return ret;
    410 
    411 	mutex_lock(&rps->lock);
    412 
    413 	val = intel_freq_opcode(rps, val);
    414 	if (val < rps->min_freq ||
    415 	    val > rps->max_freq ||
    416 	    val > rps->max_freq_softlimit) {
    417 		ret = -EINVAL;
    418 		goto unlock;
    419 	}
    420 
    421 	rps->min_freq_softlimit = val;
    422 
    423 	val = clamp_t(int, rps->cur_freq,
    424 		      rps->min_freq_softlimit,
    425 		      rps->max_freq_softlimit);
    426 
    427 	/*
    428 	 * We still need *_set_rps to process the new min_delay and
    429 	 * update the interrupt limits and PMINTRMSK even though
    430 	 * frequency request may be unchanged.
    431 	 */
    432 	intel_rps_set(rps, val);
    433 
    434 unlock:
    435 	mutex_unlock(&rps->lock);
    436 
    437 	return ret ?: count;
    438 }
    439 
    440 static DEVICE_ATTR_RO(gt_act_freq_mhz);
    441 static DEVICE_ATTR_RO(gt_cur_freq_mhz);
    442 static DEVICE_ATTR_RW(gt_boost_freq_mhz);
    443 static DEVICE_ATTR_RW(gt_max_freq_mhz);
    444 static DEVICE_ATTR_RW(gt_min_freq_mhz);
    445 
    446 static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
    447 
    448 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
    449 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
    450 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
    451 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
    452 
    453 /* For now we have a static number of RP states */
    454 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    455 {
    456 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    457 	struct intel_rps *rps = &dev_priv->gt.rps;
    458 	u32 val;
    459 
    460 	if (attr == &dev_attr_gt_RP0_freq_mhz)
    461 		val = intel_gpu_freq(rps, rps->rp0_freq);
    462 	else if (attr == &dev_attr_gt_RP1_freq_mhz)
    463 		val = intel_gpu_freq(rps, rps->rp1_freq);
    464 	else if (attr == &dev_attr_gt_RPn_freq_mhz)
    465 		val = intel_gpu_freq(rps, rps->min_freq);
    466 	else
    467 		BUG();
    468 
    469 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
    470 }
    471 
    472 static const struct attribute * const gen6_attrs[] = {
    473 	&dev_attr_gt_act_freq_mhz.attr,
    474 	&dev_attr_gt_cur_freq_mhz.attr,
    475 	&dev_attr_gt_boost_freq_mhz.attr,
    476 	&dev_attr_gt_max_freq_mhz.attr,
    477 	&dev_attr_gt_min_freq_mhz.attr,
    478 	&dev_attr_gt_RP0_freq_mhz.attr,
    479 	&dev_attr_gt_RP1_freq_mhz.attr,
    480 	&dev_attr_gt_RPn_freq_mhz.attr,
    481 	NULL,
    482 };
    483 
    484 static const struct attribute * const vlv_attrs[] = {
    485 	&dev_attr_gt_act_freq_mhz.attr,
    486 	&dev_attr_gt_cur_freq_mhz.attr,
    487 	&dev_attr_gt_boost_freq_mhz.attr,
    488 	&dev_attr_gt_max_freq_mhz.attr,
    489 	&dev_attr_gt_min_freq_mhz.attr,
    490 	&dev_attr_gt_RP0_freq_mhz.attr,
    491 	&dev_attr_gt_RP1_freq_mhz.attr,
    492 	&dev_attr_gt_RPn_freq_mhz.attr,
    493 	&dev_attr_vlv_rpe_freq_mhz.attr,
    494 	NULL,
    495 };
    496 
    497 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
    498 
    499 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
    500 				struct bin_attribute *attr, char *buf,
    501 				loff_t off, size_t count)
    502 {
    503 
    504 	struct device *kdev = kobj_to_dev(kobj);
    505 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
    506 	struct i915_gpu_coredump *gpu;
    507 	ssize_t ret;
    508 
    509 	gpu = i915_first_error_state(i915);
    510 	if (IS_ERR(gpu)) {
    511 		ret = PTR_ERR(gpu);
    512 	} else if (gpu) {
    513 		ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
    514 		i915_gpu_coredump_put(gpu);
    515 	} else {
    516 		const char *str = "No error state collected\n";
    517 		size_t len = strlen(str);
    518 
    519 		ret = min_t(size_t, count, len - off);
    520 		memcpy(buf, str + off, ret);
    521 	}
    522 
    523 	return ret;
    524 }
    525 
    526 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
    527 				 struct bin_attribute *attr, char *buf,
    528 				 loff_t off, size_t count)
    529 {
    530 	struct device *kdev = kobj_to_dev(kobj);
    531 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
    532 
    533 	DRM_DEBUG_DRIVER("Resetting error state\n");
    534 	i915_reset_error_state(dev_priv);
    535 
    536 	return count;
    537 }
    538 
    539 static const struct bin_attribute error_state_attr = {
    540 	.attr.name = "error",
    541 	.attr.mode = S_IRUSR | S_IWUSR,
    542 	.size = 0,
    543 	.read = error_state_read,
    544 	.write = error_state_write,
    545 };
    546 
    547 static void i915_setup_error_capture(struct device *kdev)
    548 {
    549 	if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
    550 		DRM_ERROR("error_state sysfs setup failed\n");
    551 }
    552 
    553 static void i915_teardown_error_capture(struct device *kdev)
    554 {
    555 	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
    556 }
    557 #else
    558 static void i915_setup_error_capture(struct device *kdev) {}
    559 static void i915_teardown_error_capture(struct device *kdev) {}
    560 #endif
    561 
    562 void i915_setup_sysfs(struct drm_i915_private *dev_priv)
    563 {
    564 	struct device *kdev = dev_priv->drm.primary->kdev;
    565 	int ret;
    566 
    567 #ifdef CONFIG_PM
    568 	if (HAS_RC6(dev_priv)) {
    569 		ret = sysfs_merge_group(&kdev->kobj,
    570 					&rc6_attr_group);
    571 		if (ret)
    572 			DRM_ERROR("RC6 residency sysfs setup failed\n");
    573 	}
    574 	if (HAS_RC6p(dev_priv)) {
    575 		ret = sysfs_merge_group(&kdev->kobj,
    576 					&rc6p_attr_group);
    577 		if (ret)
    578 			DRM_ERROR("RC6p residency sysfs setup failed\n");
    579 	}
    580 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
    581 		ret = sysfs_merge_group(&kdev->kobj,
    582 					&media_rc6_attr_group);
    583 		if (ret)
    584 			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
    585 	}
    586 #endif
    587 	if (HAS_L3_DPF(dev_priv)) {
    588 		ret = device_create_bin_file(kdev, &dpf_attrs);
    589 		if (ret)
    590 			DRM_ERROR("l3 parity sysfs setup failed\n");
    591 
    592 		if (NUM_L3_SLICES(dev_priv) > 1) {
    593 			ret = device_create_bin_file(kdev,
    594 						     &dpf_attrs_1);
    595 			if (ret)
    596 				DRM_ERROR("l3 parity slice 1 setup failed\n");
    597 		}
    598 	}
    599 
    600 	ret = 0;
    601 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
    602 		ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
    603 	else if (INTEL_GEN(dev_priv) >= 6)
    604 		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
    605 	if (ret)
    606 		DRM_ERROR("RPS sysfs setup failed\n");
    607 
    608 	i915_setup_error_capture(kdev);
    609 }
    610 
    611 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
    612 {
    613 	struct device *kdev = dev_priv->drm.primary->kdev;
    614 
    615 	i915_teardown_error_capture(kdev);
    616 
    617 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
    618 		sysfs_remove_files(&kdev->kobj, vlv_attrs);
    619 	else
    620 		sysfs_remove_files(&kdev->kobj, gen6_attrs);
    621 	device_remove_bin_file(kdev,  &dpf_attrs_1);
    622 	device_remove_bin_file(kdev,  &dpf_attrs);
    623 #ifdef CONFIG_PM
    624 	sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
    625 	sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
    626 #endif
    627 }
    628