Home | History | Annotate | Line # | Download | only in i915
i915_sysfs.c revision 1.1.1.1.6.1
      1 /*
      2  * Copyright  2012 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *    Ben Widawsky <ben (at) bwidawsk.net>
     25  *
     26  */
     27 
     28 #include <linux/device.h>
     29 #include <linux/module.h>
     30 #include <linux/stat.h>
     31 #include <linux/sysfs.h>
     32 #include "intel_drv.h"
     33 #include "i915_drv.h"
     34 
     35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
     36 
     37 #ifdef CONFIG_PM
     38 static u32 calc_residency(struct drm_device *dev, const u32 reg)
     39 {
     40 	struct drm_i915_private *dev_priv = dev->dev_private;
     41 	u64 raw_time; /* 32b value may overflow during fixed point math */
     42 	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
     43 	u32 ret;
     44 
     45 	if (!intel_enable_rc6(dev))
     46 		return 0;
     47 
     48 	intel_runtime_pm_get(dev_priv);
     49 
     50 	/* On VLV, residency time is in CZ units rather than 1.28us */
     51 	if (IS_VALLEYVIEW(dev)) {
     52 		u32 clkctl2;
     53 
     54 		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
     55 			CLK_CTL2_CZCOUNT_30NS_SHIFT;
     56 		if (!clkctl2) {
     57 			WARN(!clkctl2, "bogus CZ count value");
     58 			ret = 0;
     59 			goto out;
     60 		}
     61 		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
     62 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
     63 			units <<= 8;
     64 
     65 		div = 1000000ULL * bias;
     66 	}
     67 
     68 	raw_time = I915_READ(reg) * units;
     69 	ret = DIV_ROUND_UP_ULL(raw_time, div);
     70 
     71 out:
     72 	intel_runtime_pm_put(dev_priv);
     73 	return ret;
     74 }
     75 
     76 static ssize_t
     77 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
     78 {
     79 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
     80 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
     81 }
     82 
     83 static ssize_t
     84 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
     85 {
     86 	struct drm_minor *dminor = dev_get_drvdata(kdev);
     87 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
     88 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
     89 }
     90 
     91 static ssize_t
     92 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
     93 {
     94 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
     95 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
     96 	if (IS_VALLEYVIEW(dminor->dev))
     97 		rc6p_residency = 0;
     98 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
     99 }
    100 
    101 static ssize_t
    102 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
    103 {
    104 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
    105 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
    106 	if (IS_VALLEYVIEW(dminor->dev))
    107 		rc6pp_residency = 0;
    108 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
    109 }
    110 
    111 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
    112 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
    113 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
    114 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
    115 
    116 static struct attribute *rc6_attrs[] = {
    117 	&dev_attr_rc6_enable.attr,
    118 	&dev_attr_rc6_residency_ms.attr,
    119 	&dev_attr_rc6p_residency_ms.attr,
    120 	&dev_attr_rc6pp_residency_ms.attr,
    121 	NULL
    122 };
    123 
    124 static struct attribute_group rc6_attr_group = {
    125 	.name = power_group_name,
    126 	.attrs =  rc6_attrs
    127 };
    128 #endif
    129 
    130 static int l3_access_valid(struct drm_device *dev, loff_t offset)
    131 {
    132 	if (!HAS_L3_DPF(dev))
    133 		return -EPERM;
    134 
    135 	if (offset % 4 != 0)
    136 		return -EINVAL;
    137 
    138 	if (offset >= GEN7_L3LOG_SIZE)
    139 		return -ENXIO;
    140 
    141 	return 0;
    142 }
    143 
    144 static ssize_t
    145 i915_l3_read(struct file *filp, struct kobject *kobj,
    146 	     struct bin_attribute *attr, char *buf,
    147 	     loff_t offset, size_t count)
    148 {
    149 	struct device *dev = container_of(kobj, struct device, kobj);
    150 	struct drm_minor *dminor = dev_to_drm_minor(dev);
    151 	struct drm_device *drm_dev = dminor->dev;
    152 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
    153 	int slice = (int)(uintptr_t)attr->private;
    154 	int ret;
    155 
    156 	count = round_down(count, 4);
    157 
    158 	ret = l3_access_valid(drm_dev, offset);
    159 	if (ret)
    160 		return ret;
    161 
    162 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
    163 
    164 	ret = i915_mutex_lock_interruptible(drm_dev);
    165 	if (ret)
    166 		return ret;
    167 
    168 	if (dev_priv->l3_parity.remap_info[slice])
    169 		memcpy(buf,
    170 		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
    171 		       count);
    172 	else
    173 		memset(buf, 0, count);
    174 
    175 	mutex_unlock(&drm_dev->struct_mutex);
    176 
    177 	return count;
    178 }
    179 
    180 static ssize_t
    181 i915_l3_write(struct file *filp, struct kobject *kobj,
    182 	      struct bin_attribute *attr, char *buf,
    183 	      loff_t offset, size_t count)
    184 {
    185 	struct device *dev = container_of(kobj, struct device, kobj);
    186 	struct drm_minor *dminor = dev_to_drm_minor(dev);
    187 	struct drm_device *drm_dev = dminor->dev;
    188 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
    189 	struct i915_hw_context *ctx;
    190 	u32 *temp = NULL; /* Just here to make handling failures easy */
    191 	int slice = (int)(uintptr_t)attr->private;
    192 	int ret;
    193 
    194 	if (!HAS_HW_CONTEXTS(drm_dev))
    195 		return -ENXIO;
    196 
    197 	ret = l3_access_valid(drm_dev, offset);
    198 	if (ret)
    199 		return ret;
    200 
    201 	ret = i915_mutex_lock_interruptible(drm_dev);
    202 	if (ret)
    203 		return ret;
    204 
    205 	if (!dev_priv->l3_parity.remap_info[slice]) {
    206 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
    207 		if (!temp) {
    208 			mutex_unlock(&drm_dev->struct_mutex);
    209 			return -ENOMEM;
    210 		}
    211 	}
    212 
    213 	ret = i915_gpu_idle(drm_dev);
    214 	if (ret) {
    215 		kfree(temp);
    216 		mutex_unlock(&drm_dev->struct_mutex);
    217 		return ret;
    218 	}
    219 
    220 	/* TODO: Ideally we really want a GPU reset here to make sure errors
    221 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
    222 	 * at this point it is left as a TODO.
    223 	*/
    224 	if (temp)
    225 		dev_priv->l3_parity.remap_info[slice] = temp;
    226 
    227 	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
    228 
    229 	/* NB: We defer the remapping until we switch to the context */
    230 	list_for_each_entry(ctx, &dev_priv->context_list, link)
    231 		ctx->remap_slice |= (1<<slice);
    232 
    233 	mutex_unlock(&drm_dev->struct_mutex);
    234 
    235 	return count;
    236 }
    237 
    238 static struct bin_attribute dpf_attrs = {
    239 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
    240 	.size = GEN7_L3LOG_SIZE,
    241 	.read = i915_l3_read,
    242 	.write = i915_l3_write,
    243 	.mmap = NULL,
    244 	.private = (void *)0
    245 };
    246 
    247 static struct bin_attribute dpf_attrs_1 = {
    248 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
    249 	.size = GEN7_L3LOG_SIZE,
    250 	.read = i915_l3_read,
    251 	.write = i915_l3_write,
    252 	.mmap = NULL,
    253 	.private = (void *)1
    254 };
    255 
    256 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
    257 				    struct device_attribute *attr, char *buf)
    258 {
    259 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    260 	struct drm_device *dev = minor->dev;
    261 	struct drm_i915_private *dev_priv = dev->dev_private;
    262 	int ret;
    263 
    264 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
    265 
    266 	mutex_lock(&dev_priv->rps.hw_lock);
    267 	if (IS_VALLEYVIEW(dev_priv->dev)) {
    268 		u32 freq;
    269 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
    270 		ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
    271 	} else {
    272 		ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
    273 	}
    274 	mutex_unlock(&dev_priv->rps.hw_lock);
    275 
    276 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
    277 }
    278 
    279 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
    280 				     struct device_attribute *attr, char *buf)
    281 {
    282 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    283 	struct drm_device *dev = minor->dev;
    284 	struct drm_i915_private *dev_priv = dev->dev_private;
    285 
    286 	return snprintf(buf, PAGE_SIZE, "%d\n",
    287 			vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
    288 }
    289 
    290 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    291 {
    292 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    293 	struct drm_device *dev = minor->dev;
    294 	struct drm_i915_private *dev_priv = dev->dev_private;
    295 	int ret;
    296 
    297 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
    298 
    299 	mutex_lock(&dev_priv->rps.hw_lock);
    300 	if (IS_VALLEYVIEW(dev_priv->dev))
    301 		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
    302 	else
    303 		ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
    304 	mutex_unlock(&dev_priv->rps.hw_lock);
    305 
    306 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
    307 }
    308 
    309 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
    310 				     struct device_attribute *attr,
    311 				     const char *buf, size_t count)
    312 {
    313 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    314 	struct drm_device *dev = minor->dev;
    315 	struct drm_i915_private *dev_priv = dev->dev_private;
    316 	u32 val;
    317 	ssize_t ret;
    318 
    319 	ret = kstrtou32(buf, 0, &val);
    320 	if (ret)
    321 		return ret;
    322 
    323 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
    324 
    325 	mutex_lock(&dev_priv->rps.hw_lock);
    326 
    327 	if (IS_VALLEYVIEW(dev_priv->dev))
    328 		val = vlv_freq_opcode(dev_priv, val);
    329 	else
    330 		val /= GT_FREQUENCY_MULTIPLIER;
    331 
    332 	if (val < dev_priv->rps.min_freq ||
    333 	    val > dev_priv->rps.max_freq ||
    334 	    val < dev_priv->rps.min_freq_softlimit) {
    335 		mutex_unlock(&dev_priv->rps.hw_lock);
    336 		return -EINVAL;
    337 	}
    338 
    339 	if (val > dev_priv->rps.rp0_freq)
    340 		DRM_DEBUG("User requested overclocking to %d\n",
    341 			  val * GT_FREQUENCY_MULTIPLIER);
    342 
    343 	dev_priv->rps.max_freq_softlimit = val;
    344 
    345 	if (dev_priv->rps.cur_freq > val) {
    346 		if (IS_VALLEYVIEW(dev))
    347 			valleyview_set_rps(dev, val);
    348 		else
    349 			gen6_set_rps(dev, val);
    350 	} else if (!IS_VALLEYVIEW(dev)) {
    351 		/* We still need gen6_set_rps to process the new max_delay and
    352 		 * update the interrupt limits even though frequency request is
    353 		 * unchanged. */
    354 		gen6_set_rps(dev, dev_priv->rps.cur_freq);
    355 	}
    356 
    357 	mutex_unlock(&dev_priv->rps.hw_lock);
    358 
    359 	return count;
    360 }
    361 
    362 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    363 {
    364 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    365 	struct drm_device *dev = minor->dev;
    366 	struct drm_i915_private *dev_priv = dev->dev_private;
    367 	int ret;
    368 
    369 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
    370 
    371 	mutex_lock(&dev_priv->rps.hw_lock);
    372 	if (IS_VALLEYVIEW(dev_priv->dev))
    373 		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
    374 	else
    375 		ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
    376 	mutex_unlock(&dev_priv->rps.hw_lock);
    377 
    378 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
    379 }
    380 
    381 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
    382 				     struct device_attribute *attr,
    383 				     const char *buf, size_t count)
    384 {
    385 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    386 	struct drm_device *dev = minor->dev;
    387 	struct drm_i915_private *dev_priv = dev->dev_private;
    388 	u32 val;
    389 	ssize_t ret;
    390 
    391 	ret = kstrtou32(buf, 0, &val);
    392 	if (ret)
    393 		return ret;
    394 
    395 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
    396 
    397 	mutex_lock(&dev_priv->rps.hw_lock);
    398 
    399 	if (IS_VALLEYVIEW(dev))
    400 		val = vlv_freq_opcode(dev_priv, val);
    401 	else
    402 		val /= GT_FREQUENCY_MULTIPLIER;
    403 
    404 	if (val < dev_priv->rps.min_freq ||
    405 	    val > dev_priv->rps.max_freq ||
    406 	    val > dev_priv->rps.max_freq_softlimit) {
    407 		mutex_unlock(&dev_priv->rps.hw_lock);
    408 		return -EINVAL;
    409 	}
    410 
    411 	dev_priv->rps.min_freq_softlimit = val;
    412 
    413 	if (dev_priv->rps.cur_freq < val) {
    414 		if (IS_VALLEYVIEW(dev))
    415 			valleyview_set_rps(dev, val);
    416 		else
    417 			gen6_set_rps(dev, val);
    418 	} else if (!IS_VALLEYVIEW(dev)) {
    419 		/* We still need gen6_set_rps to process the new min_delay and
    420 		 * update the interrupt limits even though frequency request is
    421 		 * unchanged. */
    422 		gen6_set_rps(dev, dev_priv->rps.cur_freq);
    423 	}
    424 
    425 	mutex_unlock(&dev_priv->rps.hw_lock);
    426 
    427 	return count;
    428 
    429 }
    430 
    431 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
    432 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
    433 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
    434 
    435 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
    436 
    437 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
    438 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
    439 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
    440 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
    441 
    442 /* For now we have a static number of RP states */
    443 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
    444 {
    445 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    446 	struct drm_device *dev = minor->dev;
    447 	struct drm_i915_private *dev_priv = dev->dev_private;
    448 	u32 val, rp_state_cap;
    449 	ssize_t ret;
    450 
    451 	ret = mutex_lock_interruptible(&dev->struct_mutex);
    452 	if (ret)
    453 		return ret;
    454 	intel_runtime_pm_get(dev_priv);
    455 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
    456 	intel_runtime_pm_put(dev_priv);
    457 	mutex_unlock(&dev->struct_mutex);
    458 
    459 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
    460 		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
    461 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
    462 		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
    463 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
    464 		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
    465 	} else {
    466 		BUG();
    467 	}
    468 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
    469 }
    470 
    471 static const struct attribute *gen6_attrs[] = {
    472 	&dev_attr_gt_cur_freq_mhz.attr,
    473 	&dev_attr_gt_max_freq_mhz.attr,
    474 	&dev_attr_gt_min_freq_mhz.attr,
    475 	&dev_attr_gt_RP0_freq_mhz.attr,
    476 	&dev_attr_gt_RP1_freq_mhz.attr,
    477 	&dev_attr_gt_RPn_freq_mhz.attr,
    478 	NULL,
    479 };
    480 
    481 static const struct attribute *vlv_attrs[] = {
    482 	&dev_attr_gt_cur_freq_mhz.attr,
    483 	&dev_attr_gt_max_freq_mhz.attr,
    484 	&dev_attr_gt_min_freq_mhz.attr,
    485 	&dev_attr_vlv_rpe_freq_mhz.attr,
    486 	NULL,
    487 };
    488 
    489 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
    490 				struct bin_attribute *attr, char *buf,
    491 				loff_t off, size_t count)
    492 {
    493 
    494 	struct device *kdev = container_of(kobj, struct device, kobj);
    495 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    496 	struct drm_device *dev = minor->dev;
    497 	struct i915_error_state_file_priv error_priv;
    498 	struct drm_i915_error_state_buf error_str;
    499 	ssize_t ret_count = 0;
    500 	int ret;
    501 
    502 	memset(&error_priv, 0, sizeof(error_priv));
    503 
    504 	ret = i915_error_state_buf_init(&error_str, count, off);
    505 	if (ret)
    506 		return ret;
    507 
    508 	error_priv.dev = dev;
    509 	i915_error_state_get(dev, &error_priv);
    510 
    511 	ret = i915_error_state_to_str(&error_str, &error_priv);
    512 	if (ret)
    513 		goto out;
    514 
    515 	ret_count = count < error_str.bytes ? count : error_str.bytes;
    516 
    517 	memcpy(buf, error_str.buf, ret_count);
    518 out:
    519 	i915_error_state_put(&error_priv);
    520 	i915_error_state_buf_release(&error_str);
    521 
    522 	return ret ?: ret_count;
    523 }
    524 
    525 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
    526 				 struct bin_attribute *attr, char *buf,
    527 				 loff_t off, size_t count)
    528 {
    529 	struct device *kdev = container_of(kobj, struct device, kobj);
    530 	struct drm_minor *minor = dev_to_drm_minor(kdev);
    531 	struct drm_device *dev = minor->dev;
    532 	int ret;
    533 
    534 	DRM_DEBUG_DRIVER("Resetting error state\n");
    535 
    536 	ret = mutex_lock_interruptible(&dev->struct_mutex);
    537 	if (ret)
    538 		return ret;
    539 
    540 	i915_destroy_error_state(dev);
    541 	mutex_unlock(&dev->struct_mutex);
    542 
    543 	return count;
    544 }
    545 
    546 static struct bin_attribute error_state_attr = {
    547 	.attr.name = "error",
    548 	.attr.mode = S_IRUSR | S_IWUSR,
    549 	.size = 0,
    550 	.read = error_state_read,
    551 	.write = error_state_write,
    552 };
    553 
    554 void i915_setup_sysfs(struct drm_device *dev)
    555 {
    556 	int ret;
    557 
    558 #ifdef CONFIG_PM
    559 	if (INTEL_INFO(dev)->gen >= 6) {
    560 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
    561 					&rc6_attr_group);
    562 		if (ret)
    563 			DRM_ERROR("RC6 residency sysfs setup failed\n");
    564 	}
    565 #endif
    566 	if (HAS_L3_DPF(dev)) {
    567 		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
    568 		if (ret)
    569 			DRM_ERROR("l3 parity sysfs setup failed\n");
    570 
    571 		if (NUM_L3_SLICES(dev) > 1) {
    572 			ret = device_create_bin_file(dev->primary->kdev,
    573 						     &dpf_attrs_1);
    574 			if (ret)
    575 				DRM_ERROR("l3 parity slice 1 setup failed\n");
    576 		}
    577 	}
    578 
    579 	ret = 0;
    580 	if (IS_VALLEYVIEW(dev))
    581 		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
    582 	else if (INTEL_INFO(dev)->gen >= 6)
    583 		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
    584 	if (ret)
    585 		DRM_ERROR("RPS sysfs setup failed\n");
    586 
    587 	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
    588 				    &error_state_attr);
    589 	if (ret)
    590 		DRM_ERROR("error_state sysfs setup failed\n");
    591 }
    592 
    593 void i915_teardown_sysfs(struct drm_device *dev)
    594 {
    595 	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
    596 	if (IS_VALLEYVIEW(dev))
    597 		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
    598 	else
    599 		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
    600 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
    601 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
    602 #ifdef CONFIG_PM
    603 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
    604 #endif
    605 }
    606