Home | History | Annotate | Line # | Download | only in i915
      1  1.48  riastrad /*	$NetBSD: i915_drv.c,v 1.48 2022/09/22 14:37:38 riastradh Exp $	*/
      2   1.7  riastrad 
      3   1.1  riastrad /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
      4   1.1  riastrad  */
      5   1.1  riastrad /*
      6   1.1  riastrad  *
      7   1.1  riastrad  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      8   1.1  riastrad  * All Rights Reserved.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
     11   1.1  riastrad  * copy of this software and associated documentation files (the
     12   1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     13   1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     14   1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     15   1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     16   1.1  riastrad  * the following conditions:
     17   1.1  riastrad  *
     18   1.1  riastrad  * The above copyright notice and this permission notice (including the
     19   1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     20   1.1  riastrad  * of the Software.
     21   1.1  riastrad  *
     22   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     23   1.1  riastrad  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     24   1.1  riastrad  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     25   1.1  riastrad  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     26   1.1  riastrad  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     27   1.1  riastrad  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     28   1.1  riastrad  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     29   1.1  riastrad  *
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.7  riastrad #include <sys/cdefs.h>
     33  1.48  riastrad __KERNEL_RCSID(0, "$NetBSD: i915_drv.c,v 1.48 2022/09/22 14:37:38 riastradh Exp $");
     34   1.7  riastrad 
     35  1.20  riastrad #include <linux/acpi.h>
     36   1.1  riastrad #include <linux/device.h>
     37  1.20  riastrad #include <linux/oom.h>
     38  1.20  riastrad #include <linux/module.h>
     39  1.20  riastrad #include <linux/pci.h>
     40  1.20  riastrad #include <linux/pm.h>
     41  1.20  riastrad #include <linux/pm_runtime.h>
     42  1.20  riastrad #include <linux/pnp.h>
     43  1.20  riastrad #include <linux/slab.h>
     44  1.20  riastrad #include <linux/vga_switcheroo.h>
     45  1.20  riastrad #include <linux/vt.h>
     46  1.20  riastrad #include <acpi/video.h>
     47  1.20  riastrad 
     48  1.20  riastrad #include <drm/drm_atomic_helper.h>
     49  1.20  riastrad #include <drm/drm_ioctl.h>
     50  1.20  riastrad #include <drm/drm_irq.h>
     51  1.33  riastrad #include <drm/drm_pci.h>
     52  1.20  riastrad #include <drm/drm_probe_helper.h>
     53   1.1  riastrad #include <drm/i915_drm.h>
     54  1.20  riastrad 
     55  1.20  riastrad #include "display/intel_acpi.h"
     56  1.20  riastrad #include "display/intel_audio.h"
     57  1.20  riastrad #include "display/intel_bw.h"
     58  1.20  riastrad #include "display/intel_cdclk.h"
     59  1.20  riastrad #include "display/intel_display_types.h"
     60  1.20  riastrad #include "display/intel_dp.h"
     61  1.20  riastrad #include "display/intel_fbdev.h"
     62  1.20  riastrad #include "display/intel_hotplug.h"
     63  1.20  riastrad #include "display/intel_overlay.h"
     64  1.20  riastrad #include "display/intel_pipe_crc.h"
     65  1.20  riastrad #include "display/intel_sprite.h"
     66  1.20  riastrad #include "display/intel_vga.h"
     67  1.20  riastrad 
     68  1.20  riastrad #include "gem/i915_gem_context.h"
     69  1.20  riastrad #include "gem/i915_gem_ioctls.h"
     70  1.20  riastrad #include "gem/i915_gem_mman.h"
     71  1.20  riastrad #include "gt/intel_gt.h"
     72  1.20  riastrad #include "gt/intel_gt_pm.h"
     73  1.20  riastrad #include "gt/intel_rc6.h"
     74  1.20  riastrad 
     75  1.20  riastrad #include "i915_debugfs.h"
     76   1.1  riastrad #include "i915_drv.h"
     77  1.20  riastrad #include "i915_irq.h"
     78  1.20  riastrad #include "i915_memcpy.h"
     79  1.20  riastrad #include "i915_perf.h"
     80  1.20  riastrad #include "i915_query.h"
     81  1.20  riastrad #include "i915_suspend.h"
     82  1.20  riastrad #include "i915_switcheroo.h"
     83  1.20  riastrad #include "i915_sysfs.h"
     84   1.1  riastrad #include "i915_trace.h"
     85  1.20  riastrad #include "i915_vgpu.h"
     86  1.20  riastrad #include "intel_csr.h"
     87  1.20  riastrad #include "intel_memory_region.h"
     88  1.20  riastrad #include "intel_pm.h"
     89   1.1  riastrad 
     90  1.26  riastrad #ifdef __NetBSD__
     91  1.26  riastrad #ifdef notyet
     92  1.26  riastrad #if defined(__i386__)
     93  1.26  riastrad #include "pnpbios.h"
     94  1.26  riastrad #endif
     95  1.26  riastrad #if NPNPBIOS > 0
     96  1.26  riastrad #define CONFIG_PNP
     97  1.26  riastrad #endif
     98  1.26  riastrad #endif
     99  1.26  riastrad #endif
    100  1.26  riastrad 
    101  1.29  riastrad #include <linux/nbsd-namespace.h>
    102  1.29  riastrad 
    103   1.1  riastrad static struct drm_driver driver;
    104   1.1  riastrad 
    105  1.20  riastrad struct vlv_s0ix_state {
    106  1.20  riastrad 	/* GAM */
    107  1.20  riastrad 	u32 wr_watermark;
    108  1.20  riastrad 	u32 gfx_prio_ctrl;
    109  1.20  riastrad 	u32 arb_mode;
    110  1.20  riastrad 	u32 gfx_pend_tlb0;
    111  1.20  riastrad 	u32 gfx_pend_tlb1;
    112  1.20  riastrad 	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
    113  1.20  riastrad 	u32 media_max_req_count;
    114  1.20  riastrad 	u32 gfx_max_req_count;
    115  1.20  riastrad 	u32 render_hwsp;
    116  1.20  riastrad 	u32 ecochk;
    117  1.20  riastrad 	u32 bsd_hwsp;
    118  1.20  riastrad 	u32 blt_hwsp;
    119  1.20  riastrad 	u32 tlb_rd_addr;
    120  1.20  riastrad 
    121  1.20  riastrad 	/* MBC */
    122  1.20  riastrad 	u32 g3dctl;
    123  1.20  riastrad 	u32 gsckgctl;
    124  1.20  riastrad 	u32 mbctl;
    125  1.20  riastrad 
    126  1.20  riastrad 	/* GCP */
    127  1.20  riastrad 	u32 ucgctl1;
    128  1.20  riastrad 	u32 ucgctl3;
    129  1.20  riastrad 	u32 rcgctl1;
    130  1.20  riastrad 	u32 rcgctl2;
    131  1.20  riastrad 	u32 rstctl;
    132  1.20  riastrad 	u32 misccpctl;
    133  1.20  riastrad 
    134  1.20  riastrad 	/* GPM */
    135  1.20  riastrad 	u32 gfxpause;
    136  1.20  riastrad 	u32 rpdeuhwtc;
    137  1.20  riastrad 	u32 rpdeuc;
    138  1.20  riastrad 	u32 ecobus;
    139  1.20  riastrad 	u32 pwrdwnupctl;
    140  1.20  riastrad 	u32 rp_down_timeout;
    141  1.20  riastrad 	u32 rp_deucsw;
    142  1.20  riastrad 	u32 rcubmabdtmr;
    143  1.20  riastrad 	u32 rcedata;
    144  1.20  riastrad 	u32 spare2gh;
    145  1.20  riastrad 
    146  1.20  riastrad 	/* Display 1 CZ domain */
    147  1.20  riastrad 	u32 gt_imr;
    148  1.20  riastrad 	u32 gt_ier;
    149  1.20  riastrad 	u32 pm_imr;
    150  1.20  riastrad 	u32 pm_ier;
    151  1.20  riastrad 	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
    152  1.20  riastrad 
    153  1.20  riastrad 	/* GT SA CZ domain */
    154  1.20  riastrad 	u32 tilectl;
    155  1.20  riastrad 	u32 gt_fifoctl;
    156  1.20  riastrad 	u32 gtlc_wake_ctrl;
    157  1.20  riastrad 	u32 gtlc_survive;
    158  1.20  riastrad 	u32 pmwgicz;
    159  1.20  riastrad 
    160  1.20  riastrad 	/* Display 2 CZ domain */
    161  1.20  riastrad 	u32 gu_ctl0;
    162  1.20  riastrad 	u32 gu_ctl1;
    163  1.20  riastrad 	u32 pcbr;
    164  1.20  riastrad 	u32 clock_gate_dis2;
    165  1.20  riastrad };
    166  1.20  riastrad 
    167  1.20  riastrad static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
    168  1.20  riastrad {
    169  1.20  riastrad 	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
    170  1.20  riastrad 
    171  1.20  riastrad 	dev_priv->bridge_dev =
    172  1.20  riastrad 		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
    173  1.20  riastrad 	if (!dev_priv->bridge_dev) {
    174  1.20  riastrad 		DRM_ERROR("bridge device not found\n");
    175  1.20  riastrad 		return -1;
    176  1.20  riastrad 	}
    177  1.20  riastrad 	return 0;
    178  1.20  riastrad }
    179  1.20  riastrad 
    180  1.20  riastrad /* Allocate space for the MCH regs if needed, return nonzero on error */
    181  1.20  riastrad static int
    182  1.20  riastrad intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
    183  1.20  riastrad {
    184  1.20  riastrad 	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
    185  1.23  riastrad #ifdef CONFIG_PNP
    186  1.20  riastrad 	u32 temp_lo, temp_hi = 0;
    187  1.20  riastrad 	u64 mchbar_addr;
    188  1.23  riastrad #endif
    189  1.20  riastrad 	int ret;
    190  1.20  riastrad 
    191  1.23  riastrad #ifdef CONFIG_PNP
    192  1.20  riastrad 	if (INTEL_GEN(dev_priv) >= 4)
    193  1.20  riastrad 		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
    194  1.20  riastrad 	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
    195  1.20  riastrad 	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
    196  1.20  riastrad 
    197  1.20  riastrad 	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
    198  1.20  riastrad 	if (mchbar_addr &&
    199  1.20  riastrad 	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
    200  1.20  riastrad 		return 0;
    201  1.20  riastrad #endif
    202  1.20  riastrad 
    203  1.20  riastrad 	/* Get some space for it */
    204  1.20  riastrad 	dev_priv->mch_res.name = "i915 MCHBAR";
    205  1.20  riastrad 	dev_priv->mch_res.flags = IORESOURCE_MEM;
    206  1.20  riastrad 	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
    207  1.20  riastrad 				     &dev_priv->mch_res,
    208  1.20  riastrad 				     MCHBAR_SIZE, MCHBAR_SIZE,
    209  1.20  riastrad 				     PCIBIOS_MIN_MEM,
    210  1.20  riastrad 				     0, pcibios_align_resource,
    211  1.20  riastrad 				     dev_priv->bridge_dev);
    212  1.20  riastrad 	if (ret) {
    213  1.20  riastrad 		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
    214  1.20  riastrad 		dev_priv->mch_res.start = 0;
    215  1.20  riastrad 		return ret;
    216  1.20  riastrad 	}
    217  1.20  riastrad 
    218  1.20  riastrad 	if (INTEL_GEN(dev_priv) >= 4)
    219  1.20  riastrad 		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
    220  1.20  riastrad 				       upper_32_bits(dev_priv->mch_res.start));
    221  1.20  riastrad 
    222  1.20  riastrad 	pci_write_config_dword(dev_priv->bridge_dev, reg,
    223  1.20  riastrad 			       lower_32_bits(dev_priv->mch_res.start));
    224  1.20  riastrad 	return 0;
    225  1.20  riastrad }
    226  1.20  riastrad 
    227  1.20  riastrad /* Setup MCHBAR if possible, return true if we should disable it again */
    228  1.20  riastrad static void
    229  1.20  riastrad intel_setup_mchbar(struct drm_i915_private *dev_priv)
    230  1.20  riastrad {
    231  1.20  riastrad 	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
    232  1.20  riastrad 	u32 temp;
    233  1.20  riastrad 	bool enabled;
    234  1.20  riastrad 
    235  1.20  riastrad 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
    236  1.20  riastrad 		return;
    237  1.20  riastrad 
    238  1.20  riastrad 	dev_priv->mchbar_need_disable = false;
    239  1.20  riastrad 
    240  1.20  riastrad 	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
    241  1.20  riastrad 		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
    242  1.20  riastrad 		enabled = !!(temp & DEVEN_MCHBAR_EN);
    243  1.20  riastrad 	} else {
    244  1.20  riastrad 		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
    245  1.20  riastrad 		enabled = temp & 1;
    246  1.20  riastrad 	}
    247  1.20  riastrad 
    248  1.20  riastrad 	/* If it's already enabled, don't have to do anything */
    249  1.20  riastrad 	if (enabled)
    250  1.20  riastrad 		return;
    251  1.20  riastrad 
    252  1.20  riastrad 	if (intel_alloc_mchbar_resource(dev_priv))
    253  1.20  riastrad 		return;
    254  1.20  riastrad 
    255  1.20  riastrad 	dev_priv->mchbar_need_disable = true;
    256  1.20  riastrad 
    257  1.20  riastrad 	/* Space is allocated or reserved, so enable it. */
    258  1.20  riastrad 	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
    259  1.20  riastrad 		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
    260  1.20  riastrad 				       temp | DEVEN_MCHBAR_EN);
    261  1.20  riastrad 	} else {
    262  1.20  riastrad 		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
    263  1.20  riastrad 		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
    264  1.20  riastrad 	}
    265  1.20  riastrad }
    266  1.20  riastrad 
    267  1.20  riastrad static void
    268  1.20  riastrad intel_teardown_mchbar(struct drm_i915_private *dev_priv)
    269  1.20  riastrad {
    270  1.20  riastrad 	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
    271  1.20  riastrad 
    272  1.20  riastrad 	if (dev_priv->mchbar_need_disable) {
    273  1.20  riastrad 		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
    274  1.20  riastrad 			u32 deven_val;
    275  1.20  riastrad 
    276  1.20  riastrad 			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
    277  1.20  riastrad 					      &deven_val);
    278  1.20  riastrad 			deven_val &= ~DEVEN_MCHBAR_EN;
    279  1.20  riastrad 			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
    280  1.20  riastrad 					       deven_val);
    281  1.20  riastrad 		} else {
    282  1.20  riastrad 			u32 mchbar_val;
    283  1.20  riastrad 
    284  1.20  riastrad 			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
    285  1.20  riastrad 					      &mchbar_val);
    286  1.20  riastrad 			mchbar_val &= ~1;
    287  1.20  riastrad 			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
    288  1.20  riastrad 					       mchbar_val);
    289  1.20  riastrad 		}
    290  1.20  riastrad 	}
    291  1.20  riastrad 
    292  1.20  riastrad 	if (dev_priv->mch_res.start)
    293  1.20  riastrad 		release_resource(&dev_priv->mch_res);
    294  1.20  riastrad }
    295  1.20  riastrad 
    296  1.20  riastrad static int i915_driver_modeset_probe(struct drm_i915_private *i915)
    297  1.20  riastrad {
    298  1.20  riastrad 	int ret;
    299   1.2  riastrad 
    300  1.20  riastrad 	if (i915_inject_probe_failure(i915))
    301  1.20  riastrad 		return -ENODEV;
    302   1.1  riastrad 
    303  1.20  riastrad 	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
    304  1.20  riastrad 		ret = drm_vblank_init(&i915->drm,
    305  1.20  riastrad 				      INTEL_NUM_PIPES(i915));
    306  1.20  riastrad 		if (ret)
    307  1.20  riastrad 			goto out;
    308  1.20  riastrad 	}
    309   1.1  riastrad 
    310  1.20  riastrad 	intel_bios_init(i915);
    311   1.1  riastrad 
    312  1.20  riastrad 	ret = intel_vga_register(i915);
    313  1.20  riastrad 	if (ret)
    314  1.20  riastrad 		goto out;
    315   1.1  riastrad 
    316  1.24  riastrad #ifdef __NetBSD__
    317  1.24  riastrad 	intel_register_dsm_handler(i915);
    318  1.24  riastrad #else
    319  1.20  riastrad 	intel_register_dsm_handler();
    320  1.24  riastrad #endif
    321   1.1  riastrad 
    322  1.20  riastrad 	ret = i915_switcheroo_register(i915);
    323  1.20  riastrad 	if (ret)
    324  1.20  riastrad 		goto cleanup_vga_client;
    325   1.1  riastrad 
    326  1.20  riastrad 	intel_power_domains_init_hw(i915, false);
    327   1.1  riastrad 
    328  1.20  riastrad 	intel_csr_ucode_init(i915);
    329   1.1  riastrad 
    330  1.20  riastrad 	ret = intel_irq_install(i915);
    331  1.20  riastrad 	if (ret)
    332  1.20  riastrad 		goto cleanup_csr;
    333   1.1  riastrad 
    334  1.20  riastrad 	/* Important: The output setup functions called by modeset_init need
    335  1.20  riastrad 	 * working irqs for e.g. gmbus and dp aux transfers. */
    336  1.20  riastrad 	ret = intel_modeset_init(i915);
    337  1.20  riastrad 	if (ret)
    338  1.20  riastrad 		goto cleanup_irq;
    339   1.1  riastrad 
    340  1.20  riastrad 	ret = i915_gem_init(i915);
    341  1.20  riastrad 	if (ret)
    342  1.20  riastrad 		goto cleanup_modeset;
    343   1.1  riastrad 
    344  1.20  riastrad 	intel_overlay_setup(i915);
    345   1.1  riastrad 
    346  1.20  riastrad 	if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
    347  1.20  riastrad 		return 0;
    348   1.1  riastrad 
    349  1.20  riastrad 	ret = intel_fbdev_init(&i915->drm);
    350  1.20  riastrad 	if (ret)
    351  1.20  riastrad 		goto cleanup_gem;
    352   1.1  riastrad 
    353  1.20  riastrad 	/* Only enable hotplug handling once the fbdev is fully set up. */
    354  1.20  riastrad 	intel_hpd_init(i915);
    355   1.1  riastrad 
    356  1.20  riastrad 	intel_init_ipc(i915);
    357   1.1  riastrad 
    358  1.20  riastrad 	return 0;
    359   1.3  riastrad 
    360  1.20  riastrad cleanup_gem:
    361  1.20  riastrad 	i915_gem_suspend(i915);
    362  1.20  riastrad 	i915_gem_driver_remove(i915);
    363  1.20  riastrad 	i915_gem_driver_release(i915);
    364  1.20  riastrad cleanup_modeset:
    365  1.20  riastrad 	intel_modeset_driver_remove(i915);
    366  1.20  riastrad cleanup_irq:
    367  1.20  riastrad 	intel_irq_uninstall(i915);
    368  1.20  riastrad cleanup_csr:
    369  1.20  riastrad 	intel_csr_ucode_fini(i915);
    370  1.20  riastrad 	intel_power_domains_driver_remove(i915);
    371  1.20  riastrad 	i915_switcheroo_unregister(i915);
    372  1.20  riastrad cleanup_vga_client:
    373  1.20  riastrad 	intel_vga_unregister(i915);
    374  1.20  riastrad out:
    375  1.20  riastrad 	return ret;
    376  1.20  riastrad }
    377   1.1  riastrad 
    378  1.20  riastrad static void i915_driver_modeset_remove(struct drm_i915_private *i915)
    379  1.20  riastrad {
    380  1.20  riastrad 	intel_modeset_driver_remove(i915);
    381   1.1  riastrad 
    382  1.20  riastrad 	intel_irq_uninstall(i915);
    383   1.1  riastrad 
    384  1.20  riastrad 	intel_bios_driver_remove(i915);
    385   1.3  riastrad 
    386  1.20  riastrad 	i915_switcheroo_unregister(i915);
    387   1.3  riastrad 
    388  1.20  riastrad 	intel_vga_unregister(i915);
    389   1.1  riastrad 
    390  1.20  riastrad 	intel_csr_ucode_fini(i915);
    391  1.20  riastrad }
    392   1.7  riastrad 
    393  1.20  riastrad static void intel_init_dpio(struct drm_i915_private *dev_priv)
    394  1.20  riastrad {
    395  1.20  riastrad 	/*
    396  1.20  riastrad 	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
    397  1.20  riastrad 	 * CHV x1 PHY (DP/HDMI D)
    398  1.20  riastrad 	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
    399  1.20  riastrad 	 */
    400  1.20  riastrad 	if (IS_CHERRYVIEW(dev_priv)) {
    401  1.20  riastrad 		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
    402  1.20  riastrad 		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
    403  1.20  riastrad 	} else if (IS_VALLEYVIEW(dev_priv)) {
    404  1.20  riastrad 		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
    405  1.20  riastrad 	}
    406  1.20  riastrad }
    407   1.7  riastrad 
    408  1.20  riastrad static int i915_workqueues_init(struct drm_i915_private *dev_priv)
    409  1.20  riastrad {
    410  1.20  riastrad 	/*
    411  1.20  riastrad 	 * The i915 workqueue is primarily used for batched retirement of
    412  1.20  riastrad 	 * requests (and thus managing bo) once the task has been completed
    413  1.20  riastrad 	 * by the GPU. i915_retire_requests() is called directly when we
    414  1.20  riastrad 	 * need high-priority retirement, such as waiting for an explicit
    415  1.20  riastrad 	 * bo.
    416  1.20  riastrad 	 *
    417  1.20  riastrad 	 * It is also used for periodic low-priority events, such as
    418  1.20  riastrad 	 * idle-timers and recording error state.
    419  1.20  riastrad 	 *
    420  1.20  riastrad 	 * All tasks on the workqueue are expected to acquire the dev mutex
    421  1.20  riastrad 	 * so there is no point in running more than one instance of the
    422  1.20  riastrad 	 * workqueue at any time.  Use an ordered one.
    423  1.20  riastrad 	 */
    424  1.20  riastrad 	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
    425  1.20  riastrad 	if (dev_priv->wq == NULL)
    426  1.20  riastrad 		goto out_err;
    427  1.20  riastrad 
    428  1.20  riastrad 	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
    429  1.20  riastrad 	if (dev_priv->hotplug.dp_wq == NULL)
    430  1.20  riastrad 		goto out_free_wq;
    431   1.7  riastrad 
    432  1.20  riastrad 	return 0;
    433   1.7  riastrad 
    434  1.20  riastrad out_free_wq:
    435  1.20  riastrad 	destroy_workqueue(dev_priv->wq);
    436  1.20  riastrad out_err:
    437  1.20  riastrad 	DRM_ERROR("Failed to allocate workqueues.\n");
    438   1.7  riastrad 
    439  1.20  riastrad 	return -ENOMEM;
    440  1.20  riastrad }
    441   1.7  riastrad 
    442  1.20  riastrad static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
    443  1.20  riastrad {
    444  1.20  riastrad 	destroy_workqueue(dev_priv->hotplug.dp_wq);
    445  1.20  riastrad 	destroy_workqueue(dev_priv->wq);
    446  1.20  riastrad }
    447   1.1  riastrad 
    448   1.3  riastrad /*
    449  1.20  riastrad  * We don't keep the workarounds for pre-production hardware, so we expect our
    450  1.20  riastrad  * driver to fail on these machines in one way or another. A little warning on
    451  1.20  riastrad  * dmesg may help both the user and the bug triagers.
    452  1.20  riastrad  *
    453  1.20  riastrad  * Our policy for removing pre-production workarounds is to keep the
    454  1.20  riastrad  * current gen workarounds as a guide to the bring-up of the next gen
    455  1.20  riastrad  * (workarounds have a habit of persisting!). Anything older than that
    456  1.20  riastrad  * should be removed along with the complications they introduce.
    457   1.3  riastrad  */
    458  1.20  riastrad static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
    459  1.20  riastrad {
    460  1.20  riastrad 	bool pre = false;
    461   1.1  riastrad 
    462  1.20  riastrad 	pre |= IS_HSW_EARLY_SDV(dev_priv);
    463  1.20  riastrad 	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
    464  1.20  riastrad 	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
    465  1.20  riastrad 	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
    466  1.20  riastrad 
    467  1.20  riastrad 	if (pre) {
    468  1.20  riastrad 		DRM_ERROR("This is a pre-production stepping. "
    469  1.20  riastrad 			  "It may not be fully functional.\n");
    470  1.20  riastrad 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
    471  1.20  riastrad 	}
    472  1.20  riastrad }
    473   1.7  riastrad 
    474  1.20  riastrad static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
    475   1.7  riastrad {
    476  1.20  riastrad 	if (!IS_VALLEYVIEW(i915))
    477  1.20  riastrad 		return 0;
    478   1.7  riastrad 
    479  1.20  riastrad 	/* we write all the values in the struct, so no need to zero it out */
    480  1.20  riastrad 	i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
    481  1.20  riastrad 				       GFP_KERNEL);
    482  1.20  riastrad 	if (!i915->vlv_s0ix_state)
    483  1.20  riastrad 		return -ENOMEM;
    484   1.7  riastrad 
    485  1.20  riastrad 	return 0;
    486   1.7  riastrad }
    487   1.1  riastrad 
    488  1.20  riastrad static void vlv_free_s0ix_state(struct drm_i915_private *i915)
    489   1.1  riastrad {
    490  1.20  riastrad 	if (!i915->vlv_s0ix_state)
    491   1.3  riastrad 		return;
    492   1.1  riastrad 
    493  1.20  riastrad 	kfree(i915->vlv_s0ix_state);
    494  1.20  riastrad 	i915->vlv_s0ix_state = NULL;
    495  1.20  riastrad }
    496   1.3  riastrad 
    497  1.20  riastrad static void sanitize_gpu(struct drm_i915_private *i915)
    498  1.20  riastrad {
    499  1.20  riastrad 	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
    500  1.20  riastrad 		__intel_gt_reset(&i915->gt, ALL_ENGINES);
    501   1.1  riastrad }
    502   1.1  riastrad 
    503  1.20  riastrad /**
    504  1.20  riastrad  * i915_driver_early_probe - setup state not requiring device access
    505  1.20  riastrad  * @dev_priv: device private
    506  1.20  riastrad  *
    507  1.20  riastrad  * Initialize everything that is a "SW-only" state, that is state not
    508  1.20  riastrad  * requiring accessing the device or exposing the driver via kernel internal
    509  1.20  riastrad  * or userspace interfaces. Example steps belonging here: lock initialization,
    510  1.20  riastrad  * system memory allocation, setting up device specific attributes and
    511  1.20  riastrad  * function hooks not requiring accessing the device.
    512  1.20  riastrad  */
    513  1.20  riastrad static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
    514   1.1  riastrad {
    515  1.20  riastrad 	int ret = 0;
    516   1.1  riastrad 
    517  1.20  riastrad 	if (i915_inject_probe_failure(dev_priv))
    518  1.20  riastrad 		return -ENODEV;
    519   1.3  riastrad 
    520  1.20  riastrad 	intel_device_info_subplatform_init(dev_priv);
    521   1.7  riastrad 
    522  1.20  riastrad 	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
    523  1.20  riastrad 	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
    524   1.1  riastrad 
    525  1.20  riastrad 	spin_lock_init(&dev_priv->irq_lock);
    526  1.20  riastrad 	spin_lock_init(&dev_priv->gpu_error.lock);
    527  1.20  riastrad 	mutex_init(&dev_priv->backlight_lock);
    528   1.1  riastrad 
    529  1.20  riastrad 	mutex_init(&dev_priv->sb_lock);
    530  1.20  riastrad 	pm_qos_add_request(&dev_priv->sb_qos,
    531  1.20  riastrad 			   PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
    532   1.1  riastrad 
    533  1.20  riastrad 	mutex_init(&dev_priv->av_mutex);
    534  1.20  riastrad 	mutex_init(&dev_priv->wm.wm_mutex);
    535  1.20  riastrad 	mutex_init(&dev_priv->pps_mutex);
    536  1.20  riastrad 	mutex_init(&dev_priv->hdcp_comp_mutex);
    537  1.20  riastrad 
    538  1.20  riastrad 	i915_memcpy_init_early(dev_priv);
    539  1.20  riastrad 	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
    540  1.20  riastrad 
    541  1.20  riastrad 	ret = i915_workqueues_init(dev_priv);
    542  1.20  riastrad 	if (ret < 0)
    543  1.20  riastrad 		return ret;
    544  1.20  riastrad 
    545  1.20  riastrad 	ret = vlv_alloc_s0ix_state(dev_priv);
    546  1.20  riastrad 	if (ret < 0)
    547  1.20  riastrad 		goto err_workqueues;
    548  1.20  riastrad 
    549  1.20  riastrad 	intel_wopcm_init_early(&dev_priv->wopcm);
    550  1.20  riastrad 
    551  1.20  riastrad 	intel_gt_init_early(&dev_priv->gt, dev_priv);
    552  1.20  riastrad 
    553  1.20  riastrad 	i915_gem_init_early(dev_priv);
    554  1.20  riastrad 
    555  1.20  riastrad 	/* This must be called before any calls to HAS_PCH_* */
    556  1.20  riastrad 	intel_detect_pch(dev_priv);
    557  1.20  riastrad 
    558  1.20  riastrad 	intel_pm_setup(dev_priv);
    559  1.20  riastrad 	intel_init_dpio(dev_priv);
    560  1.20  riastrad 	ret = intel_power_domains_init(dev_priv);
    561  1.20  riastrad 	if (ret < 0)
    562  1.20  riastrad 		goto err_gem;
    563  1.20  riastrad 	intel_irq_init(dev_priv);
    564  1.20  riastrad 	intel_init_display_hooks(dev_priv);
    565  1.20  riastrad 	intel_init_clock_gating_hooks(dev_priv);
    566  1.20  riastrad 	intel_init_audio_hooks(dev_priv);
    567  1.20  riastrad 	intel_display_crc_init(dev_priv);
    568  1.20  riastrad 
    569  1.20  riastrad 	intel_detect_preproduction_hw(dev_priv);
    570  1.20  riastrad 
    571  1.20  riastrad 	return 0;
    572  1.20  riastrad 
    573  1.20  riastrad err_gem:
    574  1.20  riastrad 	i915_gem_cleanup_early(dev_priv);
    575  1.20  riastrad 	intel_gt_driver_late_release(&dev_priv->gt);
    576  1.20  riastrad 	vlv_free_s0ix_state(dev_priv);
    577  1.20  riastrad err_workqueues:
    578  1.20  riastrad 	i915_workqueues_cleanup(dev_priv);
    579  1.25  riastrad 	mutex_destroy(&dev_priv->hdcp_comp_mutex);
    580  1.25  riastrad 	mutex_destroy(&dev_priv->pps_mutex);
    581  1.25  riastrad 	mutex_destroy(&dev_priv->wm.wm_mutex);
    582  1.25  riastrad 	mutex_destroy(&dev_priv->av_mutex);
    583  1.25  riastrad 	mutex_destroy(&dev_priv->sb_lock);
    584  1.25  riastrad 	mutex_destroy(&dev_priv->backlight_lock);
    585  1.25  riastrad 	spin_lock_destroy(&dev_priv->gpu_error.lock);
    586  1.25  riastrad 	spin_lock_destroy(&dev_priv->irq_lock);
    587  1.45  riastrad 	intel_uncore_fini_early(&dev_priv->uncore, dev_priv);
    588  1.45  riastrad 	intel_uncore_mmio_debug_fini_early(&dev_priv->mmio_debug);
    589  1.20  riastrad 	return ret;
    590  1.20  riastrad }
    591  1.20  riastrad 
    592  1.20  riastrad /**
    593  1.20  riastrad  * i915_driver_late_release - cleanup the setup done in
    594  1.20  riastrad  *			       i915_driver_early_probe()
    595  1.20  riastrad  * @dev_priv: device private
    596  1.20  riastrad  */
    597  1.20  riastrad static void i915_driver_late_release(struct drm_i915_private *dev_priv)
    598  1.20  riastrad {
    599  1.45  riastrad 	intel_display_crc_fini(dev_priv);
    600  1.20  riastrad 	intel_irq_fini(dev_priv);
    601  1.20  riastrad 	intel_power_domains_cleanup(dev_priv);
    602  1.20  riastrad 	i915_gem_cleanup_early(dev_priv);
    603  1.20  riastrad 	intel_gt_driver_late_release(&dev_priv->gt);
    604  1.20  riastrad 	vlv_free_s0ix_state(dev_priv);
    605  1.20  riastrad 	i915_workqueues_cleanup(dev_priv);
    606  1.20  riastrad 
    607  1.20  riastrad 	pm_qos_remove_request(&dev_priv->sb_qos);
    608  1.25  riastrad 	mutex_destroy(&dev_priv->hdcp_comp_mutex);
    609  1.25  riastrad 	mutex_destroy(&dev_priv->pps_mutex);
    610  1.25  riastrad 	mutex_destroy(&dev_priv->wm.wm_mutex);
    611  1.25  riastrad 	mutex_destroy(&dev_priv->av_mutex);
    612  1.25  riastrad 	mutex_destroy(&dev_priv->sb_lock);
    613  1.25  riastrad 	mutex_destroy(&dev_priv->backlight_lock);
    614  1.25  riastrad 	spin_lock_destroy(&dev_priv->gpu_error.lock);
    615  1.25  riastrad 	spin_lock_destroy(&dev_priv->irq_lock);
    616  1.45  riastrad 	intel_uncore_fini_early(&dev_priv->uncore, dev_priv);
    617  1.45  riastrad 	intel_uncore_mmio_debug_fini_early(&dev_priv->mmio_debug);
    618  1.20  riastrad }
    619  1.20  riastrad 
    620  1.20  riastrad /**
    621  1.20  riastrad  * i915_driver_mmio_probe - setup device MMIO
    622  1.20  riastrad  * @dev_priv: device private
    623  1.20  riastrad  *
    624  1.20  riastrad  * Setup minimal device state necessary for MMIO accesses later in the
    625  1.20  riastrad  * initialization sequence. The setup here should avoid any other device-wide
    626  1.20  riastrad  * side effects or exposing the driver via kernel internal or user space
    627  1.20  riastrad  * interfaces.
    628  1.20  riastrad  */
    629  1.20  riastrad static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
    630  1.20  riastrad {
    631  1.20  riastrad 	int ret;
    632  1.20  riastrad 
    633  1.20  riastrad 	if (i915_inject_probe_failure(dev_priv))
    634  1.20  riastrad 		return -ENODEV;
    635  1.20  riastrad 
    636  1.20  riastrad 	if (i915_get_bridge_dev(dev_priv))
    637  1.20  riastrad 		return -EIO;
    638  1.20  riastrad 
    639  1.20  riastrad 	ret = intel_uncore_init_mmio(&dev_priv->uncore);
    640  1.20  riastrad 	if (ret < 0)
    641  1.20  riastrad 		goto err_bridge;
    642  1.20  riastrad 
    643  1.20  riastrad 	/* Try to make sure MCHBAR is enabled before poking at it */
    644  1.20  riastrad 	intel_setup_mchbar(dev_priv);
    645  1.20  riastrad 
    646  1.20  riastrad 	intel_device_info_init_mmio(dev_priv);
    647  1.20  riastrad 
    648  1.20  riastrad 	intel_uncore_prune_mmio_domains(&dev_priv->uncore);
    649  1.20  riastrad 
    650  1.20  riastrad 	intel_uc_init_mmio(&dev_priv->gt.uc);
    651  1.20  riastrad 
    652  1.20  riastrad 	ret = intel_engines_init_mmio(&dev_priv->gt);
    653  1.20  riastrad 	if (ret)
    654  1.20  riastrad 		goto err_uncore;
    655  1.20  riastrad 
    656  1.20  riastrad 	/* As early as possible, scrub existing GPU state before clobbering */
    657  1.20  riastrad 	sanitize_gpu(dev_priv);
    658  1.20  riastrad 
    659  1.20  riastrad 	return 0;
    660  1.20  riastrad 
    661  1.20  riastrad err_uncore:
    662  1.20  riastrad 	intel_teardown_mchbar(dev_priv);
    663  1.20  riastrad 	intel_uncore_fini_mmio(&dev_priv->uncore);
    664  1.20  riastrad err_bridge:
    665  1.20  riastrad 	pci_dev_put(dev_priv->bridge_dev);
    666  1.20  riastrad 
    667  1.20  riastrad 	return ret;
    668  1.20  riastrad }
    669  1.20  riastrad 
    670  1.20  riastrad /**
    671  1.20  riastrad  * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
    672  1.20  riastrad  * @dev_priv: device private
    673  1.20  riastrad  */
    674  1.20  riastrad static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
    675  1.20  riastrad {
    676  1.20  riastrad 	intel_teardown_mchbar(dev_priv);
    677  1.20  riastrad 	intel_uncore_fini_mmio(&dev_priv->uncore);
    678  1.20  riastrad 	pci_dev_put(dev_priv->bridge_dev);
    679  1.20  riastrad }
    680  1.20  riastrad 
    681  1.20  riastrad static void intel_sanitize_options(struct drm_i915_private *dev_priv)
    682  1.20  riastrad {
    683  1.20  riastrad 	intel_gvt_sanitize_options(dev_priv);
    684  1.20  riastrad }
    685  1.20  riastrad 
    686  1.20  riastrad #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
    687  1.20  riastrad 
    688  1.20  riastrad static const char *intel_dram_type_str(enum intel_dram_type type)
    689  1.20  riastrad {
    690  1.20  riastrad 	static const char * const str[] = {
    691  1.20  riastrad 		DRAM_TYPE_STR(UNKNOWN),
    692  1.20  riastrad 		DRAM_TYPE_STR(DDR3),
    693  1.20  riastrad 		DRAM_TYPE_STR(DDR4),
    694  1.20  riastrad 		DRAM_TYPE_STR(LPDDR3),
    695  1.20  riastrad 		DRAM_TYPE_STR(LPDDR4),
    696  1.20  riastrad 	};
    697  1.20  riastrad 
    698  1.20  riastrad 	if (type >= ARRAY_SIZE(str))
    699  1.20  riastrad 		type = INTEL_DRAM_UNKNOWN;
    700  1.20  riastrad 
    701  1.20  riastrad 	return str[type];
    702  1.20  riastrad }
    703  1.20  riastrad 
    704  1.20  riastrad #undef DRAM_TYPE_STR
    705  1.20  riastrad 
    706  1.20  riastrad static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
    707  1.20  riastrad {
    708  1.20  riastrad 	return dimm->ranks * 64 / (dimm->width ?: 1);
    709  1.20  riastrad }
    710  1.20  riastrad 
    711  1.20  riastrad /* Returns total GB for the whole DIMM */
    712  1.20  riastrad static int skl_get_dimm_size(u16 val)
    713  1.20  riastrad {
    714  1.20  riastrad 	return val & SKL_DRAM_SIZE_MASK;
    715  1.20  riastrad }
    716  1.20  riastrad 
    717  1.20  riastrad static int skl_get_dimm_width(u16 val)
    718  1.20  riastrad {
    719  1.20  riastrad 	if (skl_get_dimm_size(val) == 0)
    720  1.20  riastrad 		return 0;
    721  1.20  riastrad 
    722  1.20  riastrad 	switch (val & SKL_DRAM_WIDTH_MASK) {
    723  1.20  riastrad 	case SKL_DRAM_WIDTH_X8:
    724  1.20  riastrad 	case SKL_DRAM_WIDTH_X16:
    725  1.20  riastrad 	case SKL_DRAM_WIDTH_X32:
    726  1.20  riastrad 		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
    727  1.20  riastrad 		return 8 << val;
    728  1.20  riastrad 	default:
    729  1.20  riastrad 		MISSING_CASE(val);
    730  1.20  riastrad 		return 0;
    731  1.20  riastrad 	}
    732  1.20  riastrad }
    733  1.20  riastrad 
    734  1.20  riastrad static int skl_get_dimm_ranks(u16 val)
    735  1.20  riastrad {
    736  1.20  riastrad 	if (skl_get_dimm_size(val) == 0)
    737  1.20  riastrad 		return 0;
    738  1.20  riastrad 
    739  1.20  riastrad 	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
    740  1.20  riastrad 
    741  1.20  riastrad 	return val + 1;
    742  1.20  riastrad }
    743  1.20  riastrad 
    744  1.20  riastrad /* Returns total GB for the whole DIMM */
    745  1.20  riastrad static int cnl_get_dimm_size(u16 val)
    746  1.20  riastrad {
    747  1.20  riastrad 	return (val & CNL_DRAM_SIZE_MASK) / 2;
    748  1.20  riastrad }
    749  1.20  riastrad 
    750  1.20  riastrad static int cnl_get_dimm_width(u16 val)
    751  1.20  riastrad {
    752  1.20  riastrad 	if (cnl_get_dimm_size(val) == 0)
    753  1.20  riastrad 		return 0;
    754  1.20  riastrad 
    755  1.20  riastrad 	switch (val & CNL_DRAM_WIDTH_MASK) {
    756  1.20  riastrad 	case CNL_DRAM_WIDTH_X8:
    757  1.20  riastrad 	case CNL_DRAM_WIDTH_X16:
    758  1.20  riastrad 	case CNL_DRAM_WIDTH_X32:
    759  1.20  riastrad 		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
    760  1.20  riastrad 		return 8 << val;
    761  1.20  riastrad 	default:
    762  1.20  riastrad 		MISSING_CASE(val);
    763  1.20  riastrad 		return 0;
    764  1.20  riastrad 	}
    765  1.20  riastrad }
    766  1.20  riastrad 
    767  1.20  riastrad static int cnl_get_dimm_ranks(u16 val)
    768  1.20  riastrad {
    769  1.20  riastrad 	if (cnl_get_dimm_size(val) == 0)
    770  1.20  riastrad 		return 0;
    771  1.20  riastrad 
    772  1.20  riastrad 	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
    773  1.20  riastrad 
    774  1.20  riastrad 	return val + 1;
    775  1.20  riastrad }
    776  1.20  riastrad 
    777  1.20  riastrad static bool
    778  1.20  riastrad skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
    779  1.20  riastrad {
    780  1.20  riastrad 	/* Convert total GB to Gb per DRAM device */
    781  1.20  riastrad 	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
    782  1.20  riastrad }
    783  1.20  riastrad 
    784  1.20  riastrad static void
    785  1.20  riastrad skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
    786  1.20  riastrad 		       struct dram_dimm_info *dimm,
    787  1.20  riastrad 		       int channel, char dimm_name, u16 val)
    788  1.20  riastrad {
    789  1.20  riastrad 	if (INTEL_GEN(dev_priv) >= 10) {
    790  1.20  riastrad 		dimm->size = cnl_get_dimm_size(val);
    791  1.20  riastrad 		dimm->width = cnl_get_dimm_width(val);
    792  1.20  riastrad 		dimm->ranks = cnl_get_dimm_ranks(val);
    793  1.20  riastrad 	} else {
    794  1.20  riastrad 		dimm->size = skl_get_dimm_size(val);
    795  1.20  riastrad 		dimm->width = skl_get_dimm_width(val);
    796  1.20  riastrad 		dimm->ranks = skl_get_dimm_ranks(val);
    797  1.20  riastrad 	}
    798  1.20  riastrad 
    799  1.20  riastrad 	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
    800  1.20  riastrad 		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
    801  1.20  riastrad 		      yesno(skl_is_16gb_dimm(dimm)));
    802  1.20  riastrad }
    803  1.20  riastrad 
    804  1.20  riastrad static int
    805  1.20  riastrad skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
    806  1.20  riastrad 			  struct dram_channel_info *ch,
    807  1.20  riastrad 			  int channel, u32 val)
    808  1.20  riastrad {
    809  1.20  riastrad 	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
    810  1.20  riastrad 			       channel, 'L', val & 0xffff);
    811  1.20  riastrad 	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
    812  1.20  riastrad 			       channel, 'S', val >> 16);
    813  1.20  riastrad 
    814  1.20  riastrad 	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
    815  1.20  riastrad 		DRM_DEBUG_KMS("CH%u not populated\n", channel);
    816  1.20  riastrad 		return -EINVAL;
    817  1.20  riastrad 	}
    818  1.20  riastrad 
    819  1.20  riastrad 	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
    820  1.20  riastrad 		ch->ranks = 2;
    821  1.20  riastrad 	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
    822  1.20  riastrad 		ch->ranks = 2;
    823  1.20  riastrad 	else
    824  1.20  riastrad 		ch->ranks = 1;
    825  1.20  riastrad 
    826  1.20  riastrad 	ch->is_16gb_dimm =
    827  1.20  riastrad 		skl_is_16gb_dimm(&ch->dimm_l) ||
    828  1.20  riastrad 		skl_is_16gb_dimm(&ch->dimm_s);
    829  1.20  riastrad 
    830  1.20  riastrad 	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
    831  1.20  riastrad 		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
    832  1.20  riastrad 
    833  1.20  riastrad 	return 0;
    834  1.20  riastrad }
    835  1.20  riastrad 
    836  1.20  riastrad static bool
    837  1.20  riastrad intel_is_dram_symmetric(const struct dram_channel_info *ch0,
    838  1.20  riastrad 			const struct dram_channel_info *ch1)
    839  1.20  riastrad {
    840  1.20  riastrad 	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
    841  1.20  riastrad 		(ch0->dimm_s.size == 0 ||
    842  1.20  riastrad 		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
    843  1.20  riastrad }
    844  1.20  riastrad 
    845  1.20  riastrad static int
    846  1.20  riastrad skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
    847  1.20  riastrad {
    848  1.20  riastrad 	struct dram_info *dram_info = &dev_priv->dram_info;
    849  1.20  riastrad 	struct dram_channel_info ch0 = {}, ch1 = {};
    850  1.20  riastrad 	u32 val;
    851  1.20  riastrad 	int ret;
    852  1.20  riastrad 
    853  1.20  riastrad 	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
    854  1.20  riastrad 	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
    855  1.20  riastrad 	if (ret == 0)
    856  1.20  riastrad 		dram_info->num_channels++;
    857  1.20  riastrad 
    858  1.20  riastrad 	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
    859  1.20  riastrad 	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
    860  1.20  riastrad 	if (ret == 0)
    861  1.20  riastrad 		dram_info->num_channels++;
    862  1.20  riastrad 
    863  1.20  riastrad 	if (dram_info->num_channels == 0) {
    864  1.20  riastrad 		DRM_INFO("Number of memory channels is zero\n");
    865  1.20  riastrad 		return -EINVAL;
    866  1.20  riastrad 	}
    867  1.20  riastrad 
    868  1.20  riastrad 	/*
    869  1.20  riastrad 	 * If any of the channel is single rank channel, worst case output
    870  1.20  riastrad 	 * will be same as if single rank memory, so consider single rank
    871  1.20  riastrad 	 * memory.
    872  1.20  riastrad 	 */
    873  1.20  riastrad 	if (ch0.ranks == 1 || ch1.ranks == 1)
    874  1.20  riastrad 		dram_info->ranks = 1;
    875  1.20  riastrad 	else
    876  1.20  riastrad 		dram_info->ranks = max(ch0.ranks, ch1.ranks);
    877  1.20  riastrad 
    878  1.20  riastrad 	if (dram_info->ranks == 0) {
    879  1.20  riastrad 		DRM_INFO("couldn't get memory rank information\n");
    880  1.20  riastrad 		return -EINVAL;
    881  1.20  riastrad 	}
    882  1.20  riastrad 
    883  1.20  riastrad 	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
    884  1.20  riastrad 
    885  1.20  riastrad 	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
    886  1.20  riastrad 
    887  1.20  riastrad 	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
    888  1.20  riastrad 		      yesno(dram_info->symmetric_memory));
    889  1.20  riastrad 	return 0;
    890  1.20  riastrad }
    891  1.20  riastrad 
    892  1.20  riastrad static enum intel_dram_type
    893  1.20  riastrad skl_get_dram_type(struct drm_i915_private *dev_priv)
    894  1.20  riastrad {
    895  1.20  riastrad 	u32 val;
    896  1.20  riastrad 
    897  1.20  riastrad 	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
    898  1.20  riastrad 
    899  1.20  riastrad 	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
    900  1.20  riastrad 	case SKL_DRAM_DDR_TYPE_DDR3:
    901  1.20  riastrad 		return INTEL_DRAM_DDR3;
    902  1.20  riastrad 	case SKL_DRAM_DDR_TYPE_DDR4:
    903  1.20  riastrad 		return INTEL_DRAM_DDR4;
    904  1.20  riastrad 	case SKL_DRAM_DDR_TYPE_LPDDR3:
    905  1.20  riastrad 		return INTEL_DRAM_LPDDR3;
    906  1.20  riastrad 	case SKL_DRAM_DDR_TYPE_LPDDR4:
    907  1.20  riastrad 		return INTEL_DRAM_LPDDR4;
    908  1.20  riastrad 	default:
    909  1.20  riastrad 		MISSING_CASE(val);
    910  1.20  riastrad 		return INTEL_DRAM_UNKNOWN;
    911  1.20  riastrad 	}
    912  1.20  riastrad }
    913  1.20  riastrad 
    914  1.20  riastrad static int
    915  1.20  riastrad skl_get_dram_info(struct drm_i915_private *dev_priv)
    916  1.20  riastrad {
    917  1.20  riastrad 	struct dram_info *dram_info = &dev_priv->dram_info;
    918  1.20  riastrad 	u32 mem_freq_khz, val;
    919  1.20  riastrad 	int ret;
    920  1.20  riastrad 
    921  1.20  riastrad 	dram_info->type = skl_get_dram_type(dev_priv);
    922  1.20  riastrad 	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
    923  1.20  riastrad 
    924  1.20  riastrad 	ret = skl_dram_get_channels_info(dev_priv);
    925  1.20  riastrad 	if (ret)
    926  1.20  riastrad 		return ret;
    927  1.20  riastrad 
    928  1.20  riastrad 	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
    929  1.20  riastrad 	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
    930  1.20  riastrad 				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
    931  1.20  riastrad 
    932  1.20  riastrad 	dram_info->bandwidth_kbps = dram_info->num_channels *
    933  1.20  riastrad 							mem_freq_khz * 8;
    934  1.20  riastrad 
    935  1.20  riastrad 	if (dram_info->bandwidth_kbps == 0) {
    936  1.20  riastrad 		DRM_INFO("Couldn't get system memory bandwidth\n");
    937  1.20  riastrad 		return -EINVAL;
    938  1.20  riastrad 	}
    939  1.20  riastrad 
    940  1.20  riastrad 	dram_info->valid = true;
    941  1.20  riastrad 	return 0;
    942  1.20  riastrad }
    943  1.20  riastrad 
    944  1.20  riastrad /* Returns Gb per DRAM device */
    945  1.20  riastrad static int bxt_get_dimm_size(u32 val)
    946  1.20  riastrad {
    947  1.20  riastrad 	switch (val & BXT_DRAM_SIZE_MASK) {
    948  1.20  riastrad 	case BXT_DRAM_SIZE_4GBIT:
    949  1.20  riastrad 		return 4;
    950  1.20  riastrad 	case BXT_DRAM_SIZE_6GBIT:
    951  1.20  riastrad 		return 6;
    952  1.20  riastrad 	case BXT_DRAM_SIZE_8GBIT:
    953  1.20  riastrad 		return 8;
    954  1.20  riastrad 	case BXT_DRAM_SIZE_12GBIT:
    955  1.20  riastrad 		return 12;
    956  1.20  riastrad 	case BXT_DRAM_SIZE_16GBIT:
    957  1.20  riastrad 		return 16;
    958  1.20  riastrad 	default:
    959  1.20  riastrad 		MISSING_CASE(val);
    960  1.20  riastrad 		return 0;
    961  1.20  riastrad 	}
    962  1.20  riastrad }
    963  1.20  riastrad 
    964  1.20  riastrad static int bxt_get_dimm_width(u32 val)
    965  1.20  riastrad {
    966  1.20  riastrad 	if (!bxt_get_dimm_size(val))
    967  1.20  riastrad 		return 0;
    968  1.20  riastrad 
    969  1.20  riastrad 	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
    970  1.20  riastrad 
    971  1.20  riastrad 	return 8 << val;
    972  1.20  riastrad }
    973  1.20  riastrad 
    974  1.20  riastrad static int bxt_get_dimm_ranks(u32 val)
    975  1.20  riastrad {
    976  1.20  riastrad 	if (!bxt_get_dimm_size(val))
    977  1.20  riastrad 		return 0;
    978  1.20  riastrad 
    979  1.20  riastrad 	switch (val & BXT_DRAM_RANK_MASK) {
    980  1.20  riastrad 	case BXT_DRAM_RANK_SINGLE:
    981  1.20  riastrad 		return 1;
    982  1.20  riastrad 	case BXT_DRAM_RANK_DUAL:
    983  1.20  riastrad 		return 2;
    984  1.20  riastrad 	default:
    985  1.20  riastrad 		MISSING_CASE(val);
    986  1.20  riastrad 		return 0;
    987  1.20  riastrad 	}
    988  1.20  riastrad }
    989  1.20  riastrad 
    990  1.20  riastrad static enum intel_dram_type bxt_get_dimm_type(u32 val)
    991  1.20  riastrad {
    992  1.20  riastrad 	if (!bxt_get_dimm_size(val))
    993  1.20  riastrad 		return INTEL_DRAM_UNKNOWN;
    994  1.20  riastrad 
    995  1.20  riastrad 	switch (val & BXT_DRAM_TYPE_MASK) {
    996  1.20  riastrad 	case BXT_DRAM_TYPE_DDR3:
    997  1.20  riastrad 		return INTEL_DRAM_DDR3;
    998  1.20  riastrad 	case BXT_DRAM_TYPE_LPDDR3:
    999  1.20  riastrad 		return INTEL_DRAM_LPDDR3;
   1000  1.20  riastrad 	case BXT_DRAM_TYPE_DDR4:
   1001  1.20  riastrad 		return INTEL_DRAM_DDR4;
   1002  1.20  riastrad 	case BXT_DRAM_TYPE_LPDDR4:
   1003  1.20  riastrad 		return INTEL_DRAM_LPDDR4;
   1004  1.20  riastrad 	default:
   1005  1.20  riastrad 		MISSING_CASE(val);
   1006  1.20  riastrad 		return INTEL_DRAM_UNKNOWN;
   1007  1.20  riastrad 	}
   1008  1.20  riastrad }
   1009  1.20  riastrad 
   1010  1.20  riastrad static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
   1011  1.20  riastrad 			      u32 val)
   1012   1.7  riastrad {
   1013  1.20  riastrad 	dimm->width = bxt_get_dimm_width(val);
   1014  1.20  riastrad 	dimm->ranks = bxt_get_dimm_ranks(val);
   1015  1.20  riastrad 
   1016  1.20  riastrad 	/*
   1017  1.20  riastrad 	 * Size in register is Gb per DRAM device. Convert to total
   1018  1.20  riastrad 	 * GB to match the way we report this for non-LP platforms.
   1019  1.20  riastrad 	 */
   1020  1.20  riastrad 	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
   1021  1.20  riastrad }
   1022  1.20  riastrad 
   1023  1.20  riastrad static int
   1024  1.20  riastrad bxt_get_dram_info(struct drm_i915_private *dev_priv)
   1025  1.20  riastrad {
   1026  1.20  riastrad 	struct dram_info *dram_info = &dev_priv->dram_info;
   1027  1.20  riastrad 	u32 dram_channels;
   1028  1.20  riastrad 	u32 mem_freq_khz, val;
   1029  1.20  riastrad 	u8 num_active_channels;
   1030  1.20  riastrad 	int i;
   1031  1.20  riastrad 
   1032  1.20  riastrad 	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
   1033  1.20  riastrad 	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
   1034  1.20  riastrad 				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
   1035  1.20  riastrad 
   1036  1.20  riastrad 	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
   1037  1.20  riastrad 	num_active_channels = hweight32(dram_channels);
   1038  1.20  riastrad 
   1039  1.20  riastrad 	/* Each active bit represents 4-byte channel */
   1040  1.20  riastrad 	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
   1041  1.20  riastrad 
   1042  1.20  riastrad 	if (dram_info->bandwidth_kbps == 0) {
   1043  1.20  riastrad 		DRM_INFO("Couldn't get system memory bandwidth\n");
   1044  1.20  riastrad 		return -EINVAL;
   1045  1.20  riastrad 	}
   1046   1.7  riastrad 
   1047   1.7  riastrad 	/*
   1048  1.20  riastrad 	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
   1049   1.7  riastrad 	 */
   1050  1.20  riastrad 	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
   1051  1.20  riastrad 		struct dram_dimm_info dimm;
   1052  1.20  riastrad 		enum intel_dram_type type;
   1053  1.20  riastrad 
   1054  1.20  riastrad 		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
   1055  1.20  riastrad 		if (val == 0xFFFFFFFF)
   1056  1.20  riastrad 			continue;
   1057  1.20  riastrad 
   1058  1.20  riastrad 		dram_info->num_channels++;
   1059  1.20  riastrad 
   1060  1.20  riastrad 		bxt_get_dimm_info(&dimm, val);
   1061  1.20  riastrad 		type = bxt_get_dimm_type(val);
   1062  1.20  riastrad 
   1063  1.20  riastrad 		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
   1064  1.20  riastrad 			dram_info->type != INTEL_DRAM_UNKNOWN &&
   1065  1.20  riastrad 			dram_info->type != type);
   1066   1.7  riastrad 
   1067  1.20  riastrad 		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
   1068  1.20  riastrad 			      i - BXT_D_CR_DRP0_DUNIT_START,
   1069  1.20  riastrad 			      dimm.size, dimm.width, dimm.ranks,
   1070  1.20  riastrad 			      intel_dram_type_str(type));
   1071  1.20  riastrad 
   1072  1.20  riastrad 		/*
   1073  1.20  riastrad 		 * If any of the channel is single rank channel,
   1074  1.20  riastrad 		 * worst case output will be same as if single rank
   1075  1.20  riastrad 		 * memory, so consider single rank memory.
   1076  1.20  riastrad 		 */
   1077  1.20  riastrad 		if (dram_info->ranks == 0)
   1078  1.20  riastrad 			dram_info->ranks = dimm.ranks;
   1079  1.20  riastrad 		else if (dimm.ranks == 1)
   1080  1.20  riastrad 			dram_info->ranks = 1;
   1081  1.20  riastrad 
   1082  1.20  riastrad 		if (type != INTEL_DRAM_UNKNOWN)
   1083  1.20  riastrad 			dram_info->type = type;
   1084  1.20  riastrad 	}
   1085  1.20  riastrad 
   1086  1.20  riastrad 	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
   1087  1.20  riastrad 	    dram_info->ranks == 0) {
   1088  1.20  riastrad 		DRM_INFO("couldn't get memory information\n");
   1089  1.20  riastrad 		return -EINVAL;
   1090  1.20  riastrad 	}
   1091  1.20  riastrad 
   1092  1.20  riastrad 	dram_info->valid = true;
   1093  1.20  riastrad 	return 0;
   1094  1.20  riastrad }
   1095  1.20  riastrad 
   1096  1.20  riastrad static void
   1097  1.20  riastrad intel_get_dram_info(struct drm_i915_private *dev_priv)
   1098  1.20  riastrad {
   1099  1.20  riastrad 	struct dram_info *dram_info = &dev_priv->dram_info;
   1100  1.20  riastrad 	int ret;
   1101  1.20  riastrad 
   1102  1.20  riastrad 	/*
   1103  1.20  riastrad 	 * Assume 16Gb DIMMs are present until proven otherwise.
   1104  1.20  riastrad 	 * This is only used for the level 0 watermark latency
   1105  1.20  riastrad 	 * w/a which does not apply to bxt/glk.
   1106  1.20  riastrad 	 */
   1107  1.20  riastrad 	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
   1108  1.20  riastrad 
   1109  1.20  riastrad 	if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
   1110   1.7  riastrad 		return;
   1111   1.7  riastrad 
   1112  1.20  riastrad 	if (IS_GEN9_LP(dev_priv))
   1113  1.20  riastrad 		ret = bxt_get_dram_info(dev_priv);
   1114  1.20  riastrad 	else
   1115  1.20  riastrad 		ret = skl_get_dram_info(dev_priv);
   1116  1.20  riastrad 	if (ret)
   1117  1.20  riastrad 		return;
   1118  1.20  riastrad 
   1119  1.20  riastrad 	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
   1120  1.20  riastrad 		      dram_info->bandwidth_kbps,
   1121  1.20  riastrad 		      dram_info->num_channels);
   1122  1.20  riastrad 
   1123  1.20  riastrad 	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
   1124  1.20  riastrad 		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
   1125  1.20  riastrad }
   1126  1.20  riastrad 
   1127  1.20  riastrad static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
   1128  1.20  riastrad {
   1129  1.20  riastrad 	static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
   1130  1.20  riastrad 	static const u8 sets[4] = { 1, 1, 2, 2 };
   1131  1.20  riastrad 
   1132  1.20  riastrad 	return EDRAM_NUM_BANKS(cap) *
   1133  1.20  riastrad 		ways[EDRAM_WAYS_IDX(cap)] *
   1134  1.20  riastrad 		sets[EDRAM_SETS_IDX(cap)];
   1135  1.20  riastrad }
   1136  1.20  riastrad 
   1137  1.20  riastrad static void edram_detect(struct drm_i915_private *dev_priv)
   1138  1.20  riastrad {
   1139  1.20  riastrad 	u32 edram_cap = 0;
   1140  1.20  riastrad 
   1141  1.20  riastrad 	if (!(IS_HASWELL(dev_priv) ||
   1142  1.20  riastrad 	      IS_BROADWELL(dev_priv) ||
   1143  1.20  riastrad 	      INTEL_GEN(dev_priv) >= 9))
   1144  1.20  riastrad 		return;
   1145  1.20  riastrad 
   1146  1.20  riastrad 	edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
   1147  1.20  riastrad 
   1148  1.20  riastrad 	/* NB: We can't write IDICR yet because we don't have gt funcs set up */
   1149  1.20  riastrad 
   1150  1.20  riastrad 	if (!(edram_cap & EDRAM_ENABLED))
   1151  1.20  riastrad 		return;
   1152  1.20  riastrad 
   1153  1.20  riastrad 	/*
   1154  1.20  riastrad 	 * The needed capability bits for size calculation are not there with
   1155  1.20  riastrad 	 * pre gen9 so return 128MB always.
   1156  1.20  riastrad 	 */
   1157  1.20  riastrad 	if (INTEL_GEN(dev_priv) < 9)
   1158  1.20  riastrad 		dev_priv->edram_size_mb = 128;
   1159  1.20  riastrad 	else
   1160  1.20  riastrad 		dev_priv->edram_size_mb =
   1161  1.20  riastrad 			gen9_edram_size_mb(dev_priv, edram_cap);
   1162  1.20  riastrad 
   1163  1.20  riastrad 	dev_info(dev_priv->drm.dev,
   1164  1.20  riastrad 		 "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
   1165  1.20  riastrad }
   1166  1.20  riastrad 
   1167  1.20  riastrad /**
   1168  1.20  riastrad  * i915_driver_hw_probe - setup state requiring device access
   1169  1.20  riastrad  * @dev_priv: device private
   1170  1.20  riastrad  *
   1171  1.20  riastrad  * Setup state that requires accessing the device, but doesn't require
   1172  1.20  riastrad  * exposing the driver via kernel internal or userspace interfaces.
   1173  1.20  riastrad  */
   1174  1.20  riastrad static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
   1175  1.20  riastrad {
   1176  1.20  riastrad 	struct pci_dev *pdev = dev_priv->drm.pdev;
   1177  1.20  riastrad 	int ret;
   1178  1.20  riastrad 
   1179  1.20  riastrad 	if (i915_inject_probe_failure(dev_priv))
   1180  1.20  riastrad 		return -ENODEV;
   1181  1.20  riastrad 
   1182  1.20  riastrad 	intel_device_info_runtime_init(dev_priv);
   1183  1.20  riastrad 
   1184  1.20  riastrad 	if (HAS_PPGTT(dev_priv)) {
   1185  1.20  riastrad 		if (intel_vgpu_active(dev_priv) &&
   1186  1.20  riastrad 		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
   1187  1.20  riastrad 			i915_report_error(dev_priv,
   1188  1.20  riastrad 					  "incompatible vGPU found, support for isolated ppGTT required\n");
   1189  1.20  riastrad 			return -ENXIO;
   1190  1.20  riastrad 		}
   1191  1.20  riastrad 	}
   1192  1.20  riastrad 
   1193  1.20  riastrad 	if (HAS_EXECLISTS(dev_priv)) {
   1194  1.20  riastrad 		/*
   1195  1.20  riastrad 		 * Older GVT emulation depends upon intercepting CSB mmio,
   1196  1.20  riastrad 		 * which we no longer use, preferring to use the HWSP cache
   1197  1.20  riastrad 		 * instead.
   1198  1.20  riastrad 		 */
   1199  1.20  riastrad 		if (intel_vgpu_active(dev_priv) &&
   1200  1.20  riastrad 		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
   1201  1.20  riastrad 			i915_report_error(dev_priv,
   1202  1.20  riastrad 					  "old vGPU host found, support for HWSP emulation required\n");
   1203  1.20  riastrad 			return -ENXIO;
   1204  1.20  riastrad 		}
   1205  1.20  riastrad 	}
   1206  1.20  riastrad 
   1207  1.20  riastrad 	intel_sanitize_options(dev_priv);
   1208  1.20  riastrad 
   1209  1.20  riastrad 	/* needs to be done before ggtt probe */
   1210  1.20  riastrad 	edram_detect(dev_priv);
   1211  1.20  riastrad 
   1212  1.20  riastrad 	i915_perf_init(dev_priv);
   1213  1.20  riastrad 
   1214  1.20  riastrad 	ret = i915_ggtt_probe_hw(dev_priv);
   1215  1.20  riastrad 	if (ret)
   1216  1.20  riastrad 		goto err_perf;
   1217  1.20  riastrad 
   1218  1.20  riastrad 	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
   1219  1.20  riastrad 	if (ret)
   1220  1.20  riastrad 		goto err_ggtt;
   1221  1.20  riastrad 
   1222  1.20  riastrad 	ret = i915_ggtt_init_hw(dev_priv);
   1223  1.20  riastrad 	if (ret)
   1224  1.20  riastrad 		goto err_ggtt;
   1225  1.20  riastrad 
   1226  1.20  riastrad 	ret = intel_memory_regions_hw_probe(dev_priv);
   1227  1.20  riastrad 	if (ret)
   1228  1.20  riastrad 		goto err_ggtt;
   1229  1.20  riastrad 
   1230  1.20  riastrad 	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
   1231  1.20  riastrad 
   1232  1.20  riastrad 	ret = i915_ggtt_enable_hw(dev_priv);
   1233  1.20  riastrad 	if (ret) {
   1234  1.20  riastrad 		DRM_ERROR("failed to enable GGTT\n");
   1235  1.20  riastrad 		goto err_mem_regions;
   1236  1.20  riastrad 	}
   1237  1.20  riastrad 
   1238  1.20  riastrad 	pci_set_master(pdev);
   1239  1.20  riastrad 
   1240  1.46  riastrad #ifndef __NetBSD__
   1241  1.20  riastrad 	/*
   1242  1.20  riastrad 	 * We don't have a max segment size, so set it to the max so sg's
   1243  1.20  riastrad 	 * debugging layer doesn't complain
   1244  1.20  riastrad 	 */
   1245  1.20  riastrad 	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
   1246  1.35  riastrad #endif
   1247  1.20  riastrad 
   1248  1.25  riastrad #ifndef __NetBSD__		/* Handled in intel_ggtt.c.  */
   1249  1.20  riastrad 	/* overlay on gen2 is broken and can't address above 1G */
   1250  1.20  riastrad 	if (IS_GEN(dev_priv, 2)) {
   1251  1.20  riastrad 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
   1252  1.20  riastrad 		if (ret) {
   1253  1.20  riastrad 			DRM_ERROR("failed to set DMA mask\n");
   1254  1.20  riastrad 
   1255  1.20  riastrad 			goto err_mem_regions;
   1256  1.20  riastrad 		}
   1257  1.20  riastrad 	}
   1258  1.20  riastrad 
   1259  1.20  riastrad 	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
   1260  1.20  riastrad 	 * using 32bit addressing, overwriting memory if HWS is located
   1261  1.20  riastrad 	 * above 4GB.
   1262  1.20  riastrad 	 *
   1263  1.20  riastrad 	 * The documentation also mentions an issue with undefined
   1264  1.20  riastrad 	 * behaviour if any general state is accessed within a page above 4GB,
   1265  1.20  riastrad 	 * which also needs to be handled carefully.
   1266  1.20  riastrad 	 */
   1267  1.20  riastrad 	if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
   1268  1.20  riastrad 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
   1269  1.20  riastrad 
   1270  1.20  riastrad 		if (ret) {
   1271  1.20  riastrad 			DRM_ERROR("failed to set DMA mask\n");
   1272  1.20  riastrad 
   1273  1.20  riastrad 			goto err_mem_regions;
   1274  1.20  riastrad 		}
   1275  1.20  riastrad 	}
   1276  1.25  riastrad #endif
   1277  1.20  riastrad 
   1278  1.20  riastrad 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
   1279  1.20  riastrad 			   PM_QOS_DEFAULT_VALUE);
   1280  1.20  riastrad 
   1281  1.20  riastrad 	intel_gt_init_workarounds(dev_priv);
   1282  1.20  riastrad 
   1283  1.20  riastrad 	/* On the 945G/GM, the chipset reports the MSI capability on the
   1284  1.20  riastrad 	 * integrated graphics even though the support isn't actually there
   1285  1.20  riastrad 	 * according to the published specs.  It doesn't appear to function
   1286  1.20  riastrad 	 * correctly in testing on 945G.
   1287  1.20  riastrad 	 * This may be a side effect of MSI having been made available for PEG
   1288  1.20  riastrad 	 * and the registers being closely associated.
   1289  1.20  riastrad 	 *
   1290  1.20  riastrad 	 * According to chipset errata, on the 965GM, MSI interrupts may
   1291  1.20  riastrad 	 * be lost or delayed, and was defeatured. MSI interrupts seem to
   1292  1.20  riastrad 	 * get lost on g4x as well, and interrupt delivery seems to stay
   1293  1.20  riastrad 	 * properly dead afterwards. So we'll just disable them for all
   1294  1.20  riastrad 	 * pre-gen5 chipsets.
   1295  1.20  riastrad 	 *
   1296  1.20  riastrad 	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
   1297  1.20  riastrad 	 * interrupts even when in MSI mode. This results in spurious
   1298  1.20  riastrad 	 * interrupt warnings if the legacy irq no. is shared with another
   1299  1.20  riastrad 	 * device. The kernel then disables that interrupt source and so
   1300  1.20  riastrad 	 * prevents the other device from working properly.
   1301  1.20  riastrad 	 */
   1302  1.20  riastrad 	if (INTEL_GEN(dev_priv) >= 5) {
   1303  1.20  riastrad 		if (pci_enable_msi(pdev) < 0)
   1304  1.20  riastrad 			DRM_DEBUG_DRIVER("can't enable MSI");
   1305  1.20  riastrad 	}
   1306  1.20  riastrad 
   1307  1.20  riastrad 	ret = intel_gvt_init(dev_priv);
   1308  1.20  riastrad 	if (ret)
   1309  1.20  riastrad 		goto err_msi;
   1310  1.20  riastrad 
   1311  1.20  riastrad 	intel_opregion_setup(dev_priv);
   1312  1.20  riastrad 	/*
   1313  1.20  riastrad 	 * Fill the dram structure to get the system raw bandwidth and
   1314  1.20  riastrad 	 * dram info. This will be used for memory latency calculation.
   1315  1.20  riastrad 	 */
   1316  1.20  riastrad 	intel_get_dram_info(dev_priv);
   1317  1.20  riastrad 
   1318  1.20  riastrad 	intel_bw_init_hw(dev_priv);
   1319  1.20  riastrad 
   1320  1.20  riastrad 	return 0;
   1321  1.20  riastrad 
   1322  1.20  riastrad err_msi:
   1323  1.20  riastrad 	if (pdev->msi_enabled)
   1324  1.20  riastrad 		pci_disable_msi(pdev);
   1325  1.20  riastrad 	pm_qos_remove_request(&dev_priv->pm_qos);
   1326  1.20  riastrad err_mem_regions:
   1327  1.20  riastrad 	intel_memory_regions_driver_release(dev_priv);
   1328  1.20  riastrad err_ggtt:
   1329  1.20  riastrad 	i915_ggtt_driver_release(dev_priv);
   1330  1.20  riastrad err_perf:
   1331  1.20  riastrad 	i915_perf_fini(dev_priv);
   1332  1.20  riastrad 	return ret;
   1333  1.20  riastrad }
   1334  1.20  riastrad 
   1335  1.20  riastrad /**
   1336  1.20  riastrad  * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
   1337  1.20  riastrad  * @dev_priv: device private
   1338  1.20  riastrad  */
   1339  1.20  riastrad static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
   1340  1.20  riastrad {
   1341  1.20  riastrad 	struct pci_dev *pdev = dev_priv->drm.pdev;
   1342  1.20  riastrad 
   1343  1.20  riastrad 	i915_perf_fini(dev_priv);
   1344  1.20  riastrad 
   1345  1.20  riastrad 	if (pdev->msi_enabled)
   1346  1.20  riastrad 		pci_disable_msi(pdev);
   1347  1.20  riastrad 
   1348  1.20  riastrad 	pm_qos_remove_request(&dev_priv->pm_qos);
   1349  1.20  riastrad }
   1350  1.20  riastrad 
   1351  1.20  riastrad /**
   1352  1.20  riastrad  * i915_driver_register - register the driver with the rest of the system
   1353  1.20  riastrad  * @dev_priv: device private
   1354  1.20  riastrad  *
   1355  1.20  riastrad  * Perform any steps necessary to make the driver available via kernel
   1356  1.20  riastrad  * internal or userspace interfaces.
   1357  1.20  riastrad  */
   1358  1.20  riastrad static void i915_driver_register(struct drm_i915_private *dev_priv)
   1359  1.20  riastrad {
   1360  1.20  riastrad 	struct drm_device *dev = &dev_priv->drm;
   1361  1.20  riastrad 
   1362  1.20  riastrad 	i915_gem_driver_register(dev_priv);
   1363  1.20  riastrad 	i915_pmu_register(dev_priv);
   1364  1.20  riastrad 
   1365  1.20  riastrad 	/*
   1366  1.20  riastrad 	 * Notify a valid surface after modesetting,
   1367  1.20  riastrad 	 * when running inside a VM.
   1368  1.20  riastrad 	 */
   1369  1.20  riastrad 	if (intel_vgpu_active(dev_priv))
   1370  1.20  riastrad 		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
   1371  1.20  riastrad 
   1372  1.20  riastrad 	/* Reveal our presence to userspace */
   1373  1.20  riastrad 	if (drm_dev_register(dev, 0) == 0) {
   1374  1.20  riastrad 		i915_debugfs_register(dev_priv);
   1375  1.20  riastrad 		i915_setup_sysfs(dev_priv);
   1376  1.20  riastrad 
   1377  1.20  riastrad 		/* Depends on sysfs having been initialized */
   1378  1.20  riastrad 		i915_perf_register(dev_priv);
   1379  1.20  riastrad 	} else
   1380  1.20  riastrad 		DRM_ERROR("Failed to register driver for userspace access!\n");
   1381  1.20  riastrad 
   1382  1.20  riastrad 	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
   1383  1.20  riastrad 		/* Must be done after probing outputs */
   1384  1.20  riastrad 		intel_opregion_register(dev_priv);
   1385  1.20  riastrad 		acpi_video_register();
   1386  1.20  riastrad 	}
   1387  1.20  riastrad 
   1388  1.20  riastrad 	intel_gt_driver_register(&dev_priv->gt);
   1389  1.20  riastrad 
   1390  1.20  riastrad 	intel_audio_init(dev_priv);
   1391  1.20  riastrad 
   1392  1.20  riastrad 	/*
   1393  1.20  riastrad 	 * Some ports require correctly set-up hpd registers for detection to
   1394  1.20  riastrad 	 * work properly (leading to ghost connected connector status), e.g. VGA
   1395  1.20  riastrad 	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
   1396  1.20  riastrad 	 * irqs are fully enabled. We do it last so that the async config
   1397  1.20  riastrad 	 * cannot run before the connectors are registered.
   1398  1.20  riastrad 	 */
   1399  1.20  riastrad 	intel_fbdev_initial_config_async(dev);
   1400  1.20  riastrad 
   1401  1.20  riastrad 	/*
   1402  1.20  riastrad 	 * We need to coordinate the hotplugs with the asynchronous fbdev
   1403  1.20  riastrad 	 * configuration, for which we use the fbdev->async_cookie.
   1404  1.20  riastrad 	 */
   1405  1.20  riastrad 	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
   1406  1.20  riastrad 		drm_kms_helper_poll_init(dev);
   1407  1.20  riastrad 
   1408  1.20  riastrad 	intel_power_domains_enable(dev_priv);
   1409  1.20  riastrad 	intel_runtime_pm_enable(&dev_priv->runtime_pm);
   1410  1.20  riastrad }
   1411  1.20  riastrad 
   1412  1.20  riastrad /**
   1413  1.20  riastrad  * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
   1414  1.20  riastrad  * @dev_priv: device private
   1415  1.20  riastrad  */
   1416  1.20  riastrad static void i915_driver_unregister(struct drm_i915_private *dev_priv)
   1417  1.20  riastrad {
   1418  1.20  riastrad 	intel_runtime_pm_disable(&dev_priv->runtime_pm);
   1419  1.20  riastrad 	intel_power_domains_disable(dev_priv);
   1420  1.20  riastrad 
   1421  1.20  riastrad 	intel_fbdev_unregister(dev_priv);
   1422  1.20  riastrad 	intel_audio_deinit(dev_priv);
   1423  1.20  riastrad 
   1424  1.20  riastrad 	/*
   1425  1.20  riastrad 	 * After flushing the fbdev (incl. a late async config which will
   1426  1.20  riastrad 	 * have delayed queuing of a hotplug event), then flush the hotplug
   1427  1.20  riastrad 	 * events.
   1428  1.20  riastrad 	 */
   1429  1.20  riastrad 	drm_kms_helper_poll_fini(&dev_priv->drm);
   1430  1.20  riastrad 
   1431  1.20  riastrad 	intel_gt_driver_unregister(&dev_priv->gt);
   1432  1.20  riastrad 	acpi_video_unregister();
   1433  1.20  riastrad 	intel_opregion_unregister(dev_priv);
   1434  1.20  riastrad 
   1435  1.20  riastrad 	i915_perf_unregister(dev_priv);
   1436  1.20  riastrad 	i915_pmu_unregister(dev_priv);
   1437  1.20  riastrad 
   1438  1.20  riastrad 	i915_teardown_sysfs(dev_priv);
   1439  1.20  riastrad 	drm_dev_unplug(&dev_priv->drm);
   1440  1.20  riastrad 
   1441  1.20  riastrad 	i915_gem_driver_unregister(dev_priv);
   1442  1.20  riastrad }
   1443  1.20  riastrad 
   1444  1.20  riastrad static void i915_welcome_messages(struct drm_i915_private *dev_priv)
   1445  1.20  riastrad {
   1446  1.20  riastrad 	if (drm_debug_enabled(DRM_UT_DRIVER)) {
   1447  1.20  riastrad 		struct drm_printer p = drm_debug_printer("i915 device info:");
   1448  1.20  riastrad 
   1449  1.20  riastrad 		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
   1450  1.20  riastrad 			   INTEL_DEVID(dev_priv),
   1451  1.20  riastrad 			   INTEL_REVID(dev_priv),
   1452  1.20  riastrad 			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
   1453  1.20  riastrad 			   intel_subplatform(RUNTIME_INFO(dev_priv),
   1454  1.20  riastrad 					     INTEL_INFO(dev_priv)->platform),
   1455  1.20  riastrad 			   INTEL_GEN(dev_priv));
   1456  1.20  riastrad 
   1457  1.20  riastrad 		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
   1458  1.20  riastrad 		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
   1459  1.20  riastrad 	}
   1460  1.20  riastrad 
   1461  1.20  riastrad 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
   1462  1.20  riastrad 		DRM_INFO("DRM_I915_DEBUG enabled\n");
   1463  1.20  riastrad 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
   1464  1.20  riastrad 		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
   1465  1.20  riastrad 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
   1466  1.20  riastrad 		DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
   1467  1.20  riastrad }
   1468  1.20  riastrad 
   1469  1.20  riastrad static struct drm_i915_private *
   1470  1.20  riastrad i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
   1471  1.20  riastrad {
   1472  1.20  riastrad 	const struct intel_device_info *match_info =
   1473  1.20  riastrad 		(struct intel_device_info *)ent->driver_data;
   1474  1.20  riastrad 	struct intel_device_info *device_info;
   1475  1.20  riastrad 	struct drm_i915_private *i915;
   1476  1.20  riastrad 	int err;
   1477  1.20  riastrad 
   1478  1.20  riastrad 	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
   1479  1.20  riastrad 	if (!i915)
   1480  1.20  riastrad 		return ERR_PTR(-ENOMEM);
   1481  1.20  riastrad 
   1482  1.39  riastrad 	err = drm_dev_init(&i915->drm, &driver, pci_dev_dev(pdev));
   1483  1.20  riastrad 	if (err) {
   1484  1.20  riastrad 		kfree(i915);
   1485  1.20  riastrad 		return ERR_PTR(err);
   1486  1.20  riastrad 	}
   1487  1.20  riastrad 
   1488  1.20  riastrad 	i915->drm.dev_private = i915;
   1489  1.20  riastrad 
   1490  1.20  riastrad 	i915->drm.pdev = pdev;
   1491  1.20  riastrad 	pci_set_drvdata(pdev, i915);
   1492  1.20  riastrad 
   1493  1.20  riastrad 	/* Setup the write-once "constant" device info */
   1494  1.20  riastrad 	device_info = mkwrite_device_info(i915);
   1495  1.20  riastrad 	memcpy(device_info, match_info, sizeof(*device_info));
   1496  1.20  riastrad 	RUNTIME_INFO(i915)->device_id = pdev->device;
   1497  1.20  riastrad 
   1498  1.20  riastrad 	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
   1499  1.20  riastrad 
   1500  1.20  riastrad 	return i915;
   1501  1.20  riastrad }
   1502  1.20  riastrad 
   1503  1.20  riastrad static void i915_driver_destroy(struct drm_i915_private *i915)
   1504  1.20  riastrad {
   1505  1.20  riastrad 	struct pci_dev *pdev = i915->drm.pdev;
   1506  1.20  riastrad 
   1507  1.20  riastrad 	drm_dev_fini(&i915->drm);
   1508  1.20  riastrad 	kfree(i915);
   1509  1.20  riastrad 
   1510  1.20  riastrad 	/* And make sure we never chase our dangling pointer from pci_dev */
   1511  1.20  riastrad 	pci_set_drvdata(pdev, NULL);
   1512  1.20  riastrad }
   1513  1.20  riastrad 
   1514  1.20  riastrad /**
   1515  1.20  riastrad  * i915_driver_probe - setup chip and create an initial config
   1516  1.20  riastrad  * @pdev: PCI device
   1517  1.20  riastrad  * @ent: matching PCI ID entry
   1518  1.20  riastrad  *
   1519  1.20  riastrad  * The driver probe routine has to do several things:
   1520  1.20  riastrad  *   - drive output discovery via intel_modeset_init()
   1521  1.20  riastrad  *   - initialize the memory manager
   1522  1.20  riastrad  *   - allocate initial config memory
   1523  1.20  riastrad  *   - setup the DRM framebuffer with the allocated memory
   1524  1.20  riastrad  */
   1525  1.20  riastrad int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   1526  1.20  riastrad {
   1527  1.20  riastrad 	const struct intel_device_info *match_info =
   1528  1.20  riastrad 		(struct intel_device_info *)ent->driver_data;
   1529  1.20  riastrad 	struct drm_i915_private *dev_priv;
   1530  1.20  riastrad 	int ret;
   1531  1.20  riastrad 
   1532  1.20  riastrad 	dev_priv = i915_driver_create(pdev, ent);
   1533  1.20  riastrad 	if (IS_ERR(dev_priv))
   1534  1.20  riastrad 		return PTR_ERR(dev_priv);
   1535  1.20  riastrad 
   1536  1.41  riastrad #ifdef __NetBSD__
   1537  1.41  riastrad 	ret = drm_pci_attach(&dev_priv->drm, pdev);
   1538  1.41  riastrad 	if (ret)
   1539  1.41  riastrad 		goto out_destroy;
   1540  1.41  riastrad #endif
   1541  1.41  riastrad 
   1542  1.20  riastrad 	/* Disable nuclear pageflip by default on pre-ILK */
   1543  1.20  riastrad 	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
   1544  1.20  riastrad 		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
   1545  1.20  riastrad 
   1546  1.20  riastrad 	/*
   1547  1.20  riastrad 	 * Check if we support fake LMEM -- for now we only unleash this for
   1548  1.20  riastrad 	 * the live selftests(test-and-exit).
   1549  1.20  riastrad 	 */
   1550  1.20  riastrad #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
   1551  1.20  riastrad 	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
   1552  1.20  riastrad 		if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
   1553  1.20  riastrad 		    i915_modparams.fake_lmem_start) {
   1554  1.20  riastrad 			mkwrite_device_info(dev_priv)->memory_regions =
   1555  1.20  riastrad 				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
   1556  1.20  riastrad 			mkwrite_device_info(dev_priv)->is_dgfx = true;
   1557  1.20  riastrad 			GEM_BUG_ON(!HAS_LMEM(dev_priv));
   1558  1.20  riastrad 			GEM_BUG_ON(!IS_DGFX(dev_priv));
   1559  1.20  riastrad 		}
   1560  1.20  riastrad 	}
   1561  1.20  riastrad #endif
   1562  1.20  riastrad 
   1563  1.21  riastrad #ifndef __NetBSD__		/* XXX done for us */
   1564  1.20  riastrad 	ret = pci_enable_device(pdev);
   1565  1.20  riastrad 	if (ret)
   1566  1.20  riastrad 		goto out_fini;
   1567  1.21  riastrad #endif
   1568  1.20  riastrad 
   1569  1.20  riastrad 	ret = i915_driver_early_probe(dev_priv);
   1570  1.20  riastrad 	if (ret < 0)
   1571  1.20  riastrad 		goto out_pci_disable;
   1572  1.20  riastrad 
   1573  1.20  riastrad 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1574  1.20  riastrad 
   1575  1.20  riastrad 	i915_detect_vgpu(dev_priv);
   1576  1.20  riastrad 
   1577  1.20  riastrad 	ret = i915_driver_mmio_probe(dev_priv);
   1578  1.20  riastrad 	if (ret < 0)
   1579  1.20  riastrad 		goto out_runtime_pm_put;
   1580  1.20  riastrad 
   1581  1.20  riastrad 	ret = i915_driver_hw_probe(dev_priv);
   1582  1.20  riastrad 	if (ret < 0)
   1583  1.20  riastrad 		goto out_cleanup_mmio;
   1584  1.20  riastrad 
   1585  1.20  riastrad 	ret = i915_driver_modeset_probe(dev_priv);
   1586  1.20  riastrad 	if (ret < 0)
   1587  1.20  riastrad 		goto out_cleanup_hw;
   1588  1.20  riastrad 
   1589  1.20  riastrad 	i915_driver_register(dev_priv);
   1590  1.20  riastrad 
   1591  1.20  riastrad 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1592  1.20  riastrad 
   1593  1.20  riastrad 	i915_welcome_messages(dev_priv);
   1594  1.20  riastrad 
   1595  1.20  riastrad 	return 0;
   1596  1.20  riastrad 
   1597  1.20  riastrad out_cleanup_hw:
   1598  1.20  riastrad 	i915_driver_hw_remove(dev_priv);
   1599  1.20  riastrad 	intel_memory_regions_driver_release(dev_priv);
   1600  1.20  riastrad 	i915_ggtt_driver_release(dev_priv);
   1601  1.20  riastrad out_cleanup_mmio:
   1602  1.20  riastrad 	i915_driver_mmio_release(dev_priv);
   1603  1.20  riastrad out_runtime_pm_put:
   1604  1.20  riastrad 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1605  1.20  riastrad 	i915_driver_late_release(dev_priv);
   1606  1.20  riastrad out_pci_disable:
   1607  1.21  riastrad #ifndef __NetBSD__
   1608  1.20  riastrad 	pci_disable_device(pdev);
   1609  1.20  riastrad out_fini:
   1610  1.41  riastrad #endif
   1611  1.20  riastrad 	i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
   1612  1.41  riastrad #ifdef __NetBSD__
   1613  1.41  riastrad 	drm_pci_detach(&dev_priv->drm);
   1614  1.41  riastrad out_destroy:
   1615  1.41  riastrad #endif
   1616  1.20  riastrad 	i915_driver_destroy(dev_priv);
   1617  1.20  riastrad 	return ret;
   1618  1.20  riastrad }
   1619  1.20  riastrad 
   1620  1.20  riastrad void i915_driver_remove(struct drm_i915_private *i915)
   1621  1.20  riastrad {
   1622  1.20  riastrad 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
   1623  1.20  riastrad 
   1624  1.20  riastrad 	i915_driver_unregister(i915);
   1625  1.20  riastrad 
   1626  1.20  riastrad 	/*
   1627  1.20  riastrad 	 * After unregistering the device to prevent any new users, cancel
   1628  1.20  riastrad 	 * all in-flight requests so that we can quickly unbind the active
   1629  1.20  riastrad 	 * resources.
   1630  1.20  riastrad 	 */
   1631  1.20  riastrad 	intel_gt_set_wedged(&i915->gt);
   1632  1.20  riastrad 
   1633  1.20  riastrad 	/* Flush any external code that still may be under the RCU lock */
   1634  1.20  riastrad 	synchronize_rcu();
   1635  1.20  riastrad 
   1636  1.20  riastrad 	i915_gem_suspend(i915);
   1637  1.20  riastrad 
   1638  1.20  riastrad 	drm_atomic_helper_shutdown(&i915->drm);
   1639  1.20  riastrad 
   1640  1.20  riastrad 	intel_gvt_driver_remove(i915);
   1641  1.20  riastrad 
   1642  1.20  riastrad 	i915_driver_modeset_remove(i915);
   1643  1.20  riastrad 
   1644  1.20  riastrad 	i915_reset_error_state(i915);
   1645  1.20  riastrad 	i915_gem_driver_remove(i915);
   1646  1.20  riastrad 
   1647  1.20  riastrad 	intel_power_domains_driver_remove(i915);
   1648  1.20  riastrad 
   1649  1.20  riastrad 	i915_driver_hw_remove(i915);
   1650  1.20  riastrad 
   1651  1.20  riastrad 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
   1652  1.20  riastrad }
   1653  1.20  riastrad 
   1654  1.20  riastrad static void i915_driver_release(struct drm_device *dev)
   1655  1.20  riastrad {
   1656  1.20  riastrad 	struct drm_i915_private *dev_priv = to_i915(dev);
   1657  1.20  riastrad 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
   1658  1.20  riastrad 
   1659  1.20  riastrad 	disable_rpm_wakeref_asserts(rpm);
   1660  1.20  riastrad 
   1661  1.20  riastrad 	i915_gem_driver_release(dev_priv);
   1662  1.20  riastrad 
   1663  1.20  riastrad 	intel_memory_regions_driver_release(dev_priv);
   1664  1.20  riastrad 	i915_ggtt_driver_release(dev_priv);
   1665  1.20  riastrad 
   1666  1.20  riastrad 	i915_driver_mmio_release(dev_priv);
   1667  1.20  riastrad 
   1668  1.20  riastrad 	enable_rpm_wakeref_asserts(rpm);
   1669  1.20  riastrad 	intel_runtime_pm_driver_release(rpm);
   1670  1.20  riastrad 
   1671  1.20  riastrad 	i915_driver_late_release(dev_priv);
   1672  1.41  riastrad #ifdef __NetBSD__
   1673  1.41  riastrad 	drm_pci_detach(dev);
   1674  1.41  riastrad #endif
   1675  1.20  riastrad 	i915_driver_destroy(dev_priv);
   1676  1.20  riastrad }
   1677  1.20  riastrad 
   1678  1.20  riastrad static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
   1679  1.20  riastrad {
   1680  1.20  riastrad 	struct drm_i915_private *i915 = to_i915(dev);
   1681  1.20  riastrad 	int ret;
   1682  1.20  riastrad 
   1683  1.20  riastrad 	ret = i915_gem_open(i915, file);
   1684  1.20  riastrad 	if (ret)
   1685  1.20  riastrad 		return ret;
   1686  1.20  riastrad 
   1687  1.20  riastrad 	return 0;
   1688  1.20  riastrad }
   1689  1.20  riastrad 
   1690  1.20  riastrad /**
   1691  1.20  riastrad  * i915_driver_lastclose - clean up after all DRM clients have exited
   1692  1.20  riastrad  * @dev: DRM device
   1693  1.20  riastrad  *
   1694  1.20  riastrad  * Take care of cleaning up after all DRM clients have exited.  In the
   1695  1.20  riastrad  * mode setting case, we want to restore the kernel's initial mode (just
   1696  1.20  riastrad  * in case the last client left us in a bad state).
   1697  1.20  riastrad  *
   1698  1.20  riastrad  * Additionally, in the non-mode setting case, we'll tear down the GTT
   1699  1.20  riastrad  * and DMA structures, since the kernel won't be using them, and clea
   1700  1.20  riastrad  * up any GEM state.
   1701  1.20  riastrad  */
   1702  1.20  riastrad static void i915_driver_lastclose(struct drm_device *dev)
   1703  1.20  riastrad {
   1704  1.20  riastrad 	intel_fbdev_restore_mode(dev);
   1705  1.31  riastrad #ifndef __NetBSD__		/* XXX vga */
   1706  1.20  riastrad 	vga_switcheroo_process_delayed_switch();
   1707  1.31  riastrad #endif
   1708  1.20  riastrad }
   1709  1.20  riastrad 
   1710  1.20  riastrad static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
   1711  1.20  riastrad {
   1712  1.20  riastrad 	struct drm_i915_file_private *file_priv = file->driver_priv;
   1713  1.20  riastrad 
   1714  1.20  riastrad 	i915_gem_context_close(file);
   1715  1.20  riastrad 	i915_gem_release(dev, file);
   1716  1.20  riastrad 
   1717  1.20  riastrad 	kfree_rcu(file_priv, rcu);
   1718  1.20  riastrad 
   1719  1.20  riastrad 	/* Catch up with all the deferred frees from "this" client */
   1720  1.20  riastrad 	i915_gem_flush_free_objects(to_i915(dev));
   1721   1.7  riastrad }
   1722   1.7  riastrad 
   1723   1.7  riastrad static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
   1724   1.7  riastrad {
   1725  1.20  riastrad 	struct drm_device *dev = &dev_priv->drm;
   1726  1.20  riastrad 	struct intel_encoder *encoder;
   1727   1.7  riastrad 
   1728   1.7  riastrad 	drm_modeset_lock_all(dev);
   1729  1.20  riastrad 	for_each_intel_encoder(dev, encoder)
   1730  1.20  riastrad 		if (encoder->suspend)
   1731  1.20  riastrad 			encoder->suspend(encoder);
   1732   1.7  riastrad 	drm_modeset_unlock_all(dev);
   1733   1.7  riastrad }
   1734   1.7  riastrad 
   1735  1.21  riastrad #ifndef __NetBSD__		/* XXX vlv suspend/resume */
   1736   1.7  riastrad static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
   1737   1.7  riastrad 			      bool rpm_resume);
   1738  1.20  riastrad static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
   1739  1.21  riastrad #endif
   1740   1.7  riastrad 
   1741  1.20  riastrad static bool suspend_to_idle(struct drm_i915_private *dev_priv)
   1742  1.20  riastrad {
   1743  1.20  riastrad #if IS_ENABLED(CONFIG_ACPI_SLEEP)
   1744  1.20  riastrad 	if (acpi_target_system_state() < ACPI_STATE_S3)
   1745  1.20  riastrad 		return true;
   1746  1.20  riastrad #endif
   1747  1.20  riastrad 	return false;
   1748  1.20  riastrad }
   1749  1.20  riastrad 
   1750  1.48  riastrad int i915_drm_prepare(struct drm_device *dev)
   1751  1.20  riastrad {
   1752  1.20  riastrad 	struct drm_i915_private *i915 = to_i915(dev);
   1753  1.20  riastrad 
   1754  1.20  riastrad 	/*
   1755  1.20  riastrad 	 * NB intel_display_suspend() may issue new requests after we've
   1756  1.20  riastrad 	 * ostensibly marked the GPU as ready-to-sleep here. We need to
   1757  1.20  riastrad 	 * split out that work and pull it forward so that after point,
   1758  1.20  riastrad 	 * the GPU is not woken again.
   1759  1.20  riastrad 	 */
   1760  1.20  riastrad 	i915_gem_suspend(i915);
   1761  1.20  riastrad 
   1762  1.20  riastrad 	return 0;
   1763  1.20  riastrad }
   1764   1.7  riastrad 
   1765  1.13  riastrad int i915_drm_suspend(struct drm_device *dev)
   1766   1.1  riastrad {
   1767  1.20  riastrad 	struct drm_i915_private *dev_priv = to_i915(dev);
   1768  1.20  riastrad 	struct pci_dev *pdev = dev_priv->drm.pdev;
   1769   1.7  riastrad 	pci_power_t opregion_target_state;
   1770   1.3  riastrad 
   1771  1.20  riastrad 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1772   1.3  riastrad 
   1773   1.3  riastrad 	/* We do a lot of poking in a lot of registers, make sure they work
   1774   1.3  riastrad 	 * properly. */
   1775  1.20  riastrad 	intel_power_domains_disable(dev_priv);
   1776   1.7  riastrad 
   1777  1.20  riastrad 	drm_kms_helper_poll_disable(dev);
   1778   1.7  riastrad 
   1779  1.21  riastrad #ifdef __NetBSD__		/* pmf handles this for us.  */
   1780  1.21  riastrad 	__USE(pdev);
   1781  1.21  riastrad #else
   1782  1.20  riastrad 	pci_save_state(pdev);
   1783  1.20  riastrad #endif
   1784   1.7  riastrad 
   1785   1.7  riastrad 	intel_display_suspend(dev);
   1786   1.7  riastrad 
   1787  1.20  riastrad 	intel_dp_mst_suspend(dev_priv);
   1788   1.1  riastrad 
   1789   1.7  riastrad 	intel_runtime_pm_disable_interrupts(dev_priv);
   1790   1.7  riastrad 	intel_hpd_cancel_work(dev_priv);
   1791   1.1  riastrad 
   1792   1.7  riastrad 	intel_suspend_encoders(dev_priv);
   1793   1.1  riastrad 
   1794  1.20  riastrad 	intel_suspend_hw(dev_priv);
   1795   1.1  riastrad 
   1796  1.20  riastrad 	i915_gem_suspend_gtt_mappings(dev_priv);
   1797   1.3  riastrad 
   1798  1.20  riastrad 	i915_save_state(dev_priv);
   1799   1.1  riastrad 
   1800  1.20  riastrad 	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
   1801  1.20  riastrad 	intel_opregion_suspend(dev_priv, opregion_target_state);
   1802   1.1  riastrad 
   1803   1.7  riastrad 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
   1804   1.1  riastrad 
   1805   1.3  riastrad 	dev_priv->suspend_count++;
   1806   1.3  riastrad 
   1807  1.20  riastrad 	intel_csr_ucode_suspend(dev_priv);
   1808  1.20  riastrad 
   1809  1.20  riastrad 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1810   1.7  riastrad 
   1811   1.1  riastrad 	return 0;
   1812   1.1  riastrad }
   1813   1.1  riastrad 
   1814  1.20  riastrad static enum i915_drm_suspend_mode
   1815  1.20  riastrad get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
   1816  1.20  riastrad {
   1817  1.20  riastrad 	if (hibernate)
   1818  1.20  riastrad 		return I915_DRM_SUSPEND_HIBERNATE;
   1819  1.20  riastrad 
   1820  1.20  riastrad 	if (suspend_to_idle(dev_priv))
   1821  1.20  riastrad 		return I915_DRM_SUSPEND_IDLE;
   1822  1.20  riastrad 
   1823  1.20  riastrad 	return I915_DRM_SUSPEND_MEM;
   1824  1.20  riastrad }
   1825  1.20  riastrad 
   1826  1.34  riastrad int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
   1827   1.7  riastrad {
   1828  1.20  riastrad 	struct drm_i915_private *dev_priv = to_i915(dev);
   1829  1.20  riastrad 	struct pci_dev *pdev = dev_priv->drm.pdev;
   1830  1.20  riastrad 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
   1831  1.20  riastrad 	int ret = 0;
   1832  1.20  riastrad 
   1833  1.20  riastrad 	disable_rpm_wakeref_asserts(rpm);
   1834  1.20  riastrad 
   1835  1.20  riastrad 	i915_gem_suspend_late(dev_priv);
   1836  1.20  riastrad 
   1837  1.20  riastrad 	intel_uncore_suspend(&dev_priv->uncore);
   1838  1.20  riastrad 
   1839  1.20  riastrad 	intel_power_domains_suspend(dev_priv,
   1840  1.20  riastrad 				    get_suspend_mode(dev_priv, hibernation));
   1841  1.20  riastrad 
   1842  1.20  riastrad 	intel_display_power_suspend_late(dev_priv);
   1843   1.7  riastrad 
   1844  1.20  riastrad 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   1845  1.21  riastrad #ifdef __NetBSD__
   1846  1.21  riastrad 		ret = 0;
   1847  1.21  riastrad #else
   1848  1.20  riastrad 		ret = vlv_suspend_complete(dev_priv);
   1849  1.21  riastrad #endif
   1850   1.7  riastrad 
   1851   1.7  riastrad 	if (ret) {
   1852   1.7  riastrad 		DRM_ERROR("Suspend complete failed: %d\n", ret);
   1853  1.20  riastrad 		intel_power_domains_resume(dev_priv);
   1854   1.7  riastrad 
   1855  1.20  riastrad 		goto out;
   1856   1.7  riastrad 	}
   1857   1.7  riastrad 
   1858  1.21  riastrad #ifdef __NetBSD__		/* pmf handles this for us.  */
   1859  1.21  riastrad 	__USE(pdev);
   1860  1.21  riastrad #else
   1861  1.20  riastrad 	pci_disable_device(pdev);
   1862   1.7  riastrad 	/*
   1863   1.7  riastrad 	 * During hibernation on some platforms the BIOS may try to access
   1864   1.7  riastrad 	 * the device even though it's already in D3 and hang the machine. So
   1865   1.7  riastrad 	 * leave the device in D0 on those platforms and hope the BIOS will
   1866   1.7  riastrad 	 * power down the device properly. The issue was seen on multiple old
   1867   1.7  riastrad 	 * GENs with different BIOS vendors, so having an explicit blacklist
   1868   1.7  riastrad 	 * is inpractical; apply the workaround on everything pre GEN6. The
   1869   1.7  riastrad 	 * platforms where the issue was seen:
   1870   1.7  riastrad 	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
   1871   1.7  riastrad 	 * Fujitsu FSC S7110
   1872   1.7  riastrad 	 * Acer Aspire 1830T
   1873   1.7  riastrad 	 */
   1874  1.20  riastrad 	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
   1875  1.20  riastrad 		pci_set_power_state(pdev, PCI_D3hot);
   1876  1.10  riastrad #endif
   1877   1.7  riastrad 
   1878  1.20  riastrad out:
   1879  1.20  riastrad 	enable_rpm_wakeref_asserts(rpm);
   1880  1.20  riastrad 	if (!dev_priv->uncore.user_forcewake_count)
   1881  1.20  riastrad 		intel_runtime_pm_driver_release(rpm);
   1882  1.20  riastrad 
   1883  1.20  riastrad 	return ret;
   1884   1.7  riastrad }
   1885   1.7  riastrad 
   1886  1.21  riastrad #ifndef __NetBSD__		/* XXX vga switcheroo */
   1887  1.20  riastrad int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
   1888   1.1  riastrad {
   1889   1.1  riastrad 	int error;
   1890   1.1  riastrad 
   1891   1.7  riastrad 	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
   1892   1.7  riastrad 			 state.event != PM_EVENT_FREEZE))
   1893   1.7  riastrad 		return -EINVAL;
   1894   1.1  riastrad 
   1895  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   1896   1.1  riastrad 		return 0;
   1897   1.1  riastrad 
   1898  1.20  riastrad 	error = i915_drm_suspend(&i915->drm);
   1899   1.1  riastrad 	if (error)
   1900   1.1  riastrad 		return error;
   1901   1.1  riastrad 
   1902  1.20  riastrad 	return i915_drm_suspend_late(&i915->drm, false);
   1903   1.1  riastrad }
   1904  1.21  riastrad #endif
   1905   1.1  riastrad 
   1906  1.13  riastrad int i915_drm_resume(struct drm_device *dev)
   1907   1.1  riastrad {
   1908  1.20  riastrad 	struct drm_i915_private *dev_priv = to_i915(dev);
   1909  1.20  riastrad 	int ret;
   1910  1.20  riastrad 
   1911  1.20  riastrad 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1912  1.20  riastrad 
   1913  1.20  riastrad 	sanitize_gpu(dev_priv);
   1914  1.20  riastrad 
   1915  1.20  riastrad 	ret = i915_ggtt_enable_hw(dev_priv);
   1916  1.20  riastrad 	if (ret)
   1917  1.20  riastrad 		DRM_ERROR("failed to re-enable GGTT\n");
   1918  1.20  riastrad 
   1919  1.20  riastrad 	i915_gem_restore_gtt_mappings(dev_priv);
   1920  1.20  riastrad 	i915_gem_restore_fences(&dev_priv->ggtt);
   1921   1.1  riastrad 
   1922  1.20  riastrad 	intel_csr_ucode_resume(dev_priv);
   1923   1.1  riastrad 
   1924  1.20  riastrad 	i915_restore_state(dev_priv);
   1925  1.20  riastrad 	intel_pps_unlock_regs_wa(dev_priv);
   1926   1.3  riastrad 
   1927  1.20  riastrad 	intel_init_pch_refclk(dev_priv);
   1928   1.3  riastrad 
   1929   1.7  riastrad 	/*
   1930   1.7  riastrad 	 * Interrupts have to be enabled before any batches are run. If not the
   1931   1.7  riastrad 	 * GPU will hang. i915_gem_init_hw() will initiate batches to
   1932   1.7  riastrad 	 * update/restore the context.
   1933   1.7  riastrad 	 *
   1934  1.20  riastrad 	 * drm_mode_config_reset() needs AUX interrupts.
   1935  1.20  riastrad 	 *
   1936   1.7  riastrad 	 * Modeset enabling in intel_modeset_init_hw() also needs working
   1937   1.7  riastrad 	 * interrupts.
   1938   1.7  riastrad 	 */
   1939   1.7  riastrad 	intel_runtime_pm_enable_interrupts(dev_priv);
   1940   1.3  riastrad 
   1941  1.20  riastrad 	drm_mode_config_reset(dev);
   1942   1.3  riastrad 
   1943  1.20  riastrad 	i915_gem_resume(dev_priv);
   1944   1.1  riastrad 
   1945  1.20  riastrad 	intel_modeset_init_hw(dev_priv);
   1946  1.20  riastrad 	intel_init_clock_gating(dev_priv);
   1947   1.1  riastrad 
   1948   1.7  riastrad 	spin_lock_irq(&dev_priv->irq_lock);
   1949   1.7  riastrad 	if (dev_priv->display.hpd_irq_setup)
   1950  1.20  riastrad 		dev_priv->display.hpd_irq_setup(dev_priv);
   1951   1.7  riastrad 	spin_unlock_irq(&dev_priv->irq_lock);
   1952   1.1  riastrad 
   1953  1.20  riastrad 	intel_dp_mst_resume(dev_priv);
   1954  1.20  riastrad 
   1955   1.7  riastrad 	intel_display_resume(dev);
   1956   1.1  riastrad 
   1957  1.20  riastrad 	drm_kms_helper_poll_enable(dev);
   1958   1.3  riastrad 
   1959   1.7  riastrad 	/*
   1960   1.7  riastrad 	 * ... but also need to make sure that hotplug processing
   1961   1.7  riastrad 	 * doesn't cause havoc. Like in the driver load code we don't
   1962  1.20  riastrad 	 * bother with the tiny race here where we might lose hotplug
   1963   1.7  riastrad 	 * notifications.
   1964   1.7  riastrad 	 * */
   1965   1.7  riastrad 	intel_hpd_init(dev_priv);
   1966   1.1  riastrad 
   1967  1.20  riastrad 	intel_opregion_resume(dev_priv);
   1968   1.1  riastrad 
   1969   1.7  riastrad 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
   1970   1.1  riastrad 
   1971  1.20  riastrad 	intel_power_domains_enable(dev_priv);
   1972   1.3  riastrad 
   1973  1.20  riastrad 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   1974   1.1  riastrad 
   1975   1.7  riastrad 	return 0;
   1976   1.1  riastrad }
   1977   1.1  riastrad 
   1978  1.13  riastrad int i915_drm_resume_early(struct drm_device *dev)
   1979   1.1  riastrad {
   1980  1.20  riastrad 	struct drm_i915_private *dev_priv = to_i915(dev);
   1981  1.38  riastrad #ifndef __NetBSD__
   1982  1.20  riastrad 	struct pci_dev *pdev = dev_priv->drm.pdev;
   1983  1.38  riastrad #endif
   1984  1.47       tnn 	int ret = 0;
   1985   1.1  riastrad 
   1986   1.3  riastrad 	/*
   1987   1.3  riastrad 	 * We have a resume ordering issue with the snd-hda driver also
   1988   1.3  riastrad 	 * requiring our device to be power up. Due to the lack of a
   1989   1.3  riastrad 	 * parent/child relationship we currently solve this with an early
   1990   1.3  riastrad 	 * resume hook.
   1991   1.3  riastrad 	 *
   1992   1.3  riastrad 	 * FIXME: This should be solved with a special hdmi sink device or
   1993   1.3  riastrad 	 * similar so that power domains can be employed.
   1994   1.3  riastrad 	 */
   1995  1.20  riastrad 
   1996  1.20  riastrad 	/*
   1997  1.20  riastrad 	 * Note that we need to set the power state explicitly, since we
   1998  1.20  riastrad 	 * powered off the device during freeze and the PCI core won't power
   1999  1.20  riastrad 	 * it back up for us during thaw. Powering off the device during
   2000  1.20  riastrad 	 * freeze is not a hard requirement though, and during the
   2001  1.20  riastrad 	 * suspend/resume phases the PCI core makes sure we get here with the
   2002  1.20  riastrad 	 * device powered on. So in case we change our freeze logic and keep
   2003  1.20  riastrad 	 * the device powered we can also remove the following set power state
   2004  1.20  riastrad 	 * call.
   2005  1.20  riastrad 	 */
   2006  1.33  riastrad #ifndef __NetBSD__		/* pmf handles this for us.  */
   2007  1.20  riastrad 	ret = pci_set_power_state(pdev, PCI_D0);
   2008  1.20  riastrad 	if (ret) {
   2009  1.20  riastrad 		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
   2010  1.20  riastrad 		return ret;
   2011  1.20  riastrad 	}
   2012  1.20  riastrad 
   2013  1.20  riastrad 	/*
   2014  1.20  riastrad 	 * Note that pci_enable_device() first enables any parent bridge
   2015  1.20  riastrad 	 * device and only then sets the power state for this device. The
   2016  1.20  riastrad 	 * bridge enabling is a nop though, since bridge devices are resumed
   2017  1.20  riastrad 	 * first. The order of enabling power and enabling the device is
   2018  1.20  riastrad 	 * imposed by the PCI core as described above, so here we preserve the
   2019  1.20  riastrad 	 * same order for the freeze/thaw phases.
   2020  1.20  riastrad 	 *
   2021  1.20  riastrad 	 * TODO: eventually we should remove pci_disable_device() /
   2022  1.20  riastrad 	 * pci_enable_enable_device() from suspend/resume. Due to how they
   2023  1.20  riastrad 	 * depend on the device enable refcount we can't anyway depend on them
   2024  1.20  riastrad 	 * disabling/enabling the device.
   2025  1.20  riastrad 	 */
   2026  1.20  riastrad 	if (pci_enable_device(pdev))
   2027   1.1  riastrad 		return -EIO;
   2028   1.4  riastrad #endif
   2029   1.1  riastrad 
   2030   1.4  riastrad 	/* XXX pmf probably handles this for us too.  */
   2031   1.1  riastrad 	pci_set_master(dev->pdev);
   2032   1.1  riastrad 
   2033  1.20  riastrad 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   2034  1.20  riastrad 
   2035  1.20  riastrad 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   2036  1.21  riastrad #ifdef __NetBSD__		/* XXX vlv suspend/resume */
   2037  1.21  riastrad 		ret = 0;
   2038  1.21  riastrad #else
   2039   1.7  riastrad 		ret = vlv_resume_prepare(dev_priv, false);
   2040  1.21  riastrad #endif
   2041   1.7  riastrad 	if (ret)
   2042   1.7  riastrad 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
   2043   1.7  riastrad 			  ret);
   2044   1.7  riastrad 
   2045  1.20  riastrad 	intel_uncore_resume_early(&dev_priv->uncore);
   2046  1.20  riastrad 
   2047  1.20  riastrad 	intel_gt_check_and_clear_faults(&dev_priv->gt);
   2048   1.7  riastrad 
   2049  1.20  riastrad 	intel_display_power_resume_early(dev_priv);
   2050   1.7  riastrad 
   2051  1.20  riastrad 	intel_power_domains_resume(dev_priv);
   2052  1.20  riastrad 
   2053  1.20  riastrad 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
   2054   1.7  riastrad 
   2055   1.7  riastrad 	return ret;
   2056   1.1  riastrad }
   2057   1.1  riastrad 
   2058  1.21  riastrad #ifndef __NetBSD__		/* XXX vga switcheroo */
   2059  1.20  riastrad int i915_resume_switcheroo(struct drm_i915_private *i915)
   2060   1.1  riastrad {
   2061   1.1  riastrad 	int ret;
   2062   1.1  riastrad 
   2063  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2064   1.7  riastrad 		return 0;
   2065   1.7  riastrad 
   2066  1.20  riastrad 	ret = i915_drm_resume_early(&i915->drm);
   2067   1.1  riastrad 	if (ret)
   2068   1.1  riastrad 		return ret;
   2069   1.1  riastrad 
   2070  1.20  riastrad 	return i915_drm_resume(&i915->drm);
   2071   1.1  riastrad }
   2072   1.1  riastrad 
   2073  1.20  riastrad static int i915_pm_prepare(struct device *kdev)
   2074   1.1  riastrad {
   2075  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2076   1.1  riastrad 
   2077  1.20  riastrad 	if (!i915) {
   2078  1.20  riastrad 		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
   2079   1.3  riastrad 		return -ENODEV;
   2080   1.3  riastrad 	}
   2081   1.1  riastrad 
   2082  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2083  1.20  riastrad 		return 0;
   2084   1.1  riastrad 
   2085  1.20  riastrad 	return i915_drm_prepare(&i915->drm);
   2086   1.1  riastrad }
   2087  1.11  riastrad #endif
   2088   1.1  riastrad 
   2089   1.8  riastrad #ifndef __NetBSD__
   2090  1.20  riastrad static int i915_pm_suspend(struct device *kdev)
   2091   1.1  riastrad {
   2092  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2093   1.1  riastrad 
   2094  1.20  riastrad 	if (!i915) {
   2095  1.20  riastrad 		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
   2096   1.1  riastrad 		return -ENODEV;
   2097   1.1  riastrad 	}
   2098   1.1  riastrad 
   2099  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2100   1.1  riastrad 		return 0;
   2101   1.1  riastrad 
   2102  1.20  riastrad 	return i915_drm_suspend(&i915->drm);
   2103   1.3  riastrad }
   2104   1.3  riastrad 
   2105  1.20  riastrad static int i915_pm_suspend_late(struct device *kdev)
   2106   1.3  riastrad {
   2107  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2108   1.3  riastrad 
   2109   1.3  riastrad 	/*
   2110   1.7  riastrad 	 * We have a suspend ordering issue with the snd-hda driver also
   2111   1.3  riastrad 	 * requiring our device to be power up. Due to the lack of a
   2112   1.3  riastrad 	 * parent/child relationship we currently solve this with an late
   2113   1.3  riastrad 	 * suspend hook.
   2114   1.3  riastrad 	 *
   2115   1.3  riastrad 	 * FIXME: This should be solved with a special hdmi sink device or
   2116   1.3  riastrad 	 * similar so that power domains can be employed.
   2117   1.3  riastrad 	 */
   2118  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2119   1.3  riastrad 		return 0;
   2120   1.1  riastrad 
   2121  1.20  riastrad 	return i915_drm_suspend_late(&i915->drm, false);
   2122   1.7  riastrad }
   2123   1.7  riastrad 
   2124  1.20  riastrad static int i915_pm_poweroff_late(struct device *kdev)
   2125   1.7  riastrad {
   2126  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2127   1.7  riastrad 
   2128  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2129   1.7  riastrad 		return 0;
   2130   1.1  riastrad 
   2131  1.20  riastrad 	return i915_drm_suspend_late(&i915->drm, true);
   2132   1.1  riastrad }
   2133   1.1  riastrad 
   2134  1.20  riastrad static int i915_pm_resume_early(struct device *kdev)
   2135   1.3  riastrad {
   2136  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2137   1.7  riastrad 
   2138  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2139   1.7  riastrad 		return 0;
   2140   1.3  riastrad 
   2141  1.20  riastrad 	return i915_drm_resume_early(&i915->drm);
   2142   1.3  riastrad }
   2143   1.3  riastrad 
   2144  1.20  riastrad static int i915_pm_resume(struct device *kdev)
   2145   1.1  riastrad {
   2146  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2147   1.7  riastrad 
   2148  1.20  riastrad 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
   2149   1.7  riastrad 		return 0;
   2150   1.7  riastrad 
   2151  1.20  riastrad 	return i915_drm_resume(&i915->drm);
   2152   1.7  riastrad }
   2153   1.7  riastrad 
   2154  1.20  riastrad /* freeze: before creating the hibernation_image */
   2155  1.20  riastrad static int i915_pm_freeze(struct device *kdev)
   2156   1.7  riastrad {
   2157  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2158  1.20  riastrad 	int ret;
   2159   1.7  riastrad 
   2160  1.20  riastrad 	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
   2161  1.20  riastrad 		ret = i915_drm_suspend(&i915->drm);
   2162  1.20  riastrad 		if (ret)
   2163  1.20  riastrad 			return ret;
   2164  1.20  riastrad 	}
   2165   1.7  riastrad 
   2166  1.20  riastrad 	ret = i915_gem_freeze(i915);
   2167  1.20  riastrad 	if (ret)
   2168  1.20  riastrad 		return ret;
   2169   1.7  riastrad 
   2170   1.7  riastrad 	return 0;
   2171   1.7  riastrad }
   2172   1.7  riastrad 
   2173  1.20  riastrad static int i915_pm_freeze_late(struct device *kdev)
   2174   1.7  riastrad {
   2175  1.20  riastrad 	struct drm_i915_private *i915 = kdev_to_i915(kdev);
   2176  1.20  riastrad 	int ret;
   2177   1.7  riastrad 
   2178  1.20  riastrad 	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
   2179  1.20  riastrad 		ret = i915_drm_suspend_late(&i915->drm, true);
   2180  1.20  riastrad 		if (ret)
   2181  1.20  riastrad 			return ret;
   2182  1.20  riastrad 	}
   2183   1.7  riastrad 
   2184  1.20  riastrad 	ret = i915_gem_freeze_late(i915);
   2185  1.20  riastrad 	if (ret)
   2186  1.20  riastrad 		return ret;
   2187   1.7  riastrad 
   2188   1.7  riastrad 	return 0;
   2189   1.7  riastrad }
   2190   1.7  riastrad 
   2191  1.20  riastrad /* thaw: called after creating the hibernation image, but before turning off. */
   2192  1.20  riastrad static int i915_pm_thaw_early(struct device *kdev)
   2193   1.7  riastrad {
   2194  1.20  riastrad 	return i915_pm_resume_early(kdev);
   2195  1.20  riastrad }
   2196   1.7  riastrad 
   2197  1.20  riastrad static int i915_pm_thaw(struct device *kdev)
   2198  1.20  riastrad {
   2199  1.20  riastrad 	return i915_pm_resume(kdev);
   2200  1.20  riastrad }
   2201   1.7  riastrad 
   2202  1.20  riastrad /* restore: called after loading the hibernation image. */
   2203  1.20  riastrad static int i915_pm_restore_early(struct device *kdev)
   2204  1.20  riastrad {
   2205  1.20  riastrad 	return i915_pm_resume_early(kdev);
   2206   1.7  riastrad }
   2207   1.7  riastrad 
   2208  1.20  riastrad static int i915_pm_restore(struct device *kdev)
   2209   1.7  riastrad {
   2210  1.20  riastrad 	return i915_pm_resume(kdev);
   2211   1.7  riastrad }
   2212   1.7  riastrad 
   2213   1.7  riastrad /*
   2214   1.7  riastrad  * Save all Gunit registers that may be lost after a D3 and a subsequent
   2215   1.7  riastrad  * S0i[R123] transition. The list of registers needing a save/restore is
   2216   1.7  riastrad  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
   2217   1.7  riastrad  * registers in the following way:
   2218   1.7  riastrad  * - Driver: saved/restored by the driver
   2219   1.7  riastrad  * - Punit : saved/restored by the Punit firmware
   2220   1.7  riastrad  * - No, w/o marking: no need to save/restore, since the register is R/O or
   2221   1.7  riastrad  *                    used internally by the HW in a way that doesn't depend
   2222   1.7  riastrad  *                    keeping the content across a suspend/resume.
   2223   1.7  riastrad  * - Debug : used for debugging
   2224   1.7  riastrad  *
   2225   1.7  riastrad  * We save/restore all registers marked with 'Driver', with the following
   2226   1.7  riastrad  * exceptions:
   2227   1.7  riastrad  * - Registers out of use, including also registers marked with 'Debug'.
   2228   1.7  riastrad  *   These have no effect on the driver's operation, so we don't save/restore
   2229   1.7  riastrad  *   them to reduce the overhead.
   2230   1.7  riastrad  * - Registers that are fully setup by an initialization function called from
   2231   1.7  riastrad  *   the resume path. For example many clock gating and RPS/RC6 registers.
   2232   1.7  riastrad  * - Registers that provide the right functionality with their reset defaults.
   2233   1.7  riastrad  *
   2234   1.7  riastrad  * TODO: Except for registers that based on the above 3 criteria can be safely
   2235   1.7  riastrad  * ignored, we save/restore all others, practically treating the HW context as
   2236   1.7  riastrad  * a black-box for the driver. Further investigation is needed to reduce the
   2237   1.7  riastrad  * saved/restored registers even further, by following the same 3 criteria.
   2238   1.7  riastrad  */
   2239   1.7  riastrad static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
   2240   1.7  riastrad {
   2241  1.20  riastrad 	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
   2242   1.7  riastrad 	int i;
   2243   1.7  riastrad 
   2244  1.20  riastrad 	if (!s)
   2245  1.20  riastrad 		return;
   2246  1.20  riastrad 
   2247   1.7  riastrad 	/* GAM 0x4000-0x4770 */
   2248   1.7  riastrad 	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
   2249   1.7  riastrad 	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
   2250   1.7  riastrad 	s->arb_mode		= I915_READ(ARB_MODE);
   2251   1.7  riastrad 	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
   2252   1.7  riastrad 	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
   2253   1.7  riastrad 
   2254   1.7  riastrad 	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
   2255   1.7  riastrad 		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
   2256   1.7  riastrad 
   2257   1.7  riastrad 	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
   2258   1.7  riastrad 	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
   2259   1.7  riastrad 
   2260   1.7  riastrad 	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
   2261   1.7  riastrad 	s->ecochk		= I915_READ(GAM_ECOCHK);
   2262   1.7  riastrad 	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
   2263   1.7  riastrad 	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
   2264   1.7  riastrad 
   2265   1.7  riastrad 	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
   2266   1.7  riastrad 
   2267   1.7  riastrad 	/* MBC 0x9024-0x91D0, 0x8500 */
   2268   1.7  riastrad 	s->g3dctl		= I915_READ(VLV_G3DCTL);
   2269   1.7  riastrad 	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
   2270   1.7  riastrad 	s->mbctl		= I915_READ(GEN6_MBCTL);
   2271   1.7  riastrad 
   2272   1.7  riastrad 	/* GCP 0x9400-0x9424, 0x8100-0x810C */
   2273   1.7  riastrad 	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
   2274   1.7  riastrad 	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
   2275   1.7  riastrad 	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
   2276   1.7  riastrad 	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
   2277   1.7  riastrad 	s->rstctl		= I915_READ(GEN6_RSTCTL);
   2278   1.7  riastrad 	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
   2279   1.7  riastrad 
   2280   1.7  riastrad 	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
   2281   1.7  riastrad 	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
   2282   1.7  riastrad 	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
   2283   1.7  riastrad 	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
   2284   1.7  riastrad 	s->ecobus		= I915_READ(ECOBUS);
   2285   1.7  riastrad 	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
   2286   1.7  riastrad 	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
   2287   1.7  riastrad 	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
   2288   1.7  riastrad 	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
   2289   1.7  riastrad 	s->rcedata		= I915_READ(VLV_RCEDATA);
   2290   1.7  riastrad 	s->spare2gh		= I915_READ(VLV_SPAREG2H);
   2291   1.7  riastrad 
   2292   1.7  riastrad 	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
   2293   1.7  riastrad 	s->gt_imr		= I915_READ(GTIMR);
   2294   1.7  riastrad 	s->gt_ier		= I915_READ(GTIER);
   2295   1.7  riastrad 	s->pm_imr		= I915_READ(GEN6_PMIMR);
   2296   1.7  riastrad 	s->pm_ier		= I915_READ(GEN6_PMIER);
   2297   1.7  riastrad 
   2298   1.7  riastrad 	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
   2299   1.7  riastrad 		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
   2300   1.7  riastrad 
   2301   1.7  riastrad 	/* GT SA CZ domain, 0x100000-0x138124 */
   2302   1.7  riastrad 	s->tilectl		= I915_READ(TILECTL);
   2303   1.7  riastrad 	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
   2304   1.7  riastrad 	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
   2305   1.7  riastrad 	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
   2306   1.7  riastrad 	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
   2307   1.7  riastrad 
   2308   1.7  riastrad 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
   2309   1.7  riastrad 	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
   2310   1.7  riastrad 	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
   2311   1.7  riastrad 	s->pcbr			= I915_READ(VLV_PCBR);
   2312   1.7  riastrad 	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
   2313   1.7  riastrad 
   2314   1.7  riastrad 	/*
   2315   1.7  riastrad 	 * Not saving any of:
   2316   1.7  riastrad 	 * DFT,		0x9800-0x9EC0
   2317   1.7  riastrad 	 * SARB,	0xB000-0xB1FC
   2318   1.7  riastrad 	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
   2319   1.7  riastrad 	 * PCI CFG
   2320   1.7  riastrad 	 */
   2321   1.7  riastrad }
   2322   1.7  riastrad 
   2323   1.7  riastrad static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
   2324   1.7  riastrad {
   2325  1.20  riastrad 	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
   2326   1.7  riastrad 	u32 val;
   2327   1.7  riastrad 	int i;
   2328   1.7  riastrad 
   2329  1.20  riastrad 	if (!s)
   2330  1.20  riastrad 		return;
   2331  1.20  riastrad 
   2332   1.7  riastrad 	/* GAM 0x4000-0x4770 */
   2333   1.7  riastrad 	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
   2334   1.7  riastrad 	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
   2335   1.7  riastrad 	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
   2336   1.7  riastrad 	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
   2337   1.7  riastrad 	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
   2338   1.7  riastrad 
   2339   1.7  riastrad 	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
   2340   1.7  riastrad 		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
   2341   1.7  riastrad 
   2342   1.7  riastrad 	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
   2343   1.7  riastrad 	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
   2344   1.7  riastrad 
   2345   1.7  riastrad 	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
   2346   1.7  riastrad 	I915_WRITE(GAM_ECOCHK,		s->ecochk);
   2347   1.7  riastrad 	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
   2348   1.7  riastrad 	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
   2349   1.7  riastrad 
   2350   1.7  riastrad 	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
   2351   1.7  riastrad 
   2352   1.7  riastrad 	/* MBC 0x9024-0x91D0, 0x8500 */
   2353   1.7  riastrad 	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
   2354   1.7  riastrad 	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
   2355   1.7  riastrad 	I915_WRITE(GEN6_MBCTL,		s->mbctl);
   2356   1.7  riastrad 
   2357   1.7  riastrad 	/* GCP 0x9400-0x9424, 0x8100-0x810C */
   2358   1.7  riastrad 	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
   2359   1.7  riastrad 	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
   2360   1.7  riastrad 	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
   2361   1.7  riastrad 	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
   2362   1.7  riastrad 	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
   2363   1.7  riastrad 	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
   2364   1.7  riastrad 
   2365   1.7  riastrad 	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
   2366   1.7  riastrad 	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
   2367   1.7  riastrad 	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
   2368   1.7  riastrad 	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
   2369   1.7  riastrad 	I915_WRITE(ECOBUS,		s->ecobus);
   2370   1.7  riastrad 	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
   2371   1.7  riastrad 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
   2372   1.7  riastrad 	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
   2373   1.7  riastrad 	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
   2374   1.7  riastrad 	I915_WRITE(VLV_RCEDATA,		s->rcedata);
   2375   1.7  riastrad 	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
   2376   1.7  riastrad 
   2377   1.7  riastrad 	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
   2378   1.7  riastrad 	I915_WRITE(GTIMR,		s->gt_imr);
   2379   1.7  riastrad 	I915_WRITE(GTIER,		s->gt_ier);
   2380   1.7  riastrad 	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
   2381   1.7  riastrad 	I915_WRITE(GEN6_PMIER,		s->pm_ier);
   2382   1.7  riastrad 
   2383   1.7  riastrad 	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
   2384   1.7  riastrad 		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
   2385   1.7  riastrad 
   2386   1.7  riastrad 	/* GT SA CZ domain, 0x100000-0x138124 */
   2387   1.7  riastrad 	I915_WRITE(TILECTL,			s->tilectl);
   2388   1.7  riastrad 	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
   2389   1.7  riastrad 	/*
   2390   1.7  riastrad 	 * Preserve the GT allow wake and GFX force clock bit, they are not
   2391   1.7  riastrad 	 * be restored, as they are used to control the s0ix suspend/resume
   2392   1.7  riastrad 	 * sequence by the caller.
   2393   1.7  riastrad 	 */
   2394   1.7  riastrad 	val = I915_READ(VLV_GTLC_WAKE_CTRL);
   2395   1.7  riastrad 	val &= VLV_GTLC_ALLOWWAKEREQ;
   2396   1.7  riastrad 	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
   2397   1.7  riastrad 	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
   2398   1.7  riastrad 
   2399   1.7  riastrad 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
   2400   1.7  riastrad 	val &= VLV_GFX_CLK_FORCE_ON_BIT;
   2401   1.7  riastrad 	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
   2402   1.7  riastrad 	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
   2403   1.7  riastrad 
   2404   1.7  riastrad 	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
   2405   1.1  riastrad 
   2406   1.7  riastrad 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
   2407   1.7  riastrad 	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
   2408   1.7  riastrad 	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
   2409   1.7  riastrad 	I915_WRITE(VLV_PCBR,			s->pcbr);
   2410   1.7  riastrad 	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
   2411   1.1  riastrad }
   2412   1.1  riastrad 
   2413  1.20  riastrad static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
   2414  1.20  riastrad 				  u32 mask, u32 val)
   2415  1.20  riastrad {
   2416  1.20  riastrad 	i915_reg_t reg = VLV_GTLC_PW_STATUS;
   2417  1.20  riastrad 	u32 reg_value;
   2418  1.20  riastrad 	int ret;
   2419  1.20  riastrad 
   2420  1.20  riastrad 	/* The HW does not like us polling for PW_STATUS frequently, so
   2421  1.20  riastrad 	 * use the sleeping loop rather than risk the busy spin within
   2422  1.20  riastrad 	 * intel_wait_for_register().
   2423  1.20  riastrad 	 *
   2424  1.20  riastrad 	 * Transitioning between RC6 states should be at most 2ms (see
   2425  1.20  riastrad 	 * valleyview_enable_rps) so use a 3ms timeout.
   2426  1.20  riastrad 	 */
   2427  1.20  riastrad 	ret = wait_for(((reg_value =
   2428  1.20  riastrad 			 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
   2429  1.20  riastrad 		       == val, 3);
   2430  1.20  riastrad 
   2431  1.20  riastrad 	/* just trace the final value */
   2432  1.20  riastrad 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
   2433  1.20  riastrad 
   2434  1.20  riastrad 	return ret;
   2435  1.20  riastrad }
   2436  1.33  riastrad #endif
   2437  1.20  riastrad 
   2438   1.7  riastrad int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
   2439   1.1  riastrad {
   2440   1.7  riastrad 	u32 val;
   2441   1.7  riastrad 	int err;
   2442   1.7  riastrad 
   2443   1.7  riastrad 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
   2444   1.7  riastrad 	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
   2445   1.7  riastrad 	if (force_on)
   2446   1.7  riastrad 		val |= VLV_GFX_CLK_FORCE_ON_BIT;
   2447   1.7  riastrad 	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
   2448   1.7  riastrad 
   2449   1.7  riastrad 	if (!force_on)
   2450   1.7  riastrad 		return 0;
   2451   1.7  riastrad 
   2452  1.20  riastrad 	err = intel_wait_for_register(&dev_priv->uncore,
   2453  1.20  riastrad 				      VLV_GTLC_SURVIVABILITY_REG,
   2454  1.20  riastrad 				      VLV_GFX_CLK_STATUS_BIT,
   2455  1.20  riastrad 				      VLV_GFX_CLK_STATUS_BIT,
   2456  1.20  riastrad 				      20);
   2457   1.7  riastrad 	if (err)
   2458   1.7  riastrad 		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
   2459   1.7  riastrad 			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
   2460   1.7  riastrad 
   2461   1.7  riastrad 	return err;
   2462   1.7  riastrad }
   2463   1.7  riastrad 
   2464  1.33  riastrad #ifndef __NetBSD__		/* XXX vlv suspend/resume */
   2465   1.7  riastrad static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
   2466   1.7  riastrad {
   2467  1.20  riastrad 	u32 mask;
   2468   1.7  riastrad 	u32 val;
   2469  1.20  riastrad 	int err;
   2470   1.7  riastrad 
   2471   1.7  riastrad 	val = I915_READ(VLV_GTLC_WAKE_CTRL);
   2472   1.7  riastrad 	val &= ~VLV_GTLC_ALLOWWAKEREQ;
   2473   1.7  riastrad 	if (allow)
   2474   1.7  riastrad 		val |= VLV_GTLC_ALLOWWAKEREQ;
   2475   1.7  riastrad 	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
   2476   1.7  riastrad 	POSTING_READ(VLV_GTLC_WAKE_CTRL);
   2477   1.7  riastrad 
   2478  1.20  riastrad 	mask = VLV_GTLC_ALLOWWAKEACK;
   2479  1.20  riastrad 	val = allow ? mask : 0;
   2480  1.20  riastrad 
   2481  1.20  riastrad 	err = vlv_wait_for_pw_status(dev_priv, mask, val);
   2482   1.7  riastrad 	if (err)
   2483   1.7  riastrad 		DRM_ERROR("timeout disabling GT waking\n");
   2484  1.20  riastrad 
   2485   1.7  riastrad 	return err;
   2486   1.7  riastrad }
   2487   1.7  riastrad 
   2488  1.20  riastrad static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
   2489  1.20  riastrad 				  bool wait_for_on)
   2490   1.7  riastrad {
   2491   1.7  riastrad 	u32 mask;
   2492   1.7  riastrad 	u32 val;
   2493   1.7  riastrad 
   2494   1.7  riastrad 	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
   2495   1.7  riastrad 	val = wait_for_on ? mask : 0;
   2496   1.1  riastrad 
   2497   1.7  riastrad 	/*
   2498   1.7  riastrad 	 * RC6 transitioning can be delayed up to 2 msec (see
   2499   1.7  riastrad 	 * valleyview_enable_rps), use 3 msec for safety.
   2500  1.20  riastrad 	 *
   2501  1.20  riastrad 	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
   2502  1.20  riastrad 	 * reset and we are trying to force the machine to sleep.
   2503   1.7  riastrad 	 */
   2504  1.20  riastrad 	if (vlv_wait_for_pw_status(dev_priv, mask, val))
   2505  1.20  riastrad 		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
   2506  1.20  riastrad 				 onoff(wait_for_on));
   2507   1.1  riastrad }
   2508   1.1  riastrad 
   2509   1.7  riastrad static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
   2510   1.3  riastrad {
   2511   1.7  riastrad 	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
   2512   1.7  riastrad 		return;
   2513   1.3  riastrad 
   2514  1.20  riastrad 	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
   2515   1.7  riastrad 	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
   2516   1.3  riastrad }
   2517   1.3  riastrad 
   2518   1.7  riastrad static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
   2519   1.1  riastrad {
   2520   1.7  riastrad 	u32 mask;
   2521   1.7  riastrad 	int err;
   2522   1.7  riastrad 
   2523   1.7  riastrad 	/*
   2524   1.7  riastrad 	 * Bspec defines the following GT well on flags as debug only, so
   2525   1.7  riastrad 	 * don't treat them as hard failures.
   2526   1.7  riastrad 	 */
   2527  1.20  riastrad 	vlv_wait_for_gt_wells(dev_priv, false);
   2528   1.7  riastrad 
   2529   1.7  riastrad 	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
   2530   1.7  riastrad 	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
   2531   1.7  riastrad 
   2532   1.7  riastrad 	vlv_check_no_gt_access(dev_priv);
   2533   1.7  riastrad 
   2534   1.7  riastrad 	err = vlv_force_gfx_clock(dev_priv, true);
   2535   1.7  riastrad 	if (err)
   2536   1.7  riastrad 		goto err1;
   2537   1.7  riastrad 
   2538   1.7  riastrad 	err = vlv_allow_gt_wake(dev_priv, false);
   2539   1.7  riastrad 	if (err)
   2540   1.7  riastrad 		goto err2;
   2541   1.7  riastrad 
   2542  1.20  riastrad 	vlv_save_gunit_s0ix_state(dev_priv);
   2543   1.7  riastrad 
   2544   1.7  riastrad 	err = vlv_force_gfx_clock(dev_priv, false);
   2545   1.7  riastrad 	if (err)
   2546   1.7  riastrad 		goto err2;
   2547   1.7  riastrad 
   2548   1.7  riastrad 	return 0;
   2549   1.7  riastrad 
   2550   1.7  riastrad err2:
   2551   1.7  riastrad 	/* For safety always re-enable waking and disable gfx clock forcing */
   2552   1.7  riastrad 	vlv_allow_gt_wake(dev_priv, true);
   2553   1.7  riastrad err1:
   2554   1.7  riastrad 	vlv_force_gfx_clock(dev_priv, false);
   2555   1.1  riastrad 
   2556   1.7  riastrad 	return err;
   2557   1.1  riastrad }
   2558   1.1  riastrad 
   2559   1.7  riastrad static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
   2560   1.7  riastrad 				bool rpm_resume)
   2561   1.1  riastrad {
   2562   1.7  riastrad 	int err;
   2563   1.7  riastrad 	int ret;
   2564   1.7  riastrad 
   2565   1.7  riastrad 	/*
   2566   1.7  riastrad 	 * If any of the steps fail just try to continue, that's the best we
   2567   1.7  riastrad 	 * can do at this point. Return the first error code (which will also
   2568   1.7  riastrad 	 * leave RPM permanently disabled).
   2569   1.7  riastrad 	 */
   2570   1.7  riastrad 	ret = vlv_force_gfx_clock(dev_priv, true);
   2571   1.7  riastrad 
   2572  1.20  riastrad 	vlv_restore_gunit_s0ix_state(dev_priv);
   2573   1.7  riastrad 
   2574   1.7  riastrad 	err = vlv_allow_gt_wake(dev_priv, true);
   2575   1.7  riastrad 	if (!ret)
   2576   1.7  riastrad 		ret = err;
   2577   1.7  riastrad 
   2578   1.7  riastrad 	err = vlv_force_gfx_clock(dev_priv, false);
   2579   1.7  riastrad 	if (!ret)
   2580   1.7  riastrad 		ret = err;
   2581   1.7  riastrad 
   2582   1.7  riastrad 	vlv_check_no_gt_access(dev_priv);
   2583   1.7  riastrad 
   2584  1.20  riastrad 	if (rpm_resume)
   2585  1.20  riastrad 		intel_init_clock_gating(dev_priv);
   2586   1.1  riastrad 
   2587   1.7  riastrad 	return ret;
   2588   1.1  riastrad }
   2589  1.33  riastrad #endif
   2590   1.1  riastrad 
   2591  1.11  riastrad #ifndef __NetBSD__		/* XXX runtime pm */
   2592  1.20  riastrad static int intel_runtime_suspend(struct device *kdev)
   2593   1.3  riastrad {
   2594  1.20  riastrad 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
   2595  1.20  riastrad 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
   2596  1.20  riastrad 	int ret = 0;
   2597   1.3  riastrad 
   2598  1.20  riastrad 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
   2599   1.7  riastrad 		return -ENODEV;
   2600   1.3  riastrad 
   2601   1.3  riastrad 	DRM_DEBUG_KMS("Suspending device\n");
   2602   1.3  riastrad 
   2603  1.20  riastrad 	disable_rpm_wakeref_asserts(rpm);
   2604   1.3  riastrad 
   2605   1.7  riastrad 	/*
   2606   1.7  riastrad 	 * We are safe here against re-faults, since the fault handler takes
   2607   1.7  riastrad 	 * an RPM reference.
   2608   1.7  riastrad 	 */
   2609  1.20  riastrad 	i915_gem_runtime_suspend(dev_priv);
   2610   1.3  riastrad 
   2611  1.20  riastrad 	intel_gt_runtime_suspend(&dev_priv->gt);
   2612   1.7  riastrad 
   2613   1.7  riastrad 	intel_runtime_pm_disable_interrupts(dev_priv);
   2614   1.7  riastrad 
   2615  1.20  riastrad 	intel_uncore_suspend(&dev_priv->uncore);
   2616  1.20  riastrad 
   2617  1.20  riastrad 	intel_display_power_suspend(dev_priv);
   2618  1.20  riastrad 
   2619  1.20  riastrad 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   2620  1.21  riastrad #ifndef __NetBSD__		/* XXX vlv suspend/resume */
   2621  1.20  riastrad 		ret = vlv_suspend_complete(dev_priv);
   2622  1.21  riastrad #endif
   2623  1.20  riastrad 
   2624   1.7  riastrad 	if (ret) {
   2625   1.7  riastrad 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
   2626  1.20  riastrad 		intel_uncore_runtime_resume(&dev_priv->uncore);
   2627  1.20  riastrad 
   2628   1.7  riastrad 		intel_runtime_pm_enable_interrupts(dev_priv);
   2629   1.7  riastrad 
   2630  1.20  riastrad 		intel_gt_runtime_resume(&dev_priv->gt);
   2631  1.20  riastrad 
   2632  1.20  riastrad 		i915_gem_restore_fences(&dev_priv->ggtt);
   2633  1.20  riastrad 
   2634  1.20  riastrad 		enable_rpm_wakeref_asserts(rpm);
   2635  1.20  riastrad 
   2636   1.7  riastrad 		return ret;
   2637   1.7  riastrad 	}
   2638   1.7  riastrad 
   2639  1.20  riastrad 	enable_rpm_wakeref_asserts(rpm);
   2640  1.20  riastrad 	intel_runtime_pm_driver_release(rpm);
   2641  1.20  riastrad 
   2642  1.20  riastrad 	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
   2643  1.20  riastrad 		DRM_ERROR("Unclaimed access detected prior to suspending\n");
   2644  1.20  riastrad 
   2645  1.20  riastrad 	rpm->suspended = true;
   2646   1.3  riastrad 
   2647   1.3  riastrad 	/*
   2648   1.7  riastrad 	 * FIXME: We really should find a document that references the arguments
   2649   1.7  riastrad 	 * used below!
   2650   1.3  riastrad 	 */
   2651  1.20  riastrad 	if (IS_BROADWELL(dev_priv)) {
   2652   1.7  riastrad 		/*
   2653   1.7  riastrad 		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
   2654   1.7  riastrad 		 * being detected, and the call we do at intel_runtime_resume()
   2655   1.7  riastrad 		 * won't be able to restore them. Since PCI_D3hot matches the
   2656   1.7  riastrad 		 * actual specification and appears to be working, use it.
   2657   1.7  riastrad 		 */
   2658  1.20  riastrad 		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
   2659   1.7  riastrad 	} else {
   2660   1.7  riastrad 		/*
   2661   1.7  riastrad 		 * current versions of firmware which depend on this opregion
   2662   1.7  riastrad 		 * notification have repurposed the D1 definition to mean
   2663   1.7  riastrad 		 * "runtime suspended" vs. what you would normally expect (D3)
   2664   1.7  riastrad 		 * to distinguish it from notifications that might be sent via
   2665   1.7  riastrad 		 * the suspend path.
   2666   1.7  riastrad 		 */
   2667  1.20  riastrad 		intel_opregion_notify_adapter(dev_priv, PCI_D1);
   2668   1.7  riastrad 	}
   2669   1.7  riastrad 
   2670  1.20  riastrad 	assert_forcewakes_inactive(&dev_priv->uncore);
   2671  1.20  riastrad 
   2672  1.20  riastrad 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
   2673  1.20  riastrad 		intel_hpd_poll_init(dev_priv);
   2674   1.3  riastrad 
   2675   1.3  riastrad 	DRM_DEBUG_KMS("Device suspended\n");
   2676   1.3  riastrad 	return 0;
   2677   1.3  riastrad }
   2678   1.3  riastrad 
   2679  1.20  riastrad static int intel_runtime_resume(struct device *kdev)
   2680   1.3  riastrad {
   2681  1.20  riastrad 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
   2682  1.20  riastrad 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
   2683   1.7  riastrad 	int ret = 0;
   2684   1.3  riastrad 
   2685  1.20  riastrad 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
   2686   1.7  riastrad 		return -ENODEV;
   2687   1.3  riastrad 
   2688   1.3  riastrad 	DRM_DEBUG_KMS("Resuming device\n");
   2689   1.3  riastrad 
   2690  1.20  riastrad 	WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
   2691  1.20  riastrad 	disable_rpm_wakeref_asserts(rpm);
   2692   1.3  riastrad 
   2693  1.20  riastrad 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
   2694  1.20  riastrad 	rpm->suspended = false;
   2695  1.20  riastrad 	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
   2696  1.20  riastrad 		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
   2697   1.7  riastrad 
   2698  1.20  riastrad 	intel_display_power_resume(dev_priv);
   2699   1.7  riastrad 
   2700  1.20  riastrad 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   2701   1.7  riastrad 		ret = vlv_resume_prepare(dev_priv, true);
   2702   1.7  riastrad 
   2703  1.20  riastrad 	intel_uncore_runtime_resume(&dev_priv->uncore);
   2704  1.20  riastrad 
   2705  1.20  riastrad 	intel_runtime_pm_enable_interrupts(dev_priv);
   2706  1.20  riastrad 
   2707   1.7  riastrad 	/*
   2708   1.7  riastrad 	 * No point of rolling back things in case of an error, as the best
   2709   1.7  riastrad 	 * we can do is to hope that things will still work (and disable RPM).
   2710   1.7  riastrad 	 */
   2711  1.20  riastrad 	intel_gt_runtime_resume(&dev_priv->gt);
   2712  1.20  riastrad 	i915_gem_restore_fences(&dev_priv->ggtt);
   2713   1.7  riastrad 
   2714   1.7  riastrad 	/*
   2715   1.7  riastrad 	 * On VLV/CHV display interrupts are part of the display
   2716   1.7  riastrad 	 * power well, so hpd is reinitialized from there. For
   2717   1.7  riastrad 	 * everyone else do it here.
   2718   1.7  riastrad 	 */
   2719  1.20  riastrad 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
   2720   1.7  riastrad 		intel_hpd_init(dev_priv);
   2721   1.7  riastrad 
   2722  1.20  riastrad 	intel_enable_ipc(dev_priv);
   2723  1.20  riastrad 
   2724  1.20  riastrad 	enable_rpm_wakeref_asserts(rpm);
   2725   1.7  riastrad 
   2726   1.7  riastrad 	if (ret)
   2727   1.7  riastrad 		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
   2728   1.7  riastrad 	else
   2729   1.7  riastrad 		DRM_DEBUG_KMS("Device resumed\n");
   2730   1.3  riastrad 
   2731   1.7  riastrad 	return ret;
   2732   1.7  riastrad }
   2733  1.11  riastrad #endif
   2734  1.10  riastrad 
   2735  1.10  riastrad #ifndef __NetBSD__
   2736  1.20  riastrad const struct dev_pm_ops i915_pm_ops = {
   2737   1.7  riastrad 	/*
   2738   1.7  riastrad 	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
   2739   1.7  riastrad 	 * PMSG_RESUME]
   2740   1.7  riastrad 	 */
   2741  1.20  riastrad 	.prepare = i915_pm_prepare,
   2742   1.1  riastrad 	.suspend = i915_pm_suspend,
   2743   1.3  riastrad 	.suspend_late = i915_pm_suspend_late,
   2744   1.3  riastrad 	.resume_early = i915_pm_resume_early,
   2745   1.1  riastrad 	.resume = i915_pm_resume,
   2746   1.7  riastrad 
   2747   1.7  riastrad 	/*
   2748   1.7  riastrad 	 * S4 event handlers
   2749   1.7  riastrad 	 * @freeze, @freeze_late    : called (1) before creating the
   2750   1.7  riastrad 	 *                            hibernation image [PMSG_FREEZE] and
   2751   1.7  riastrad 	 *                            (2) after rebooting, before restoring
   2752   1.7  riastrad 	 *                            the image [PMSG_QUIESCE]
   2753   1.7  riastrad 	 * @thaw, @thaw_early       : called (1) after creating the hibernation
   2754   1.7  riastrad 	 *                            image, before writing it [PMSG_THAW]
   2755   1.7  riastrad 	 *                            and (2) after failing to create or
   2756   1.7  riastrad 	 *                            restore the image [PMSG_RECOVER]
   2757   1.7  riastrad 	 * @poweroff, @poweroff_late: called after writing the hibernation
   2758   1.7  riastrad 	 *                            image, before rebooting [PMSG_HIBERNATE]
   2759   1.7  riastrad 	 * @restore, @restore_early : called after rebooting and restoring the
   2760   1.7  riastrad 	 *                            hibernation image [PMSG_RESTORE]
   2761   1.7  riastrad 	 */
   2762  1.20  riastrad 	.freeze = i915_pm_freeze,
   2763  1.20  riastrad 	.freeze_late = i915_pm_freeze_late,
   2764  1.20  riastrad 	.thaw_early = i915_pm_thaw_early,
   2765  1.20  riastrad 	.thaw = i915_pm_thaw,
   2766   1.7  riastrad 	.poweroff = i915_pm_suspend,
   2767   1.7  riastrad 	.poweroff_late = i915_pm_poweroff_late,
   2768  1.20  riastrad 	.restore_early = i915_pm_restore_early,
   2769  1.20  riastrad 	.restore = i915_pm_restore,
   2770   1.7  riastrad 
   2771   1.7  riastrad 	/* S0ix (via runtime suspend) event handlers */
   2772   1.7  riastrad 	.runtime_suspend = intel_runtime_suspend,
   2773   1.7  riastrad 	.runtime_resume = intel_runtime_resume,
   2774   1.1  riastrad };
   2775   1.1  riastrad 
   2776   1.1  riastrad static const struct file_operations i915_driver_fops = {
   2777   1.1  riastrad 	.owner = THIS_MODULE,
   2778   1.1  riastrad 	.open = drm_open,
   2779   1.1  riastrad 	.release = drm_release,
   2780   1.1  riastrad 	.unlocked_ioctl = drm_ioctl,
   2781  1.20  riastrad 	.mmap = i915_gem_mmap,
   2782   1.1  riastrad 	.poll = drm_poll,
   2783   1.1  riastrad 	.read = drm_read,
   2784   1.1  riastrad 	.compat_ioctl = i915_compat_ioctl,
   2785   1.1  riastrad 	.llseek = noop_llseek,
   2786   1.1  riastrad };
   2787  1.20  riastrad #endif	/* defined(__NetBSD__) */
   2788   1.1  riastrad 
   2789  1.20  riastrad static int
   2790  1.20  riastrad i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
   2791  1.20  riastrad 			  struct drm_file *file)
   2792  1.20  riastrad {
   2793  1.20  riastrad 	return -ENODEV;
   2794  1.20  riastrad }
   2795  1.20  riastrad 
   2796  1.20  riastrad static const struct drm_ioctl_desc i915_ioctls[] = {
   2797  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2798  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
   2799  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
   2800  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
   2801  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
   2802  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
   2803  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
   2804  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2805  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
   2806  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
   2807  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2808  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
   2809  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2810  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2811  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
   2812  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
   2813  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2814  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2815  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
   2816  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
   2817  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
   2818  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
   2819  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
   2820  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
   2821  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
   2822  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
   2823  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2824  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
   2825  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
   2826  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
   2827  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
   2828  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
   2829  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
   2830  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
   2831  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
   2832  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
   2833  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
   2834  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
   2835  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
   2836  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
   2837  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
   2838  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
   2839  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
   2840  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
   2841  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
   2842  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
   2843  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
   2844  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
   2845  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
   2846  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
   2847  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
   2848  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
   2849  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
   2850  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
   2851  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
   2852  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
   2853  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
   2854  1.20  riastrad 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
   2855  1.20  riastrad };
   2856   1.2  riastrad 
   2857   1.1  riastrad static struct drm_driver driver = {
   2858   1.1  riastrad 	/* Don't use MTRRs here; the Xserver or userspace app should
   2859   1.1  riastrad 	 * deal with them for Intel hardware.
   2860   1.1  riastrad 	 */
   2861   1.1  riastrad 	.driver_features =
   2862  1.20  riastrad 	    DRIVER_GEM |
   2863  1.20  riastrad 	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
   2864  1.20  riastrad 	.release = i915_driver_release,
   2865   1.1  riastrad 	.open = i915_driver_open,
   2866   1.1  riastrad 	.lastclose = i915_driver_lastclose,
   2867   1.1  riastrad 	.postclose = i915_driver_postclose,
   2868  1.20  riastrad 
   2869  1.20  riastrad 	.gem_close_object = i915_gem_close_object,
   2870  1.20  riastrad 	.gem_free_object_unlocked = i915_gem_free_object,
   2871   1.9  riastrad #ifdef __NetBSD__
   2872   1.9  riastrad 	.request_irq = drm_pci_request_irq,
   2873   1.9  riastrad 	.free_irq = drm_pci_free_irq,
   2874   1.1  riastrad 
   2875  1.40  riastrad 	.mmap_object = &i915_gem_mmap_object,
   2876  1.43  riastrad 	.gem_uvm_ops = &i915_gem_uvm_ops,
   2877   1.2  riastrad #endif
   2878   1.1  riastrad 
   2879  1.20  riastrad 
   2880   1.1  riastrad 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
   2881   1.1  riastrad 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
   2882   1.1  riastrad 	.gem_prime_export = i915_gem_prime_export,
   2883   1.1  riastrad 	.gem_prime_import = i915_gem_prime_import,
   2884   1.1  riastrad 
   2885  1.20  riastrad 	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
   2886  1.20  riastrad 	.get_scanout_position = i915_get_crtc_scanoutpos,
   2887  1.20  riastrad 
   2888   1.1  riastrad 	.dumb_create = i915_gem_dumb_create,
   2889  1.20  riastrad 	.dumb_map_offset = i915_gem_dumb_mmap_offset,
   2890  1.20  riastrad 
   2891   1.1  riastrad 	.ioctls = i915_ioctls,
   2892  1.20  riastrad 	.num_ioctls = ARRAY_SIZE(i915_ioctls),
   2893   1.2  riastrad #ifdef __NetBSD__
   2894   1.2  riastrad 	.fops = NULL,
   2895   1.2  riastrad #else
   2896   1.1  riastrad 	.fops = &i915_driver_fops,
   2897   1.2  riastrad #endif
   2898   1.1  riastrad 	.name = DRIVER_NAME,
   2899   1.1  riastrad 	.desc = DRIVER_DESC,
   2900   1.1  riastrad 	.date = DRIVER_DATE,
   2901   1.1  riastrad 	.major = DRIVER_MAJOR,
   2902   1.1  riastrad 	.minor = DRIVER_MINOR,
   2903   1.1  riastrad 	.patchlevel = DRIVER_PATCHLEVEL,
   2904   1.1  riastrad };
   2905