Home | History | Annotate | Line # | Download | only in amdgpu
      1  1.21  riastrad /*	$NetBSD: amdgpu_device.c,v 1.21 2024/07/01 12:09:52 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*
      4   1.1  riastrad  * Copyright 2008 Advanced Micro Devices, Inc.
      5   1.1  riastrad  * Copyright 2008 Red Hat Inc.
      6   1.1  riastrad  * Copyright 2009 Jerome Glisse.
      7   1.1  riastrad  *
      8   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      9   1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
     10   1.1  riastrad  * to deal in the Software without restriction, including without limitation
     11   1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12   1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     13   1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     14   1.1  riastrad  *
     15   1.1  riastrad  * The above copyright notice and this permission notice shall be included in
     16   1.1  riastrad  * all copies or substantial portions of the Software.
     17   1.1  riastrad  *
     18   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21   1.1  riastrad  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22   1.1  riastrad  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23   1.1  riastrad  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24   1.1  riastrad  * OTHER DEALINGS IN THE SOFTWARE.
     25   1.1  riastrad  *
     26   1.1  riastrad  * Authors: Dave Airlie
     27   1.1  riastrad  *          Alex Deucher
     28   1.1  riastrad  *          Jerome Glisse
     29   1.1  riastrad  */
     30   1.1  riastrad #include <sys/cdefs.h>
     31  1.21  riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_device.c,v 1.21 2024/07/01 12:09:52 riastradh Exp $");
     32   1.1  riastrad 
     33   1.7  riastrad #include <linux/power_supply.h>
     34   1.7  riastrad #include <linux/kthread.h>
     35   1.7  riastrad #include <linux/module.h>
     36   1.1  riastrad #include <linux/console.h>
     37   1.1  riastrad #include <linux/slab.h>
     38  1.10  riastrad #include <linux/reboot.h>
     39   1.7  riastrad 
     40   1.7  riastrad #include <drm/drm_atomic_helper.h>
     41   1.7  riastrad #include <drm/drm_probe_helper.h>
     42   1.1  riastrad #include <drm/amdgpu_drm.h>
     43   1.1  riastrad #include <linux/vgaarb.h>
     44   1.1  riastrad #include <linux/vga_switcheroo.h>
     45   1.1  riastrad #include <linux/efi.h>
     46   1.1  riastrad #include "amdgpu.h"
     47   1.7  riastrad #include "amdgpu_trace.h"
     48   1.1  riastrad #include "amdgpu_i2c.h"
     49   1.1  riastrad #include "atom.h"
     50   1.1  riastrad #include "amdgpu_atombios.h"
     51   1.7  riastrad #include "amdgpu_atomfirmware.h"
     52   1.7  riastrad #include "amd_pcie.h"
     53   1.7  riastrad #ifdef CONFIG_DRM_AMDGPU_SI
     54   1.7  riastrad #include "si.h"
     55   1.7  riastrad #endif
     56   1.1  riastrad #ifdef CONFIG_DRM_AMDGPU_CIK
     57   1.1  riastrad #include "cik.h"
     58   1.1  riastrad #endif
     59   1.1  riastrad #include "vi.h"
     60   1.7  riastrad #include "soc15.h"
     61   1.7  riastrad #include "nv.h"
     62   1.1  riastrad #include "bif/bif_4_1_d.h"
     63   1.7  riastrad #include <linux/pci.h>
     64   1.7  riastrad #include <linux/firmware.h>
     65   1.7  riastrad #include "amdgpu_vf_error.h"
     66   1.7  riastrad 
     67   1.7  riastrad #include "amdgpu_amdkfd.h"
     68   1.7  riastrad #include "amdgpu_pm.h"
     69   1.7  riastrad 
     70   1.7  riastrad #include "amdgpu_xgmi.h"
     71   1.7  riastrad #include "amdgpu_ras.h"
     72   1.7  riastrad #include "amdgpu_pmu.h"
     73   1.1  riastrad 
     74   1.7  riastrad #include <linux/suspend.h>
     75   1.7  riastrad #include <drm/task_barrier.h>
     76   1.5  riastrad #include <linux/nbsd-namespace.h>
     77   1.5  riastrad 
     78   1.7  riastrad MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
     79   1.7  riastrad MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
     80   1.7  riastrad MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
     81   1.7  riastrad MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
     82   1.7  riastrad MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
     83   1.7  riastrad MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
     84   1.7  riastrad MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
     85   1.7  riastrad MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
     86   1.7  riastrad MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
     87   1.7  riastrad MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
     88   1.7  riastrad 
     89   1.7  riastrad #define AMDGPU_RESUME_MS		2000
     90   1.7  riastrad 
     91   1.7  riastrad const char *amdgpu_asic_name[] = {
     92   1.7  riastrad 	"TAHITI",
     93   1.7  riastrad 	"PITCAIRN",
     94   1.7  riastrad 	"VERDE",
     95   1.7  riastrad 	"OLAND",
     96   1.7  riastrad 	"HAINAN",
     97   1.1  riastrad 	"BONAIRE",
     98   1.1  riastrad 	"KAVERI",
     99   1.1  riastrad 	"KABINI",
    100   1.1  riastrad 	"HAWAII",
    101   1.1  riastrad 	"MULLINS",
    102   1.1  riastrad 	"TOPAZ",
    103   1.1  riastrad 	"TONGA",
    104   1.1  riastrad 	"FIJI",
    105   1.1  riastrad 	"CARRIZO",
    106   1.1  riastrad 	"STONEY",
    107   1.7  riastrad 	"POLARIS10",
    108   1.7  riastrad 	"POLARIS11",
    109   1.7  riastrad 	"POLARIS12",
    110   1.7  riastrad 	"VEGAM",
    111   1.7  riastrad 	"VEGA10",
    112   1.7  riastrad 	"VEGA12",
    113   1.7  riastrad 	"VEGA20",
    114   1.7  riastrad 	"RAVEN",
    115   1.7  riastrad 	"ARCTURUS",
    116   1.7  riastrad 	"RENOIR",
    117   1.7  riastrad 	"NAVI10",
    118   1.7  riastrad 	"NAVI14",
    119   1.7  riastrad 	"NAVI12",
    120   1.1  riastrad 	"LAST",
    121   1.1  riastrad };
    122   1.1  riastrad 
    123   1.8  riastrad #ifndef __NetBSD__		/* XXX amdgpu sysfs */
    124   1.8  riastrad 
    125   1.7  riastrad /**
    126   1.7  riastrad  * DOC: pcie_replay_count
    127   1.7  riastrad  *
    128   1.7  riastrad  * The amdgpu driver provides a sysfs API for reporting the total number
    129   1.7  riastrad  * of PCIe replays (NAKs)
    130   1.7  riastrad  * The file pcie_replay_count is used for this and returns the total
    131   1.7  riastrad  * number of replays as a sum of the NAKs generated and NAKs received
    132   1.7  riastrad  */
    133   1.7  riastrad 
    134   1.7  riastrad static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
    135   1.7  riastrad 		struct device_attribute *attr, char *buf)
    136   1.7  riastrad {
    137   1.7  riastrad 	struct drm_device *ddev = dev_get_drvdata(dev);
    138   1.7  riastrad 	struct amdgpu_device *adev = ddev->dev_private;
    139   1.7  riastrad 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
    140   1.7  riastrad 
    141   1.7  riastrad 	return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
    142   1.7  riastrad }
    143   1.7  riastrad 
    144   1.7  riastrad static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
    145   1.7  riastrad 		amdgpu_device_get_pcie_replay_count, NULL);
    146   1.7  riastrad 
    147   1.8  riastrad #endif	/* __NetBSD__ */
    148   1.8  riastrad 
    149   1.7  riastrad static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
    150   1.7  riastrad 
    151   1.7  riastrad /**
    152   1.7  riastrad  * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
    153   1.7  riastrad  *
    154   1.7  riastrad  * @dev: drm_device pointer
    155   1.7  riastrad  *
    156   1.7  riastrad  * Returns true if the device is a dGPU with HG/PX power control,
    157   1.7  riastrad  * otherwise return false.
    158   1.7  riastrad  */
    159   1.7  riastrad bool amdgpu_device_supports_boco(struct drm_device *dev)
    160   1.1  riastrad {
    161   1.1  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    162   1.1  riastrad 
    163   1.1  riastrad 	if (adev->flags & AMD_IS_PX)
    164   1.1  riastrad 		return true;
    165   1.1  riastrad 	return false;
    166   1.1  riastrad }
    167   1.1  riastrad 
    168   1.7  riastrad /**
    169   1.7  riastrad  * amdgpu_device_supports_baco - Does the device support BACO
    170   1.7  riastrad  *
    171   1.7  riastrad  * @dev: drm_device pointer
    172   1.7  riastrad  *
    173   1.7  riastrad  * Returns true if the device supporte BACO,
    174   1.7  riastrad  * otherwise return false.
    175   1.7  riastrad  */
    176   1.7  riastrad bool amdgpu_device_supports_baco(struct drm_device *dev)
    177   1.7  riastrad {
    178   1.7  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    179   1.7  riastrad 
    180   1.7  riastrad 	return amdgpu_asic_supports_baco(adev);
    181   1.7  riastrad }
    182   1.7  riastrad 
    183   1.7  riastrad /**
    184   1.7  riastrad  * VRAM access helper functions.
    185   1.7  riastrad  *
    186   1.7  riastrad  * amdgpu_device_vram_access - read/write a buffer in vram
    187   1.7  riastrad  *
    188   1.7  riastrad  * @adev: amdgpu_device pointer
    189   1.7  riastrad  * @pos: offset of the buffer in vram
    190   1.7  riastrad  * @buf: virtual address of the buffer in system memory
    191   1.7  riastrad  * @size: read/write size, sizeof(@buf) must > @size
    192   1.7  riastrad  * @write: true - write to vram, otherwise - read from vram
    193   1.7  riastrad  */
    194   1.7  riastrad void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
    195   1.7  riastrad 			       uint32_t *buf, size_t size, bool write)
    196   1.7  riastrad {
    197   1.7  riastrad 	uint64_t last;
    198   1.7  riastrad 	unsigned long flags;
    199   1.7  riastrad 
    200   1.7  riastrad 	last = size - 4;
    201   1.7  riastrad 	for (last += pos; pos <= last; pos += 4) {
    202   1.7  riastrad 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
    203   1.7  riastrad 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
    204   1.7  riastrad 		WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
    205   1.7  riastrad 		if (write)
    206   1.7  riastrad 			WREG32_NO_KIQ(mmMM_DATA, *buf++);
    207   1.7  riastrad 		else
    208   1.7  riastrad 			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
    209   1.7  riastrad 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
    210   1.7  riastrad 	}
    211   1.7  riastrad }
    212   1.7  riastrad 
    213   1.1  riastrad /*
    214   1.1  riastrad  * MMIO register access helper functions.
    215   1.1  riastrad  */
    216   1.7  riastrad /**
    217   1.7  riastrad  * amdgpu_mm_rreg - read a memory mapped IO register
    218   1.7  riastrad  *
    219   1.7  riastrad  * @adev: amdgpu_device pointer
    220   1.7  riastrad  * @reg: dword aligned register offset
    221   1.7  riastrad  * @acc_flags: access flags which require special behavior
    222   1.7  riastrad  *
    223   1.7  riastrad  * Returns the 32 bit value from the offset specified.
    224   1.7  riastrad  */
    225   1.1  riastrad uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
    226   1.7  riastrad 			uint32_t acc_flags)
    227   1.1  riastrad {
    228   1.7  riastrad 	uint32_t ret;
    229   1.7  riastrad 
    230   1.7  riastrad 	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
    231   1.7  riastrad 		return amdgpu_kiq_rreg(adev, reg);
    232   1.7  riastrad 
    233   1.7  riastrad 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
    234   1.3  riastrad #ifdef __NetBSD__
    235   1.3  riastrad 		return bus_space_read_4(adev->rmmiot, adev->rmmioh, 4*reg);
    236   1.3  riastrad #else
    237   1.7  riastrad 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
    238   1.3  riastrad #endif
    239   1.1  riastrad 	else {
    240   1.1  riastrad 		unsigned long flags;
    241   1.1  riastrad 
    242   1.1  riastrad 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
    243   1.3  riastrad #ifdef __NetBSD__
    244   1.3  riastrad 		bus_space_write_4(adev->rmmiot, adev->rmmioh, 4*mmMM_INDEX,
    245   1.3  riastrad 		    4*reg);
    246   1.3  riastrad 		ret = bus_space_read_4(adev->rmmiot, adev->rmmioh,
    247   1.4  riastrad 		    4*mmMM_DATA);
    248   1.3  riastrad #else
    249   1.1  riastrad 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
    250   1.1  riastrad 		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
    251   1.3  riastrad #endif
    252   1.1  riastrad 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
    253   1.7  riastrad 	}
    254   1.7  riastrad 	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
    255   1.7  riastrad 	return ret;
    256   1.7  riastrad }
    257   1.1  riastrad 
    258   1.7  riastrad /*
    259   1.7  riastrad  * MMIO register read with bytes helper functions
    260   1.7  riastrad  * @offset:bytes offset from MMIO start
    261   1.7  riastrad  *
    262   1.7  riastrad */
    263   1.7  riastrad 
    264   1.7  riastrad /**
    265   1.7  riastrad  * amdgpu_mm_rreg8 - read a memory mapped IO register
    266   1.7  riastrad  *
    267   1.7  riastrad  * @adev: amdgpu_device pointer
    268   1.7  riastrad  * @offset: byte aligned register offset
    269   1.7  riastrad  *
    270   1.7  riastrad  * Returns the 8 bit value from the offset specified.
    271   1.7  riastrad  */
    272   1.7  riastrad uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
    273   1.7  riastrad 	if (offset < adev->rmmio_size)
    274   1.8  riastrad #ifdef __NetBSD__
    275  1.15  riastrad 		return bus_space_read_1(adev->rmmiot, adev->rmmioh, offset);
    276   1.8  riastrad #else
    277   1.7  riastrad 		return (readb(adev->rmmio + offset));
    278   1.8  riastrad #endif
    279   1.7  riastrad 	BUG();
    280   1.7  riastrad }
    281   1.7  riastrad 
    282   1.7  riastrad /*
    283   1.7  riastrad  * MMIO register write with bytes helper functions
    284   1.7  riastrad  * @offset:bytes offset from MMIO start
    285   1.7  riastrad  * @value: the value want to be written to the register
    286   1.7  riastrad  *
    287   1.7  riastrad */
    288   1.7  riastrad /**
    289   1.7  riastrad  * amdgpu_mm_wreg8 - read a memory mapped IO register
    290   1.7  riastrad  *
    291   1.7  riastrad  * @adev: amdgpu_device pointer
    292   1.7  riastrad  * @offset: byte aligned register offset
    293   1.7  riastrad  * @value: 8 bit value to write
    294   1.7  riastrad  *
    295   1.7  riastrad  * Writes the value specified to the offset specified.
    296   1.7  riastrad  */
    297   1.7  riastrad void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
    298   1.7  riastrad 	if (offset < adev->rmmio_size)
    299   1.8  riastrad #ifdef __NetBSD__
    300  1.15  riastrad 		bus_space_write_1(adev->rmmiot, adev->rmmioh, offset, value);
    301   1.8  riastrad #else
    302   1.7  riastrad 		writeb(value, adev->rmmio + offset);
    303   1.8  riastrad #endif
    304   1.7  riastrad 	else
    305   1.7  riastrad 		BUG();
    306   1.1  riastrad }
    307   1.1  riastrad 
    308   1.7  riastrad /**
    309   1.7  riastrad  * amdgpu_mm_wreg - write to a memory mapped IO register
    310   1.7  riastrad  *
    311   1.7  riastrad  * @adev: amdgpu_device pointer
    312   1.7  riastrad  * @reg: dword aligned register offset
    313   1.7  riastrad  * @v: 32 bit value to write to the register
    314   1.7  riastrad  * @acc_flags: access flags which require special behavior
    315   1.7  riastrad  *
    316   1.7  riastrad  * Writes the value specified to the offset specified.
    317   1.7  riastrad  */
    318   1.1  riastrad void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
    319   1.7  riastrad 		    uint32_t acc_flags)
    320   1.1  riastrad {
    321   1.7  riastrad 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
    322   1.7  riastrad 
    323   1.7  riastrad 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
    324   1.7  riastrad 		adev->last_mm_index = v;
    325   1.7  riastrad 	}
    326   1.7  riastrad 
    327   1.7  riastrad 	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
    328   1.7  riastrad 		return amdgpu_kiq_wreg(adev, reg, v);
    329   1.7  riastrad 
    330   1.7  riastrad 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
    331   1.3  riastrad #ifdef __NetBSD__
    332   1.3  riastrad 		bus_space_write_4(adev->rmmiot, adev->rmmioh, 4*reg, v);
    333   1.3  riastrad #else
    334   1.1  riastrad 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
    335   1.3  riastrad #endif
    336   1.1  riastrad 	else {
    337   1.1  riastrad 		unsigned long flags;
    338   1.1  riastrad 
    339   1.1  riastrad 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
    340   1.3  riastrad #ifdef __NetBSD__
    341   1.3  riastrad 		bus_space_write_4(adev->rmmiot, adev->rmmioh, 4*mmMM_INDEX,
    342   1.3  riastrad 		    reg*4);
    343   1.3  riastrad 		bus_space_write_4(adev->rmmiot, adev->rmmioh, 4*mmMM_DATA, v);
    344   1.3  riastrad #else
    345   1.1  riastrad 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
    346   1.1  riastrad 		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
    347   1.3  riastrad #endif
    348   1.1  riastrad 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
    349   1.1  riastrad 	}
    350   1.7  riastrad 
    351   1.7  riastrad 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
    352   1.7  riastrad 		udelay(500);
    353   1.7  riastrad 	}
    354   1.1  riastrad }
    355   1.1  riastrad 
    356   1.7  riastrad /**
    357   1.7  riastrad  * amdgpu_io_rreg - read an IO register
    358   1.7  riastrad  *
    359   1.7  riastrad  * @adev: amdgpu_device pointer
    360   1.7  riastrad  * @reg: dword aligned register offset
    361   1.7  riastrad  *
    362   1.7  riastrad  * Returns the 32 bit value from the offset specified.
    363   1.7  riastrad  */
    364   1.1  riastrad u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
    365   1.1  riastrad {
    366   1.1  riastrad 	if ((reg * 4) < adev->rio_mem_size)
    367   1.3  riastrad #ifdef __NetBSD__
    368   1.3  riastrad 		return bus_space_read_4(adev->rio_memt, adev->rio_memh, 4*reg);
    369   1.3  riastrad #else
    370   1.1  riastrad 		return ioread32(adev->rio_mem + (reg * 4));
    371   1.3  riastrad #endif
    372   1.1  riastrad 	else {
    373   1.3  riastrad #ifdef __NetBSD__
    374   1.3  riastrad 		bus_space_write_4(adev->rio_memt, adev->rio_memh, 4*mmMM_INDEX,
    375   1.3  riastrad 		    4*reg);
    376   1.3  riastrad 		return bus_space_read_4(adev->rio_memt, adev->rio_memh,
    377   1.3  riastrad 		    4*mmMM_DATA);
    378   1.3  riastrad #else
    379   1.1  riastrad 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
    380   1.1  riastrad 		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
    381   1.3  riastrad #endif
    382   1.1  riastrad 	}
    383   1.1  riastrad }
    384   1.1  riastrad 
    385   1.7  riastrad /**
    386   1.7  riastrad  * amdgpu_io_wreg - write to an IO register
    387   1.7  riastrad  *
    388   1.7  riastrad  * @adev: amdgpu_device pointer
    389   1.7  riastrad  * @reg: dword aligned register offset
    390   1.7  riastrad  * @v: 32 bit value to write to the register
    391   1.7  riastrad  *
    392   1.7  riastrad  * Writes the value specified to the offset specified.
    393   1.7  riastrad  */
    394   1.1  riastrad void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    395   1.1  riastrad {
    396   1.7  riastrad 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
    397   1.7  riastrad 		adev->last_mm_index = v;
    398   1.7  riastrad 	}
    399   1.1  riastrad 
    400   1.1  riastrad 	if ((reg * 4) < adev->rio_mem_size)
    401   1.3  riastrad #ifdef __NetBSD__
    402   1.3  riastrad 		bus_space_write_4(adev->rio_memt, adev->rio_memh, 4*reg, v);
    403   1.3  riastrad #else
    404   1.1  riastrad 		iowrite32(v, adev->rio_mem + (reg * 4));
    405   1.3  riastrad #endif
    406   1.1  riastrad 	else {
    407   1.3  riastrad #ifdef __NetBSD__
    408   1.3  riastrad 		bus_space_write_4(adev->rio_memt, adev->rio_memh, 4*mmMM_INDEX,
    409   1.3  riastrad 		    4*reg);
    410   1.3  riastrad 		bus_space_write_4(adev->rio_memt, adev->rio_memh, 4*mmMM_DATA,
    411   1.3  riastrad 		    v);
    412   1.3  riastrad #else
    413   1.1  riastrad 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
    414   1.1  riastrad 		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
    415   1.3  riastrad #endif
    416   1.1  riastrad 	}
    417   1.7  riastrad 
    418   1.7  riastrad 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
    419   1.7  riastrad 		udelay(500);
    420   1.7  riastrad 	}
    421   1.1  riastrad }
    422   1.1  riastrad 
    423   1.1  riastrad /**
    424   1.1  riastrad  * amdgpu_mm_rdoorbell - read a doorbell dword
    425   1.1  riastrad  *
    426   1.1  riastrad  * @adev: amdgpu_device pointer
    427   1.1  riastrad  * @index: doorbell index
    428   1.1  riastrad  *
    429   1.1  riastrad  * Returns the value in the doorbell aperture at the
    430   1.1  riastrad  * requested doorbell index (CIK).
    431   1.1  riastrad  */
    432   1.1  riastrad u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
    433   1.1  riastrad {
    434   1.1  riastrad 	if (index < adev->doorbell.num_doorbells) {
    435   1.3  riastrad #ifdef __NetBSD__
    436   1.3  riastrad 		return bus_space_read_4(adev->doorbell.bst, adev->doorbell.bsh,
    437   1.3  riastrad 		    4*index);
    438   1.3  riastrad #else
    439   1.1  riastrad 		return readl(adev->doorbell.ptr + index);
    440   1.3  riastrad #endif
    441   1.1  riastrad 	} else {
    442   1.1  riastrad 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
    443   1.1  riastrad 		return 0;
    444   1.1  riastrad 	}
    445   1.1  riastrad }
    446   1.1  riastrad 
    447   1.1  riastrad /**
    448   1.1  riastrad  * amdgpu_mm_wdoorbell - write a doorbell dword
    449   1.1  riastrad  *
    450   1.1  riastrad  * @adev: amdgpu_device pointer
    451   1.1  riastrad  * @index: doorbell index
    452   1.1  riastrad  * @v: value to write
    453   1.1  riastrad  *
    454   1.1  riastrad  * Writes @v to the doorbell aperture at the
    455   1.1  riastrad  * requested doorbell index (CIK).
    456   1.1  riastrad  */
    457   1.1  riastrad void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
    458   1.1  riastrad {
    459   1.1  riastrad 	if (index < adev->doorbell.num_doorbells) {
    460   1.3  riastrad #ifdef __NetBSD__
    461   1.3  riastrad 		bus_space_write_4(adev->doorbell.bst, adev->doorbell.bsh,
    462   1.3  riastrad 		    4*index, v);
    463   1.3  riastrad #else
    464   1.1  riastrad 		writel(v, adev->doorbell.ptr + index);
    465   1.3  riastrad #endif
    466   1.1  riastrad 	} else {
    467   1.1  riastrad 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
    468   1.1  riastrad 	}
    469   1.1  riastrad }
    470   1.1  riastrad 
    471   1.1  riastrad /**
    472   1.7  riastrad  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
    473   1.7  riastrad  *
    474   1.7  riastrad  * @adev: amdgpu_device pointer
    475   1.7  riastrad  * @index: doorbell index
    476   1.7  riastrad  *
    477   1.7  riastrad  * Returns the value in the doorbell aperture at the
    478   1.7  riastrad  * requested doorbell index (VEGA10+).
    479   1.7  riastrad  */
    480   1.7  riastrad u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
    481   1.7  riastrad {
    482   1.7  riastrad 	if (index < adev->doorbell.num_doorbells) {
    483   1.8  riastrad #ifdef __NetBSD__
    484  1.13  riastrad #ifdef _LP64
    485   1.8  riastrad 		return bus_space_read_8(adev->doorbell.bst, adev->doorbell.bsh,
    486  1.18  riastrad 		    4*index);
    487   1.8  riastrad #else
    488  1.13  riastrad 		uint64_t lo, hi;
    489  1.13  riastrad #if _BYTE_ORDER == _LITTLE_ENDIAN
    490  1.13  riastrad 		lo = bus_space_read_4(adev->doorbell.bst, adev->doorbell.bsh,
    491  1.18  riastrad 		    4*index);
    492  1.13  riastrad 		hi = bus_space_read_4(adev->doorbell.bst, adev->doorbell.bsh,
    493  1.18  riastrad 		    4*index + 4);
    494  1.13  riastrad #else
    495  1.13  riastrad 		hi = bus_space_read_4(adev->doorbell.bst, adev->doorbell.bsh,
    496  1.18  riastrad 		    4*index);
    497  1.13  riastrad 		lo = bus_space_read_4(adev->doorbell.bst, adev->doorbell.bsh,
    498  1.18  riastrad 		    4*index + 4);
    499  1.13  riastrad #endif
    500  1.13  riastrad 		return lo | (hi << 32);
    501  1.13  riastrad #endif
    502  1.13  riastrad #else
    503   1.7  riastrad 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
    504   1.8  riastrad #endif
    505   1.7  riastrad 	} else {
    506   1.7  riastrad 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
    507   1.7  riastrad 		return 0;
    508   1.7  riastrad 	}
    509   1.7  riastrad }
    510   1.7  riastrad 
    511   1.7  riastrad /**
    512   1.7  riastrad  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
    513   1.7  riastrad  *
    514   1.7  riastrad  * @adev: amdgpu_device pointer
    515   1.7  riastrad  * @index: doorbell index
    516   1.7  riastrad  * @v: value to write
    517   1.7  riastrad  *
    518   1.7  riastrad  * Writes @v to the doorbell aperture at the
    519   1.7  riastrad  * requested doorbell index (VEGA10+).
    520   1.7  riastrad  */
    521   1.7  riastrad void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
    522   1.7  riastrad {
    523   1.7  riastrad 	if (index < adev->doorbell.num_doorbells) {
    524   1.8  riastrad #ifdef __NetBSD__
    525  1.13  riastrad #ifdef _LP64
    526   1.8  riastrad 		bus_space_write_8(adev->doorbell.bst, adev->doorbell.bsh,
    527  1.18  riastrad 		    4*index, v);
    528   1.8  riastrad #else
    529  1.14  riastrad 		/*
    530  1.14  riastrad 		 * XXX This might not be as atomic as one might hope...
    531  1.14  riastrad 		 */
    532  1.13  riastrad #if _BYTE_ORDER == _LITTLE_ENDIAN
    533  1.13  riastrad 		bus_space_write_4(adev->doorbell.bst, adev->doorbell.bsh,
    534  1.18  riastrad 		    4*index, v & 0xffffffffU);
    535  1.13  riastrad 		bus_space_write_4(adev->doorbell.bst, adev->doorbell.bsh,
    536  1.18  riastrad 		    4*index + 4, v >> 32);
    537  1.13  riastrad #else
    538  1.13  riastrad 		bus_space_write_4(adev->doorbell.bst, adev->doorbell.bsh,
    539  1.18  riastrad 		    4*index, v >> 32);
    540  1.13  riastrad 		bus_space_write_4(adev->doorbell.bst, adev->doorbell.bsh,
    541  1.18  riastrad 		    4*index + 4, v & 0xffffffffU);
    542  1.13  riastrad #endif
    543  1.13  riastrad #endif
    544  1.13  riastrad #else
    545   1.7  riastrad 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
    546   1.8  riastrad #endif
    547   1.7  riastrad 	} else {
    548   1.7  riastrad 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
    549   1.7  riastrad 	}
    550   1.7  riastrad }
    551   1.7  riastrad 
    552   1.7  riastrad /**
    553   1.1  riastrad  * amdgpu_invalid_rreg - dummy reg read function
    554   1.1  riastrad  *
    555   1.1  riastrad  * @adev: amdgpu device pointer
    556   1.1  riastrad  * @reg: offset of register
    557   1.1  riastrad  *
    558   1.1  riastrad  * Dummy register read function.  Used for register blocks
    559   1.1  riastrad  * that certain asics don't have (all asics).
    560   1.1  riastrad  * Returns the value in the register.
    561   1.1  riastrad  */
    562   1.1  riastrad static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
    563   1.1  riastrad {
    564   1.1  riastrad 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
    565   1.1  riastrad 	BUG();
    566   1.1  riastrad 	return 0;
    567   1.1  riastrad }
    568   1.1  riastrad 
    569   1.1  riastrad /**
    570   1.1  riastrad  * amdgpu_invalid_wreg - dummy reg write function
    571   1.1  riastrad  *
    572   1.1  riastrad  * @adev: amdgpu device pointer
    573   1.1  riastrad  * @reg: offset of register
    574   1.1  riastrad  * @v: value to write to the register
    575   1.1  riastrad  *
    576   1.1  riastrad  * Dummy register read function.  Used for register blocks
    577   1.1  riastrad  * that certain asics don't have (all asics).
    578   1.1  riastrad  */
    579   1.1  riastrad static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
    580   1.1  riastrad {
    581   1.1  riastrad 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
    582   1.1  riastrad 		  reg, v);
    583   1.1  riastrad 	BUG();
    584   1.1  riastrad }
    585   1.1  riastrad 
    586   1.1  riastrad /**
    587   1.7  riastrad  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
    588   1.7  riastrad  *
    589   1.7  riastrad  * @adev: amdgpu device pointer
    590   1.7  riastrad  * @reg: offset of register
    591   1.7  riastrad  *
    592   1.7  riastrad  * Dummy register read function.  Used for register blocks
    593   1.7  riastrad  * that certain asics don't have (all asics).
    594   1.7  riastrad  * Returns the value in the register.
    595   1.7  riastrad  */
    596   1.7  riastrad static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
    597   1.7  riastrad {
    598   1.7  riastrad 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
    599   1.7  riastrad 	BUG();
    600   1.7  riastrad 	return 0;
    601   1.7  riastrad }
    602   1.7  riastrad 
    603   1.7  riastrad /**
    604   1.7  riastrad  * amdgpu_invalid_wreg64 - dummy reg write function
    605   1.7  riastrad  *
    606   1.7  riastrad  * @adev: amdgpu device pointer
    607   1.7  riastrad  * @reg: offset of register
    608   1.7  riastrad  * @v: value to write to the register
    609   1.7  riastrad  *
    610   1.7  riastrad  * Dummy register read function.  Used for register blocks
    611   1.7  riastrad  * that certain asics don't have (all asics).
    612   1.7  riastrad  */
    613   1.7  riastrad static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
    614   1.7  riastrad {
    615   1.9  riastrad 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08"PRIX64"\n",
    616   1.7  riastrad 		  reg, v);
    617   1.7  riastrad 	BUG();
    618   1.7  riastrad }
    619   1.7  riastrad 
    620   1.7  riastrad /**
    621   1.1  riastrad  * amdgpu_block_invalid_rreg - dummy reg read function
    622   1.1  riastrad  *
    623   1.1  riastrad  * @adev: amdgpu device pointer
    624   1.1  riastrad  * @block: offset of instance
    625   1.1  riastrad  * @reg: offset of register
    626   1.1  riastrad  *
    627   1.1  riastrad  * Dummy register read function.  Used for register blocks
    628   1.1  riastrad  * that certain asics don't have (all asics).
    629   1.1  riastrad  * Returns the value in the register.
    630   1.1  riastrad  */
    631   1.1  riastrad static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
    632   1.1  riastrad 					  uint32_t block, uint32_t reg)
    633   1.1  riastrad {
    634   1.1  riastrad 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
    635   1.1  riastrad 		  reg, block);
    636   1.1  riastrad 	BUG();
    637   1.1  riastrad 	return 0;
    638   1.1  riastrad }
    639   1.1  riastrad 
    640   1.1  riastrad /**
    641   1.1  riastrad  * amdgpu_block_invalid_wreg - dummy reg write function
    642   1.1  riastrad  *
    643   1.1  riastrad  * @adev: amdgpu device pointer
    644   1.1  riastrad  * @block: offset of instance
    645   1.1  riastrad  * @reg: offset of register
    646   1.1  riastrad  * @v: value to write to the register
    647   1.1  riastrad  *
    648   1.1  riastrad  * Dummy register read function.  Used for register blocks
    649   1.1  riastrad  * that certain asics don't have (all asics).
    650   1.1  riastrad  */
    651   1.1  riastrad static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
    652   1.1  riastrad 				      uint32_t block,
    653   1.1  riastrad 				      uint32_t reg, uint32_t v)
    654   1.1  riastrad {
    655   1.1  riastrad 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
    656   1.1  riastrad 		  reg, block, v);
    657   1.1  riastrad 	BUG();
    658   1.1  riastrad }
    659   1.1  riastrad 
    660   1.7  riastrad /**
    661   1.7  riastrad  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
    662   1.7  riastrad  *
    663   1.7  riastrad  * @adev: amdgpu device pointer
    664   1.7  riastrad  *
    665   1.7  riastrad  * Allocates a scratch page of VRAM for use by various things in the
    666   1.7  riastrad  * driver.
    667   1.7  riastrad  */
    668   1.7  riastrad static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
    669   1.1  riastrad {
    670   1.7  riastrad 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
    671   1.7  riastrad 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
    672   1.7  riastrad 				       &adev->vram_scratch.robj,
    673   1.7  riastrad 				       &adev->vram_scratch.gpu_addr,
    674   1.7  riastrad 				       (void **)__UNVOLATILE(&adev->vram_scratch.ptr));
    675   1.1  riastrad }
    676   1.1  riastrad 
    677   1.7  riastrad /**
    678   1.7  riastrad  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
    679   1.7  riastrad  *
    680   1.7  riastrad  * @adev: amdgpu device pointer
    681   1.7  riastrad  *
    682   1.7  riastrad  * Frees the VRAM scratch page.
    683   1.7  riastrad  */
    684   1.7  riastrad static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
    685   1.1  riastrad {
    686   1.7  riastrad 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
    687   1.1  riastrad }
    688   1.1  riastrad 
    689   1.1  riastrad /**
    690   1.7  riastrad  * amdgpu_device_program_register_sequence - program an array of registers.
    691   1.1  riastrad  *
    692   1.1  riastrad  * @adev: amdgpu_device pointer
    693   1.1  riastrad  * @registers: pointer to the register array
    694   1.1  riastrad  * @array_size: size of the register array
    695   1.1  riastrad  *
    696   1.1  riastrad  * Programs an array or registers with and and or masks.
    697   1.1  riastrad  * This is a helper for setting golden registers.
    698   1.1  riastrad  */
    699   1.7  riastrad void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
    700   1.7  riastrad 					     const u32 *registers,
    701   1.7  riastrad 					     const u32 array_size)
    702   1.1  riastrad {
    703   1.1  riastrad 	u32 tmp, reg, and_mask, or_mask;
    704   1.1  riastrad 	int i;
    705   1.1  riastrad 
    706   1.1  riastrad 	if (array_size % 3)
    707   1.1  riastrad 		return;
    708   1.1  riastrad 
    709   1.1  riastrad 	for (i = 0; i < array_size; i +=3) {
    710   1.1  riastrad 		reg = registers[i + 0];
    711   1.1  riastrad 		and_mask = registers[i + 1];
    712   1.1  riastrad 		or_mask = registers[i + 2];
    713   1.1  riastrad 
    714   1.1  riastrad 		if (and_mask == 0xffffffff) {
    715   1.1  riastrad 			tmp = or_mask;
    716   1.1  riastrad 		} else {
    717   1.1  riastrad 			tmp = RREG32(reg);
    718   1.1  riastrad 			tmp &= ~and_mask;
    719   1.7  riastrad 			if (adev->family >= AMDGPU_FAMILY_AI)
    720   1.7  riastrad 				tmp |= (or_mask & and_mask);
    721   1.7  riastrad 			else
    722   1.7  riastrad 				tmp |= or_mask;
    723   1.1  riastrad 		}
    724   1.1  riastrad 		WREG32(reg, tmp);
    725   1.1  riastrad 	}
    726   1.1  riastrad }
    727   1.1  riastrad 
    728   1.7  riastrad /**
    729   1.7  riastrad  * amdgpu_device_pci_config_reset - reset the GPU
    730   1.7  riastrad  *
    731   1.7  riastrad  * @adev: amdgpu_device pointer
    732   1.7  riastrad  *
    733   1.7  riastrad  * Resets the GPU using the pci config reset sequence.
    734   1.7  riastrad  * Only applicable to asics prior to vega10.
    735   1.7  riastrad  */
    736   1.7  riastrad void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
    737   1.1  riastrad {
    738   1.1  riastrad 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
    739   1.1  riastrad }
    740   1.1  riastrad 
    741   1.1  riastrad /*
    742   1.1  riastrad  * GPU doorbell aperture helpers function.
    743   1.1  riastrad  */
    744   1.1  riastrad /**
    745   1.7  riastrad  * amdgpu_device_doorbell_init - Init doorbell driver information.
    746   1.1  riastrad  *
    747   1.1  riastrad  * @adev: amdgpu_device pointer
    748   1.1  riastrad  *
    749   1.1  riastrad  * Init doorbell driver information (CIK)
    750   1.1  riastrad  * Returns 0 on success, error on failure.
    751   1.1  riastrad  */
    752   1.7  riastrad static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
    753   1.1  riastrad {
    754   1.7  riastrad 
    755   1.7  riastrad 	/* No doorbell on SI hardware generation */
    756   1.7  riastrad 	if (adev->asic_type < CHIP_BONAIRE) {
    757   1.7  riastrad 		adev->doorbell.base = 0;
    758   1.7  riastrad 		adev->doorbell.size = 0;
    759   1.7  riastrad 		adev->doorbell.num_doorbells = 0;
    760   1.9  riastrad #ifndef __NetBSD__
    761   1.7  riastrad 		adev->doorbell.ptr = NULL;
    762   1.9  riastrad #endif
    763   1.7  riastrad 		return 0;
    764   1.7  riastrad 	}
    765   1.7  riastrad 
    766   1.7  riastrad 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
    767   1.7  riastrad 		return -EINVAL;
    768   1.7  riastrad 
    769   1.7  riastrad 	amdgpu_asic_init_doorbell_index(adev);
    770   1.3  riastrad 
    771   1.1  riastrad 	/* doorbell bar mapping */
    772   1.1  riastrad 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
    773   1.1  riastrad 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
    774   1.1  riastrad 
    775   1.7  riastrad 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
    776   1.7  riastrad 					     adev->doorbell_index.max_assignment+1);
    777   1.1  riastrad 	if (adev->doorbell.num_doorbells == 0)
    778   1.1  riastrad 		return -EINVAL;
    779   1.1  riastrad 
    780   1.7  riastrad 	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
    781   1.7  riastrad 	 * paging queue doorbell use the second page. The
    782   1.7  riastrad 	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
    783   1.7  riastrad 	 * doorbells are in the first page. So with paging queue enabled,
    784   1.7  riastrad 	 * the max num_doorbells should + 1 page (0x400 in dword)
    785   1.7  riastrad 	 */
    786   1.7  riastrad 	if (adev->asic_type >= CHIP_VEGA10)
    787   1.7  riastrad 		adev->doorbell.num_doorbells += 0x400;
    788   1.7  riastrad 
    789   1.3  riastrad #ifdef __NetBSD__
    790   1.7  riastrad 	int r;
    791   1.3  riastrad 	adev->doorbell.bst = adev->pdev->pd_pa.pa_memt;
    792   1.3  riastrad 	/* XXX errno NetBSD->Linux */
    793   1.3  riastrad 	r = -bus_space_map(adev->doorbell.bst, adev->doorbell.base,
    794   1.3  riastrad 	    adev->doorbell.num_doorbells * sizeof(u32), 0,
    795   1.3  riastrad 	    &adev->doorbell.bsh);
    796   1.3  riastrad 	if (r)
    797   1.3  riastrad 		return r;
    798   1.3  riastrad #else
    799   1.7  riastrad 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
    800   1.7  riastrad 				     adev->doorbell.num_doorbells *
    801   1.7  riastrad 				     sizeof(u32));
    802   1.7  riastrad 	if (adev->doorbell.ptr == NULL)
    803   1.1  riastrad 		return -ENOMEM;
    804   1.3  riastrad #endif
    805   1.1  riastrad 
    806   1.1  riastrad 	return 0;
    807   1.1  riastrad }
    808   1.1  riastrad 
    809   1.1  riastrad /**
    810   1.7  riastrad  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
    811   1.1  riastrad  *
    812   1.1  riastrad  * @adev: amdgpu_device pointer
    813   1.1  riastrad  *
    814   1.1  riastrad  * Tear down doorbell driver information (CIK)
    815   1.1  riastrad  */
    816   1.7  riastrad static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
    817   1.1  riastrad {
    818   1.3  riastrad #ifdef __NetBSD__
    819  1.16  riastrad 	if (adev->doorbell.num_doorbells) {
    820  1.16  riastrad 		bus_space_unmap(adev->doorbell.bst, adev->doorbell.bsh,
    821  1.16  riastrad 		    adev->doorbell.num_doorbells * sizeof(u32));
    822  1.16  riastrad 		adev->doorbell.num_doorbells = 0;
    823  1.16  riastrad 	}
    824   1.3  riastrad #else
    825   1.1  riastrad 	iounmap(adev->doorbell.ptr);
    826   1.1  riastrad 	adev->doorbell.ptr = NULL;
    827   1.3  riastrad #endif
    828   1.1  riastrad }
    829   1.1  riastrad 
    830   1.7  riastrad 
    831   1.1  riastrad 
    832   1.1  riastrad /*
    833   1.7  riastrad  * amdgpu_device_wb_*()
    834   1.7  riastrad  * Writeback is the method by which the GPU updates special pages in memory
    835   1.7  riastrad  * with the status of certain GPU events (fences, ring pointers,etc.).
    836   1.1  riastrad  */
    837   1.1  riastrad 
    838   1.1  riastrad /**
    839   1.7  riastrad  * amdgpu_device_wb_fini - Disable Writeback and free memory
    840   1.1  riastrad  *
    841   1.1  riastrad  * @adev: amdgpu_device pointer
    842   1.1  riastrad  *
    843   1.1  riastrad  * Disables Writeback and frees the Writeback memory (all asics).
    844   1.1  riastrad  * Used at driver shutdown.
    845   1.1  riastrad  */
    846   1.7  riastrad static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
    847   1.1  riastrad {
    848   1.1  riastrad 	if (adev->wb.wb_obj) {
    849   1.7  riastrad 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
    850   1.7  riastrad 				      &adev->wb.gpu_addr,
    851   1.9  riastrad 				      (void **)__UNVOLATILE(&adev->wb.wb));
    852   1.1  riastrad 		adev->wb.wb_obj = NULL;
    853   1.1  riastrad 	}
    854   1.1  riastrad }
    855   1.1  riastrad 
    856   1.1  riastrad /**
    857   1.7  riastrad  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
    858   1.1  riastrad  *
    859   1.1  riastrad  * @adev: amdgpu_device pointer
    860   1.1  riastrad  *
    861   1.7  riastrad  * Initializes writeback and allocates writeback memory (all asics).
    862   1.1  riastrad  * Used at driver startup.
    863   1.1  riastrad  * Returns 0 on success or an -error on failure.
    864   1.1  riastrad  */
    865   1.7  riastrad static int amdgpu_device_wb_init(struct amdgpu_device *adev)
    866   1.1  riastrad {
    867   1.1  riastrad 	int r;
    868   1.1  riastrad 
    869   1.1  riastrad 	if (adev->wb.wb_obj == NULL) {
    870   1.7  riastrad 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
    871   1.7  riastrad 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
    872   1.7  riastrad 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
    873   1.7  riastrad 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
    874   1.9  riastrad 					    (void **)__UNVOLATILE(&adev->wb.wb));
    875   1.1  riastrad 		if (r) {
    876   1.1  riastrad 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
    877   1.1  riastrad 			return r;
    878   1.1  riastrad 		}
    879   1.1  riastrad 
    880   1.1  riastrad 		adev->wb.num_wb = AMDGPU_MAX_WB;
    881   1.1  riastrad 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
    882   1.1  riastrad 
    883   1.1  riastrad 		/* clear wb memory */
    884   1.9  riastrad 		memset(__UNVOLATILE(adev->wb.wb), 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
    885   1.1  riastrad 	}
    886   1.1  riastrad 
    887   1.1  riastrad 	return 0;
    888   1.1  riastrad }
    889   1.1  riastrad 
    890   1.1  riastrad /**
    891   1.7  riastrad  * amdgpu_device_wb_get - Allocate a wb entry
    892   1.1  riastrad  *
    893   1.1  riastrad  * @adev: amdgpu_device pointer
    894   1.1  riastrad  * @wb: wb index
    895   1.1  riastrad  *
    896   1.1  riastrad  * Allocate a wb slot for use by the driver (all asics).
    897   1.1  riastrad  * Returns 0 on success or -EINVAL on failure.
    898   1.1  riastrad  */
    899   1.7  riastrad int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
    900   1.1  riastrad {
    901   1.1  riastrad 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
    902   1.7  riastrad 
    903   1.1  riastrad 	if (offset < adev->wb.num_wb) {
    904   1.1  riastrad 		__set_bit(offset, adev->wb.used);
    905   1.7  riastrad 		*wb = offset << 3; /* convert to dw offset */
    906   1.1  riastrad 		return 0;
    907   1.1  riastrad 	} else {
    908   1.1  riastrad 		return -EINVAL;
    909   1.1  riastrad 	}
    910   1.1  riastrad }
    911   1.1  riastrad 
    912   1.1  riastrad /**
    913   1.7  riastrad  * amdgpu_device_wb_free - Free a wb entry
    914   1.1  riastrad  *
    915   1.1  riastrad  * @adev: amdgpu_device pointer
    916   1.1  riastrad  * @wb: wb index
    917   1.1  riastrad  *
    918   1.1  riastrad  * Free a wb slot allocated for use by the driver (all asics)
    919   1.1  riastrad  */
    920   1.7  riastrad void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
    921   1.1  riastrad {
    922   1.7  riastrad 	wb >>= 3;
    923   1.1  riastrad 	if (wb < adev->wb.num_wb)
    924   1.1  riastrad 		__clear_bit(wb, adev->wb.used);
    925   1.1  riastrad }
    926   1.1  riastrad 
    927   1.1  riastrad /**
    928   1.7  riastrad  * amdgpu_device_resize_fb_bar - try to resize FB BAR
    929   1.1  riastrad  *
    930   1.7  riastrad  * @adev: amdgpu_device pointer
    931   1.1  riastrad  *
    932   1.7  riastrad  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
    933   1.7  riastrad  * to fail, but if any of the BARs is not accessible after the size we abort
    934   1.7  riastrad  * driver loading by returning -ENODEV.
    935   1.7  riastrad  */
    936   1.7  riastrad int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
    937   1.7  riastrad {
    938   1.7  riastrad 	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
    939   1.7  riastrad 	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
    940   1.7  riastrad 	struct pci_bus *root;
    941   1.7  riastrad 	struct resource *res;
    942   1.7  riastrad 	unsigned i;
    943   1.7  riastrad 	u16 cmd;
    944   1.7  riastrad 	int r;
    945   1.7  riastrad 
    946   1.7  riastrad 	/* Bypass for VF */
    947   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
    948   1.7  riastrad 		return 0;
    949   1.1  riastrad 
    950  1.10  riastrad #ifdef __NetBSD__		/* XXX amdgpu fb resize */
    951  1.10  riastrad 	__USE(space_needed);
    952  1.10  riastrad 	__USE(rbar_size);
    953  1.10  riastrad 	__USE(root);
    954  1.10  riastrad 	__USE(res);
    955  1.10  riastrad 	__USE(i);
    956  1.10  riastrad 	__USE(cmd);
    957  1.10  riastrad 	__USE(r);
    958  1.10  riastrad #else
    959   1.9  riastrad 
    960   1.7  riastrad 	/* Check if the root BUS has 64bit memory resources */
    961   1.7  riastrad 	root = adev->pdev->bus;
    962   1.7  riastrad 	while (root->parent)
    963   1.7  riastrad 		root = root->parent;
    964   1.7  riastrad 
    965   1.7  riastrad 	pci_bus_for_each_resource(root, res, i) {
    966   1.7  riastrad 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
    967   1.7  riastrad 		    res->start > 0x100000000ull)
    968   1.7  riastrad 			break;
    969   1.1  riastrad 	}
    970   1.1  riastrad 
    971   1.7  riastrad 	/* Trying to resize is pointless without a root hub window above 4GB */
    972   1.7  riastrad 	if (!res)
    973   1.7  riastrad 		return 0;
    974   1.7  riastrad 
    975   1.7  riastrad 	/* Disable memory decoding while we change the BAR addresses and size */
    976   1.7  riastrad 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
    977   1.7  riastrad 	pci_write_config_word(adev->pdev, PCI_COMMAND,
    978   1.7  riastrad 			      cmd & ~PCI_COMMAND_MEMORY);
    979   1.7  riastrad 
    980   1.7  riastrad 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
    981   1.7  riastrad 	amdgpu_device_doorbell_fini(adev);
    982   1.7  riastrad 	if (adev->asic_type >= CHIP_BONAIRE)
    983   1.7  riastrad 		pci_release_resource(adev->pdev, 2);
    984   1.7  riastrad 
    985   1.7  riastrad 	pci_release_resource(adev->pdev, 0);
    986   1.7  riastrad 
    987   1.7  riastrad 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
    988   1.7  riastrad 	if (r == -ENOSPC)
    989   1.7  riastrad 		DRM_INFO("Not enough PCI address space for a large BAR.");
    990   1.7  riastrad 	else if (r && r != -ENOTSUPP)
    991   1.7  riastrad 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
    992   1.7  riastrad 
    993   1.7  riastrad 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
    994   1.7  riastrad 
    995   1.7  riastrad 	/* When the doorbell or fb BAR isn't available we have no chance of
    996   1.7  riastrad 	 * using the device.
    997   1.7  riastrad 	 */
    998   1.7  riastrad 	r = amdgpu_device_doorbell_init(adev);
    999   1.7  riastrad 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
   1000   1.7  riastrad 		return -ENODEV;
   1001   1.7  riastrad 
   1002   1.7  riastrad 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
   1003   1.1  riastrad 
   1004   1.9  riastrad #endif
   1005   1.9  riastrad 
   1006   1.7  riastrad 	return 0;
   1007   1.1  riastrad }
   1008   1.1  riastrad 
   1009   1.1  riastrad /*
   1010   1.1  riastrad  * GPU helpers function.
   1011   1.1  riastrad  */
   1012   1.1  riastrad /**
   1013   1.7  riastrad  * amdgpu_device_need_post - check if the hw need post or not
   1014   1.1  riastrad  *
   1015   1.1  riastrad  * @adev: amdgpu_device pointer
   1016   1.1  riastrad  *
   1017   1.7  riastrad  * Check if the asic has been initialized (all asics) at driver startup
   1018   1.7  riastrad  * or post is needed if  hw reset is performed.
   1019   1.7  riastrad  * Returns true if need or false if not.
   1020   1.1  riastrad  */
   1021   1.7  riastrad bool amdgpu_device_need_post(struct amdgpu_device *adev)
   1022   1.1  riastrad {
   1023   1.1  riastrad 	uint32_t reg;
   1024   1.1  riastrad 
   1025   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   1026   1.7  riastrad 		return false;
   1027   1.7  riastrad 
   1028   1.7  riastrad 	if (amdgpu_passthrough(adev)) {
   1029   1.7  riastrad 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
   1030   1.7  riastrad 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
   1031   1.7  riastrad 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
   1032   1.7  riastrad 		 * vpost executed for smc version below 22.15
   1033   1.7  riastrad 		 */
   1034   1.7  riastrad 		if (adev->asic_type == CHIP_FIJI) {
   1035   1.7  riastrad 			int err;
   1036   1.7  riastrad 			uint32_t fw_ver;
   1037   1.7  riastrad 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
   1038   1.7  riastrad 			/* force vPost if error occured */
   1039   1.7  riastrad 			if (err)
   1040   1.7  riastrad 				return true;
   1041   1.7  riastrad 
   1042   1.7  riastrad 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
   1043   1.7  riastrad 			if (fw_ver < 0x00160e00)
   1044   1.7  riastrad 				return true;
   1045   1.7  riastrad 		}
   1046   1.7  riastrad 	}
   1047   1.1  riastrad 
   1048   1.7  riastrad 	if (adev->has_hw_reset) {
   1049   1.7  riastrad 		adev->has_hw_reset = false;
   1050   1.1  riastrad 		return true;
   1051   1.7  riastrad 	}
   1052   1.1  riastrad 
   1053   1.7  riastrad 	/* bios scratch used on CIK+ */
   1054   1.7  riastrad 	if (adev->asic_type >= CHIP_BONAIRE)
   1055   1.7  riastrad 		return amdgpu_atombios_scratch_need_asic_init(adev);
   1056   1.1  riastrad 
   1057   1.7  riastrad 	/* check MEM_SIZE for older asics */
   1058   1.7  riastrad 	reg = amdgpu_asic_get_config_memsize(adev);
   1059   1.7  riastrad 
   1060   1.7  riastrad 	if ((reg != 0) && (reg != 0xffffffff))
   1061   1.7  riastrad 		return false;
   1062   1.7  riastrad 
   1063   1.7  riastrad 	return true;
   1064   1.1  riastrad }
   1065   1.1  riastrad 
   1066   1.7  riastrad #ifndef __NetBSD__		/* XXX amdgpu vga */
   1067   1.7  riastrad /* if we get transitioned to only one device, take VGA back */
   1068   1.1  riastrad /**
   1069   1.7  riastrad  * amdgpu_device_vga_set_decode - enable/disable vga decode
   1070   1.1  riastrad  *
   1071   1.7  riastrad  * @cookie: amdgpu_device pointer
   1072   1.7  riastrad  * @state: enable/disable vga decode
   1073   1.1  riastrad  *
   1074   1.7  riastrad  * Enable/disable vga decode (all asics).
   1075   1.7  riastrad  * Returns VGA resource flags.
   1076   1.1  riastrad  */
   1077   1.7  riastrad static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
   1078   1.1  riastrad {
   1079   1.7  riastrad 	struct amdgpu_device *adev = cookie;
   1080   1.7  riastrad 	amdgpu_asic_set_vga_state(adev, state);
   1081   1.7  riastrad 	if (state)
   1082   1.7  riastrad 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
   1083   1.7  riastrad 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1084   1.7  riastrad 	else
   1085   1.7  riastrad 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1086   1.1  riastrad }
   1087   1.7  riastrad #endif	/* __NetBSD__ */
   1088   1.1  riastrad 
   1089   1.1  riastrad /**
   1090   1.7  riastrad  * amdgpu_device_check_block_size - validate the vm block size
   1091   1.1  riastrad  *
   1092   1.1  riastrad  * @adev: amdgpu_device pointer
   1093   1.1  riastrad  *
   1094   1.7  riastrad  * Validates the vm block size specified via module parameter.
   1095   1.7  riastrad  * The vm block size defines number of bits in page table versus page directory,
   1096   1.7  riastrad  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
   1097   1.7  riastrad  * page table and the remaining bits are in the page directory.
   1098   1.1  riastrad  */
   1099   1.7  riastrad static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
   1100   1.1  riastrad {
   1101   1.7  riastrad 	/* defines number of bits in page table versus page directory,
   1102   1.7  riastrad 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
   1103   1.7  riastrad 	 * page table and the remaining bits are in the page directory */
   1104   1.7  riastrad 	if (amdgpu_vm_block_size == -1)
   1105   1.7  riastrad 		return;
   1106   1.3  riastrad 
   1107   1.7  riastrad 	if (amdgpu_vm_block_size < 9) {
   1108   1.7  riastrad 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
   1109   1.7  riastrad 			 amdgpu_vm_block_size);
   1110   1.7  riastrad 		amdgpu_vm_block_size = -1;
   1111   1.1  riastrad 	}
   1112   1.1  riastrad }
   1113   1.1  riastrad 
   1114   1.1  riastrad /**
   1115   1.7  riastrad  * amdgpu_device_check_vm_size - validate the vm size
   1116   1.1  riastrad  *
   1117   1.1  riastrad  * @adev: amdgpu_device pointer
   1118   1.1  riastrad  *
   1119   1.7  riastrad  * Validates the vm size in GB specified via module parameter.
   1120   1.7  riastrad  * The VM size is the size of the GPU virtual memory space in GB.
   1121   1.1  riastrad  */
   1122   1.7  riastrad static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
   1123   1.1  riastrad {
   1124   1.7  riastrad 	/* no need to check the default value */
   1125   1.7  riastrad 	if (amdgpu_vm_size == -1)
   1126   1.1  riastrad 		return;
   1127   1.1  riastrad 
   1128   1.7  riastrad 	if (amdgpu_vm_size < 1) {
   1129   1.7  riastrad 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
   1130   1.7  riastrad 			 amdgpu_vm_size);
   1131   1.7  riastrad 		amdgpu_vm_size = -1;
   1132   1.3  riastrad 	}
   1133   1.1  riastrad }
   1134   1.1  riastrad 
   1135   1.7  riastrad static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
   1136   1.1  riastrad {
   1137   1.7  riastrad 	struct sysinfo si;
   1138   1.7  riastrad 	bool is_os_64 = (sizeof(void *) == 8);
   1139   1.7  riastrad 	uint64_t total_memory;
   1140   1.7  riastrad 	uint64_t dram_size_seven_GB = 0x1B8000000;
   1141   1.7  riastrad 	uint64_t dram_size_three_GB = 0xB8000000;
   1142   1.1  riastrad 
   1143   1.7  riastrad 	if (amdgpu_smu_memory_pool_size == 0)
   1144   1.7  riastrad 		return;
   1145   1.1  riastrad 
   1146   1.7  riastrad 	if (!is_os_64) {
   1147   1.7  riastrad 		DRM_WARN("Not 64-bit OS, feature not supported\n");
   1148   1.7  riastrad 		goto def_value;
   1149   1.7  riastrad 	}
   1150   1.7  riastrad 	si_meminfo(&si);
   1151   1.7  riastrad 	total_memory = (uint64_t)si.totalram * si.mem_unit;
   1152   1.7  riastrad 
   1153   1.7  riastrad 	if ((amdgpu_smu_memory_pool_size == 1) ||
   1154   1.7  riastrad 		(amdgpu_smu_memory_pool_size == 2)) {
   1155   1.7  riastrad 		if (total_memory < dram_size_three_GB)
   1156   1.7  riastrad 			goto def_value1;
   1157   1.7  riastrad 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
   1158   1.7  riastrad 		(amdgpu_smu_memory_pool_size == 8)) {
   1159   1.7  riastrad 		if (total_memory < dram_size_seven_GB)
   1160   1.7  riastrad 			goto def_value1;
   1161   1.1  riastrad 	} else {
   1162   1.7  riastrad 		DRM_WARN("Smu memory pool size not supported\n");
   1163   1.7  riastrad 		goto def_value;
   1164   1.1  riastrad 	}
   1165   1.7  riastrad 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
   1166   1.1  riastrad 
   1167   1.7  riastrad 	return;
   1168   1.1  riastrad 
   1169   1.7  riastrad def_value1:
   1170   1.7  riastrad 	DRM_WARN("No enough system memory\n");
   1171   1.7  riastrad def_value:
   1172   1.7  riastrad 	adev->pm.smu_prv_buffer_size = 0;
   1173   1.1  riastrad }
   1174   1.1  riastrad 
   1175   1.1  riastrad /**
   1176   1.7  riastrad  * amdgpu_device_check_arguments - validate module params
   1177   1.1  riastrad  *
   1178   1.1  riastrad  * @adev: amdgpu_device pointer
   1179   1.1  riastrad  *
   1180   1.1  riastrad  * Validates certain module parameters and updates
   1181   1.1  riastrad  * the associated values used by the driver (all asics).
   1182   1.1  riastrad  */
   1183   1.7  riastrad static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
   1184   1.1  riastrad {
   1185   1.7  riastrad 	if (amdgpu_sched_jobs < 4) {
   1186   1.7  riastrad 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
   1187   1.7  riastrad 			 amdgpu_sched_jobs);
   1188   1.7  riastrad 		amdgpu_sched_jobs = 4;
   1189   1.7  riastrad 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
   1190   1.7  riastrad 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
   1191   1.7  riastrad 			 amdgpu_sched_jobs);
   1192   1.7  riastrad 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
   1193   1.1  riastrad 	}
   1194   1.1  riastrad 
   1195   1.7  riastrad 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
   1196   1.7  riastrad 		/* gart size must be greater or equal to 32M */
   1197   1.7  riastrad 		dev_warn(adev->dev, "gart size (%d) too small\n",
   1198   1.7  riastrad 			 amdgpu_gart_size);
   1199   1.7  riastrad 		amdgpu_gart_size = -1;
   1200   1.1  riastrad 	}
   1201   1.1  riastrad 
   1202   1.7  riastrad 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
   1203   1.7  riastrad 		/* gtt size must be greater or equal to 32M */
   1204   1.7  riastrad 		dev_warn(adev->dev, "gtt size (%d) too small\n",
   1205   1.7  riastrad 				 amdgpu_gtt_size);
   1206   1.7  riastrad 		amdgpu_gtt_size = -1;
   1207   1.1  riastrad 	}
   1208   1.1  riastrad 
   1209   1.7  riastrad 	/* valid range is between 4 and 9 inclusive */
   1210   1.7  riastrad 	if (amdgpu_vm_fragment_size != -1 &&
   1211   1.7  riastrad 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
   1212   1.7  riastrad 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
   1213   1.7  riastrad 		amdgpu_vm_fragment_size = -1;
   1214   1.1  riastrad 	}
   1215   1.1  riastrad 
   1216   1.7  riastrad 	amdgpu_device_check_smu_prv_buffer_size(adev);
   1217   1.1  riastrad 
   1218   1.7  riastrad 	amdgpu_device_check_vm_size(adev);
   1219   1.1  riastrad 
   1220   1.7  riastrad 	amdgpu_device_check_block_size(adev);
   1221   1.1  riastrad 
   1222   1.7  riastrad 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
   1223   1.1  riastrad 
   1224   1.7  riastrad 	return 0;
   1225   1.1  riastrad }
   1226   1.1  riastrad 
   1227   1.3  riastrad #ifndef __NetBSD__		/* XXX amdgpu vga */
   1228   1.1  riastrad /**
   1229   1.1  riastrad  * amdgpu_switcheroo_set_state - set switcheroo state
   1230   1.1  riastrad  *
   1231   1.1  riastrad  * @pdev: pci dev pointer
   1232   1.1  riastrad  * @state: vga_switcheroo state
   1233   1.1  riastrad  *
   1234   1.1  riastrad  * Callback for the switcheroo driver.  Suspends or resumes the
   1235   1.1  riastrad  * the asics before or after it is powered up using ACPI methods.
   1236   1.1  riastrad  */
   1237   1.1  riastrad static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
   1238   1.1  riastrad {
   1239   1.1  riastrad 	struct drm_device *dev = pci_get_drvdata(pdev);
   1240   1.7  riastrad 	int r;
   1241   1.1  riastrad 
   1242   1.7  riastrad 	if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
   1243   1.1  riastrad 		return;
   1244   1.1  riastrad 
   1245   1.1  riastrad 	if (state == VGA_SWITCHEROO_ON) {
   1246   1.7  riastrad 		pr_info("amdgpu: switched on\n");
   1247   1.1  riastrad 		/* don't suspend or resume card normally */
   1248   1.1  riastrad 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1249   1.1  riastrad 
   1250   1.7  riastrad #ifndef __NetBSD__		/* pmf handles this for us.  */
   1251   1.7  riastrad 		pci_set_power_state(dev->pdev, PCI_D0);
   1252   1.7  riastrad 		pci_restore_state(dev->pdev);
   1253   1.7  riastrad 		r = pci_enable_device(dev->pdev);
   1254   1.7  riastrad 		if (r)
   1255   1.7  riastrad 			DRM_WARN("pci_enable_device failed (%d)\n", r);
   1256   1.7  riastrad #endif
   1257   1.7  riastrad 		amdgpu_device_resume(dev, true);
   1258   1.1  riastrad 
   1259   1.1  riastrad 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
   1260   1.1  riastrad 		drm_kms_helper_poll_enable(dev);
   1261   1.1  riastrad 	} else {
   1262   1.7  riastrad 		pr_info("amdgpu: switched off\n");
   1263   1.1  riastrad 		drm_kms_helper_poll_disable(dev);
   1264   1.1  riastrad 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1265   1.7  riastrad 		amdgpu_device_suspend(dev, true);
   1266   1.7  riastrad #ifndef __NetBSD__		/* pmf handles this for us.  */
   1267   1.7  riastrad 		pci_save_state(dev->pdev);
   1268   1.7  riastrad 		/* Shut down the device */
   1269   1.7  riastrad 		pci_disable_device(dev->pdev);
   1270   1.7  riastrad 		pci_set_power_state(dev->pdev, PCI_D3cold);
   1271   1.1  riastrad 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
   1272   1.7  riastrad #endif
   1273   1.1  riastrad 	}
   1274   1.1  riastrad }
   1275   1.1  riastrad 
   1276   1.1  riastrad /**
   1277   1.1  riastrad  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
   1278   1.1  riastrad  *
   1279   1.1  riastrad  * @pdev: pci dev pointer
   1280   1.1  riastrad  *
   1281   1.1  riastrad  * Callback for the switcheroo driver.  Check of the switcheroo
   1282   1.1  riastrad  * state can be changed.
   1283   1.1  riastrad  * Returns true if the state can be changed, false if not.
   1284   1.1  riastrad  */
   1285   1.1  riastrad static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
   1286   1.1  riastrad {
   1287   1.1  riastrad 	struct drm_device *dev = pci_get_drvdata(pdev);
   1288   1.1  riastrad 
   1289   1.1  riastrad 	/*
   1290   1.1  riastrad 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
   1291   1.1  riastrad 	* locking inversion with the driver load path. And the access here is
   1292   1.1  riastrad 	* completely racy anyway. So don't bother with locking for now.
   1293   1.1  riastrad 	*/
   1294   1.1  riastrad 	return dev->open_count == 0;
   1295   1.1  riastrad }
   1296   1.1  riastrad 
   1297   1.1  riastrad static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
   1298   1.1  riastrad 	.set_gpu_state = amdgpu_switcheroo_set_state,
   1299   1.1  riastrad 	.reprobe = NULL,
   1300   1.1  riastrad 	.can_switch = amdgpu_switcheroo_can_switch,
   1301   1.1  riastrad };
   1302   1.3  riastrad #endif	/* __NetBSD__ */
   1303   1.1  riastrad 
   1304   1.7  riastrad /**
   1305   1.7  riastrad  * amdgpu_device_ip_set_clockgating_state - set the CG state
   1306   1.7  riastrad  *
   1307   1.7  riastrad  * @dev: amdgpu_device pointer
   1308   1.7  riastrad  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
   1309   1.7  riastrad  * @state: clockgating state (gate or ungate)
   1310   1.7  riastrad  *
   1311   1.7  riastrad  * Sets the requested clockgating state for all instances of
   1312   1.7  riastrad  * the hardware IP specified.
   1313   1.7  riastrad  * Returns the error code from the last instance.
   1314   1.7  riastrad  */
   1315   1.7  riastrad int amdgpu_device_ip_set_clockgating_state(void *dev,
   1316   1.7  riastrad 					   enum amd_ip_block_type block_type,
   1317   1.7  riastrad 					   enum amd_clockgating_state state)
   1318   1.1  riastrad {
   1319   1.7  riastrad 	struct amdgpu_device *adev = dev;
   1320   1.1  riastrad 	int i, r = 0;
   1321   1.1  riastrad 
   1322   1.1  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1323   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   1324   1.7  riastrad 			continue;
   1325   1.7  riastrad 		if (adev->ip_blocks[i].version->type != block_type)
   1326   1.7  riastrad 			continue;
   1327   1.7  riastrad 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
   1328   1.7  riastrad 			continue;
   1329   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
   1330   1.7  riastrad 			(void *)adev, state);
   1331   1.7  riastrad 		if (r)
   1332   1.7  riastrad 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
   1333   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   1334   1.1  riastrad 	}
   1335   1.1  riastrad 	return r;
   1336   1.1  riastrad }
   1337   1.1  riastrad 
   1338   1.7  riastrad /**
   1339   1.7  riastrad  * amdgpu_device_ip_set_powergating_state - set the PG state
   1340   1.7  riastrad  *
   1341   1.7  riastrad  * @dev: amdgpu_device pointer
   1342   1.7  riastrad  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
   1343   1.7  riastrad  * @state: powergating state (gate or ungate)
   1344   1.7  riastrad  *
   1345   1.7  riastrad  * Sets the requested powergating state for all instances of
   1346   1.7  riastrad  * the hardware IP specified.
   1347   1.7  riastrad  * Returns the error code from the last instance.
   1348   1.7  riastrad  */
   1349   1.7  riastrad int amdgpu_device_ip_set_powergating_state(void *dev,
   1350   1.7  riastrad 					   enum amd_ip_block_type block_type,
   1351   1.7  riastrad 					   enum amd_powergating_state state)
   1352   1.1  riastrad {
   1353   1.7  riastrad 	struct amdgpu_device *adev = dev;
   1354   1.1  riastrad 	int i, r = 0;
   1355   1.1  riastrad 
   1356   1.1  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1357   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   1358   1.7  riastrad 			continue;
   1359   1.7  riastrad 		if (adev->ip_blocks[i].version->type != block_type)
   1360   1.7  riastrad 			continue;
   1361   1.7  riastrad 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
   1362   1.7  riastrad 			continue;
   1363   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
   1364   1.7  riastrad 			(void *)adev, state);
   1365   1.7  riastrad 		if (r)
   1366   1.7  riastrad 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
   1367   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   1368   1.1  riastrad 	}
   1369   1.1  riastrad 	return r;
   1370   1.1  riastrad }
   1371   1.1  riastrad 
   1372   1.7  riastrad /**
   1373   1.7  riastrad  * amdgpu_device_ip_get_clockgating_state - get the CG state
   1374   1.7  riastrad  *
   1375   1.7  riastrad  * @adev: amdgpu_device pointer
   1376   1.7  riastrad  * @flags: clockgating feature flags
   1377   1.7  riastrad  *
   1378   1.7  riastrad  * Walks the list of IPs on the device and updates the clockgating
   1379   1.7  riastrad  * flags for each IP.
   1380   1.7  riastrad  * Updates @flags with the feature flags for each hardware IP where
   1381   1.7  riastrad  * clockgating is enabled.
   1382   1.7  riastrad  */
   1383   1.7  riastrad void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
   1384   1.7  riastrad 					    u32 *flags)
   1385   1.1  riastrad {
   1386   1.1  riastrad 	int i;
   1387   1.1  riastrad 
   1388   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1389   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   1390   1.7  riastrad 			continue;
   1391   1.7  riastrad 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
   1392   1.7  riastrad 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
   1393   1.7  riastrad 	}
   1394   1.1  riastrad }
   1395   1.1  riastrad 
   1396   1.1  riastrad /**
   1397   1.7  riastrad  * amdgpu_device_ip_wait_for_idle - wait for idle
   1398   1.1  riastrad  *
   1399   1.1  riastrad  * @adev: amdgpu_device pointer
   1400   1.7  riastrad  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
   1401   1.1  riastrad  *
   1402   1.7  riastrad  * Waits for the request hardware IP to be idle.
   1403   1.7  riastrad  * Returns 0 for success or a negative error code on failure.
   1404   1.1  riastrad  */
   1405   1.7  riastrad int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
   1406   1.7  riastrad 				   enum amd_ip_block_type block_type)
   1407   1.7  riastrad {
   1408   1.7  riastrad 	int i, r;
   1409   1.7  riastrad 
   1410   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1411   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   1412   1.7  riastrad 			continue;
   1413   1.7  riastrad 		if (adev->ip_blocks[i].version->type == block_type) {
   1414   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
   1415   1.7  riastrad 			if (r)
   1416   1.7  riastrad 				return r;
   1417   1.7  riastrad 			break;
   1418   1.7  riastrad 		}
   1419   1.7  riastrad 	}
   1420   1.7  riastrad 	return 0;
   1421   1.1  riastrad 
   1422   1.1  riastrad }
   1423   1.1  riastrad 
   1424   1.7  riastrad /**
   1425   1.7  riastrad  * amdgpu_device_ip_is_idle - is the hardware IP idle
   1426   1.7  riastrad  *
   1427   1.7  riastrad  * @adev: amdgpu_device pointer
   1428   1.7  riastrad  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
   1429   1.7  riastrad  *
   1430   1.7  riastrad  * Check if the hardware IP is idle or not.
   1431   1.7  riastrad  * Returns true if it the IP is idle, false if not.
   1432   1.7  riastrad  */
   1433   1.7  riastrad bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
   1434   1.7  riastrad 			      enum amd_ip_block_type block_type)
   1435   1.7  riastrad {
   1436   1.7  riastrad 	int i;
   1437   1.7  riastrad 
   1438   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1439   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   1440   1.7  riastrad 			continue;
   1441   1.7  riastrad 		if (adev->ip_blocks[i].version->type == block_type)
   1442   1.7  riastrad 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
   1443   1.7  riastrad 	}
   1444   1.7  riastrad 	return true;
   1445   1.7  riastrad 
   1446   1.7  riastrad }
   1447   1.7  riastrad 
   1448   1.7  riastrad /**
   1449   1.7  riastrad  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
   1450   1.7  riastrad  *
   1451   1.7  riastrad  * @adev: amdgpu_device pointer
   1452   1.7  riastrad  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
   1453   1.7  riastrad  *
   1454   1.7  riastrad  * Returns a pointer to the hardware IP block structure
   1455   1.7  riastrad  * if it exists for the asic, otherwise NULL.
   1456   1.7  riastrad  */
   1457   1.7  riastrad struct amdgpu_ip_block *
   1458   1.7  riastrad amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
   1459   1.7  riastrad 			      enum amd_ip_block_type type)
   1460   1.7  riastrad {
   1461   1.7  riastrad 	int i;
   1462   1.7  riastrad 
   1463   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++)
   1464   1.7  riastrad 		if (adev->ip_blocks[i].version->type == type)
   1465   1.7  riastrad 			return &adev->ip_blocks[i];
   1466   1.7  riastrad 
   1467   1.7  riastrad 	return NULL;
   1468   1.7  riastrad }
   1469   1.7  riastrad 
   1470   1.7  riastrad /**
   1471   1.7  riastrad  * amdgpu_device_ip_block_version_cmp
   1472   1.7  riastrad  *
   1473   1.7  riastrad  * @adev: amdgpu_device pointer
   1474   1.7  riastrad  * @type: enum amd_ip_block_type
   1475   1.7  riastrad  * @major: major version
   1476   1.7  riastrad  * @minor: minor version
   1477   1.7  riastrad  *
   1478   1.7  riastrad  * return 0 if equal or greater
   1479   1.7  riastrad  * return 1 if smaller or the ip_block doesn't exist
   1480   1.7  riastrad  */
   1481   1.7  riastrad int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
   1482   1.7  riastrad 				       enum amd_ip_block_type type,
   1483   1.7  riastrad 				       u32 major, u32 minor)
   1484   1.7  riastrad {
   1485   1.7  riastrad 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
   1486   1.7  riastrad 
   1487   1.7  riastrad 	if (ip_block && ((ip_block->version->major > major) ||
   1488   1.7  riastrad 			((ip_block->version->major == major) &&
   1489   1.7  riastrad 			(ip_block->version->minor >= minor))))
   1490   1.7  riastrad 		return 0;
   1491   1.7  riastrad 
   1492   1.7  riastrad 	return 1;
   1493   1.7  riastrad }
   1494   1.7  riastrad 
   1495   1.7  riastrad /**
   1496   1.7  riastrad  * amdgpu_device_ip_block_add
   1497   1.7  riastrad  *
   1498   1.7  riastrad  * @adev: amdgpu_device pointer
   1499   1.7  riastrad  * @ip_block_version: pointer to the IP to add
   1500   1.7  riastrad  *
   1501   1.7  riastrad  * Adds the IP block driver information to the collection of IPs
   1502   1.7  riastrad  * on the asic.
   1503   1.7  riastrad  */
   1504   1.7  riastrad int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
   1505   1.7  riastrad 			       const struct amdgpu_ip_block_version *ip_block_version)
   1506   1.7  riastrad {
   1507   1.7  riastrad 	if (!ip_block_version)
   1508   1.7  riastrad 		return -EINVAL;
   1509   1.7  riastrad 
   1510   1.7  riastrad 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
   1511   1.7  riastrad 		  ip_block_version->funcs->name);
   1512   1.7  riastrad 
   1513   1.7  riastrad 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
   1514   1.7  riastrad 
   1515   1.7  riastrad 	return 0;
   1516   1.7  riastrad }
   1517   1.7  riastrad 
   1518   1.7  riastrad /**
   1519   1.7  riastrad  * amdgpu_device_enable_virtual_display - enable virtual display feature
   1520   1.7  riastrad  *
   1521   1.7  riastrad  * @adev: amdgpu_device pointer
   1522   1.7  riastrad  *
   1523   1.7  riastrad  * Enabled the virtual display feature if the user has enabled it via
   1524   1.7  riastrad  * the module parameter virtual_display.  This feature provides a virtual
   1525   1.7  riastrad  * display hardware on headless boards or in virtualized environments.
   1526   1.7  riastrad  * This function parses and validates the configuration string specified by
   1527   1.7  riastrad  * the user and configues the virtual display configuration (number of
   1528   1.7  riastrad  * virtual connectors, crtcs, etc.) specified.
   1529   1.7  riastrad  */
   1530   1.7  riastrad static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
   1531   1.7  riastrad {
   1532   1.7  riastrad 	adev->enable_virtual_display = false;
   1533   1.7  riastrad 
   1534   1.7  riastrad 	if (amdgpu_virtual_display) {
   1535   1.7  riastrad 		struct drm_device *ddev = adev->ddev;
   1536   1.7  riastrad 		const char *pci_address_name = pci_name(ddev->pdev);
   1537   1.7  riastrad 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
   1538   1.7  riastrad 
   1539   1.7  riastrad 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
   1540   1.7  riastrad 		pciaddstr_tmp = pciaddstr;
   1541   1.7  riastrad 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
   1542   1.7  riastrad 			pciaddname = strsep(&pciaddname_tmp, ",");
   1543   1.7  riastrad 			if (!strcmp("all", pciaddname)
   1544   1.7  riastrad 			    || !strcmp(pci_address_name, pciaddname)) {
   1545   1.7  riastrad 				long num_crtc;
   1546   1.7  riastrad 				int res = -1;
   1547   1.7  riastrad 
   1548   1.7  riastrad 				adev->enable_virtual_display = true;
   1549   1.7  riastrad 
   1550   1.7  riastrad 				if (pciaddname_tmp)
   1551   1.7  riastrad 					res = kstrtol(pciaddname_tmp, 10,
   1552   1.7  riastrad 						      &num_crtc);
   1553   1.7  riastrad 
   1554   1.7  riastrad 				if (!res) {
   1555   1.7  riastrad 					if (num_crtc < 1)
   1556   1.7  riastrad 						num_crtc = 1;
   1557   1.7  riastrad 					if (num_crtc > 6)
   1558   1.7  riastrad 						num_crtc = 6;
   1559   1.7  riastrad 					adev->mode_info.num_crtc = num_crtc;
   1560   1.7  riastrad 				} else {
   1561   1.7  riastrad 					adev->mode_info.num_crtc = 1;
   1562   1.7  riastrad 				}
   1563   1.7  riastrad 				break;
   1564   1.7  riastrad 			}
   1565   1.7  riastrad 		}
   1566   1.7  riastrad 
   1567   1.7  riastrad 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
   1568   1.7  riastrad 			 amdgpu_virtual_display, pci_address_name,
   1569   1.7  riastrad 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
   1570   1.7  riastrad 
   1571   1.7  riastrad 		kfree(pciaddstr);
   1572   1.7  riastrad 	}
   1573   1.7  riastrad }
   1574   1.7  riastrad 
   1575   1.7  riastrad /**
   1576   1.7  riastrad  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
   1577   1.7  riastrad  *
   1578   1.7  riastrad  * @adev: amdgpu_device pointer
   1579   1.7  riastrad  *
   1580   1.7  riastrad  * Parses the asic configuration parameters specified in the gpu info
   1581   1.7  riastrad  * firmware and makes them availale to the driver for use in configuring
   1582   1.7  riastrad  * the asic.
   1583   1.7  riastrad  * Returns 0 on success, -EINVAL on failure.
   1584   1.7  riastrad  */
   1585   1.7  riastrad static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
   1586   1.7  riastrad {
   1587   1.7  riastrad 	const char *chip_name;
   1588   1.7  riastrad 	char fw_name[30];
   1589   1.7  riastrad 	int err;
   1590   1.7  riastrad 	const struct gpu_info_firmware_header_v1_0 *hdr;
   1591   1.7  riastrad 
   1592   1.7  riastrad 	adev->firmware.gpu_info_fw = NULL;
   1593   1.7  riastrad 
   1594   1.7  riastrad 	switch (adev->asic_type) {
   1595   1.7  riastrad 	case CHIP_TOPAZ:
   1596   1.7  riastrad 	case CHIP_TONGA:
   1597   1.7  riastrad 	case CHIP_FIJI:
   1598   1.7  riastrad 	case CHIP_POLARIS10:
   1599   1.7  riastrad 	case CHIP_POLARIS11:
   1600   1.7  riastrad 	case CHIP_POLARIS12:
   1601   1.7  riastrad 	case CHIP_VEGAM:
   1602   1.7  riastrad 	case CHIP_CARRIZO:
   1603   1.7  riastrad 	case CHIP_STONEY:
   1604   1.7  riastrad #ifdef CONFIG_DRM_AMDGPU_SI
   1605   1.7  riastrad 	case CHIP_VERDE:
   1606   1.7  riastrad 	case CHIP_TAHITI:
   1607   1.7  riastrad 	case CHIP_PITCAIRN:
   1608   1.7  riastrad 	case CHIP_OLAND:
   1609   1.7  riastrad 	case CHIP_HAINAN:
   1610   1.7  riastrad #endif
   1611   1.7  riastrad #ifdef CONFIG_DRM_AMDGPU_CIK
   1612   1.7  riastrad 	case CHIP_BONAIRE:
   1613   1.7  riastrad 	case CHIP_HAWAII:
   1614   1.7  riastrad 	case CHIP_KAVERI:
   1615   1.7  riastrad 	case CHIP_KABINI:
   1616   1.7  riastrad 	case CHIP_MULLINS:
   1617   1.7  riastrad #endif
   1618   1.7  riastrad 	case CHIP_VEGA20:
   1619   1.7  riastrad 	default:
   1620   1.7  riastrad 		return 0;
   1621   1.7  riastrad 	case CHIP_VEGA10:
   1622   1.7  riastrad 		chip_name = "vega10";
   1623   1.7  riastrad 		break;
   1624   1.7  riastrad 	case CHIP_VEGA12:
   1625   1.7  riastrad 		chip_name = "vega12";
   1626   1.7  riastrad 		break;
   1627   1.7  riastrad 	case CHIP_RAVEN:
   1628   1.7  riastrad 		if (adev->rev_id >= 8)
   1629   1.7  riastrad 			chip_name = "raven2";
   1630   1.7  riastrad 		else if (adev->pdev->device == 0x15d8)
   1631   1.7  riastrad 			chip_name = "picasso";
   1632   1.7  riastrad 		else
   1633   1.7  riastrad 			chip_name = "raven";
   1634   1.7  riastrad 		break;
   1635   1.7  riastrad 	case CHIP_ARCTURUS:
   1636   1.7  riastrad 		chip_name = "arcturus";
   1637   1.7  riastrad 		break;
   1638   1.7  riastrad 	case CHIP_RENOIR:
   1639   1.7  riastrad 		chip_name = "renoir";
   1640   1.7  riastrad 		break;
   1641   1.7  riastrad 	case CHIP_NAVI10:
   1642   1.7  riastrad 		chip_name = "navi10";
   1643   1.7  riastrad 		break;
   1644   1.7  riastrad 	case CHIP_NAVI14:
   1645   1.7  riastrad 		chip_name = "navi14";
   1646   1.7  riastrad 		break;
   1647   1.7  riastrad 	case CHIP_NAVI12:
   1648   1.7  riastrad 		chip_name = "navi12";
   1649   1.7  riastrad 		break;
   1650   1.7  riastrad 	}
   1651   1.7  riastrad 
   1652   1.7  riastrad 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
   1653   1.7  riastrad 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
   1654   1.7  riastrad 	if (err) {
   1655   1.7  riastrad 		dev_err(adev->dev,
   1656   1.7  riastrad 			"Failed to load gpu_info firmware \"%s\"\n",
   1657   1.7  riastrad 			fw_name);
   1658   1.7  riastrad 		goto out;
   1659   1.7  riastrad 	}
   1660   1.7  riastrad 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
   1661   1.7  riastrad 	if (err) {
   1662   1.7  riastrad 		dev_err(adev->dev,
   1663   1.7  riastrad 			"Failed to validate gpu_info firmware \"%s\"\n",
   1664   1.7  riastrad 			fw_name);
   1665   1.7  riastrad 		goto out;
   1666   1.7  riastrad 	}
   1667   1.7  riastrad 
   1668   1.7  riastrad 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
   1669   1.7  riastrad 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
   1670   1.7  riastrad 
   1671   1.7  riastrad 	switch (hdr->version_major) {
   1672   1.7  riastrad 	case 1:
   1673   1.7  riastrad 	{
   1674   1.7  riastrad 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
   1675   1.7  riastrad 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
   1676   1.7  riastrad 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
   1677   1.7  riastrad 
   1678   1.7  riastrad 		if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
   1679   1.7  riastrad 			goto parse_soc_bounding_box;
   1680   1.7  riastrad 
   1681   1.7  riastrad 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
   1682   1.7  riastrad 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
   1683   1.7  riastrad 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
   1684   1.7  riastrad 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
   1685   1.7  riastrad 		adev->gfx.config.max_texture_channel_caches =
   1686   1.7  riastrad 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
   1687   1.7  riastrad 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
   1688   1.7  riastrad 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
   1689   1.7  riastrad 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
   1690   1.7  riastrad 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
   1691   1.7  riastrad 		adev->gfx.config.double_offchip_lds_buf =
   1692   1.7  riastrad 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
   1693   1.7  riastrad 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
   1694   1.7  riastrad 		adev->gfx.cu_info.max_waves_per_simd =
   1695   1.7  riastrad 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
   1696   1.7  riastrad 		adev->gfx.cu_info.max_scratch_slots_per_cu =
   1697   1.7  riastrad 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
   1698   1.7  riastrad 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
   1699   1.7  riastrad 		if (hdr->version_minor >= 1) {
   1700   1.7  riastrad 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
   1701   1.7  riastrad 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
   1702   1.7  riastrad 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
   1703   1.7  riastrad 			adev->gfx.config.num_sc_per_sh =
   1704   1.7  riastrad 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
   1705   1.7  riastrad 			adev->gfx.config.num_packer_per_sc =
   1706   1.7  riastrad 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
   1707   1.7  riastrad 		}
   1708   1.7  riastrad 
   1709   1.7  riastrad parse_soc_bounding_box:
   1710   1.7  riastrad 		/*
   1711   1.7  riastrad 		 * soc bounding box info is not integrated in disocovery table,
   1712   1.7  riastrad 		 * we always need to parse it from gpu info firmware.
   1713   1.7  riastrad 		 */
   1714   1.7  riastrad 		if (hdr->version_minor == 2) {
   1715   1.7  riastrad 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
   1716   1.7  riastrad 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
   1717   1.7  riastrad 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
   1718   1.7  riastrad 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
   1719   1.7  riastrad 		}
   1720   1.7  riastrad 		break;
   1721   1.7  riastrad 	}
   1722   1.7  riastrad 	default:
   1723   1.7  riastrad 		dev_err(adev->dev,
   1724   1.7  riastrad 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
   1725   1.7  riastrad 		err = -EINVAL;
   1726   1.7  riastrad 		goto out;
   1727   1.7  riastrad 	}
   1728   1.7  riastrad out:
   1729   1.7  riastrad 	return err;
   1730   1.7  riastrad }
   1731   1.7  riastrad 
   1732   1.7  riastrad /**
   1733   1.7  riastrad  * amdgpu_device_ip_early_init - run early init for hardware IPs
   1734   1.7  riastrad  *
   1735   1.7  riastrad  * @adev: amdgpu_device pointer
   1736   1.7  riastrad  *
   1737   1.7  riastrad  * Early initialization pass for hardware IPs.  The hardware IPs that make
   1738   1.7  riastrad  * up each asic are discovered each IP's early_init callback is run.  This
   1739   1.7  riastrad  * is the first stage in initializing the asic.
   1740   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   1741   1.7  riastrad  */
   1742   1.7  riastrad static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
   1743   1.1  riastrad {
   1744   1.1  riastrad 	int i, r;
   1745   1.1  riastrad 
   1746   1.7  riastrad 	amdgpu_device_enable_virtual_display(adev);
   1747   1.7  riastrad 
   1748   1.1  riastrad 	switch (adev->asic_type) {
   1749   1.1  riastrad 	case CHIP_TOPAZ:
   1750   1.1  riastrad 	case CHIP_TONGA:
   1751   1.1  riastrad 	case CHIP_FIJI:
   1752   1.7  riastrad 	case CHIP_POLARIS10:
   1753   1.7  riastrad 	case CHIP_POLARIS11:
   1754   1.7  riastrad 	case CHIP_POLARIS12:
   1755   1.7  riastrad 	case CHIP_VEGAM:
   1756   1.1  riastrad 	case CHIP_CARRIZO:
   1757   1.1  riastrad 	case CHIP_STONEY:
   1758   1.1  riastrad 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
   1759   1.1  riastrad 			adev->family = AMDGPU_FAMILY_CZ;
   1760   1.1  riastrad 		else
   1761   1.1  riastrad 			adev->family = AMDGPU_FAMILY_VI;
   1762   1.1  riastrad 
   1763   1.1  riastrad 		r = vi_set_ip_blocks(adev);
   1764   1.1  riastrad 		if (r)
   1765   1.1  riastrad 			return r;
   1766   1.1  riastrad 		break;
   1767   1.7  riastrad #ifdef CONFIG_DRM_AMDGPU_SI
   1768   1.7  riastrad 	case CHIP_VERDE:
   1769   1.7  riastrad 	case CHIP_TAHITI:
   1770   1.7  riastrad 	case CHIP_PITCAIRN:
   1771   1.7  riastrad 	case CHIP_OLAND:
   1772   1.7  riastrad 	case CHIP_HAINAN:
   1773   1.7  riastrad 		adev->family = AMDGPU_FAMILY_SI;
   1774   1.7  riastrad 		r = si_set_ip_blocks(adev);
   1775   1.7  riastrad 		if (r)
   1776   1.7  riastrad 			return r;
   1777   1.7  riastrad 		break;
   1778   1.7  riastrad #endif
   1779   1.1  riastrad #ifdef CONFIG_DRM_AMDGPU_CIK
   1780   1.1  riastrad 	case CHIP_BONAIRE:
   1781   1.1  riastrad 	case CHIP_HAWAII:
   1782   1.1  riastrad 	case CHIP_KAVERI:
   1783   1.1  riastrad 	case CHIP_KABINI:
   1784   1.1  riastrad 	case CHIP_MULLINS:
   1785   1.1  riastrad 		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
   1786   1.1  riastrad 			adev->family = AMDGPU_FAMILY_CI;
   1787   1.1  riastrad 		else
   1788   1.1  riastrad 			adev->family = AMDGPU_FAMILY_KV;
   1789   1.1  riastrad 
   1790   1.1  riastrad 		r = cik_set_ip_blocks(adev);
   1791   1.1  riastrad 		if (r)
   1792   1.1  riastrad 			return r;
   1793   1.1  riastrad 		break;
   1794   1.1  riastrad #endif
   1795   1.7  riastrad 	case CHIP_VEGA10:
   1796   1.7  riastrad 	case CHIP_VEGA12:
   1797   1.7  riastrad 	case CHIP_VEGA20:
   1798   1.7  riastrad 	case CHIP_RAVEN:
   1799   1.7  riastrad 	case CHIP_ARCTURUS:
   1800   1.7  riastrad 	case CHIP_RENOIR:
   1801   1.7  riastrad 		if (adev->asic_type == CHIP_RAVEN ||
   1802   1.7  riastrad 		    adev->asic_type == CHIP_RENOIR)
   1803   1.7  riastrad 			adev->family = AMDGPU_FAMILY_RV;
   1804   1.7  riastrad 		else
   1805   1.7  riastrad 			adev->family = AMDGPU_FAMILY_AI;
   1806   1.7  riastrad 
   1807   1.7  riastrad 		r = soc15_set_ip_blocks(adev);
   1808   1.7  riastrad 		if (r)
   1809   1.7  riastrad 			return r;
   1810   1.7  riastrad 		break;
   1811   1.7  riastrad 	case  CHIP_NAVI10:
   1812   1.7  riastrad 	case  CHIP_NAVI14:
   1813   1.7  riastrad 	case  CHIP_NAVI12:
   1814   1.7  riastrad 		adev->family = AMDGPU_FAMILY_NV;
   1815   1.7  riastrad 
   1816   1.7  riastrad 		r = nv_set_ip_blocks(adev);
   1817   1.7  riastrad 		if (r)
   1818   1.7  riastrad 			return r;
   1819   1.7  riastrad 		break;
   1820   1.1  riastrad 	default:
   1821   1.1  riastrad 		/* FIXME: not supported yet */
   1822   1.1  riastrad 		return -EINVAL;
   1823   1.1  riastrad 	}
   1824   1.1  riastrad 
   1825   1.7  riastrad 	r = amdgpu_device_parse_gpu_info_fw(adev);
   1826   1.7  riastrad 	if (r)
   1827   1.7  riastrad 		return r;
   1828   1.7  riastrad 
   1829   1.7  riastrad 	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
   1830   1.7  riastrad 		amdgpu_discovery_get_gfx_info(adev);
   1831   1.7  riastrad 
   1832   1.7  riastrad 	amdgpu_amdkfd_device_probe(adev);
   1833   1.1  riastrad 
   1834   1.7  riastrad 	if (amdgpu_sriov_vf(adev)) {
   1835   1.7  riastrad 		r = amdgpu_virt_request_full_gpu(adev, true);
   1836   1.7  riastrad 		if (r)
   1837   1.7  riastrad 			return -EAGAIN;
   1838   1.1  riastrad 	}
   1839   1.1  riastrad 
   1840   1.7  riastrad 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
   1841   1.7  riastrad 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
   1842   1.7  riastrad 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
   1843   1.7  riastrad 
   1844   1.1  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1845   1.1  riastrad 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
   1846   1.7  riastrad 			DRM_ERROR("disabled ip block: %d <%s>\n",
   1847   1.7  riastrad 				  i, adev->ip_blocks[i].version->funcs->name);
   1848   1.7  riastrad 			adev->ip_blocks[i].status.valid = false;
   1849   1.1  riastrad 		} else {
   1850   1.7  riastrad 			if (adev->ip_blocks[i].version->funcs->early_init) {
   1851   1.7  riastrad 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
   1852   1.7  riastrad 				if (r == -ENOENT) {
   1853   1.7  riastrad 					adev->ip_blocks[i].status.valid = false;
   1854   1.7  riastrad 				} else if (r) {
   1855   1.7  riastrad 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
   1856   1.7  riastrad 						  adev->ip_blocks[i].version->funcs->name, r);
   1857   1.1  riastrad 					return r;
   1858   1.7  riastrad 				} else {
   1859   1.7  riastrad 					adev->ip_blocks[i].status.valid = true;
   1860   1.7  riastrad 				}
   1861   1.1  riastrad 			} else {
   1862   1.7  riastrad 				adev->ip_blocks[i].status.valid = true;
   1863   1.7  riastrad 			}
   1864   1.7  riastrad 		}
   1865   1.7  riastrad 		/* get the vbios after the asic_funcs are set up */
   1866   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
   1867   1.7  riastrad 			/* Read BIOS */
   1868   1.7  riastrad 			if (!amdgpu_get_bios(adev))
   1869   1.7  riastrad 				return -EINVAL;
   1870   1.7  riastrad 
   1871   1.7  riastrad 			r = amdgpu_atombios_init(adev);
   1872   1.7  riastrad 			if (r) {
   1873   1.7  riastrad 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
   1874   1.7  riastrad 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
   1875   1.7  riastrad 				return r;
   1876   1.1  riastrad 			}
   1877   1.1  riastrad 		}
   1878   1.1  riastrad 	}
   1879   1.1  riastrad 
   1880   1.7  riastrad 	adev->cg_flags &= amdgpu_cg_mask;
   1881   1.7  riastrad 	adev->pg_flags &= amdgpu_pg_mask;
   1882   1.7  riastrad 
   1883   1.1  riastrad 	return 0;
   1884   1.1  riastrad }
   1885   1.1  riastrad 
   1886   1.7  riastrad static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
   1887   1.1  riastrad {
   1888   1.1  riastrad 	int i, r;
   1889   1.1  riastrad 
   1890   1.1  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1891   1.7  riastrad 		if (!adev->ip_blocks[i].status.sw)
   1892   1.7  riastrad 			continue;
   1893   1.7  riastrad 		if (adev->ip_blocks[i].status.hw)
   1894   1.1  riastrad 			continue;
   1895   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
   1896   1.7  riastrad 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
   1897   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
   1898   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
   1899   1.7  riastrad 			if (r) {
   1900   1.7  riastrad 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
   1901   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   1902   1.1  riastrad 				return r;
   1903   1.7  riastrad 			}
   1904   1.7  riastrad 			adev->ip_blocks[i].status.hw = true;
   1905   1.1  riastrad 		}
   1906   1.1  riastrad 	}
   1907   1.1  riastrad 
   1908   1.7  riastrad 	return 0;
   1909   1.7  riastrad }
   1910   1.7  riastrad 
   1911   1.7  riastrad static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
   1912   1.7  riastrad {
   1913   1.7  riastrad 	int i, r;
   1914   1.7  riastrad 
   1915   1.1  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1916   1.7  riastrad 		if (!adev->ip_blocks[i].status.sw)
   1917   1.1  riastrad 			continue;
   1918   1.7  riastrad 		if (adev->ip_blocks[i].status.hw)
   1919   1.1  riastrad 			continue;
   1920   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
   1921   1.7  riastrad 		if (r) {
   1922   1.7  riastrad 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
   1923   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   1924   1.1  riastrad 			return r;
   1925   1.7  riastrad 		}
   1926   1.7  riastrad 		adev->ip_blocks[i].status.hw = true;
   1927   1.1  riastrad 	}
   1928   1.1  riastrad 
   1929   1.1  riastrad 	return 0;
   1930   1.1  riastrad }
   1931   1.1  riastrad 
   1932   1.7  riastrad static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
   1933   1.1  riastrad {
   1934   1.7  riastrad 	int r = 0;
   1935   1.7  riastrad 	int i;
   1936   1.7  riastrad 	uint32_t smu_version;
   1937   1.7  riastrad 
   1938   1.7  riastrad 	if (adev->asic_type >= CHIP_VEGA10) {
   1939   1.7  riastrad 		for (i = 0; i < adev->num_ip_blocks; i++) {
   1940   1.7  riastrad 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
   1941   1.7  riastrad 				continue;
   1942   1.7  riastrad 
   1943   1.7  riastrad 			/* no need to do the fw loading again if already done*/
   1944   1.7  riastrad 			if (adev->ip_blocks[i].status.hw == true)
   1945   1.7  riastrad 				break;
   1946   1.7  riastrad 
   1947   1.7  riastrad 			if (adev->in_gpu_reset || adev->in_suspend) {
   1948   1.7  riastrad 				r = adev->ip_blocks[i].version->funcs->resume(adev);
   1949   1.7  riastrad 				if (r) {
   1950   1.7  riastrad 					DRM_ERROR("resume of IP block <%s> failed %d\n",
   1951   1.7  riastrad 							  adev->ip_blocks[i].version->funcs->name, r);
   1952   1.7  riastrad 					return r;
   1953   1.7  riastrad 				}
   1954   1.7  riastrad 			} else {
   1955   1.7  riastrad 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
   1956   1.7  riastrad 				if (r) {
   1957   1.7  riastrad 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
   1958   1.7  riastrad 							  adev->ip_blocks[i].version->funcs->name, r);
   1959   1.7  riastrad 					return r;
   1960   1.7  riastrad 				}
   1961   1.7  riastrad 			}
   1962   1.1  riastrad 
   1963   1.7  riastrad 			adev->ip_blocks[i].status.hw = true;
   1964   1.7  riastrad 			break;
   1965   1.1  riastrad 		}
   1966   1.1  riastrad 	}
   1967   1.1  riastrad 
   1968   1.7  riastrad 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
   1969   1.7  riastrad 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
   1970   1.7  riastrad 
   1971   1.7  riastrad 	return r;
   1972   1.1  riastrad }
   1973   1.1  riastrad 
   1974   1.7  riastrad /**
   1975   1.7  riastrad  * amdgpu_device_ip_init - run init for hardware IPs
   1976   1.7  riastrad  *
   1977   1.7  riastrad  * @adev: amdgpu_device pointer
   1978   1.7  riastrad  *
   1979   1.7  riastrad  * Main initialization pass for hardware IPs.  The list of all the hardware
   1980   1.7  riastrad  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
   1981   1.7  riastrad  * are run.  sw_init initializes the software state associated with each IP
   1982   1.7  riastrad  * and hw_init initializes the hardware associated with each IP.
   1983   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   1984   1.7  riastrad  */
   1985   1.7  riastrad static int amdgpu_device_ip_init(struct amdgpu_device *adev)
   1986   1.1  riastrad {
   1987   1.1  riastrad 	int i, r;
   1988   1.1  riastrad 
   1989   1.7  riastrad 	r = amdgpu_ras_init(adev);
   1990   1.7  riastrad 	if (r)
   1991   1.7  riastrad 		return r;
   1992   1.7  riastrad 
   1993   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   1994   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   1995   1.1  riastrad 			continue;
   1996   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
   1997   1.7  riastrad 		if (r) {
   1998   1.7  riastrad 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
   1999   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   2000   1.7  riastrad 			goto init_failed;
   2001   1.7  riastrad 		}
   2002   1.7  riastrad 		adev->ip_blocks[i].status.sw = true;
   2003   1.7  riastrad 
   2004   1.7  riastrad 		/* need to do gmc hw init early so we can allocate gpu mem */
   2005   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
   2006   1.7  riastrad 			r = amdgpu_device_vram_scratch_init(adev);
   2007   1.7  riastrad 			if (r) {
   2008   1.7  riastrad 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
   2009   1.7  riastrad 				goto init_failed;
   2010   1.7  riastrad 			}
   2011   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
   2012   1.7  riastrad 			if (r) {
   2013   1.7  riastrad 				DRM_ERROR("hw_init %d failed %d\n", i, r);
   2014   1.7  riastrad 				goto init_failed;
   2015   1.7  riastrad 			}
   2016   1.7  riastrad 			r = amdgpu_device_wb_init(adev);
   2017   1.7  riastrad 			if (r) {
   2018   1.7  riastrad 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
   2019   1.7  riastrad 				goto init_failed;
   2020   1.7  riastrad 			}
   2021   1.7  riastrad 			adev->ip_blocks[i].status.hw = true;
   2022   1.7  riastrad 
   2023   1.7  riastrad 			/* right after GMC hw init, we create CSA */
   2024   1.7  riastrad 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
   2025   1.7  riastrad 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
   2026   1.7  riastrad 								AMDGPU_GEM_DOMAIN_VRAM,
   2027   1.7  riastrad 								AMDGPU_CSA_SIZE);
   2028   1.7  riastrad 				if (r) {
   2029   1.7  riastrad 					DRM_ERROR("allocate CSA failed %d\n", r);
   2030   1.7  riastrad 					goto init_failed;
   2031   1.7  riastrad 				}
   2032   1.7  riastrad 			}
   2033   1.7  riastrad 		}
   2034   1.1  riastrad 	}
   2035   1.1  riastrad 
   2036   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   2037   1.7  riastrad 		amdgpu_virt_init_data_exchange(adev);
   2038   1.7  riastrad 
   2039   1.7  riastrad 	r = amdgpu_ib_pool_init(adev);
   2040   1.7  riastrad 	if (r) {
   2041   1.7  riastrad 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
   2042   1.7  riastrad 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
   2043   1.7  riastrad 		goto init_failed;
   2044   1.1  riastrad 	}
   2045   1.1  riastrad 
   2046   1.7  riastrad 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
   2047   1.7  riastrad 	if (r)
   2048   1.7  riastrad 		goto init_failed;
   2049   1.1  riastrad 
   2050   1.7  riastrad 	r = amdgpu_device_ip_hw_init_phase1(adev);
   2051   1.7  riastrad 	if (r)
   2052   1.7  riastrad 		goto init_failed;
   2053   1.1  riastrad 
   2054   1.7  riastrad 	r = amdgpu_device_fw_loading(adev);
   2055   1.7  riastrad 	if (r)
   2056   1.7  riastrad 		goto init_failed;
   2057   1.1  riastrad 
   2058   1.7  riastrad 	r = amdgpu_device_ip_hw_init_phase2(adev);
   2059   1.7  riastrad 	if (r)
   2060   1.7  riastrad 		goto init_failed;
   2061   1.1  riastrad 
   2062   1.7  riastrad 	/*
   2063   1.7  riastrad 	 * retired pages will be loaded from eeprom and reserved here,
   2064   1.7  riastrad 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
   2065   1.7  riastrad 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
   2066   1.7  riastrad 	 * for I2C communication which only true at this point.
   2067   1.7  riastrad 	 * recovery_init may fail, but it can free all resources allocated by
   2068   1.7  riastrad 	 * itself and its failure should not stop amdgpu init process.
   2069   1.7  riastrad 	 *
   2070   1.7  riastrad 	 * Note: theoretically, this should be called before all vram allocations
   2071   1.7  riastrad 	 * to protect retired page from abusing
   2072   1.7  riastrad 	 */
   2073   1.7  riastrad 	amdgpu_ras_recovery_init(adev);
   2074   1.1  riastrad 
   2075   1.7  riastrad 	if (adev->gmc.xgmi.num_physical_nodes > 1)
   2076   1.7  riastrad 		amdgpu_xgmi_add_device(adev);
   2077   1.7  riastrad 	amdgpu_amdkfd_device_init(adev);
   2078   1.7  riastrad 
   2079   1.7  riastrad init_failed:
   2080   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   2081   1.7  riastrad 		amdgpu_virt_release_full_gpu(adev, true);
   2082   1.1  riastrad 
   2083   1.7  riastrad 	return r;
   2084   1.1  riastrad }
   2085   1.1  riastrad 
   2086   1.1  riastrad /**
   2087   1.7  riastrad  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
   2088   1.1  riastrad  *
   2089   1.1  riastrad  * @adev: amdgpu_device pointer
   2090   1.1  riastrad  *
   2091   1.7  riastrad  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
   2092   1.7  riastrad  * this function before a GPU reset.  If the value is retained after a
   2093   1.7  riastrad  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
   2094   1.1  riastrad  */
   2095   1.7  riastrad static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
   2096   1.1  riastrad {
   2097   1.7  riastrad 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
   2098   1.7  riastrad }
   2099   1.7  riastrad 
   2100   1.7  riastrad /**
   2101   1.7  riastrad  * amdgpu_device_check_vram_lost - check if vram is valid
   2102   1.7  riastrad  *
   2103   1.7  riastrad  * @adev: amdgpu_device pointer
   2104   1.7  riastrad  *
   2105   1.7  riastrad  * Checks the reset magic value written to the gart pointer in VRAM.
   2106   1.7  riastrad  * The driver calls this after a GPU reset to see if the contents of
   2107   1.7  riastrad  * VRAM is lost or now.
   2108   1.7  riastrad  * returns true if vram is lost, false if not.
   2109   1.7  riastrad  */
   2110   1.7  riastrad static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
   2111   1.7  riastrad {
   2112   1.7  riastrad 	return !!memcmp(adev->gart.ptr, adev->reset_magic,
   2113   1.7  riastrad 			AMDGPU_RESET_MAGIC_NUM);
   2114   1.7  riastrad }
   2115   1.7  riastrad 
   2116   1.7  riastrad /**
   2117   1.7  riastrad  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
   2118   1.7  riastrad  *
   2119   1.7  riastrad  * @adev: amdgpu_device pointer
   2120   1.7  riastrad  * @state: clockgating state (gate or ungate)
   2121   1.7  riastrad  *
   2122   1.7  riastrad  * The list of all the hardware IPs that make up the asic is walked and the
   2123   1.7  riastrad  * set_clockgating_state callbacks are run.
   2124   1.7  riastrad  * Late initialization pass enabling clockgating for hardware IPs.
   2125   1.7  riastrad  * Fini or suspend, pass disabling clockgating for hardware IPs.
   2126   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2127   1.7  riastrad  */
   2128   1.7  riastrad 
   2129   1.7  riastrad static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
   2130   1.7  riastrad 						enum amd_clockgating_state state)
   2131   1.7  riastrad {
   2132   1.7  riastrad 	int i, j, r;
   2133   1.7  riastrad 
   2134   1.7  riastrad 	if (amdgpu_emu_mode == 1)
   2135   1.7  riastrad 		return 0;
   2136   1.7  riastrad 
   2137   1.7  riastrad 	for (j = 0; j < adev->num_ip_blocks; j++) {
   2138   1.7  riastrad 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
   2139   1.7  riastrad 		if (!adev->ip_blocks[i].status.late_initialized)
   2140   1.7  riastrad 			continue;
   2141   1.7  riastrad 		/* skip CG for VCE/UVD, it's handled specially */
   2142   1.7  riastrad 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
   2143   1.7  riastrad 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
   2144   1.7  riastrad 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
   2145   1.7  riastrad 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
   2146   1.7  riastrad 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
   2147   1.7  riastrad 			/* enable clockgating to save power */
   2148   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
   2149   1.7  riastrad 										     state);
   2150   1.7  riastrad 			if (r) {
   2151   1.7  riastrad 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
   2152   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   2153   1.7  riastrad 				return r;
   2154   1.7  riastrad 			}
   2155   1.7  riastrad 		}
   2156   1.7  riastrad 	}
   2157   1.7  riastrad 
   2158   1.7  riastrad 	return 0;
   2159   1.7  riastrad }
   2160   1.7  riastrad 
   2161   1.7  riastrad static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
   2162   1.7  riastrad {
   2163   1.7  riastrad 	int i, j, r;
   2164   1.7  riastrad 
   2165   1.7  riastrad 	if (amdgpu_emu_mode == 1)
   2166   1.7  riastrad 		return 0;
   2167   1.7  riastrad 
   2168   1.7  riastrad 	for (j = 0; j < adev->num_ip_blocks; j++) {
   2169   1.7  riastrad 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
   2170   1.7  riastrad 		if (!adev->ip_blocks[i].status.late_initialized)
   2171   1.7  riastrad 			continue;
   2172   1.7  riastrad 		/* skip CG for VCE/UVD, it's handled specially */
   2173   1.7  riastrad 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
   2174   1.7  riastrad 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
   2175   1.7  riastrad 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
   2176   1.7  riastrad 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
   2177   1.7  riastrad 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
   2178   1.7  riastrad 			/* enable powergating to save power */
   2179   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
   2180   1.7  riastrad 											state);
   2181   1.7  riastrad 			if (r) {
   2182   1.7  riastrad 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
   2183   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   2184   1.7  riastrad 				return r;
   2185   1.7  riastrad 			}
   2186   1.7  riastrad 		}
   2187   1.7  riastrad 	}
   2188   1.7  riastrad 	return 0;
   2189   1.7  riastrad }
   2190   1.7  riastrad 
   2191   1.7  riastrad static int amdgpu_device_enable_mgpu_fan_boost(void)
   2192   1.7  riastrad {
   2193   1.7  riastrad 	struct amdgpu_gpu_instance *gpu_ins;
   2194   1.7  riastrad 	struct amdgpu_device *adev;
   2195   1.7  riastrad 	int i, ret = 0;
   2196   1.7  riastrad 
   2197   1.7  riastrad 	mutex_lock(&mgpu_info.mutex);
   2198   1.7  riastrad 
   2199   1.7  riastrad 	/*
   2200   1.7  riastrad 	 * MGPU fan boost feature should be enabled
   2201   1.7  riastrad 	 * only when there are two or more dGPUs in
   2202   1.7  riastrad 	 * the system
   2203   1.7  riastrad 	 */
   2204   1.7  riastrad 	if (mgpu_info.num_dgpu < 2)
   2205   1.7  riastrad 		goto out;
   2206   1.7  riastrad 
   2207   1.7  riastrad 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
   2208   1.7  riastrad 		gpu_ins = &(mgpu_info.gpu_ins[i]);
   2209   1.7  riastrad 		adev = gpu_ins->adev;
   2210   1.7  riastrad 		if (!(adev->flags & AMD_IS_APU) &&
   2211   1.7  riastrad 		    !gpu_ins->mgpu_fan_enabled &&
   2212   1.7  riastrad 		    adev->powerplay.pp_funcs &&
   2213   1.7  riastrad 		    adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
   2214   1.7  riastrad 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
   2215   1.7  riastrad 			if (ret)
   2216   1.7  riastrad 				break;
   2217   1.7  riastrad 
   2218   1.7  riastrad 			gpu_ins->mgpu_fan_enabled = 1;
   2219   1.7  riastrad 		}
   2220   1.7  riastrad 	}
   2221   1.7  riastrad 
   2222   1.7  riastrad out:
   2223   1.7  riastrad 	mutex_unlock(&mgpu_info.mutex);
   2224   1.7  riastrad 
   2225   1.7  riastrad 	return ret;
   2226   1.7  riastrad }
   2227   1.7  riastrad 
   2228   1.7  riastrad /**
   2229   1.7  riastrad  * amdgpu_device_ip_late_init - run late init for hardware IPs
   2230   1.7  riastrad  *
   2231   1.7  riastrad  * @adev: amdgpu_device pointer
   2232   1.7  riastrad  *
   2233   1.7  riastrad  * Late initialization pass for hardware IPs.  The list of all the hardware
   2234   1.7  riastrad  * IPs that make up the asic is walked and the late_init callbacks are run.
   2235   1.7  riastrad  * late_init covers any special initialization that an IP requires
   2236   1.7  riastrad  * after all of the have been initialized or something that needs to happen
   2237   1.7  riastrad  * late in the init process.
   2238   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2239   1.7  riastrad  */
   2240   1.7  riastrad static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
   2241   1.7  riastrad {
   2242   1.7  riastrad 	struct amdgpu_gpu_instance *gpu_instance;
   2243   1.7  riastrad 	int i = 0, r;
   2244   1.7  riastrad 
   2245   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   2246   1.7  riastrad 		if (!adev->ip_blocks[i].status.hw)
   2247   1.7  riastrad 			continue;
   2248   1.7  riastrad 		if (adev->ip_blocks[i].version->funcs->late_init) {
   2249   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
   2250   1.7  riastrad 			if (r) {
   2251   1.7  riastrad 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
   2252   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   2253   1.7  riastrad 				return r;
   2254   1.7  riastrad 			}
   2255   1.7  riastrad 		}
   2256   1.7  riastrad 		adev->ip_blocks[i].status.late_initialized = true;
   2257   1.7  riastrad 	}
   2258   1.7  riastrad 
   2259   1.7  riastrad 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
   2260   1.7  riastrad 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
   2261   1.7  riastrad 
   2262   1.7  riastrad 	amdgpu_device_fill_reset_magic(adev);
   2263   1.7  riastrad 
   2264   1.7  riastrad 	r = amdgpu_device_enable_mgpu_fan_boost();
   2265   1.7  riastrad 	if (r)
   2266   1.7  riastrad 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
   2267   1.7  riastrad 
   2268   1.7  riastrad 
   2269   1.7  riastrad 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
   2270   1.7  riastrad 		mutex_lock(&mgpu_info.mutex);
   2271   1.7  riastrad 
   2272   1.7  riastrad 		/*
   2273   1.7  riastrad 		 * Reset device p-state to low as this was booted with high.
   2274   1.7  riastrad 		 *
   2275   1.7  riastrad 		 * This should be performed only after all devices from the same
   2276   1.7  riastrad 		 * hive get initialized.
   2277   1.7  riastrad 		 *
   2278   1.7  riastrad 		 * However, it's unknown how many device in the hive in advance.
   2279   1.7  riastrad 		 * As this is counted one by one during devices initializations.
   2280   1.7  riastrad 		 *
   2281   1.7  riastrad 		 * So, we wait for all XGMI interlinked devices initialized.
   2282   1.7  riastrad 		 * This may bring some delays as those devices may come from
   2283   1.7  riastrad 		 * different hives. But that should be OK.
   2284   1.7  riastrad 		 */
   2285   1.7  riastrad 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
   2286   1.7  riastrad 			for (i = 0; i < mgpu_info.num_gpu; i++) {
   2287   1.7  riastrad 				gpu_instance = &(mgpu_info.gpu_ins[i]);
   2288   1.7  riastrad 				if (gpu_instance->adev->flags & AMD_IS_APU)
   2289   1.7  riastrad 					continue;
   2290   1.7  riastrad 
   2291   1.7  riastrad 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
   2292   1.7  riastrad 				if (r) {
   2293   1.7  riastrad 					DRM_ERROR("pstate setting failed (%d).\n", r);
   2294   1.7  riastrad 					break;
   2295   1.7  riastrad 				}
   2296   1.7  riastrad 			}
   2297   1.7  riastrad 		}
   2298   1.7  riastrad 
   2299   1.7  riastrad 		mutex_unlock(&mgpu_info.mutex);
   2300   1.7  riastrad 	}
   2301   1.7  riastrad 
   2302   1.7  riastrad 	return 0;
   2303   1.7  riastrad }
   2304   1.7  riastrad 
   2305   1.7  riastrad /**
   2306   1.7  riastrad  * amdgpu_device_ip_fini - run fini for hardware IPs
   2307   1.7  riastrad  *
   2308   1.7  riastrad  * @adev: amdgpu_device pointer
   2309   1.7  riastrad  *
   2310   1.7  riastrad  * Main teardown pass for hardware IPs.  The list of all the hardware
   2311   1.7  riastrad  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
   2312   1.7  riastrad  * are run.  hw_fini tears down the hardware associated with each IP
   2313   1.7  riastrad  * and sw_fini tears down any software state associated with each IP.
   2314   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2315   1.7  riastrad  */
   2316   1.7  riastrad static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
   2317   1.7  riastrad {
   2318   1.7  riastrad 	int i, r;
   2319   1.7  riastrad 
   2320   1.7  riastrad 	amdgpu_ras_pre_fini(adev);
   2321   1.7  riastrad 
   2322   1.7  riastrad 	if (adev->gmc.xgmi.num_physical_nodes > 1)
   2323   1.7  riastrad 		amdgpu_xgmi_remove_device(adev);
   2324   1.7  riastrad 
   2325   1.7  riastrad 	amdgpu_amdkfd_device_fini(adev);
   2326   1.7  riastrad 
   2327   1.7  riastrad 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
   2328   1.7  riastrad 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
   2329   1.7  riastrad 
   2330   1.7  riastrad 	/* need to disable SMC first */
   2331   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   2332   1.7  riastrad 		if (!adev->ip_blocks[i].status.hw)
   2333   1.7  riastrad 			continue;
   2334   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
   2335   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
   2336   1.7  riastrad 			/* XXX handle errors */
   2337   1.7  riastrad 			if (r) {
   2338   1.7  riastrad 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
   2339   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   2340   1.7  riastrad 			}
   2341   1.7  riastrad 			adev->ip_blocks[i].status.hw = false;
   2342   1.7  riastrad 			break;
   2343   1.7  riastrad 		}
   2344   1.7  riastrad 	}
   2345   1.7  riastrad 
   2346   1.7  riastrad 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
   2347   1.7  riastrad 		if (!adev->ip_blocks[i].status.hw)
   2348   1.7  riastrad 			continue;
   2349   1.7  riastrad 
   2350   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
   2351   1.7  riastrad 		/* XXX handle errors */
   2352   1.7  riastrad 		if (r) {
   2353   1.7  riastrad 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
   2354   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   2355   1.7  riastrad 		}
   2356   1.7  riastrad 
   2357   1.7  riastrad 		adev->ip_blocks[i].status.hw = false;
   2358   1.7  riastrad 	}
   2359   1.7  riastrad 
   2360   1.7  riastrad 
   2361   1.7  riastrad 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
   2362   1.7  riastrad 		if (!adev->ip_blocks[i].status.sw)
   2363   1.7  riastrad 			continue;
   2364   1.7  riastrad 
   2365   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
   2366   1.7  riastrad 			amdgpu_ucode_free_bo(adev);
   2367   1.7  riastrad 			amdgpu_free_static_csa(&adev->virt.csa_obj);
   2368   1.7  riastrad 			amdgpu_device_wb_fini(adev);
   2369   1.7  riastrad 			amdgpu_device_vram_scratch_fini(adev);
   2370   1.7  riastrad 			amdgpu_ib_pool_fini(adev);
   2371   1.7  riastrad 		}
   2372   1.7  riastrad 
   2373   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
   2374   1.7  riastrad 		/* XXX handle errors */
   2375   1.7  riastrad 		if (r) {
   2376   1.7  riastrad 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
   2377   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   2378   1.7  riastrad 		}
   2379   1.7  riastrad 		adev->ip_blocks[i].status.sw = false;
   2380   1.7  riastrad 		adev->ip_blocks[i].status.valid = false;
   2381   1.7  riastrad 	}
   2382   1.7  riastrad 
   2383   1.7  riastrad 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
   2384   1.7  riastrad 		if (!adev->ip_blocks[i].status.late_initialized)
   2385   1.7  riastrad 			continue;
   2386   1.7  riastrad 		if (adev->ip_blocks[i].version->funcs->late_fini)
   2387   1.7  riastrad 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
   2388   1.7  riastrad 		adev->ip_blocks[i].status.late_initialized = false;
   2389   1.7  riastrad 	}
   2390   1.7  riastrad 
   2391   1.7  riastrad 	amdgpu_ras_fini(adev);
   2392   1.7  riastrad 
   2393   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   2394   1.7  riastrad 		if (amdgpu_virt_release_full_gpu(adev, false))
   2395   1.7  riastrad 			DRM_ERROR("failed to release exclusive mode on fini\n");
   2396   1.7  riastrad 
   2397   1.7  riastrad 	return 0;
   2398   1.7  riastrad }
   2399   1.7  riastrad 
   2400   1.7  riastrad /**
   2401   1.7  riastrad  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
   2402   1.7  riastrad  *
   2403   1.7  riastrad  * @work: work_struct.
   2404   1.7  riastrad  */
   2405   1.7  riastrad static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
   2406   1.7  riastrad {
   2407   1.7  riastrad 	struct amdgpu_device *adev =
   2408   1.7  riastrad 		container_of(work, struct amdgpu_device, delayed_init_work.work);
   2409   1.7  riastrad 	int r;
   2410   1.7  riastrad 
   2411   1.7  riastrad 	r = amdgpu_ib_ring_tests(adev);
   2412   1.7  riastrad 	if (r)
   2413   1.7  riastrad 		DRM_ERROR("ib ring test failed (%d).\n", r);
   2414   1.7  riastrad }
   2415   1.7  riastrad 
   2416   1.7  riastrad static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
   2417   1.7  riastrad {
   2418   1.7  riastrad 	struct amdgpu_device *adev =
   2419   1.7  riastrad 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
   2420   1.7  riastrad 
   2421   1.7  riastrad 	mutex_lock(&adev->gfx.gfx_off_mutex);
   2422   1.7  riastrad 	if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
   2423   1.7  riastrad 		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
   2424   1.7  riastrad 			adev->gfx.gfx_off_state = true;
   2425   1.7  riastrad 	}
   2426   1.7  riastrad 	mutex_unlock(&adev->gfx.gfx_off_mutex);
   2427   1.7  riastrad }
   2428   1.7  riastrad 
   2429   1.7  riastrad /**
   2430   1.7  riastrad  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
   2431   1.7  riastrad  *
   2432   1.7  riastrad  * @adev: amdgpu_device pointer
   2433   1.7  riastrad  *
   2434   1.7  riastrad  * Main suspend function for hardware IPs.  The list of all the hardware
   2435   1.7  riastrad  * IPs that make up the asic is walked, clockgating is disabled and the
   2436   1.7  riastrad  * suspend callbacks are run.  suspend puts the hardware and software state
   2437   1.7  riastrad  * in each IP into a state suitable for suspend.
   2438   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2439   1.7  riastrad  */
   2440   1.7  riastrad static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
   2441   1.7  riastrad {
   2442   1.7  riastrad 	int i, r;
   2443   1.7  riastrad 
   2444   1.7  riastrad 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
   2445   1.7  riastrad 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
   2446   1.7  riastrad 
   2447   1.7  riastrad 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
   2448   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   2449   1.7  riastrad 			continue;
   2450   1.7  riastrad 		/* displays are handled separately */
   2451   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
   2452   1.7  riastrad 			/* XXX handle errors */
   2453   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->suspend(adev);
   2454   1.7  riastrad 			/* XXX handle errors */
   2455   1.7  riastrad 			if (r) {
   2456   1.7  riastrad 				DRM_ERROR("suspend of IP block <%s> failed %d\n",
   2457   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   2458   1.7  riastrad 				return r;
   2459   1.7  riastrad 			}
   2460   1.7  riastrad 			adev->ip_blocks[i].status.hw = false;
   2461   1.7  riastrad 		}
   2462   1.7  riastrad 	}
   2463   1.7  riastrad 
   2464   1.7  riastrad 	return 0;
   2465   1.7  riastrad }
   2466   1.7  riastrad 
   2467   1.7  riastrad /**
   2468   1.7  riastrad  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
   2469   1.7  riastrad  *
   2470   1.7  riastrad  * @adev: amdgpu_device pointer
   2471   1.7  riastrad  *
   2472   1.7  riastrad  * Main suspend function for hardware IPs.  The list of all the hardware
   2473   1.7  riastrad  * IPs that make up the asic is walked, clockgating is disabled and the
   2474   1.7  riastrad  * suspend callbacks are run.  suspend puts the hardware and software state
   2475   1.7  riastrad  * in each IP into a state suitable for suspend.
   2476   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2477   1.7  riastrad  */
   2478   1.7  riastrad static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
   2479   1.7  riastrad {
   2480   1.7  riastrad 	int i, r __unused;
   2481   1.7  riastrad 
   2482   1.7  riastrad 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
   2483   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   2484   1.7  riastrad 			continue;
   2485   1.7  riastrad 		/* displays are handled in phase1 */
   2486   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
   2487   1.7  riastrad 			continue;
   2488   1.7  riastrad 		/* PSP lost connection when err_event_athub occurs */
   2489   1.7  riastrad 		if (amdgpu_ras_intr_triggered() &&
   2490   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
   2491   1.7  riastrad 			adev->ip_blocks[i].status.hw = false;
   2492   1.7  riastrad 			continue;
   2493   1.7  riastrad 		}
   2494   1.7  riastrad 		/* XXX handle errors */
   2495   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
   2496   1.7  riastrad 		/* XXX handle errors */
   2497   1.7  riastrad 		if (r) {
   2498   1.7  riastrad 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
   2499   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   2500   1.7  riastrad 		}
   2501   1.7  riastrad 		adev->ip_blocks[i].status.hw = false;
   2502   1.7  riastrad 		/* handle putting the SMC in the appropriate state */
   2503   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
   2504   1.7  riastrad 			r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
   2505   1.7  riastrad 			if (r) {
   2506   1.7  riastrad 				DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
   2507   1.7  riastrad 					  adev->mp1_state, r);
   2508   1.7  riastrad 				return r;
   2509   1.7  riastrad 			}
   2510   1.7  riastrad 		}
   2511   1.7  riastrad 
   2512   1.7  riastrad 		adev->ip_blocks[i].status.hw = false;
   2513   1.7  riastrad 	}
   2514   1.7  riastrad 
   2515   1.7  riastrad 	return 0;
   2516   1.7  riastrad }
   2517   1.7  riastrad 
   2518   1.7  riastrad /**
   2519   1.7  riastrad  * amdgpu_device_ip_suspend - run suspend for hardware IPs
   2520   1.7  riastrad  *
   2521   1.7  riastrad  * @adev: amdgpu_device pointer
   2522   1.7  riastrad  *
   2523   1.7  riastrad  * Main suspend function for hardware IPs.  The list of all the hardware
   2524   1.7  riastrad  * IPs that make up the asic is walked, clockgating is disabled and the
   2525   1.7  riastrad  * suspend callbacks are run.  suspend puts the hardware and software state
   2526   1.7  riastrad  * in each IP into a state suitable for suspend.
   2527   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2528   1.7  riastrad  */
   2529   1.7  riastrad int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
   2530   1.7  riastrad {
   2531   1.7  riastrad 	int r;
   2532   1.7  riastrad 
   2533   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   2534   1.7  riastrad 		amdgpu_virt_request_full_gpu(adev, false);
   2535   1.7  riastrad 
   2536   1.7  riastrad 	r = amdgpu_device_ip_suspend_phase1(adev);
   2537   1.7  riastrad 	if (r)
   2538   1.7  riastrad 		return r;
   2539   1.7  riastrad 	r = amdgpu_device_ip_suspend_phase2(adev);
   2540   1.7  riastrad 
   2541   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   2542   1.7  riastrad 		amdgpu_virt_release_full_gpu(adev, false);
   2543   1.7  riastrad 
   2544   1.7  riastrad 	return r;
   2545   1.7  riastrad }
   2546   1.7  riastrad 
   2547   1.7  riastrad static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
   2548   1.7  riastrad {
   2549   1.7  riastrad 	int i, r;
   2550   1.7  riastrad 
   2551   1.7  riastrad 	static enum amd_ip_block_type ip_order[] = {
   2552   1.7  riastrad 		AMD_IP_BLOCK_TYPE_GMC,
   2553   1.7  riastrad 		AMD_IP_BLOCK_TYPE_COMMON,
   2554   1.7  riastrad 		AMD_IP_BLOCK_TYPE_PSP,
   2555   1.7  riastrad 		AMD_IP_BLOCK_TYPE_IH,
   2556   1.7  riastrad 	};
   2557   1.7  riastrad 
   2558   1.7  riastrad 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
   2559   1.7  riastrad 		int j;
   2560   1.7  riastrad 		struct amdgpu_ip_block *block;
   2561   1.7  riastrad 
   2562   1.7  riastrad 		for (j = 0; j < adev->num_ip_blocks; j++) {
   2563   1.7  riastrad 			block = &adev->ip_blocks[j];
   2564   1.7  riastrad 
   2565   1.7  riastrad 			block->status.hw = false;
   2566   1.7  riastrad 			if (block->version->type != ip_order[i] ||
   2567   1.7  riastrad 				!block->status.valid)
   2568   1.7  riastrad 				continue;
   2569   1.7  riastrad 
   2570   1.7  riastrad 			r = block->version->funcs->hw_init(adev);
   2571   1.7  riastrad 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
   2572   1.7  riastrad 			if (r)
   2573   1.7  riastrad 				return r;
   2574   1.7  riastrad 			block->status.hw = true;
   2575   1.7  riastrad 		}
   2576   1.7  riastrad 	}
   2577   1.7  riastrad 
   2578   1.7  riastrad 	return 0;
   2579   1.7  riastrad }
   2580   1.7  riastrad 
   2581   1.7  riastrad static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
   2582   1.7  riastrad {
   2583   1.7  riastrad 	int i, r;
   2584   1.7  riastrad 
   2585   1.7  riastrad 	static enum amd_ip_block_type ip_order[] = {
   2586   1.7  riastrad 		AMD_IP_BLOCK_TYPE_SMC,
   2587   1.7  riastrad 		AMD_IP_BLOCK_TYPE_DCE,
   2588   1.7  riastrad 		AMD_IP_BLOCK_TYPE_GFX,
   2589   1.7  riastrad 		AMD_IP_BLOCK_TYPE_SDMA,
   2590   1.7  riastrad 		AMD_IP_BLOCK_TYPE_UVD,
   2591   1.7  riastrad 		AMD_IP_BLOCK_TYPE_VCE,
   2592   1.7  riastrad 		AMD_IP_BLOCK_TYPE_VCN
   2593   1.7  riastrad 	};
   2594   1.7  riastrad 
   2595   1.7  riastrad 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
   2596   1.7  riastrad 		int j;
   2597   1.7  riastrad 		struct amdgpu_ip_block *block;
   2598   1.7  riastrad 
   2599   1.7  riastrad 		for (j = 0; j < adev->num_ip_blocks; j++) {
   2600   1.7  riastrad 			block = &adev->ip_blocks[j];
   2601   1.7  riastrad 
   2602   1.7  riastrad 			if (block->version->type != ip_order[i] ||
   2603   1.7  riastrad 				!block->status.valid ||
   2604   1.7  riastrad 				block->status.hw)
   2605   1.7  riastrad 				continue;
   2606   1.7  riastrad 
   2607   1.7  riastrad 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
   2608   1.7  riastrad 				r = block->version->funcs->resume(adev);
   2609   1.7  riastrad 			else
   2610   1.7  riastrad 				r = block->version->funcs->hw_init(adev);
   2611   1.7  riastrad 
   2612   1.7  riastrad 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
   2613   1.7  riastrad 			if (r)
   2614   1.7  riastrad 				return r;
   2615   1.7  riastrad 			block->status.hw = true;
   2616   1.7  riastrad 		}
   2617   1.7  riastrad 	}
   2618   1.7  riastrad 
   2619   1.7  riastrad 	return 0;
   2620   1.7  riastrad }
   2621   1.7  riastrad 
   2622   1.7  riastrad /**
   2623   1.7  riastrad  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
   2624   1.7  riastrad  *
   2625   1.7  riastrad  * @adev: amdgpu_device pointer
   2626   1.7  riastrad  *
   2627   1.7  riastrad  * First resume function for hardware IPs.  The list of all the hardware
   2628   1.7  riastrad  * IPs that make up the asic is walked and the resume callbacks are run for
   2629   1.7  riastrad  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
   2630   1.7  riastrad  * after a suspend and updates the software state as necessary.  This
   2631   1.7  riastrad  * function is also used for restoring the GPU after a GPU reset.
   2632   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2633   1.7  riastrad  */
   2634   1.7  riastrad static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
   2635   1.7  riastrad {
   2636   1.7  riastrad 	int i, r;
   2637   1.7  riastrad 
   2638   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   2639   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
   2640   1.7  riastrad 			continue;
   2641   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
   2642   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
   2643   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
   2644   1.7  riastrad 
   2645   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->resume(adev);
   2646   1.7  riastrad 			if (r) {
   2647   1.7  riastrad 				DRM_ERROR("resume of IP block <%s> failed %d\n",
   2648   1.7  riastrad 					  adev->ip_blocks[i].version->funcs->name, r);
   2649   1.7  riastrad 				return r;
   2650   1.7  riastrad 			}
   2651   1.7  riastrad 			adev->ip_blocks[i].status.hw = true;
   2652   1.7  riastrad 		}
   2653   1.7  riastrad 	}
   2654   1.7  riastrad 
   2655   1.7  riastrad 	return 0;
   2656   1.7  riastrad }
   2657   1.7  riastrad 
   2658   1.7  riastrad /**
   2659   1.7  riastrad  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
   2660   1.7  riastrad  *
   2661   1.7  riastrad  * @adev: amdgpu_device pointer
   2662   1.7  riastrad  *
   2663   1.7  riastrad  * First resume function for hardware IPs.  The list of all the hardware
   2664   1.7  riastrad  * IPs that make up the asic is walked and the resume callbacks are run for
   2665   1.7  riastrad  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
   2666   1.7  riastrad  * functional state after a suspend and updates the software state as
   2667   1.7  riastrad  * necessary.  This function is also used for restoring the GPU after a GPU
   2668   1.7  riastrad  * reset.
   2669   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2670   1.7  riastrad  */
   2671   1.7  riastrad static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
   2672   1.7  riastrad {
   2673   1.7  riastrad 	int i, r;
   2674   1.7  riastrad 
   2675   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   2676   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
   2677   1.7  riastrad 			continue;
   2678   1.7  riastrad 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
   2679   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
   2680   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
   2681   1.7  riastrad 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
   2682   1.7  riastrad 			continue;
   2683   1.7  riastrad 		r = adev->ip_blocks[i].version->funcs->resume(adev);
   2684   1.7  riastrad 		if (r) {
   2685   1.7  riastrad 			DRM_ERROR("resume of IP block <%s> failed %d\n",
   2686   1.7  riastrad 				  adev->ip_blocks[i].version->funcs->name, r);
   2687   1.7  riastrad 			return r;
   2688   1.7  riastrad 		}
   2689   1.7  riastrad 		adev->ip_blocks[i].status.hw = true;
   2690   1.7  riastrad 	}
   2691   1.7  riastrad 
   2692   1.7  riastrad 	return 0;
   2693   1.7  riastrad }
   2694   1.7  riastrad 
   2695   1.7  riastrad /**
   2696   1.7  riastrad  * amdgpu_device_ip_resume - run resume for hardware IPs
   2697   1.7  riastrad  *
   2698   1.7  riastrad  * @adev: amdgpu_device pointer
   2699   1.7  riastrad  *
   2700   1.7  riastrad  * Main resume function for hardware IPs.  The hardware IPs
   2701   1.7  riastrad  * are split into two resume functions because they are
   2702   1.7  riastrad  * are also used in in recovering from a GPU reset and some additional
   2703   1.7  riastrad  * steps need to be take between them.  In this case (S3/S4) they are
   2704   1.7  riastrad  * run sequentially.
   2705   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   2706   1.7  riastrad  */
   2707   1.7  riastrad static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
   2708   1.7  riastrad {
   2709   1.7  riastrad 	int r;
   2710   1.7  riastrad 
   2711   1.7  riastrad 	r = amdgpu_device_ip_resume_phase1(adev);
   2712   1.7  riastrad 	if (r)
   2713   1.7  riastrad 		return r;
   2714   1.7  riastrad 
   2715   1.7  riastrad 	r = amdgpu_device_fw_loading(adev);
   2716   1.7  riastrad 	if (r)
   2717   1.7  riastrad 		return r;
   2718   1.7  riastrad 
   2719   1.7  riastrad 	r = amdgpu_device_ip_resume_phase2(adev);
   2720   1.7  riastrad 
   2721   1.7  riastrad 	return r;
   2722   1.7  riastrad }
   2723   1.7  riastrad 
   2724   1.7  riastrad /**
   2725   1.7  riastrad  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
   2726   1.7  riastrad  *
   2727   1.7  riastrad  * @adev: amdgpu_device pointer
   2728   1.7  riastrad  *
   2729   1.7  riastrad  * Query the VBIOS data tables to determine if the board supports SR-IOV.
   2730   1.7  riastrad  */
   2731   1.7  riastrad static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
   2732   1.7  riastrad {
   2733   1.7  riastrad 	if (amdgpu_sriov_vf(adev)) {
   2734   1.7  riastrad 		if (adev->is_atom_fw) {
   2735   1.7  riastrad 			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
   2736   1.7  riastrad 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
   2737   1.7  riastrad 		} else {
   2738   1.7  riastrad 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
   2739   1.7  riastrad 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
   2740   1.7  riastrad 		}
   2741   1.7  riastrad 
   2742   1.7  riastrad 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
   2743   1.7  riastrad 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
   2744   1.7  riastrad 	}
   2745   1.7  riastrad }
   2746   1.7  riastrad 
   2747   1.7  riastrad /**
   2748   1.7  riastrad  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
   2749   1.7  riastrad  *
   2750   1.7  riastrad  * @asic_type: AMD asic type
   2751   1.7  riastrad  *
   2752   1.7  riastrad  * Check if there is DC (new modesetting infrastructre) support for an asic.
   2753   1.7  riastrad  * returns true if DC has support, false if not.
   2754   1.7  riastrad  */
   2755   1.7  riastrad bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
   2756   1.7  riastrad {
   2757   1.7  riastrad 	switch (asic_type) {
   2758   1.7  riastrad #if defined(CONFIG_DRM_AMD_DC)
   2759   1.7  riastrad 	case CHIP_BONAIRE:
   2760   1.7  riastrad 	case CHIP_KAVERI:
   2761   1.7  riastrad 	case CHIP_KABINI:
   2762   1.7  riastrad 	case CHIP_MULLINS:
   2763   1.7  riastrad 		/*
   2764   1.7  riastrad 		 * We have systems in the wild with these ASICs that require
   2765   1.7  riastrad 		 * LVDS and VGA support which is not supported with DC.
   2766   1.7  riastrad 		 *
   2767   1.7  riastrad 		 * Fallback to the non-DC driver here by default so as not to
   2768   1.7  riastrad 		 * cause regressions.
   2769   1.7  riastrad 		 */
   2770   1.7  riastrad 		return amdgpu_dc > 0;
   2771   1.7  riastrad 	case CHIP_HAWAII:
   2772   1.7  riastrad 	case CHIP_CARRIZO:
   2773   1.7  riastrad 	case CHIP_STONEY:
   2774   1.7  riastrad 	case CHIP_POLARIS10:
   2775   1.7  riastrad 	case CHIP_POLARIS11:
   2776   1.7  riastrad 	case CHIP_POLARIS12:
   2777   1.7  riastrad 	case CHIP_VEGAM:
   2778   1.7  riastrad 	case CHIP_TONGA:
   2779   1.7  riastrad 	case CHIP_FIJI:
   2780   1.7  riastrad 	case CHIP_VEGA10:
   2781   1.7  riastrad 	case CHIP_VEGA12:
   2782   1.7  riastrad 	case CHIP_VEGA20:
   2783   1.7  riastrad #if defined(CONFIG_DRM_AMD_DC_DCN)
   2784   1.7  riastrad 	case CHIP_RAVEN:
   2785   1.7  riastrad 	case CHIP_NAVI10:
   2786   1.7  riastrad 	case CHIP_NAVI14:
   2787   1.7  riastrad 	case CHIP_NAVI12:
   2788   1.7  riastrad 	case CHIP_RENOIR:
   2789   1.7  riastrad #endif
   2790   1.7  riastrad 		return amdgpu_dc != 0;
   2791   1.7  riastrad #endif
   2792   1.7  riastrad 	default:
   2793   1.7  riastrad 		if (amdgpu_dc > 0)
   2794   1.7  riastrad 			DRM_INFO("Display Core has been requested via kernel parameter "
   2795   1.7  riastrad 					 "but isn't supported by ASIC, ignoring\n");
   2796   1.7  riastrad 		return false;
   2797   1.7  riastrad 	}
   2798   1.7  riastrad }
   2799   1.7  riastrad 
   2800   1.7  riastrad /**
   2801   1.7  riastrad  * amdgpu_device_has_dc_support - check if dc is supported
   2802   1.7  riastrad  *
   2803   1.7  riastrad  * @adev: amdgpu_device_pointer
   2804   1.7  riastrad  *
   2805   1.7  riastrad  * Returns true for supported, false for not supported
   2806   1.7  riastrad  */
   2807   1.7  riastrad bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
   2808   1.7  riastrad {
   2809   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   2810   1.7  riastrad 		return false;
   2811   1.7  riastrad 
   2812   1.7  riastrad 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
   2813   1.7  riastrad }
   2814   1.7  riastrad 
   2815   1.7  riastrad 
   2816   1.7  riastrad static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
   2817   1.7  riastrad {
   2818   1.7  riastrad 	struct amdgpu_device *adev =
   2819   1.7  riastrad 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
   2820   1.7  riastrad 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
   2821   1.7  riastrad 
   2822   1.7  riastrad 	/* It's a bug to not have a hive within this function */
   2823   1.7  riastrad 	if (WARN_ON(!hive))
   2824   1.7  riastrad 		return;
   2825   1.7  riastrad 
   2826   1.7  riastrad 	/*
   2827   1.7  riastrad 	 * Use task barrier to synchronize all xgmi reset works across the
   2828   1.7  riastrad 	 * hive. task_barrier_enter and task_barrier_exit will block
   2829   1.7  riastrad 	 * until all the threads running the xgmi reset works reach
   2830   1.7  riastrad 	 * those points. task_barrier_full will do both blocks.
   2831   1.7  riastrad 	 */
   2832   1.7  riastrad 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
   2833   1.7  riastrad 
   2834   1.7  riastrad 		task_barrier_enter(&hive->tb);
   2835   1.7  riastrad 		adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
   2836   1.7  riastrad 
   2837   1.7  riastrad 		if (adev->asic_reset_res)
   2838   1.7  riastrad 			goto fail;
   2839   1.7  riastrad 
   2840   1.7  riastrad 		task_barrier_exit(&hive->tb);
   2841   1.7  riastrad 		adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
   2842   1.7  riastrad 
   2843   1.7  riastrad 		if (adev->asic_reset_res)
   2844   1.7  riastrad 			goto fail;
   2845   1.7  riastrad 	} else {
   2846   1.7  riastrad 
   2847   1.7  riastrad 		task_barrier_full(&hive->tb);
   2848   1.7  riastrad 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
   2849   1.7  riastrad 	}
   2850   1.7  riastrad 
   2851   1.7  riastrad fail:
   2852   1.7  riastrad 	if (adev->asic_reset_res)
   2853   1.7  riastrad 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
   2854   1.7  riastrad 			 adev->asic_reset_res, adev->ddev->unique);
   2855   1.7  riastrad }
   2856   1.7  riastrad 
   2857   1.7  riastrad static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
   2858   1.7  riastrad {
   2859   1.7  riastrad 	char *input = amdgpu_lockup_timeout;
   2860   1.7  riastrad 	char *timeout_setting = NULL;
   2861   1.7  riastrad 	int index = 0;
   2862   1.7  riastrad 	long timeout;
   2863   1.7  riastrad 	int ret = 0;
   2864   1.7  riastrad 
   2865   1.7  riastrad 	/*
   2866   1.7  riastrad 	 * By default timeout for non compute jobs is 10000.
   2867   1.7  riastrad 	 * And there is no timeout enforced on compute jobs.
   2868   1.7  riastrad 	 * In SR-IOV or passthrough mode, timeout for compute
   2869   1.7  riastrad 	 * jobs are 10000 by default.
   2870   1.7  riastrad 	 */
   2871   1.7  riastrad 	adev->gfx_timeout = msecs_to_jiffies(10000);
   2872   1.7  riastrad 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
   2873   1.7  riastrad 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
   2874   1.7  riastrad 		adev->compute_timeout = adev->gfx_timeout;
   2875   1.7  riastrad 	else
   2876   1.7  riastrad 		adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
   2877   1.7  riastrad 
   2878   1.7  riastrad 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
   2879   1.7  riastrad 		while ((timeout_setting = strsep(&input, ",")) &&
   2880   1.7  riastrad 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
   2881   1.7  riastrad 			ret = kstrtol(timeout_setting, 0, &timeout);
   2882   1.7  riastrad 			if (ret)
   2883   1.7  riastrad 				return ret;
   2884   1.7  riastrad 
   2885   1.7  riastrad 			if (timeout == 0) {
   2886   1.7  riastrad 				index++;
   2887   1.7  riastrad 				continue;
   2888   1.7  riastrad 			} else if (timeout < 0) {
   2889   1.7  riastrad 				timeout = MAX_SCHEDULE_TIMEOUT;
   2890   1.7  riastrad 			} else {
   2891   1.7  riastrad 				timeout = msecs_to_jiffies(timeout);
   2892   1.7  riastrad 			}
   2893   1.7  riastrad 
   2894   1.7  riastrad 			switch (index++) {
   2895   1.7  riastrad 			case 0:
   2896   1.7  riastrad 				adev->gfx_timeout = timeout;
   2897   1.7  riastrad 				break;
   2898   1.7  riastrad 			case 1:
   2899   1.7  riastrad 				adev->compute_timeout = timeout;
   2900   1.7  riastrad 				break;
   2901   1.7  riastrad 			case 2:
   2902   1.7  riastrad 				adev->sdma_timeout = timeout;
   2903   1.7  riastrad 				break;
   2904   1.7  riastrad 			case 3:
   2905   1.7  riastrad 				adev->video_timeout = timeout;
   2906   1.7  riastrad 				break;
   2907   1.7  riastrad 			default:
   2908   1.7  riastrad 				break;
   2909   1.7  riastrad 			}
   2910   1.7  riastrad 		}
   2911   1.7  riastrad 		/*
   2912   1.7  riastrad 		 * There is only one value specified and
   2913   1.7  riastrad 		 * it should apply to all non-compute jobs.
   2914   1.7  riastrad 		 */
   2915   1.7  riastrad 		if (index == 1) {
   2916   1.7  riastrad 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
   2917   1.7  riastrad 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
   2918   1.7  riastrad 				adev->compute_timeout = adev->gfx_timeout;
   2919   1.7  riastrad 		}
   2920   1.7  riastrad 	}
   2921   1.7  riastrad 
   2922   1.7  riastrad 	return ret;
   2923   1.7  riastrad }
   2924   1.7  riastrad 
   2925   1.7  riastrad /**
   2926   1.7  riastrad  * amdgpu_device_init - initialize the driver
   2927   1.7  riastrad  *
   2928   1.7  riastrad  * @adev: amdgpu_device pointer
   2929   1.7  riastrad  * @ddev: drm dev pointer
   2930   1.7  riastrad  * @pdev: pci dev pointer
   2931   1.7  riastrad  * @flags: driver flags
   2932   1.7  riastrad  *
   2933   1.7  riastrad  * Initializes the driver info and hw (all asics).
   2934   1.7  riastrad  * Returns 0 for success or an error on failure.
   2935   1.7  riastrad  * Called at driver startup.
   2936   1.7  riastrad  */
   2937   1.7  riastrad int amdgpu_device_init(struct amdgpu_device *adev,
   2938   1.7  riastrad 		       struct drm_device *ddev,
   2939   1.7  riastrad 		       struct pci_dev *pdev,
   2940   1.7  riastrad 		       uint32_t flags)
   2941   1.7  riastrad {
   2942   1.7  riastrad 	int r, i;
   2943   1.7  riastrad 	bool boco = false;
   2944   1.7  riastrad 	u32 max_MBps;
   2945   1.1  riastrad 
   2946   1.1  riastrad 	adev->shutdown = false;
   2947   1.3  riastrad 	adev->dev = pci_dev_dev(pdev);
   2948   1.1  riastrad 	adev->ddev = ddev;
   2949   1.1  riastrad 	adev->pdev = pdev;
   2950   1.1  riastrad 	adev->flags = flags;
   2951   1.7  riastrad 
   2952   1.7  riastrad 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
   2953   1.7  riastrad 		adev->asic_type = amdgpu_force_asic_type;
   2954   1.7  riastrad 	else
   2955   1.7  riastrad 		adev->asic_type = flags & AMD_ASIC_MASK;
   2956   1.7  riastrad 
   2957   1.1  riastrad 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
   2958   1.7  riastrad 	if (amdgpu_emu_mode == 1)
   2959   1.7  riastrad 		adev->usec_timeout *= 2;
   2960   1.7  riastrad 	adev->gmc.gart_size = 512 * 1024 * 1024;
   2961   1.1  riastrad 	adev->accel_working = false;
   2962   1.1  riastrad 	adev->num_rings = 0;
   2963   1.1  riastrad 	adev->mman.buffer_funcs = NULL;
   2964   1.1  riastrad 	adev->mman.buffer_funcs_ring = NULL;
   2965   1.1  riastrad 	adev->vm_manager.vm_pte_funcs = NULL;
   2966   1.7  riastrad 	adev->vm_manager.vm_pte_num_scheds = 0;
   2967   1.7  riastrad 	adev->gmc.gmc_funcs = NULL;
   2968   1.7  riastrad 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
   2969   1.7  riastrad 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
   2970   1.1  riastrad 
   2971   1.1  riastrad 	adev->smc_rreg = &amdgpu_invalid_rreg;
   2972   1.1  riastrad 	adev->smc_wreg = &amdgpu_invalid_wreg;
   2973   1.1  riastrad 	adev->pcie_rreg = &amdgpu_invalid_rreg;
   2974   1.1  riastrad 	adev->pcie_wreg = &amdgpu_invalid_wreg;
   2975   1.7  riastrad 	adev->pciep_rreg = &amdgpu_invalid_rreg;
   2976   1.7  riastrad 	adev->pciep_wreg = &amdgpu_invalid_wreg;
   2977   1.7  riastrad 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
   2978   1.7  riastrad 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
   2979   1.1  riastrad 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
   2980   1.1  riastrad 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
   2981   1.1  riastrad 	adev->didt_rreg = &amdgpu_invalid_rreg;
   2982   1.1  riastrad 	adev->didt_wreg = &amdgpu_invalid_wreg;
   2983   1.7  riastrad 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
   2984   1.7  riastrad 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
   2985   1.1  riastrad 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
   2986   1.1  riastrad 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
   2987   1.1  riastrad 
   2988   1.1  riastrad 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
   2989   1.1  riastrad 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
   2990   1.1  riastrad 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
   2991   1.1  riastrad 
   2992   1.1  riastrad 	/* mutex initialization are all done here so we
   2993   1.1  riastrad 	 * can recall function without having locking issues */
   2994   1.3  riastrad 	atomic_set(&adev->irq.ih.lock, 0);
   2995   1.7  riastrad 	mutex_init(&adev->firmware.mutex);
   2996   1.1  riastrad 	mutex_init(&adev->pm.mutex);
   2997   1.1  riastrad 	mutex_init(&adev->gfx.gpu_clock_mutex);
   2998   1.1  riastrad 	mutex_init(&adev->srbm_mutex);
   2999   1.7  riastrad 	mutex_init(&adev->gfx.pipe_reserve_mutex);
   3000   1.7  riastrad 	mutex_init(&adev->gfx.gfx_off_mutex);
   3001   1.1  riastrad 	mutex_init(&adev->grbm_idx_mutex);
   3002   1.1  riastrad 	mutex_init(&adev->mn_lock);
   3003   1.7  riastrad 	mutex_init(&adev->virt.vf_errors.lock);
   3004   1.1  riastrad 	hash_init(adev->mn_hash);
   3005   1.7  riastrad 	mutex_init(&adev->lock_reset);
   3006   1.7  riastrad 	mutex_init(&adev->psp.mutex);
   3007   1.7  riastrad 	mutex_init(&adev->notifier_lock);
   3008   1.1  riastrad 
   3009   1.1  riastrad 	spin_lock_init(&adev->mmio_idx_lock);
   3010   1.1  riastrad 	spin_lock_init(&adev->smc_idx_lock);
   3011   1.1  riastrad 	spin_lock_init(&adev->pcie_idx_lock);
   3012   1.1  riastrad 	spin_lock_init(&adev->uvd_ctx_idx_lock);
   3013   1.1  riastrad 	spin_lock_init(&adev->didt_idx_lock);
   3014   1.7  riastrad 	spin_lock_init(&adev->gc_cac_idx_lock);
   3015   1.7  riastrad 	spin_lock_init(&adev->se_cac_idx_lock);
   3016   1.1  riastrad 	spin_lock_init(&adev->audio_endpt_idx_lock);
   3017   1.7  riastrad 	spin_lock_init(&adev->mm_stats.lock);
   3018   1.7  riastrad 
   3019   1.7  riastrad 	INIT_LIST_HEAD(&adev->shadow_list);
   3020   1.7  riastrad 	mutex_init(&adev->shadow_list_lock);
   3021   1.7  riastrad 
   3022   1.7  riastrad 	INIT_LIST_HEAD(&adev->ring_lru_list);
   3023   1.7  riastrad 	spin_lock_init(&adev->ring_lru_list_lock);
   3024   1.7  riastrad 
   3025   1.7  riastrad 	INIT_DELAYED_WORK(&adev->delayed_init_work,
   3026   1.7  riastrad 			  amdgpu_device_delayed_init_work_handler);
   3027   1.7  riastrad 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
   3028   1.7  riastrad 			  amdgpu_device_delay_enable_gfx_off);
   3029   1.7  riastrad 
   3030   1.7  riastrad 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
   3031   1.7  riastrad 
   3032  1.11  riastrad 	r = amdgpu_device_check_arguments(adev);
   3033  1.11  riastrad 	if (r)
   3034  1.11  riastrad 		return r;
   3035  1.11  riastrad 
   3036   1.7  riastrad 	adev->gfx.gfx_off_req_count = 1;
   3037   1.7  riastrad 	adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
   3038   1.7  riastrad 
   3039   1.7  riastrad 	/* Registers mapping */
   3040   1.7  riastrad 	/* TODO: block userspace mapping of io register */
   3041   1.7  riastrad 	if (adev->asic_type >= CHIP_BONAIRE) {
   3042   1.7  riastrad 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
   3043   1.7  riastrad 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
   3044   1.7  riastrad 	} else {
   3045   1.7  riastrad 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
   3046   1.7  riastrad 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
   3047   1.7  riastrad 	}
   3048   1.1  riastrad 
   3049   1.3  riastrad #ifdef __NetBSD__
   3050  1.21  riastrad 	const int bar = (adev->asic_type >= CHIP_BONAIRE ? 5 : 2);
   3051  1.21  riastrad 	if (pci_mapreg_map(&adev->pdev->pd_pa, PCI_BAR(bar),
   3052   1.3  riastrad 		pci_mapreg_type(adev->pdev->pd_pa.pa_pc,
   3053  1.21  riastrad 		    adev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
   3054   1.3  riastrad 		0,
   3055   1.3  riastrad 		&adev->rmmiot, &adev->rmmioh,
   3056   1.3  riastrad 		&adev->rmmio_base, &adev->rmmio_size))
   3057   1.3  riastrad 		return -EIO;
   3058   1.3  riastrad 	DRM_INFO("register mmio base: 0x%8"PRIXMAX"\n",
   3059   1.3  riastrad 	    (uintmax_t)adev->rmmio_base);
   3060   1.3  riastrad 	DRM_INFO("register mmio size: %"PRIuMAX"\n",
   3061   1.3  riastrad 	    (uintmax_t)adev->rmmio_size);
   3062   1.3  riastrad #else
   3063   1.1  riastrad 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
   3064   1.1  riastrad 	if (adev->rmmio == NULL) {
   3065   1.1  riastrad 		return -ENOMEM;
   3066   1.1  riastrad 	}
   3067   1.1  riastrad 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
   3068   1.1  riastrad 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
   3069   1.3  riastrad #endif
   3070   1.1  riastrad 
   3071   1.1  riastrad 	/* io port mapping */
   3072   1.1  riastrad 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
   3073   1.3  riastrad #ifdef __NetBSD__
   3074   1.3  riastrad 		if (pci_mapreg_map(&adev->pdev->pd_pa, PCI_BAR(i),
   3075   1.3  riastrad 			PCI_MAPREG_TYPE_IO, 0,
   3076   1.3  riastrad 			&adev->rio_memt, &adev->rio_memh,
   3077   1.3  riastrad 			NULL, &adev->rio_mem_size) == 0)
   3078   1.3  riastrad 			break;
   3079   1.3  riastrad #else
   3080   1.1  riastrad 		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
   3081   1.1  riastrad 			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
   3082   1.1  riastrad 			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
   3083   1.1  riastrad 			break;
   3084   1.1  riastrad 		}
   3085   1.3  riastrad #endif
   3086   1.1  riastrad 	}
   3087   1.3  riastrad #ifdef __NetBSD__
   3088   1.3  riastrad 	if (i == DEVICE_COUNT_RESOURCE)
   3089   1.3  riastrad #else
   3090   1.1  riastrad 	if (adev->rio_mem == NULL)
   3091   1.3  riastrad #endif
   3092   1.7  riastrad 		DRM_INFO("PCI I/O BAR is not found.\n");
   3093   1.7  riastrad 
   3094   1.7  riastrad 	/* enable PCIE atomic ops */
   3095   1.7  riastrad 	r = pci_enable_atomic_ops_to_root(adev->pdev,
   3096   1.7  riastrad 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
   3097   1.7  riastrad 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
   3098   1.7  riastrad 	if (r) {
   3099   1.7  riastrad 		adev->have_atomics_support = false;
   3100   1.7  riastrad 		DRM_INFO("PCIE atomic ops is not supported\n");
   3101   1.7  riastrad 	} else {
   3102   1.7  riastrad 		adev->have_atomics_support = true;
   3103   1.7  riastrad 	}
   3104   1.7  riastrad 
   3105   1.7  riastrad 	amdgpu_device_get_pcie_info(adev);
   3106   1.7  riastrad 
   3107   1.7  riastrad 	if (amdgpu_mcbp)
   3108   1.7  riastrad 		DRM_INFO("MCBP is enabled\n");
   3109   1.7  riastrad 
   3110   1.7  riastrad 	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
   3111   1.7  riastrad 		adev->enable_mes = true;
   3112   1.7  riastrad 
   3113   1.7  riastrad 	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
   3114   1.7  riastrad 		r = amdgpu_discovery_init(adev);
   3115   1.7  riastrad 		if (r) {
   3116   1.7  riastrad 			dev_err(adev->dev, "amdgpu_discovery_init failed\n");
   3117   1.7  riastrad 			return r;
   3118   1.7  riastrad 		}
   3119   1.7  riastrad 	}
   3120   1.1  riastrad 
   3121   1.1  riastrad 	/* early init functions */
   3122   1.7  riastrad 	r = amdgpu_device_ip_early_init(adev);
   3123   1.1  riastrad 	if (r)
   3124   1.1  riastrad 		return r;
   3125   1.1  riastrad 
   3126   1.7  riastrad 	r = amdgpu_device_get_job_timeout_settings(adev);
   3127   1.7  riastrad 	if (r) {
   3128   1.7  riastrad 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
   3129   1.7  riastrad 		return r;
   3130   1.7  riastrad 	}
   3131   1.7  riastrad 
   3132   1.7  riastrad 	/* doorbell bar mapping and doorbell index init*/
   3133   1.7  riastrad 	amdgpu_device_doorbell_init(adev);
   3134   1.7  riastrad 
   3135   1.3  riastrad #ifndef __NetBSD__		/* XXX amdgpu vga */
   3136   1.1  riastrad 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
   3137   1.1  riastrad 	/* this will fail for cards that aren't VGA class devices, just
   3138   1.1  riastrad 	 * ignore it */
   3139   1.7  riastrad 	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
   3140   1.1  riastrad 
   3141   1.7  riastrad 	if (amdgpu_device_supports_boco(ddev))
   3142   1.7  riastrad 		boco = true;
   3143   1.7  riastrad 	if (amdgpu_has_atpx() &&
   3144   1.7  riastrad 	    (amdgpu_is_atpx_hybrid() ||
   3145   1.7  riastrad 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
   3146   1.7  riastrad 	    !pci_is_thunderbolt_attached(adev->pdev))
   3147   1.7  riastrad 		vga_switcheroo_register_client(adev->pdev,
   3148   1.7  riastrad 					       &amdgpu_switcheroo_ops, boco);
   3149   1.7  riastrad 	if (boco)
   3150   1.1  riastrad 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
   3151   1.3  riastrad #endif
   3152   1.1  riastrad 
   3153   1.7  riastrad 	if (amdgpu_emu_mode == 1) {
   3154   1.7  riastrad 		/* post the asic on emulation mode */
   3155   1.7  riastrad 		emu_soc_asic_init(adev);
   3156   1.7  riastrad 		goto fence_driver_init;
   3157   1.7  riastrad 	}
   3158   1.7  riastrad 
   3159   1.7  riastrad 	/* detect if we are with an SRIOV vbios */
   3160   1.7  riastrad 	amdgpu_device_detect_sriov_bios(adev);
   3161   1.7  riastrad 
   3162   1.7  riastrad 	/* check if we need to reset the asic
   3163   1.7  riastrad 	 *  E.g., driver was not cleanly unloaded previously, etc.
   3164   1.7  riastrad 	 */
   3165   1.7  riastrad 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
   3166   1.7  riastrad 		r = amdgpu_asic_reset(adev);
   3167   1.7  riastrad 		if (r) {
   3168   1.7  riastrad 			dev_err(adev->dev, "asic reset on init failed\n");
   3169   1.7  riastrad 			goto failed;
   3170   1.7  riastrad 		}
   3171   1.1  riastrad 	}
   3172   1.1  riastrad 
   3173   1.1  riastrad 	/* Post card if necessary */
   3174   1.7  riastrad 	if (amdgpu_device_need_post(adev)) {
   3175   1.1  riastrad 		if (!adev->bios) {
   3176   1.7  riastrad 			dev_err(adev->dev, "no vBIOS found\n");
   3177   1.7  riastrad 			r = -EINVAL;
   3178   1.7  riastrad 			goto failed;
   3179   1.7  riastrad 		}
   3180   1.7  riastrad 		DRM_INFO("GPU posting now...\n");
   3181   1.7  riastrad 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
   3182   1.7  riastrad 		if (r) {
   3183   1.7  riastrad 			dev_err(adev->dev, "gpu post error!\n");
   3184   1.7  riastrad 			goto failed;
   3185   1.1  riastrad 		}
   3186   1.1  riastrad 	}
   3187   1.1  riastrad 
   3188   1.7  riastrad 	if (adev->is_atom_fw) {
   3189   1.7  riastrad 		/* Initialize clocks */
   3190   1.7  riastrad 		r = amdgpu_atomfirmware_get_clock_info(adev);
   3191   1.7  riastrad 		if (r) {
   3192   1.7  riastrad 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
   3193   1.7  riastrad 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
   3194   1.7  riastrad 			goto failed;
   3195   1.7  riastrad 		}
   3196   1.7  riastrad 	} else {
   3197   1.7  riastrad 		/* Initialize clocks */
   3198   1.7  riastrad 		r = amdgpu_atombios_get_clock_info(adev);
   3199   1.7  riastrad 		if (r) {
   3200   1.7  riastrad 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
   3201   1.7  riastrad 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
   3202   1.7  riastrad 			goto failed;
   3203   1.7  riastrad 		}
   3204   1.7  riastrad 		/* init i2c buses */
   3205   1.7  riastrad 		if (!amdgpu_device_has_dc_support(adev))
   3206   1.7  riastrad 			amdgpu_atombios_i2c_init(adev);
   3207   1.7  riastrad 	}
   3208   1.1  riastrad 
   3209   1.7  riastrad fence_driver_init:
   3210   1.1  riastrad 	/* Fence driver */
   3211   1.1  riastrad 	r = amdgpu_fence_driver_init(adev);
   3212   1.7  riastrad 	if (r) {
   3213   1.7  riastrad 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
   3214   1.7  riastrad 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
   3215   1.7  riastrad 		goto failed;
   3216   1.7  riastrad 	}
   3217   1.1  riastrad 
   3218   1.1  riastrad 	/* init the mode config */
   3219   1.1  riastrad 	drm_mode_config_init(adev->ddev);
   3220   1.1  riastrad 
   3221   1.7  riastrad 	r = amdgpu_device_ip_init(adev);
   3222   1.1  riastrad 	if (r) {
   3223   1.7  riastrad 		/* failed in exclusive mode due to timeout */
   3224   1.7  riastrad 		if (amdgpu_sriov_vf(adev) &&
   3225   1.7  riastrad 		    !amdgpu_sriov_runtime(adev) &&
   3226   1.7  riastrad 		    amdgpu_virt_mmio_blocked(adev) &&
   3227   1.7  riastrad 		    !amdgpu_virt_wait_reset(adev)) {
   3228   1.7  riastrad 			dev_err(adev->dev, "VF exclusive mode timeout\n");
   3229   1.7  riastrad 			/* Don't send request since VF is inactive. */
   3230   1.7  riastrad 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
   3231   1.7  riastrad 			adev->virt.ops = NULL;
   3232   1.7  riastrad 			r = -EAGAIN;
   3233   1.7  riastrad 			goto failed;
   3234   1.7  riastrad 		}
   3235   1.7  riastrad 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
   3236   1.7  riastrad 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
   3237   1.7  riastrad 		goto failed;
   3238   1.1  riastrad 	}
   3239   1.1  riastrad 
   3240   1.7  riastrad 	DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
   3241   1.7  riastrad 			adev->gfx.config.max_shader_engines,
   3242   1.7  riastrad 			adev->gfx.config.max_sh_per_se,
   3243   1.7  riastrad 			adev->gfx.config.max_cu_per_sh,
   3244   1.7  riastrad 			adev->gfx.cu_info.number);
   3245   1.7  riastrad 
   3246   1.7  riastrad 	amdgpu_ctx_init_sched(adev);
   3247   1.7  riastrad 
   3248   1.1  riastrad 	adev->accel_working = true;
   3249   1.1  riastrad 
   3250   1.7  riastrad 	amdgpu_vm_check_compute_bug(adev);
   3251   1.7  riastrad 
   3252   1.7  riastrad 	/* Initialize the buffer migration limit. */
   3253   1.7  riastrad 	if (amdgpu_moverate >= 0)
   3254   1.7  riastrad 		max_MBps = amdgpu_moverate;
   3255   1.7  riastrad 	else
   3256   1.7  riastrad 		max_MBps = 8; /* Allow 8 MB/s. */
   3257   1.7  riastrad 	/* Get a log2 for easy divisions. */
   3258   1.7  riastrad 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
   3259   1.7  riastrad 
   3260   1.1  riastrad 	amdgpu_fbdev_init(adev);
   3261   1.1  riastrad 
   3262   1.7  riastrad 	r = amdgpu_pm_sysfs_init(adev);
   3263   1.1  riastrad 	if (r) {
   3264   1.7  riastrad 		adev->pm_sysfs_en = false;
   3265   1.7  riastrad 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
   3266   1.7  riastrad 	} else
   3267   1.7  riastrad 		adev->pm_sysfs_en = true;
   3268   1.1  riastrad 
   3269   1.7  riastrad 	r = amdgpu_ucode_sysfs_init(adev);
   3270   1.1  riastrad 	if (r) {
   3271   1.7  riastrad 		adev->ucode_sysfs_en = false;
   3272   1.7  riastrad 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
   3273   1.7  riastrad 	} else
   3274   1.7  riastrad 		adev->ucode_sysfs_en = true;
   3275   1.7  riastrad 
   3276   1.7  riastrad 	r = amdgpu_debugfs_gem_init(adev);
   3277   1.1  riastrad 	if (r)
   3278   1.1  riastrad 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
   3279   1.1  riastrad 
   3280   1.1  riastrad 	r = amdgpu_debugfs_regs_init(adev);
   3281   1.7  riastrad 	if (r)
   3282   1.1  riastrad 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
   3283   1.7  riastrad 
   3284   1.7  riastrad 	r = amdgpu_debugfs_firmware_init(adev);
   3285   1.7  riastrad 	if (r)
   3286   1.7  riastrad 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
   3287   1.7  riastrad 
   3288   1.7  riastrad 	r = amdgpu_debugfs_init(adev);
   3289   1.7  riastrad 	if (r)
   3290   1.7  riastrad 		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
   3291   1.1  riastrad 
   3292   1.1  riastrad 	if ((amdgpu_testing & 1)) {
   3293   1.1  riastrad 		if (adev->accel_working)
   3294   1.1  riastrad 			amdgpu_test_moves(adev);
   3295   1.1  riastrad 		else
   3296   1.1  riastrad 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
   3297   1.1  riastrad 	}
   3298   1.7  riastrad 	if (amdgpu_benchmarking) {
   3299   1.1  riastrad 		if (adev->accel_working)
   3300   1.7  riastrad 			amdgpu_benchmark(adev, amdgpu_benchmarking);
   3301   1.1  riastrad 		else
   3302   1.7  riastrad 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
   3303   1.7  riastrad 	}
   3304   1.7  riastrad 
   3305   1.7  riastrad 	/*
   3306   1.7  riastrad 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
   3307   1.7  riastrad 	 * Otherwise the mgpu fan boost feature will be skipped due to the
   3308   1.7  riastrad 	 * gpu instance is counted less.
   3309   1.7  riastrad 	 */
   3310   1.7  riastrad 	amdgpu_register_gpu_instance(adev);
   3311   1.7  riastrad 
   3312   1.7  riastrad 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
   3313   1.7  riastrad 	 * explicit gating rather than handling it automatically.
   3314   1.7  riastrad 	 */
   3315   1.7  riastrad 	r = amdgpu_device_ip_late_init(adev);
   3316   1.7  riastrad 	if (r) {
   3317   1.7  riastrad 		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
   3318   1.7  riastrad 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
   3319   1.7  riastrad 		goto failed;
   3320   1.1  riastrad 	}
   3321   1.7  riastrad 
   3322   1.7  riastrad 	/* must succeed. */
   3323   1.7  riastrad 	amdgpu_ras_resume(adev);
   3324   1.7  riastrad 
   3325   1.7  riastrad 	queue_delayed_work(system_wq, &adev->delayed_init_work,
   3326   1.7  riastrad 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
   3327   1.7  riastrad 
   3328   1.8  riastrad #ifndef __NetBSD__		/* XXX amdgpu sysfs */
   3329   1.7  riastrad 	r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
   3330   1.7  riastrad 	if (r) {
   3331   1.7  riastrad 		dev_err(adev->dev, "Could not create pcie_replay_count");
   3332   1.7  riastrad 		return r;
   3333   1.1  riastrad 	}
   3334   1.8  riastrad #endif
   3335   1.1  riastrad 
   3336   1.7  riastrad 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
   3337   1.7  riastrad 		r = amdgpu_pmu_init(adev);
   3338   1.1  riastrad 	if (r)
   3339   1.7  riastrad 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
   3340   1.1  riastrad 
   3341   1.1  riastrad 	return 0;
   3342   1.7  riastrad 
   3343   1.7  riastrad failed:
   3344   1.7  riastrad 	amdgpu_vf_error_trans_all(adev);
   3345   1.7  riastrad 	if (boco)
   3346   1.7  riastrad 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
   3347   1.7  riastrad 
   3348   1.7  riastrad 	return r;
   3349   1.1  riastrad }
   3350   1.1  riastrad 
   3351   1.1  riastrad /**
   3352   1.1  riastrad  * amdgpu_device_fini - tear down the driver
   3353   1.1  riastrad  *
   3354   1.1  riastrad  * @adev: amdgpu_device pointer
   3355   1.1  riastrad  *
   3356   1.1  riastrad  * Tear down the driver info (all asics).
   3357   1.1  riastrad  * Called at driver shutdown.
   3358   1.1  riastrad  */
   3359   1.1  riastrad void amdgpu_device_fini(struct amdgpu_device *adev)
   3360   1.1  riastrad {
   3361   1.3  riastrad 	int r __unused;
   3362   1.1  riastrad 
   3363   1.1  riastrad 	DRM_INFO("amdgpu: finishing device.\n");
   3364   1.7  riastrad 	flush_delayed_work(&adev->delayed_init_work);
   3365   1.1  riastrad 	adev->shutdown = true;
   3366   1.7  riastrad 
   3367   1.7  riastrad 	/* disable all interrupts */
   3368   1.7  riastrad 	amdgpu_irq_disable_all(adev);
   3369   1.7  riastrad 	if (adev->mode_info.mode_config_initialized){
   3370   1.7  riastrad 		if (!amdgpu_device_has_dc_support(adev))
   3371   1.7  riastrad 			drm_helper_force_disable_all(adev->ddev);
   3372   1.7  riastrad 		else
   3373   1.7  riastrad 			drm_atomic_helper_shutdown(adev->ddev);
   3374   1.7  riastrad 	}
   3375   1.1  riastrad 	amdgpu_fence_driver_fini(adev);
   3376   1.7  riastrad 	if (adev->pm_sysfs_en)
   3377   1.7  riastrad 		amdgpu_pm_sysfs_fini(adev);
   3378   1.1  riastrad 	amdgpu_fbdev_fini(adev);
   3379   1.7  riastrad 	r = amdgpu_device_ip_fini(adev);
   3380   1.7  riastrad 	if (adev->firmware.gpu_info_fw) {
   3381   1.7  riastrad 		release_firmware(adev->firmware.gpu_info_fw);
   3382   1.7  riastrad 		adev->firmware.gpu_info_fw = NULL;
   3383   1.7  riastrad 	}
   3384   1.1  riastrad 	adev->accel_working = false;
   3385   1.1  riastrad 	/* free i2c buses */
   3386   1.7  riastrad 	if (!amdgpu_device_has_dc_support(adev))
   3387   1.7  riastrad 		amdgpu_i2c_fini(adev);
   3388   1.7  riastrad 
   3389   1.7  riastrad 	if (amdgpu_emu_mode != 1)
   3390   1.7  riastrad 		amdgpu_atombios_fini(adev);
   3391   1.7  riastrad 
   3392   1.1  riastrad 	kfree(adev->bios);
   3393   1.1  riastrad 	adev->bios = NULL;
   3394   1.3  riastrad #ifndef __NetBSD__		/* XXX amdgpu vga */
   3395   1.7  riastrad 	if (amdgpu_has_atpx() &&
   3396   1.7  riastrad 	    (amdgpu_is_atpx_hybrid() ||
   3397   1.7  riastrad 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
   3398   1.7  riastrad 	    !pci_is_thunderbolt_attached(adev->pdev))
   3399   1.7  riastrad 		vga_switcheroo_unregister_client(adev->pdev);
   3400   1.7  riastrad 	if (amdgpu_device_supports_boco(adev->ddev))
   3401   1.7  riastrad 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
   3402   1.1  riastrad 	vga_client_register(adev->pdev, NULL, NULL, NULL);
   3403   1.3  riastrad #endif
   3404   1.3  riastrad #ifdef __NetBSD__
   3405   1.3  riastrad 	if (adev->rio_mem_size)
   3406   1.3  riastrad 		bus_space_unmap(adev->rio_memt, adev->rio_memh,
   3407   1.3  riastrad 		    adev->rio_mem_size);
   3408   1.3  riastrad 	adev->rio_mem_size = 0;
   3409   1.3  riastrad 	bus_space_unmap(adev->rmmiot, adev->rmmioh, adev->rmmio_size);
   3410   1.3  riastrad #else
   3411   1.1  riastrad 	if (adev->rio_mem)
   3412   1.1  riastrad 		pci_iounmap(adev->pdev, adev->rio_mem);
   3413   1.1  riastrad 	adev->rio_mem = NULL;
   3414   1.1  riastrad 	iounmap(adev->rmmio);
   3415   1.1  riastrad 	adev->rmmio = NULL;
   3416   1.3  riastrad #endif
   3417   1.7  riastrad 	amdgpu_device_doorbell_fini(adev);
   3418   1.7  riastrad 
   3419   1.1  riastrad 	amdgpu_debugfs_regs_cleanup(adev);
   3420  1.10  riastrad #ifndef __NetBSD__		/* XXX amdgpu sysfs */
   3421   1.7  riastrad 	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
   3422  1.10  riastrad #endif
   3423   1.7  riastrad 	if (adev->ucode_sysfs_en)
   3424   1.7  riastrad 		amdgpu_ucode_sysfs_fini(adev);
   3425   1.7  riastrad 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
   3426   1.7  riastrad 		amdgpu_pmu_fini(adev);
   3427   1.7  riastrad 	amdgpu_debugfs_preempt_cleanup(adev);
   3428   1.7  riastrad 	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
   3429   1.7  riastrad 		amdgpu_discovery_fini(adev);
   3430   1.7  riastrad 	spin_lock_destroy(&adev->ring_lru_list_lock);
   3431   1.7  riastrad 	mutex_destroy(&adev->shadow_list_lock);
   3432   1.7  riastrad 	spin_lock_destroy(&adev->mm_stats.lock);
   3433   1.3  riastrad 	spin_lock_destroy(&adev->audio_endpt_idx_lock);
   3434   1.7  riastrad 	spin_lock_destroy(&adev->se_cac_idx_lock);
   3435   1.7  riastrad 	spin_lock_destroy(&adev->gc_cac_idx_lock);
   3436   1.3  riastrad 	spin_lock_destroy(&adev->didt_idx_lock);
   3437   1.3  riastrad 	spin_lock_destroy(&adev->uvd_ctx_idx_lock);
   3438   1.3  riastrad 	spin_lock_destroy(&adev->pcie_idx_lock);
   3439   1.3  riastrad 	spin_lock_destroy(&adev->smc_idx_lock);
   3440   1.3  riastrad 	spin_lock_destroy(&adev->mmio_idx_lock);
   3441   1.7  riastrad 	mutex_destroy(&adev->notifier_lock);
   3442   1.7  riastrad 	mutex_destroy(&adev->psp.mutex);
   3443   1.7  riastrad 	mutex_destroy(&adev->lock_reset);
   3444   1.7  riastrad 	/* hash_destroy(adev->mn_hash)? */
   3445   1.7  riastrad 	mutex_destroy(&adev->virt.vf_errors.lock);
   3446   1.3  riastrad 	mutex_destroy(&adev->mn_lock);
   3447   1.3  riastrad 	mutex_destroy(&adev->grbm_idx_mutex);
   3448   1.7  riastrad 	mutex_destroy(&adev->gfx.gfx_off_mutex);
   3449   1.7  riastrad 	mutex_destroy(&adev->gfx.pipe_reserve_mutex);
   3450   1.3  riastrad 	mutex_destroy(&adev->srbm_mutex);
   3451   1.3  riastrad 	mutex_destroy(&adev->gfx.gpu_clock_mutex);
   3452   1.3  riastrad 	mutex_destroy(&adev->pm.mutex);
   3453   1.7  riastrad 	mutex_destroy(&adev->firmware.mutex);
   3454   1.1  riastrad }
   3455   1.1  riastrad 
   3456   1.1  riastrad 
   3457   1.1  riastrad /*
   3458   1.1  riastrad  * Suspend & resume.
   3459   1.7  riastrad  */
   3460   1.7  riastrad /**
   3461   1.7  riastrad  * amdgpu_device_suspend - initiate device suspend
   3462   1.7  riastrad  *
   3463   1.7  riastrad  * @dev: drm dev pointer
   3464   1.7  riastrad  * @suspend: suspend state
   3465   1.7  riastrad  * @fbcon : notify the fbdev of suspend
   3466   1.7  riastrad  *
   3467   1.7  riastrad  * Puts the hw in the suspend state (all asics).
   3468   1.7  riastrad  * Returns 0 for success or an error on failure.
   3469   1.7  riastrad  * Called at driver suspend.
   3470   1.7  riastrad  */
   3471   1.7  riastrad int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
   3472   1.7  riastrad {
   3473   1.7  riastrad 	struct amdgpu_device *adev;
   3474   1.7  riastrad 	struct drm_crtc *crtc;
   3475   1.7  riastrad 	struct drm_connector *connector;
   3476   1.7  riastrad 	struct drm_connector_list_iter iter;
   3477   1.7  riastrad 	int r;
   3478   1.7  riastrad 
   3479   1.7  riastrad 	if (dev == NULL || dev->dev_private == NULL) {
   3480   1.7  riastrad 		return -ENODEV;
   3481   1.7  riastrad 	}
   3482   1.7  riastrad 
   3483   1.7  riastrad 	adev = dev->dev_private;
   3484   1.7  riastrad 
   3485   1.7  riastrad 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   3486   1.7  riastrad 		return 0;
   3487   1.7  riastrad 
   3488   1.7  riastrad 	adev->in_suspend = true;
   3489   1.7  riastrad 	drm_kms_helper_poll_disable(dev);
   3490   1.7  riastrad 
   3491   1.7  riastrad 	if (fbcon)
   3492   1.7  riastrad 		amdgpu_fbdev_set_suspend(adev, 1);
   3493   1.7  riastrad 
   3494   1.7  riastrad 	cancel_delayed_work_sync(&adev->delayed_init_work);
   3495   1.7  riastrad 
   3496   1.7  riastrad 	if (!amdgpu_device_has_dc_support(adev)) {
   3497   1.7  riastrad 		/* turn off display hw */
   3498   1.7  riastrad 		drm_modeset_lock_all(dev);
   3499   1.7  riastrad 		drm_connector_list_iter_begin(dev, &iter);
   3500   1.7  riastrad 		drm_for_each_connector_iter(connector, &iter)
   3501   1.7  riastrad 			drm_helper_connector_dpms(connector,
   3502   1.7  riastrad 						  DRM_MODE_DPMS_OFF);
   3503   1.7  riastrad 		drm_connector_list_iter_end(&iter);
   3504   1.7  riastrad 		drm_modeset_unlock_all(dev);
   3505   1.7  riastrad 			/* unpin the front buffers and cursors */
   3506   1.7  riastrad 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   3507   1.7  riastrad 			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
   3508   1.7  riastrad 			struct drm_framebuffer *fb = crtc->primary->fb;
   3509   1.7  riastrad 			struct amdgpu_bo *robj;
   3510   1.7  riastrad 
   3511   1.7  riastrad 			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
   3512   1.7  riastrad 				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
   3513   1.7  riastrad 				r = amdgpu_bo_reserve(aobj, true);
   3514   1.7  riastrad 				if (r == 0) {
   3515   1.7  riastrad 					amdgpu_bo_unpin(aobj);
   3516   1.7  riastrad 					amdgpu_bo_unreserve(aobj);
   3517   1.7  riastrad 				}
   3518   1.7  riastrad 			}
   3519   1.7  riastrad 
   3520   1.7  riastrad 			if (fb == NULL || fb->obj[0] == NULL) {
   3521   1.7  riastrad 				continue;
   3522   1.7  riastrad 			}
   3523   1.7  riastrad 			robj = gem_to_amdgpu_bo(fb->obj[0]);
   3524   1.7  riastrad 			/* don't unpin kernel fb objects */
   3525   1.7  riastrad 			if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
   3526   1.7  riastrad 				r = amdgpu_bo_reserve(robj, true);
   3527   1.7  riastrad 				if (r == 0) {
   3528   1.7  riastrad 					amdgpu_bo_unpin(robj);
   3529   1.7  riastrad 					amdgpu_bo_unreserve(robj);
   3530   1.7  riastrad 				}
   3531   1.7  riastrad 			}
   3532   1.7  riastrad 		}
   3533   1.7  riastrad 	}
   3534   1.7  riastrad 
   3535   1.7  riastrad 	amdgpu_amdkfd_suspend(adev);
   3536   1.7  riastrad 
   3537   1.7  riastrad 	amdgpu_ras_suspend(adev);
   3538   1.7  riastrad 
   3539   1.7  riastrad 	r = amdgpu_device_ip_suspend_phase1(adev);
   3540   1.7  riastrad 
   3541   1.7  riastrad 	/* evict vram memory */
   3542   1.7  riastrad 	amdgpu_bo_evict_vram(adev);
   3543   1.7  riastrad 
   3544   1.7  riastrad 	amdgpu_fence_driver_suspend(adev);
   3545   1.7  riastrad 
   3546   1.7  riastrad 	r = amdgpu_device_ip_suspend_phase2(adev);
   3547   1.7  riastrad 
   3548   1.7  riastrad 	/* evict remaining vram memory
   3549   1.7  riastrad 	 * This second call to evict vram is to evict the gart page table
   3550   1.7  riastrad 	 * using the CPU.
   3551   1.7  riastrad 	 */
   3552   1.7  riastrad 	amdgpu_bo_evict_vram(adev);
   3553   1.7  riastrad 
   3554   1.7  riastrad 	return 0;
   3555   1.7  riastrad }
   3556   1.7  riastrad 
   3557   1.7  riastrad /**
   3558   1.7  riastrad  * amdgpu_device_resume - initiate device resume
   3559   1.7  riastrad  *
   3560   1.7  riastrad  * @dev: drm dev pointer
   3561   1.7  riastrad  * @resume: resume state
   3562   1.7  riastrad  * @fbcon : notify the fbdev of resume
   3563   1.7  riastrad  *
   3564   1.7  riastrad  * Bring the hw back to operating state (all asics).
   3565   1.7  riastrad  * Returns 0 for success or an error on failure.
   3566   1.7  riastrad  * Called at driver resume.
   3567   1.7  riastrad  */
   3568   1.7  riastrad int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
   3569   1.7  riastrad {
   3570   1.7  riastrad 	struct drm_connector *connector;
   3571   1.7  riastrad 	struct drm_connector_list_iter iter;
   3572   1.7  riastrad 	struct amdgpu_device *adev = dev->dev_private;
   3573   1.7  riastrad 	struct drm_crtc *crtc;
   3574   1.7  riastrad 	int r = 0;
   3575   1.7  riastrad 
   3576   1.7  riastrad 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   3577   1.7  riastrad 		return 0;
   3578   1.7  riastrad 
   3579   1.7  riastrad 	/* post card */
   3580   1.7  riastrad 	if (amdgpu_device_need_post(adev)) {
   3581   1.7  riastrad 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
   3582   1.7  riastrad 		if (r)
   3583   1.7  riastrad 			DRM_ERROR("amdgpu asic init failed\n");
   3584   1.7  riastrad 	}
   3585   1.7  riastrad 
   3586   1.7  riastrad 	r = amdgpu_device_ip_resume(adev);
   3587   1.7  riastrad 	if (r) {
   3588   1.7  riastrad 		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
   3589   1.7  riastrad 		return r;
   3590   1.7  riastrad 	}
   3591   1.7  riastrad 	amdgpu_fence_driver_resume(adev);
   3592   1.7  riastrad 
   3593   1.7  riastrad 
   3594   1.7  riastrad 	r = amdgpu_device_ip_late_init(adev);
   3595   1.7  riastrad 	if (r)
   3596   1.7  riastrad 		return r;
   3597   1.7  riastrad 
   3598   1.7  riastrad 	queue_delayed_work(system_wq, &adev->delayed_init_work,
   3599   1.7  riastrad 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
   3600   1.7  riastrad 
   3601   1.7  riastrad 	if (!amdgpu_device_has_dc_support(adev)) {
   3602   1.7  riastrad 		/* pin cursors */
   3603   1.7  riastrad 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   3604   1.7  riastrad 			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
   3605   1.7  riastrad 
   3606   1.7  riastrad 			if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
   3607   1.7  riastrad 				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
   3608   1.7  riastrad 				r = amdgpu_bo_reserve(aobj, true);
   3609   1.7  riastrad 				if (r == 0) {
   3610   1.7  riastrad 					r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
   3611   1.7  riastrad 					if (r != 0)
   3612   1.7  riastrad 						DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
   3613   1.7  riastrad 					amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
   3614   1.7  riastrad 					amdgpu_bo_unreserve(aobj);
   3615   1.7  riastrad 				}
   3616   1.7  riastrad 			}
   3617   1.7  riastrad 		}
   3618   1.7  riastrad 	}
   3619   1.7  riastrad 	r = amdgpu_amdkfd_resume(adev);
   3620   1.7  riastrad 	if (r)
   3621   1.7  riastrad 		return r;
   3622   1.7  riastrad 
   3623   1.7  riastrad 	/* Make sure IB tests flushed */
   3624   1.7  riastrad 	flush_delayed_work(&adev->delayed_init_work);
   3625   1.7  riastrad 
   3626   1.7  riastrad 	/* blat the mode back in */
   3627   1.7  riastrad 	if (fbcon) {
   3628   1.7  riastrad 		if (!amdgpu_device_has_dc_support(adev)) {
   3629   1.7  riastrad 			/* pre DCE11 */
   3630   1.7  riastrad 			drm_helper_resume_force_mode(dev);
   3631   1.7  riastrad 
   3632   1.7  riastrad 			/* turn on display hw */
   3633   1.7  riastrad 			drm_modeset_lock_all(dev);
   3634   1.7  riastrad 
   3635   1.7  riastrad 			drm_connector_list_iter_begin(dev, &iter);
   3636   1.7  riastrad 			drm_for_each_connector_iter(connector, &iter)
   3637   1.7  riastrad 				drm_helper_connector_dpms(connector,
   3638   1.7  riastrad 							  DRM_MODE_DPMS_ON);
   3639   1.7  riastrad 			drm_connector_list_iter_end(&iter);
   3640   1.7  riastrad 
   3641   1.7  riastrad 			drm_modeset_unlock_all(dev);
   3642   1.7  riastrad 		}
   3643   1.7  riastrad 		amdgpu_fbdev_set_suspend(adev, 0);
   3644   1.7  riastrad 	}
   3645   1.7  riastrad 
   3646   1.7  riastrad 	drm_kms_helper_poll_enable(dev);
   3647   1.7  riastrad 
   3648   1.7  riastrad 	amdgpu_ras_resume(adev);
   3649   1.7  riastrad 
   3650   1.7  riastrad 	/*
   3651   1.7  riastrad 	 * Most of the connector probing functions try to acquire runtime pm
   3652   1.7  riastrad 	 * refs to ensure that the GPU is powered on when connector polling is
   3653   1.7  riastrad 	 * performed. Since we're calling this from a runtime PM callback,
   3654   1.7  riastrad 	 * trying to acquire rpm refs will cause us to deadlock.
   3655   1.7  riastrad 	 *
   3656   1.7  riastrad 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
   3657   1.7  riastrad 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
   3658   1.7  riastrad 	 */
   3659   1.7  riastrad #ifdef CONFIG_PM
   3660   1.7  riastrad 	dev->dev->power.disable_depth++;
   3661   1.7  riastrad #endif
   3662   1.7  riastrad 	if (!amdgpu_device_has_dc_support(adev))
   3663   1.7  riastrad 		drm_helper_hpd_irq_event(dev);
   3664   1.7  riastrad 	else
   3665   1.7  riastrad 		drm_kms_helper_hotplug_event(dev);
   3666   1.7  riastrad #ifdef CONFIG_PM
   3667   1.7  riastrad 	dev->dev->power.disable_depth--;
   3668   1.7  riastrad #endif
   3669   1.7  riastrad 	adev->in_suspend = false;
   3670   1.7  riastrad 
   3671   1.7  riastrad 	return 0;
   3672   1.7  riastrad }
   3673   1.7  riastrad 
   3674   1.7  riastrad /**
   3675   1.7  riastrad  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
   3676   1.7  riastrad  *
   3677   1.7  riastrad  * @adev: amdgpu_device pointer
   3678   1.7  riastrad  *
   3679   1.7  riastrad  * The list of all the hardware IPs that make up the asic is walked and
   3680   1.7  riastrad  * the check_soft_reset callbacks are run.  check_soft_reset determines
   3681   1.7  riastrad  * if the asic is still hung or not.
   3682   1.7  riastrad  * Returns true if any of the IPs are still in a hung state, false if not.
   3683   1.7  riastrad  */
   3684   1.7  riastrad static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
   3685   1.7  riastrad {
   3686   1.7  riastrad 	int i;
   3687   1.7  riastrad 	bool asic_hang = false;
   3688   1.7  riastrad 
   3689   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   3690   1.7  riastrad 		return true;
   3691   1.7  riastrad 
   3692   1.7  riastrad 	if (amdgpu_asic_need_full_reset(adev))
   3693   1.7  riastrad 		return true;
   3694   1.7  riastrad 
   3695   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   3696   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   3697   1.7  riastrad 			continue;
   3698   1.7  riastrad 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
   3699   1.7  riastrad 			adev->ip_blocks[i].status.hang =
   3700   1.7  riastrad 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
   3701   1.7  riastrad 		if (adev->ip_blocks[i].status.hang) {
   3702   1.7  riastrad 			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
   3703   1.7  riastrad 			asic_hang = true;
   3704   1.7  riastrad 		}
   3705   1.7  riastrad 	}
   3706   1.7  riastrad 	return asic_hang;
   3707   1.7  riastrad }
   3708   1.7  riastrad 
   3709   1.7  riastrad /**
   3710   1.7  riastrad  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
   3711   1.7  riastrad  *
   3712   1.7  riastrad  * @adev: amdgpu_device pointer
   3713   1.7  riastrad  *
   3714   1.7  riastrad  * The list of all the hardware IPs that make up the asic is walked and the
   3715   1.7  riastrad  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
   3716   1.7  riastrad  * handles any IP specific hardware or software state changes that are
   3717   1.7  riastrad  * necessary for a soft reset to succeed.
   3718   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   3719   1.7  riastrad  */
   3720   1.7  riastrad static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
   3721   1.7  riastrad {
   3722   1.7  riastrad 	int i, r = 0;
   3723   1.7  riastrad 
   3724   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   3725   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   3726   1.7  riastrad 			continue;
   3727   1.7  riastrad 		if (adev->ip_blocks[i].status.hang &&
   3728   1.7  riastrad 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
   3729   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
   3730   1.7  riastrad 			if (r)
   3731   1.7  riastrad 				return r;
   3732   1.7  riastrad 		}
   3733   1.7  riastrad 	}
   3734   1.7  riastrad 
   3735   1.7  riastrad 	return 0;
   3736   1.7  riastrad }
   3737   1.7  riastrad 
   3738   1.7  riastrad /**
   3739   1.7  riastrad  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
   3740   1.7  riastrad  *
   3741   1.7  riastrad  * @adev: amdgpu_device pointer
   3742   1.7  riastrad  *
   3743   1.7  riastrad  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
   3744   1.7  riastrad  * reset is necessary to recover.
   3745   1.7  riastrad  * Returns true if a full asic reset is required, false if not.
   3746   1.7  riastrad  */
   3747   1.7  riastrad static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
   3748   1.7  riastrad {
   3749   1.7  riastrad 	int i;
   3750   1.7  riastrad 
   3751   1.7  riastrad 	if (amdgpu_asic_need_full_reset(adev))
   3752   1.7  riastrad 		return true;
   3753   1.7  riastrad 
   3754   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   3755   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   3756   1.7  riastrad 			continue;
   3757   1.7  riastrad 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
   3758   1.7  riastrad 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
   3759   1.7  riastrad 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
   3760   1.7  riastrad 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
   3761   1.7  riastrad 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
   3762   1.7  riastrad 			if (adev->ip_blocks[i].status.hang) {
   3763   1.7  riastrad 				DRM_INFO("Some block need full reset!\n");
   3764   1.7  riastrad 				return true;
   3765   1.7  riastrad 			}
   3766   1.7  riastrad 		}
   3767   1.7  riastrad 	}
   3768   1.7  riastrad 	return false;
   3769   1.7  riastrad }
   3770   1.7  riastrad 
   3771   1.7  riastrad /**
   3772   1.7  riastrad  * amdgpu_device_ip_soft_reset - do a soft reset
   3773   1.7  riastrad  *
   3774   1.7  riastrad  * @adev: amdgpu_device pointer
   3775   1.7  riastrad  *
   3776   1.7  riastrad  * The list of all the hardware IPs that make up the asic is walked and the
   3777   1.7  riastrad  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
   3778   1.7  riastrad  * IP specific hardware or software state changes that are necessary to soft
   3779   1.7  riastrad  * reset the IP.
   3780   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   3781   1.7  riastrad  */
   3782   1.7  riastrad static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
   3783   1.7  riastrad {
   3784   1.7  riastrad 	int i, r = 0;
   3785   1.7  riastrad 
   3786   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   3787   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   3788   1.7  riastrad 			continue;
   3789   1.7  riastrad 		if (adev->ip_blocks[i].status.hang &&
   3790   1.7  riastrad 		    adev->ip_blocks[i].version->funcs->soft_reset) {
   3791   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
   3792   1.7  riastrad 			if (r)
   3793   1.7  riastrad 				return r;
   3794   1.7  riastrad 		}
   3795   1.7  riastrad 	}
   3796   1.7  riastrad 
   3797   1.7  riastrad 	return 0;
   3798   1.7  riastrad }
   3799   1.7  riastrad 
   3800   1.7  riastrad /**
   3801   1.7  riastrad  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
   3802   1.7  riastrad  *
   3803   1.7  riastrad  * @adev: amdgpu_device pointer
   3804   1.7  riastrad  *
   3805   1.7  riastrad  * The list of all the hardware IPs that make up the asic is walked and the
   3806   1.7  riastrad  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
   3807   1.7  riastrad  * handles any IP specific hardware or software state changes that are
   3808   1.7  riastrad  * necessary after the IP has been soft reset.
   3809   1.7  riastrad  * Returns 0 on success, negative error code on failure.
   3810   1.7  riastrad  */
   3811   1.7  riastrad static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
   3812   1.7  riastrad {
   3813   1.7  riastrad 	int i, r = 0;
   3814   1.7  riastrad 
   3815   1.7  riastrad 	for (i = 0; i < adev->num_ip_blocks; i++) {
   3816   1.7  riastrad 		if (!adev->ip_blocks[i].status.valid)
   3817   1.7  riastrad 			continue;
   3818   1.7  riastrad 		if (adev->ip_blocks[i].status.hang &&
   3819   1.7  riastrad 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
   3820   1.7  riastrad 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
   3821   1.7  riastrad 		if (r)
   3822   1.7  riastrad 			return r;
   3823   1.7  riastrad 	}
   3824   1.7  riastrad 
   3825   1.7  riastrad 	return 0;
   3826   1.7  riastrad }
   3827   1.7  riastrad 
   3828   1.7  riastrad /**
   3829   1.7  riastrad  * amdgpu_device_recover_vram - Recover some VRAM contents
   3830   1.7  riastrad  *
   3831   1.7  riastrad  * @adev: amdgpu_device pointer
   3832   1.7  riastrad  *
   3833   1.7  riastrad  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
   3834   1.7  riastrad  * restore things like GPUVM page tables after a GPU reset where
   3835   1.7  riastrad  * the contents of VRAM might be lost.
   3836   1.7  riastrad  *
   3837   1.7  riastrad  * Returns:
   3838   1.7  riastrad  * 0 on success, negative error code on failure.
   3839   1.7  riastrad  */
   3840   1.7  riastrad static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
   3841   1.7  riastrad {
   3842   1.7  riastrad 	struct dma_fence *fence = NULL, *next = NULL;
   3843   1.7  riastrad 	struct amdgpu_bo *shadow;
   3844   1.7  riastrad 	long r = 1, tmo;
   3845   1.7  riastrad 
   3846   1.7  riastrad 	if (amdgpu_sriov_runtime(adev))
   3847   1.7  riastrad 		tmo = msecs_to_jiffies(8000);
   3848   1.7  riastrad 	else
   3849   1.7  riastrad 		tmo = msecs_to_jiffies(100);
   3850   1.7  riastrad 
   3851   1.7  riastrad 	DRM_INFO("recover vram bo from shadow start\n");
   3852   1.7  riastrad 	mutex_lock(&adev->shadow_list_lock);
   3853   1.7  riastrad 	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
   3854   1.7  riastrad 
   3855   1.7  riastrad 		/* No need to recover an evicted BO */
   3856   1.7  riastrad 		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
   3857   1.7  riastrad 		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
   3858   1.7  riastrad 		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
   3859   1.7  riastrad 			continue;
   3860   1.7  riastrad 
   3861   1.7  riastrad 		r = amdgpu_bo_restore_shadow(shadow, &next);
   3862   1.7  riastrad 		if (r)
   3863   1.7  riastrad 			break;
   3864   1.7  riastrad 
   3865   1.7  riastrad 		if (fence) {
   3866   1.7  riastrad 			tmo = dma_fence_wait_timeout(fence, false, tmo);
   3867   1.7  riastrad 			dma_fence_put(fence);
   3868   1.7  riastrad 			fence = next;
   3869   1.7  riastrad 			if (tmo == 0) {
   3870   1.7  riastrad 				r = -ETIMEDOUT;
   3871   1.7  riastrad 				break;
   3872   1.7  riastrad 			} else if (tmo < 0) {
   3873   1.7  riastrad 				r = tmo;
   3874   1.7  riastrad 				break;
   3875   1.7  riastrad 			}
   3876   1.7  riastrad 		} else {
   3877   1.7  riastrad 			fence = next;
   3878   1.7  riastrad 		}
   3879   1.7  riastrad 	}
   3880   1.7  riastrad 	mutex_unlock(&adev->shadow_list_lock);
   3881   1.7  riastrad 
   3882   1.7  riastrad 	if (fence)
   3883   1.7  riastrad 		tmo = dma_fence_wait_timeout(fence, false, tmo);
   3884   1.7  riastrad 	dma_fence_put(fence);
   3885   1.7  riastrad 
   3886   1.7  riastrad 	if (r < 0 || tmo <= 0) {
   3887   1.7  riastrad 		DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
   3888   1.7  riastrad 		return -EIO;
   3889   1.7  riastrad 	}
   3890   1.7  riastrad 
   3891   1.7  riastrad 	DRM_INFO("recover vram bo from shadow done\n");
   3892   1.7  riastrad 	return 0;
   3893   1.7  riastrad }
   3894   1.7  riastrad 
   3895   1.7  riastrad 
   3896   1.1  riastrad /**
   3897   1.7  riastrad  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
   3898   1.1  riastrad  *
   3899   1.7  riastrad  * @adev: amdgpu device pointer
   3900   1.7  riastrad  * @from_hypervisor: request from hypervisor
   3901   1.1  riastrad  *
   3902   1.7  riastrad  * do VF FLR and reinitialize Asic
   3903   1.7  riastrad  * return 0 means succeeded otherwise failed
   3904   1.1  riastrad  */
   3905   1.7  riastrad static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
   3906   1.7  riastrad 				     bool from_hypervisor)
   3907   1.1  riastrad {
   3908   1.1  riastrad 	int r;
   3909   1.1  riastrad 
   3910   1.7  riastrad 	if (from_hypervisor)
   3911   1.7  riastrad 		r = amdgpu_virt_request_full_gpu(adev, true);
   3912   1.7  riastrad 	else
   3913   1.7  riastrad 		r = amdgpu_virt_reset_gpu(adev);
   3914   1.7  riastrad 	if (r)
   3915   1.7  riastrad 		return r;
   3916   1.1  riastrad 
   3917   1.7  riastrad 	/* Resume IP prior to SMC */
   3918   1.7  riastrad 	r = amdgpu_device_ip_reinit_early_sriov(adev);
   3919   1.7  riastrad 	if (r)
   3920   1.7  riastrad 		goto error;
   3921   1.1  riastrad 
   3922   1.7  riastrad 	amdgpu_virt_init_data_exchange(adev);
   3923   1.7  riastrad 	/* we need recover gart prior to run SMC/CP/SDMA resume */
   3924   1.7  riastrad 	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
   3925   1.1  riastrad 
   3926   1.7  riastrad 	r = amdgpu_device_fw_loading(adev);
   3927   1.7  riastrad 	if (r)
   3928   1.7  riastrad 		return r;
   3929   1.1  riastrad 
   3930   1.7  riastrad 	/* now we are okay to resume SMC/CP/SDMA */
   3931   1.7  riastrad 	r = amdgpu_device_ip_reinit_late_sriov(adev);
   3932   1.7  riastrad 	if (r)
   3933   1.7  riastrad 		goto error;
   3934   1.1  riastrad 
   3935   1.7  riastrad 	amdgpu_irq_gpu_reset_resume_helper(adev);
   3936   1.7  riastrad 	r = amdgpu_ib_ring_tests(adev);
   3937   1.7  riastrad 	amdgpu_amdkfd_post_reset(adev);
   3938   1.1  riastrad 
   3939   1.7  riastrad error:
   3940   1.7  riastrad 	amdgpu_virt_release_full_gpu(adev, true);
   3941   1.7  riastrad 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
   3942   1.7  riastrad 		amdgpu_inc_vram_lost(adev);
   3943   1.7  riastrad 		r = amdgpu_device_recover_vram(adev);
   3944   1.1  riastrad 	}
   3945   1.1  riastrad 
   3946   1.7  riastrad 	return r;
   3947   1.1  riastrad }
   3948   1.1  riastrad 
   3949   1.1  riastrad /**
   3950   1.7  riastrad  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
   3951   1.1  riastrad  *
   3952   1.7  riastrad  * @adev: amdgpu device pointer
   3953   1.1  riastrad  *
   3954   1.7  riastrad  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
   3955   1.7  riastrad  * a hung GPU.
   3956   1.1  riastrad  */
   3957   1.7  riastrad bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
   3958   1.1  riastrad {
   3959   1.7  riastrad 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
   3960   1.7  riastrad 		DRM_INFO("Timeout, but no hardware hang detected.\n");
   3961   1.7  riastrad 		return false;
   3962   1.7  riastrad 	}
   3963   1.7  riastrad 
   3964   1.7  riastrad 	if (amdgpu_gpu_recovery == 0)
   3965   1.7  riastrad 		goto disabled;
   3966   1.1  riastrad 
   3967   1.7  riastrad 	if (amdgpu_sriov_vf(adev))
   3968   1.7  riastrad 		return true;
   3969   1.1  riastrad 
   3970   1.7  riastrad 	if (amdgpu_gpu_recovery == -1) {
   3971   1.7  riastrad 		switch (adev->asic_type) {
   3972   1.7  riastrad 		case CHIP_BONAIRE:
   3973   1.7  riastrad 		case CHIP_HAWAII:
   3974   1.7  riastrad 		case CHIP_TOPAZ:
   3975   1.7  riastrad 		case CHIP_TONGA:
   3976   1.7  riastrad 		case CHIP_FIJI:
   3977   1.7  riastrad 		case CHIP_POLARIS10:
   3978   1.7  riastrad 		case CHIP_POLARIS11:
   3979   1.7  riastrad 		case CHIP_POLARIS12:
   3980   1.7  riastrad 		case CHIP_VEGAM:
   3981   1.7  riastrad 		case CHIP_VEGA20:
   3982   1.7  riastrad 		case CHIP_VEGA10:
   3983   1.7  riastrad 		case CHIP_VEGA12:
   3984   1.7  riastrad 		case CHIP_RAVEN:
   3985   1.7  riastrad 		case CHIP_ARCTURUS:
   3986   1.7  riastrad 		case CHIP_RENOIR:
   3987   1.7  riastrad 		case CHIP_NAVI10:
   3988   1.7  riastrad 		case CHIP_NAVI14:
   3989   1.7  riastrad 		case CHIP_NAVI12:
   3990   1.7  riastrad 			break;
   3991   1.7  riastrad 		default:
   3992   1.7  riastrad 			goto disabled;
   3993   1.1  riastrad 		}
   3994   1.1  riastrad 	}
   3995   1.1  riastrad 
   3996   1.7  riastrad 	return true;
   3997   1.7  riastrad 
   3998   1.7  riastrad disabled:
   3999   1.7  riastrad 		DRM_INFO("GPU recovery disabled.\n");
   4000   1.7  riastrad 		return false;
   4001   1.7  riastrad }
   4002   1.7  riastrad 
   4003   1.7  riastrad 
   4004   1.7  riastrad static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
   4005   1.7  riastrad 					struct amdgpu_job *job,
   4006   1.7  riastrad 					bool *need_full_reset_arg)
   4007   1.7  riastrad {
   4008   1.7  riastrad 	int i, r = 0;
   4009   1.7  riastrad 	bool need_full_reset  = *need_full_reset_arg;
   4010   1.1  riastrad 
   4011   1.7  riastrad 	/* block all schedulers and reset given job's ring */
   4012   1.7  riastrad 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   4013   1.7  riastrad 		struct amdgpu_ring *ring = adev->rings[i];
   4014   1.1  riastrad 
   4015   1.7  riastrad 		if (!ring || !ring->sched.thread)
   4016   1.7  riastrad 			continue;
   4017   1.1  riastrad 
   4018   1.7  riastrad 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
   4019   1.7  riastrad 		amdgpu_fence_driver_force_completion(ring);
   4020   1.1  riastrad 	}
   4021   1.1  riastrad 
   4022   1.7  riastrad 	if(job)
   4023   1.7  riastrad 		drm_sched_increase_karma(&job->base);
   4024   1.7  riastrad 
   4025   1.7  riastrad 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
   4026   1.7  riastrad 	if (!amdgpu_sriov_vf(adev)) {
   4027   1.7  riastrad 
   4028   1.7  riastrad 		if (!need_full_reset)
   4029   1.7  riastrad 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
   4030   1.7  riastrad 
   4031   1.7  riastrad 		if (!need_full_reset) {
   4032   1.7  riastrad 			amdgpu_device_ip_pre_soft_reset(adev);
   4033   1.7  riastrad 			r = amdgpu_device_ip_soft_reset(adev);
   4034   1.7  riastrad 			amdgpu_device_ip_post_soft_reset(adev);
   4035   1.7  riastrad 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
   4036   1.7  riastrad 				DRM_INFO("soft reset failed, will fallback to full reset!\n");
   4037   1.7  riastrad 				need_full_reset = true;
   4038   1.7  riastrad 			}
   4039   1.7  riastrad 		}
   4040   1.7  riastrad 
   4041   1.7  riastrad 		if (need_full_reset)
   4042   1.7  riastrad 			r = amdgpu_device_ip_suspend(adev);
   4043   1.7  riastrad 
   4044   1.7  riastrad 		*need_full_reset_arg = need_full_reset;
   4045   1.1  riastrad 	}
   4046   1.1  riastrad 
   4047   1.7  riastrad 	return r;
   4048   1.7  riastrad }
   4049   1.7  riastrad 
   4050   1.7  riastrad static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
   4051   1.7  riastrad 			       struct list_head *device_list_handle,
   4052   1.7  riastrad 			       bool *need_full_reset_arg)
   4053   1.7  riastrad {
   4054   1.7  riastrad 	struct amdgpu_device *tmp_adev = NULL;
   4055   1.7  riastrad 	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
   4056   1.7  riastrad 	int r = 0;
   4057   1.7  riastrad 
   4058   1.7  riastrad 	/*
   4059   1.7  riastrad 	 * ASIC reset has to be done on all HGMI hive nodes ASAP
   4060   1.7  riastrad 	 * to allow proper links negotiation in FW (within 1 sec)
   4061   1.7  riastrad 	 */
   4062   1.7  riastrad 	if (need_full_reset) {
   4063   1.7  riastrad 		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
   4064   1.7  riastrad 			/* For XGMI run all resets in parallel to speed up the process */
   4065   1.7  riastrad 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
   4066   1.7  riastrad 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
   4067   1.7  riastrad 					r = -EALREADY;
   4068   1.7  riastrad 			} else
   4069   1.7  riastrad 				r = amdgpu_asic_reset(tmp_adev);
   4070   1.7  riastrad 
   4071   1.7  riastrad 			if (r) {
   4072   1.7  riastrad 				DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
   4073   1.7  riastrad 					 r, tmp_adev->ddev->unique);
   4074   1.7  riastrad 				break;
   4075   1.7  riastrad 			}
   4076   1.7  riastrad 		}
   4077   1.7  riastrad 
   4078   1.7  riastrad 		/* For XGMI wait for all resets to complete before proceed */
   4079   1.7  riastrad 		if (!r) {
   4080   1.7  riastrad 			list_for_each_entry(tmp_adev, device_list_handle,
   4081   1.7  riastrad 					    gmc.xgmi.head) {
   4082   1.7  riastrad 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
   4083   1.7  riastrad 					flush_work(&tmp_adev->xgmi_reset_work);
   4084   1.7  riastrad 					r = tmp_adev->asic_reset_res;
   4085   1.7  riastrad 					if (r)
   4086   1.7  riastrad 						break;
   4087   1.7  riastrad 				}
   4088   1.1  riastrad 			}
   4089   1.1  riastrad 		}
   4090   1.1  riastrad 	}
   4091   1.1  riastrad 
   4092   1.7  riastrad 	if (!r && amdgpu_ras_intr_triggered())
   4093   1.7  riastrad 		amdgpu_ras_intr_cleared();
   4094   1.7  riastrad 
   4095   1.7  riastrad 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
   4096   1.7  riastrad 		if (need_full_reset) {
   4097   1.7  riastrad 			/* post card */
   4098   1.7  riastrad 			if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
   4099   1.7  riastrad 				DRM_WARN("asic atom init failed!");
   4100   1.7  riastrad 
   4101   1.7  riastrad 			if (!r) {
   4102   1.7  riastrad 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
   4103   1.7  riastrad 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
   4104   1.7  riastrad 				if (r)
   4105   1.7  riastrad 					goto out;
   4106   1.7  riastrad 
   4107   1.7  riastrad 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
   4108   1.7  riastrad 				if (vram_lost) {
   4109   1.7  riastrad 					DRM_INFO("VRAM is lost due to GPU reset!\n");
   4110   1.7  riastrad 					amdgpu_inc_vram_lost(tmp_adev);
   4111   1.7  riastrad 				}
   4112   1.7  riastrad 
   4113   1.7  riastrad 				r = amdgpu_gtt_mgr_recover(
   4114   1.7  riastrad 					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
   4115   1.7  riastrad 				if (r)
   4116   1.7  riastrad 					goto out;
   4117   1.7  riastrad 
   4118   1.7  riastrad 				r = amdgpu_device_fw_loading(tmp_adev);
   4119   1.7  riastrad 				if (r)
   4120   1.7  riastrad 					return r;
   4121   1.7  riastrad 
   4122   1.7  riastrad 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
   4123   1.7  riastrad 				if (r)
   4124   1.7  riastrad 					goto out;
   4125   1.7  riastrad 
   4126   1.7  riastrad 				if (vram_lost)
   4127   1.7  riastrad 					amdgpu_device_fill_reset_magic(tmp_adev);
   4128   1.7  riastrad 
   4129   1.7  riastrad 				/*
   4130   1.7  riastrad 				 * Add this ASIC as tracked as reset was already
   4131   1.7  riastrad 				 * complete successfully.
   4132   1.7  riastrad 				 */
   4133   1.7  riastrad 				amdgpu_register_gpu_instance(tmp_adev);
   4134   1.7  riastrad 
   4135   1.7  riastrad 				r = amdgpu_device_ip_late_init(tmp_adev);
   4136   1.7  riastrad 				if (r)
   4137   1.7  riastrad 					goto out;
   4138   1.7  riastrad 
   4139   1.7  riastrad 				/* must succeed. */
   4140   1.7  riastrad 				amdgpu_ras_resume(tmp_adev);
   4141   1.7  riastrad 
   4142   1.7  riastrad 				/* Update PSP FW topology after reset */
   4143   1.7  riastrad 				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
   4144   1.7  riastrad 					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
   4145   1.7  riastrad 			}
   4146   1.7  riastrad 		}
   4147   1.7  riastrad 
   4148   1.7  riastrad 
   4149   1.7  riastrad out:
   4150   1.7  riastrad 		if (!r) {
   4151   1.7  riastrad 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
   4152   1.7  riastrad 			r = amdgpu_ib_ring_tests(tmp_adev);
   4153   1.7  riastrad 			if (r) {
   4154   1.7  riastrad 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
   4155   1.7  riastrad 				r = amdgpu_device_ip_suspend(tmp_adev);
   4156   1.7  riastrad 				need_full_reset = true;
   4157   1.7  riastrad 				r = -EAGAIN;
   4158   1.7  riastrad 				goto end;
   4159   1.7  riastrad 			}
   4160   1.1  riastrad 		}
   4161   1.7  riastrad 
   4162   1.7  riastrad 		if (!r)
   4163   1.7  riastrad 			r = amdgpu_device_recover_vram(tmp_adev);
   4164   1.7  riastrad 		else
   4165   1.7  riastrad 			tmp_adev->asic_reset_res = r;
   4166   1.1  riastrad 	}
   4167   1.1  riastrad 
   4168   1.7  riastrad end:
   4169   1.7  riastrad 	*need_full_reset_arg = need_full_reset;
   4170   1.7  riastrad 	return r;
   4171   1.7  riastrad }
   4172   1.1  riastrad 
   4173   1.7  riastrad static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
   4174   1.7  riastrad {
   4175   1.7  riastrad 	if (trylock) {
   4176   1.7  riastrad 		if (!mutex_trylock(&adev->lock_reset))
   4177   1.7  riastrad 			return false;
   4178   1.7  riastrad 	} else
   4179   1.7  riastrad 		mutex_lock(&adev->lock_reset);
   4180   1.1  riastrad 
   4181   1.7  riastrad 	atomic_inc(&adev->gpu_reset_counter);
   4182   1.7  riastrad 	adev->in_gpu_reset = true;
   4183   1.7  riastrad 	switch (amdgpu_asic_reset_method(adev)) {
   4184   1.7  riastrad 	case AMD_RESET_METHOD_MODE1:
   4185   1.7  riastrad 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
   4186   1.7  riastrad 		break;
   4187   1.7  riastrad 	case AMD_RESET_METHOD_MODE2:
   4188   1.7  riastrad 		adev->mp1_state = PP_MP1_STATE_RESET;
   4189   1.7  riastrad 		break;
   4190   1.7  riastrad 	default:
   4191   1.7  riastrad 		adev->mp1_state = PP_MP1_STATE_NONE;
   4192   1.7  riastrad 		break;
   4193   1.1  riastrad 	}
   4194   1.1  riastrad 
   4195   1.7  riastrad 	return true;
   4196   1.7  riastrad }
   4197   1.7  riastrad 
   4198   1.7  riastrad static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
   4199   1.7  riastrad {
   4200   1.7  riastrad 	amdgpu_vf_error_trans_all(adev);
   4201   1.7  riastrad 	adev->mp1_state = PP_MP1_STATE_NONE;
   4202   1.7  riastrad 	adev->in_gpu_reset = false;
   4203   1.7  riastrad 	mutex_unlock(&adev->lock_reset);
   4204   1.1  riastrad }
   4205   1.1  riastrad 
   4206   1.1  riastrad /**
   4207   1.7  riastrad  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
   4208   1.1  riastrad  *
   4209   1.1  riastrad  * @adev: amdgpu device pointer
   4210   1.7  riastrad  * @job: which job trigger hang
   4211   1.1  riastrad  *
   4212   1.7  riastrad  * Attempt to reset the GPU if it has hung (all asics).
   4213   1.7  riastrad  * Attempt to do soft-reset or full-reset and reinitialize Asic
   4214   1.1  riastrad  * Returns 0 for success or an error on failure.
   4215   1.1  riastrad  */
   4216   1.7  riastrad 
   4217   1.7  riastrad int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
   4218   1.7  riastrad 			      struct amdgpu_job *job)
   4219   1.1  riastrad {
   4220   1.7  riastrad 	struct list_head device_list, *device_list_handle =  NULL;
   4221   1.7  riastrad 	bool need_full_reset, job_signaled;
   4222   1.7  riastrad 	struct amdgpu_hive_info *hive = NULL;
   4223   1.7  riastrad 	struct amdgpu_device *tmp_adev = NULL;
   4224   1.7  riastrad 	int i, r = 0;
   4225   1.7  riastrad 	bool in_ras_intr = amdgpu_ras_intr_triggered();
   4226   1.7  riastrad 	bool use_baco =
   4227   1.7  riastrad 		(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
   4228   1.7  riastrad 		true : false;
   4229   1.7  riastrad 
   4230   1.7  riastrad 	/*
   4231   1.7  riastrad 	 * Flush RAM to disk so that after reboot
   4232   1.7  riastrad 	 * the user can read log and see why the system rebooted.
   4233   1.7  riastrad 	 */
   4234   1.7  riastrad 	if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
   4235   1.7  riastrad 
   4236   1.7  riastrad 		DRM_WARN("Emergency reboot.");
   4237   1.7  riastrad 
   4238   1.7  riastrad 		ksys_sync_helper();
   4239   1.7  riastrad 		emergency_restart();
   4240   1.7  riastrad 	}
   4241   1.1  riastrad 
   4242   1.7  riastrad 	need_full_reset = job_signaled = false;
   4243   1.7  riastrad 	INIT_LIST_HEAD(&device_list);
   4244   1.1  riastrad 
   4245   1.7  riastrad 	dev_info(adev->dev, "GPU %s begin!\n",
   4246   1.7  riastrad 		(in_ras_intr && !use_baco) ? "jobs stop":"reset");
   4247   1.7  riastrad 
   4248   1.7  riastrad 	cancel_delayed_work_sync(&adev->delayed_init_work);
   4249   1.1  riastrad 
   4250   1.7  riastrad 	hive = amdgpu_get_xgmi_hive(adev, false);
   4251   1.1  riastrad 
   4252   1.7  riastrad 	/*
   4253   1.7  riastrad 	 * Here we trylock to avoid chain of resets executing from
   4254   1.7  riastrad 	 * either trigger by jobs on different adevs in XGMI hive or jobs on
   4255   1.7  riastrad 	 * different schedulers for same device while this TO handler is running.
   4256   1.7  riastrad 	 * We always reset all schedulers for device and all devices for XGMI
   4257   1.7  riastrad 	 * hive so that should take care of them too.
   4258   1.7  riastrad 	 */
   4259   1.1  riastrad 
   4260   1.7  riastrad 	if (hive && !mutex_trylock(&hive->reset_lock)) {
   4261  1.10  riastrad 		DRM_INFO("Bailing on TDR for s_job:%"PRIx64", hive: %"PRIx64" as another already in progress",
   4262   1.7  riastrad 			  job ? job->base.id : -1, hive->hive_id);
   4263   1.7  riastrad 		return 0;
   4264   1.7  riastrad 	}
   4265   1.1  riastrad 
   4266   1.7  riastrad 	/* Start with adev pre asic reset first for soft reset check.*/
   4267   1.7  riastrad 	if (!amdgpu_device_lock_adev(adev, !hive)) {
   4268  1.10  riastrad 		DRM_INFO("Bailing on TDR for s_job:%"PRIx64", as another already in progress",
   4269   1.7  riastrad 			  job ? job->base.id : -1);
   4270   1.7  riastrad 		return 0;
   4271   1.7  riastrad 	}
   4272   1.1  riastrad 
   4273   1.7  riastrad 	/* Block kfd: SRIOV would do it separately */
   4274   1.7  riastrad 	if (!amdgpu_sriov_vf(adev))
   4275   1.7  riastrad                 amdgpu_amdkfd_pre_reset(adev);
   4276   1.7  riastrad 
   4277   1.7  riastrad 	/* Build list of devices to reset */
   4278   1.7  riastrad 	if  (adev->gmc.xgmi.num_physical_nodes > 1) {
   4279   1.7  riastrad 		if (!hive) {
   4280   1.7  riastrad 			/*unlock kfd: SRIOV would do it separately */
   4281   1.7  riastrad 			if (!amdgpu_sriov_vf(adev))
   4282   1.7  riastrad 		                amdgpu_amdkfd_post_reset(adev);
   4283   1.7  riastrad 			amdgpu_device_unlock_adev(adev);
   4284   1.7  riastrad 			return -ENODEV;
   4285   1.1  riastrad 		}
   4286   1.7  riastrad 
   4287   1.7  riastrad 		/*
   4288   1.7  riastrad 		 * In case we are in XGMI hive mode device reset is done for all the
   4289   1.7  riastrad 		 * nodes in the hive to retrain all XGMI links and hence the reset
   4290   1.7  riastrad 		 * sequence is executed in loop on all nodes.
   4291   1.7  riastrad 		 */
   4292   1.7  riastrad 		device_list_handle = &hive->device_list;
   4293   1.7  riastrad 	} else {
   4294   1.7  riastrad 		list_add_tail(&adev->gmc.xgmi.head, &device_list);
   4295   1.7  riastrad 		device_list_handle = &device_list;
   4296   1.1  riastrad 	}
   4297   1.1  riastrad 
   4298   1.7  riastrad 	/* block all schedulers and reset given job's ring */
   4299   1.7  riastrad 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
   4300   1.7  riastrad 		if (tmp_adev != adev) {
   4301   1.7  riastrad 			amdgpu_device_lock_adev(tmp_adev, false);
   4302   1.7  riastrad 			if (!amdgpu_sriov_vf(tmp_adev))
   4303   1.7  riastrad 			                amdgpu_amdkfd_pre_reset(tmp_adev);
   4304   1.7  riastrad 		}
   4305   1.7  riastrad 
   4306   1.7  riastrad 		/*
   4307   1.7  riastrad 		 * Mark these ASICs to be reseted as untracked first
   4308   1.7  riastrad 		 * And add them back after reset completed
   4309   1.7  riastrad 		 */
   4310   1.7  riastrad 		amdgpu_unregister_gpu_instance(tmp_adev);
   4311   1.7  riastrad 
   4312   1.7  riastrad 		/* disable ras on ALL IPs */
   4313   1.7  riastrad 		if (!(in_ras_intr && !use_baco) &&
   4314   1.7  riastrad 		      amdgpu_device_ip_need_full_reset(tmp_adev))
   4315   1.7  riastrad 			amdgpu_ras_suspend(tmp_adev);
   4316   1.1  riastrad 
   4317   1.1  riastrad 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   4318   1.7  riastrad 			struct amdgpu_ring *ring = tmp_adev->rings[i];
   4319   1.7  riastrad 
   4320   1.7  riastrad 			if (!ring || !ring->sched.thread)
   4321   1.1  riastrad 				continue;
   4322   1.1  riastrad 
   4323   1.7  riastrad 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
   4324   1.7  riastrad 
   4325   1.7  riastrad 			if (in_ras_intr && !use_baco)
   4326   1.7  riastrad 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
   4327   1.1  riastrad 		}
   4328   1.7  riastrad 	}
   4329   1.1  riastrad 
   4330   1.7  riastrad 
   4331   1.7  riastrad 	if (in_ras_intr && !use_baco)
   4332   1.7  riastrad 		goto skip_sched_resume;
   4333   1.7  riastrad 
   4334   1.7  riastrad 	/*
   4335   1.7  riastrad 	 * Must check guilty signal here since after this point all old
   4336   1.7  riastrad 	 * HW fences are force signaled.
   4337   1.7  riastrad 	 *
   4338   1.7  riastrad 	 * job->base holds a reference to parent fence
   4339   1.7  riastrad 	 */
   4340   1.7  riastrad 	if (job && job->base.s_fence->parent &&
   4341   1.7  riastrad 	    dma_fence_is_signaled(job->base.s_fence->parent))
   4342   1.7  riastrad 		job_signaled = true;
   4343   1.7  riastrad 
   4344   1.7  riastrad 	if (job_signaled) {
   4345   1.7  riastrad 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
   4346   1.7  riastrad 		goto skip_hw_reset;
   4347   1.1  riastrad 	}
   4348   1.1  riastrad 
   4349   1.1  riastrad 
   4350   1.7  riastrad 	/* Guilty job will be freed after this*/
   4351   1.7  riastrad 	r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
   4352   1.1  riastrad 	if (r) {
   4353   1.7  riastrad 		/*TODO Should we stop ?*/
   4354   1.7  riastrad 		DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
   4355   1.7  riastrad 			  r, adev->ddev->unique);
   4356   1.7  riastrad 		adev->asic_reset_res = r;
   4357   1.1  riastrad 	}
   4358   1.1  riastrad 
   4359   1.7  riastrad retry:	/* Rest of adevs pre asic reset from XGMI hive. */
   4360   1.7  riastrad 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
   4361   1.1  riastrad 
   4362   1.7  riastrad 		if (tmp_adev == adev)
   4363   1.7  riastrad 			continue;
   4364   1.1  riastrad 
   4365   1.7  riastrad 		r = amdgpu_device_pre_asic_reset(tmp_adev,
   4366   1.7  riastrad 						 NULL,
   4367   1.7  riastrad 						 &need_full_reset);
   4368   1.7  riastrad 		/*TODO Should we stop ?*/
   4369   1.7  riastrad 		if (r) {
   4370   1.7  riastrad 			DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
   4371   1.7  riastrad 				  r, tmp_adev->ddev->unique);
   4372   1.7  riastrad 			tmp_adev->asic_reset_res = r;
   4373   1.1  riastrad 		}
   4374   1.1  riastrad 	}
   4375   1.1  riastrad 
   4376   1.7  riastrad 	/* Actual ASIC resets if needed.*/
   4377   1.7  riastrad 	/* TODO Implement XGMI hive reset logic for SRIOV */
   4378   1.7  riastrad 	if (amdgpu_sriov_vf(adev)) {
   4379   1.7  riastrad 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
   4380   1.7  riastrad 		if (r)
   4381   1.7  riastrad 			adev->asic_reset_res = r;
   4382   1.7  riastrad 	} else {
   4383   1.7  riastrad 		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
   4384   1.7  riastrad 		if (r && r == -EAGAIN)
   4385   1.7  riastrad 			goto retry;
   4386   1.1  riastrad 	}
   4387   1.1  riastrad 
   4388   1.7  riastrad skip_hw_reset:
   4389   1.1  riastrad 
   4390   1.7  riastrad 	/* Post ASIC reset for all devs .*/
   4391   1.7  riastrad 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
   4392   1.1  riastrad 
   4393   1.7  riastrad 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   4394   1.7  riastrad 			struct amdgpu_ring *ring = tmp_adev->rings[i];
   4395   1.1  riastrad 
   4396   1.7  riastrad 			if (!ring || !ring->sched.thread)
   4397   1.7  riastrad 				continue;
   4398   1.1  riastrad 
   4399   1.7  riastrad 			/* No point to resubmit jobs if we didn't HW reset*/
   4400   1.7  riastrad 			if (!tmp_adev->asic_reset_res && !job_signaled)
   4401   1.7  riastrad 				drm_sched_resubmit_jobs(&ring->sched);
   4402   1.1  riastrad 
   4403   1.7  riastrad 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
   4404   1.7  riastrad 		}
   4405   1.1  riastrad 
   4406   1.7  riastrad 		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
   4407   1.7  riastrad 			drm_helper_resume_force_mode(tmp_adev->ddev);
   4408   1.7  riastrad 		}
   4409   1.1  riastrad 
   4410   1.7  riastrad 		tmp_adev->asic_reset_res = 0;
   4411   1.1  riastrad 
   4412   1.7  riastrad 		if (r) {
   4413   1.7  riastrad 			/* bad news, how to tell it to userspace ? */
   4414   1.7  riastrad 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
   4415   1.7  riastrad 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
   4416   1.7  riastrad 		} else {
   4417   1.7  riastrad 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
   4418   1.7  riastrad 		}
   4419   1.1  riastrad 	}
   4420   1.1  riastrad 
   4421   1.7  riastrad skip_sched_resume:
   4422   1.7  riastrad 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
   4423   1.7  riastrad 		/*unlock kfd: SRIOV would do it separately */
   4424   1.7  riastrad 		if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
   4425   1.7  riastrad 	                amdgpu_amdkfd_post_reset(tmp_adev);
   4426   1.7  riastrad 		amdgpu_device_unlock_adev(tmp_adev);
   4427   1.7  riastrad 	}
   4428   1.1  riastrad 
   4429   1.7  riastrad 	if (hive)
   4430   1.7  riastrad 		mutex_unlock(&hive->reset_lock);
   4431   1.1  riastrad 
   4432   1.7  riastrad 	if (r)
   4433   1.7  riastrad 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
   4434   1.7  riastrad 	return r;
   4435   1.7  riastrad }
   4436   1.1  riastrad 
   4437   1.7  riastrad /**
   4438   1.7  riastrad  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
   4439   1.7  riastrad  *
   4440   1.7  riastrad  * @adev: amdgpu_device pointer
   4441   1.7  riastrad  *
   4442   1.7  riastrad  * Fetchs and stores in the driver the PCIE capabilities (gen speed
   4443   1.7  riastrad  * and lanes) of the slot the device is in. Handles APUs and
   4444   1.7  riastrad  * virtualized environments where PCIE config space may not be available.
   4445   1.7  riastrad  */
   4446   1.7  riastrad static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
   4447   1.7  riastrad {
   4448   1.7  riastrad 	struct pci_dev *pdev;
   4449   1.7  riastrad 	enum pci_bus_speed speed_cap, platform_speed_cap;
   4450   1.7  riastrad 	enum pcie_link_width platform_link_width;
   4451   1.7  riastrad 
   4452   1.7  riastrad 	if (amdgpu_pcie_gen_cap)
   4453   1.7  riastrad 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
   4454   1.7  riastrad 
   4455   1.7  riastrad 	if (amdgpu_pcie_lane_cap)
   4456   1.7  riastrad 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
   4457   1.7  riastrad 
   4458   1.7  riastrad 	/* covers APUs as well */
   4459   1.7  riastrad 	if (pci_is_root_bus(adev->pdev->bus)) {
   4460   1.7  riastrad 		if (adev->pm.pcie_gen_mask == 0)
   4461   1.7  riastrad 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
   4462   1.7  riastrad 		if (adev->pm.pcie_mlw_mask == 0)
   4463   1.7  riastrad 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
   4464   1.7  riastrad 		return;
   4465   1.7  riastrad 	}
   4466   1.1  riastrad 
   4467   1.7  riastrad 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
   4468   1.7  riastrad 		return;
   4469   1.1  riastrad 
   4470   1.7  riastrad 	pcie_bandwidth_available(adev->pdev, NULL,
   4471   1.7  riastrad 				 &platform_speed_cap, &platform_link_width);
   4472   1.1  riastrad 
   4473   1.7  riastrad 	if (adev->pm.pcie_gen_mask == 0) {
   4474   1.7  riastrad 		/* asic caps */
   4475   1.7  riastrad 		pdev = adev->pdev;
   4476   1.7  riastrad 		speed_cap = pcie_get_speed_cap(pdev);
   4477   1.7  riastrad 		if (speed_cap == PCI_SPEED_UNKNOWN) {
   4478   1.7  riastrad 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4479   1.7  riastrad 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
   4480   1.7  riastrad 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
   4481   1.7  riastrad 		} else {
   4482   1.7  riastrad 			if (speed_cap == PCIE_SPEED_16_0GT)
   4483   1.7  riastrad 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4484   1.7  riastrad 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
   4485   1.7  riastrad 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
   4486   1.7  riastrad 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
   4487   1.7  riastrad 			else if (speed_cap == PCIE_SPEED_8_0GT)
   4488   1.7  riastrad 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4489   1.7  riastrad 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
   4490   1.7  riastrad 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
   4491   1.7  riastrad 			else if (speed_cap == PCIE_SPEED_5_0GT)
   4492   1.7  riastrad 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4493   1.7  riastrad 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
   4494   1.7  riastrad 			else
   4495   1.7  riastrad 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
   4496   1.7  riastrad 		}
   4497   1.7  riastrad 		/* platform caps */
   4498   1.7  riastrad 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
   4499   1.7  riastrad 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4500   1.7  riastrad 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
   4501   1.7  riastrad 		} else {
   4502   1.7  riastrad 			if (platform_speed_cap == PCIE_SPEED_16_0GT)
   4503   1.7  riastrad 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4504   1.7  riastrad 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
   4505   1.7  riastrad 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
   4506   1.7  riastrad 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
   4507   1.7  riastrad 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
   4508   1.7  riastrad 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4509   1.7  riastrad 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
   4510   1.7  riastrad 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
   4511   1.7  riastrad 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
   4512   1.7  riastrad 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
   4513   1.7  riastrad 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
   4514   1.7  riastrad 			else
   4515   1.7  riastrad 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
   4516   1.1  riastrad 
   4517   1.7  riastrad 		}
   4518   1.7  riastrad 	}
   4519   1.7  riastrad 	if (adev->pm.pcie_mlw_mask == 0) {
   4520   1.7  riastrad 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
   4521   1.7  riastrad 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
   4522   1.7  riastrad 		} else {
   4523   1.7  riastrad 			switch (platform_link_width) {
   4524   1.7  riastrad 			case PCIE_LNK_X32:
   4525   1.7  riastrad 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
   4526   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
   4527   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
   4528   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
   4529   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
   4530   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
   4531   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
   4532   1.7  riastrad 				break;
   4533   1.7  riastrad 			case PCIE_LNK_X16:
   4534   1.7  riastrad 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
   4535   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
   4536   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
   4537   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
   4538   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
   4539   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
   4540   1.7  riastrad 				break;
   4541   1.7  riastrad 			case PCIE_LNK_X12:
   4542   1.7  riastrad 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
   4543   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
   4544   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
   4545   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
   4546   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
   4547   1.7  riastrad 				break;
   4548   1.7  riastrad 			case PCIE_LNK_X8:
   4549   1.7  riastrad 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
   4550   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
   4551   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
   4552   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
   4553   1.7  riastrad 				break;
   4554   1.7  riastrad 			case PCIE_LNK_X4:
   4555   1.7  riastrad 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
   4556   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
   4557   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
   4558   1.7  riastrad 				break;
   4559   1.7  riastrad 			case PCIE_LNK_X2:
   4560   1.7  riastrad 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
   4561   1.7  riastrad 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
   4562   1.7  riastrad 				break;
   4563   1.7  riastrad 			case PCIE_LNK_X1:
   4564   1.7  riastrad 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
   4565   1.7  riastrad 				break;
   4566   1.7  riastrad 			default:
   4567   1.7  riastrad 				break;
   4568   1.7  riastrad 			}
   4569   1.7  riastrad 		}
   4570   1.1  riastrad 	}
   4571   1.1  riastrad }
   4572   1.1  riastrad 
   4573   1.7  riastrad int amdgpu_device_baco_enter(struct drm_device *dev)
   4574   1.7  riastrad {
   4575   1.7  riastrad 	struct amdgpu_device *adev = dev->dev_private;
   4576   1.7  riastrad 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
   4577   1.1  riastrad 
   4578   1.7  riastrad 	if (!amdgpu_device_supports_baco(adev->ddev))
   4579   1.7  riastrad 		return -ENOTSUPP;
   4580   1.1  riastrad 
   4581   1.7  riastrad 	if (ras && ras->supported)
   4582   1.7  riastrad 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
   4583   1.1  riastrad 
   4584   1.7  riastrad 	return amdgpu_dpm_baco_enter(adev);
   4585   1.1  riastrad }
   4586   1.1  riastrad 
   4587   1.7  riastrad int amdgpu_device_baco_exit(struct drm_device *dev)
   4588   1.1  riastrad {
   4589   1.7  riastrad 	struct amdgpu_device *adev = dev->dev_private;
   4590   1.7  riastrad 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
   4591   1.7  riastrad 	int ret = 0;
   4592   1.7  riastrad 
   4593   1.7  riastrad 	if (!amdgpu_device_supports_baco(adev->ddev))
   4594   1.7  riastrad 		return -ENOTSUPP;
   4595   1.7  riastrad 
   4596   1.7  riastrad 	ret = amdgpu_dpm_baco_exit(adev);
   4597   1.7  riastrad 	if (ret)
   4598   1.7  riastrad 		return ret;
   4599   1.1  riastrad 
   4600   1.7  riastrad 	if (ras && ras->supported)
   4601   1.7  riastrad 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
   4602   1.1  riastrad 
   4603   1.1  riastrad 	return 0;
   4604   1.1  riastrad }
   4605