Home | History | Annotate | Line # | Download | only in radeon
radeon_device.c revision 1.1.1.3
      1 /*	$NetBSD: radeon_device.c,v 1.1.1.3 2021/12/18 20:15:48 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.1.1.3 2021/12/18 20:15:48 riastradh Exp $");
     33 
     34 #include <linux/console.h>
     35 #include <linux/efi.h>
     36 #include <linux/pci.h>
     37 #include <linux/pm_runtime.h>
     38 #include <linux/slab.h>
     39 #include <linux/vga_switcheroo.h>
     40 #include <linux/vgaarb.h>
     41 
     42 #include <drm/drm_cache.h>
     43 #include <drm/drm_crtc_helper.h>
     44 #include <drm/drm_debugfs.h>
     45 #include <drm/drm_device.h>
     46 #include <drm/drm_file.h>
     47 #include <drm/drm_probe_helper.h>
     48 #include <drm/radeon_drm.h>
     49 
     50 #include "radeon_reg.h"
     51 #include "radeon.h"
     52 #include "atom.h"
     53 
     54 static const char radeon_family_name[][16] = {
     55 	"R100",
     56 	"RV100",
     57 	"RS100",
     58 	"RV200",
     59 	"RS200",
     60 	"R200",
     61 	"RV250",
     62 	"RS300",
     63 	"RV280",
     64 	"R300",
     65 	"R350",
     66 	"RV350",
     67 	"RV380",
     68 	"R420",
     69 	"R423",
     70 	"RV410",
     71 	"RS400",
     72 	"RS480",
     73 	"RS600",
     74 	"RS690",
     75 	"RS740",
     76 	"RV515",
     77 	"R520",
     78 	"RV530",
     79 	"RV560",
     80 	"RV570",
     81 	"R580",
     82 	"R600",
     83 	"RV610",
     84 	"RV630",
     85 	"RV670",
     86 	"RV620",
     87 	"RV635",
     88 	"RS780",
     89 	"RS880",
     90 	"RV770",
     91 	"RV730",
     92 	"RV710",
     93 	"RV740",
     94 	"CEDAR",
     95 	"REDWOOD",
     96 	"JUNIPER",
     97 	"CYPRESS",
     98 	"HEMLOCK",
     99 	"PALM",
    100 	"SUMO",
    101 	"SUMO2",
    102 	"BARTS",
    103 	"TURKS",
    104 	"CAICOS",
    105 	"CAYMAN",
    106 	"ARUBA",
    107 	"TAHITI",
    108 	"PITCAIRN",
    109 	"VERDE",
    110 	"OLAND",
    111 	"HAINAN",
    112 	"BONAIRE",
    113 	"KAVERI",
    114 	"KABINI",
    115 	"HAWAII",
    116 	"MULLINS",
    117 	"LAST",
    118 };
    119 
    120 #if defined(CONFIG_VGA_SWITCHEROO)
    121 bool radeon_has_atpx_dgpu_power_cntl(void);
    122 bool radeon_is_atpx_hybrid(void);
    123 #else
    124 static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
    125 static inline bool radeon_is_atpx_hybrid(void) { return false; }
    126 #endif
    127 
    128 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
    129 
    130 struct radeon_px_quirk {
    131 	u32 chip_vendor;
    132 	u32 chip_device;
    133 	u32 subsys_vendor;
    134 	u32 subsys_device;
    135 	u32 px_quirk_flags;
    136 };
    137 
    138 static struct radeon_px_quirk radeon_px_quirk_list[] = {
    139 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
    140 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
    141 	 */
    142 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
    143 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
    144 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    145 	 */
    146 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
    147 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    148 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    149 	 */
    150 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    151 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    152 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
    153 	 */
    154 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    155 	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    156 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
    157 	 */
    158 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
    159 	{ 0, 0, 0, 0, 0 },
    160 };
    161 
    162 bool radeon_is_px(struct drm_device *dev)
    163 {
    164 	struct radeon_device *rdev = dev->dev_private;
    165 
    166 	if (rdev->flags & RADEON_IS_PX)
    167 		return true;
    168 	return false;
    169 }
    170 
    171 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
    172 {
    173 	struct radeon_px_quirk *p = radeon_px_quirk_list;
    174 
    175 	/* Apply PX quirks */
    176 	while (p && p->chip_device != 0) {
    177 		if (rdev->pdev->vendor == p->chip_vendor &&
    178 		    rdev->pdev->device == p->chip_device &&
    179 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
    180 		    rdev->pdev->subsystem_device == p->subsys_device) {
    181 			rdev->px_quirk_flags = p->px_quirk_flags;
    182 			break;
    183 		}
    184 		++p;
    185 	}
    186 
    187 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
    188 		rdev->flags &= ~RADEON_IS_PX;
    189 
    190 	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
    191 	if (!radeon_is_atpx_hybrid() &&
    192 	    !radeon_has_atpx_dgpu_power_cntl())
    193 		rdev->flags &= ~RADEON_IS_PX;
    194 }
    195 
    196 /**
    197  * radeon_program_register_sequence - program an array of registers.
    198  *
    199  * @rdev: radeon_device pointer
    200  * @registers: pointer to the register array
    201  * @array_size: size of the register array
    202  *
    203  * Programs an array or registers with and and or masks.
    204  * This is a helper for setting golden registers.
    205  */
    206 void radeon_program_register_sequence(struct radeon_device *rdev,
    207 				      const u32 *registers,
    208 				      const u32 array_size)
    209 {
    210 	u32 tmp, reg, and_mask, or_mask;
    211 	int i;
    212 
    213 	if (array_size % 3)
    214 		return;
    215 
    216 	for (i = 0; i < array_size; i +=3) {
    217 		reg = registers[i + 0];
    218 		and_mask = registers[i + 1];
    219 		or_mask = registers[i + 2];
    220 
    221 		if (and_mask == 0xffffffff) {
    222 			tmp = or_mask;
    223 		} else {
    224 			tmp = RREG32(reg);
    225 			tmp &= ~and_mask;
    226 			tmp |= or_mask;
    227 		}
    228 		WREG32(reg, tmp);
    229 	}
    230 }
    231 
    232 void radeon_pci_config_reset(struct radeon_device *rdev)
    233 {
    234 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
    235 }
    236 
    237 /**
    238  * radeon_surface_init - Clear GPU surface registers.
    239  *
    240  * @rdev: radeon_device pointer
    241  *
    242  * Clear GPU surface registers (r1xx-r5xx).
    243  */
    244 void radeon_surface_init(struct radeon_device *rdev)
    245 {
    246 	/* FIXME: check this out */
    247 	if (rdev->family < CHIP_R600) {
    248 		int i;
    249 
    250 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
    251 			if (rdev->surface_regs[i].bo)
    252 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
    253 			else
    254 				radeon_clear_surface_reg(rdev, i);
    255 		}
    256 		/* enable surfaces */
    257 		WREG32(RADEON_SURFACE_CNTL, 0);
    258 	}
    259 }
    260 
    261 /*
    262  * GPU scratch registers helpers function.
    263  */
    264 /**
    265  * radeon_scratch_init - Init scratch register driver information.
    266  *
    267  * @rdev: radeon_device pointer
    268  *
    269  * Init CP scratch register driver information (r1xx-r5xx)
    270  */
    271 void radeon_scratch_init(struct radeon_device *rdev)
    272 {
    273 	int i;
    274 
    275 	/* FIXME: check this out */
    276 	if (rdev->family < CHIP_R300) {
    277 		rdev->scratch.num_reg = 5;
    278 	} else {
    279 		rdev->scratch.num_reg = 7;
    280 	}
    281 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
    282 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    283 		rdev->scratch.free[i] = true;
    284 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
    285 	}
    286 }
    287 
    288 /**
    289  * radeon_scratch_get - Allocate a scratch register
    290  *
    291  * @rdev: radeon_device pointer
    292  * @reg: scratch register mmio offset
    293  *
    294  * Allocate a CP scratch register for use by the driver (all asics).
    295  * Returns 0 on success or -EINVAL on failure.
    296  */
    297 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
    298 {
    299 	int i;
    300 
    301 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    302 		if (rdev->scratch.free[i]) {
    303 			rdev->scratch.free[i] = false;
    304 			*reg = rdev->scratch.reg[i];
    305 			return 0;
    306 		}
    307 	}
    308 	return -EINVAL;
    309 }
    310 
    311 /**
    312  * radeon_scratch_free - Free a scratch register
    313  *
    314  * @rdev: radeon_device pointer
    315  * @reg: scratch register mmio offset
    316  *
    317  * Free a CP scratch register allocated for use by the driver (all asics)
    318  */
    319 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
    320 {
    321 	int i;
    322 
    323 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    324 		if (rdev->scratch.reg[i] == reg) {
    325 			rdev->scratch.free[i] = true;
    326 			return;
    327 		}
    328 	}
    329 }
    330 
    331 /*
    332  * GPU doorbell aperture helpers function.
    333  */
    334 /**
    335  * radeon_doorbell_init - Init doorbell driver information.
    336  *
    337  * @rdev: radeon_device pointer
    338  *
    339  * Init doorbell driver information (CIK)
    340  * Returns 0 on success, error on failure.
    341  */
    342 static int radeon_doorbell_init(struct radeon_device *rdev)
    343 {
    344 	/* doorbell bar mapping */
    345 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
    346 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
    347 
    348 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
    349 	if (rdev->doorbell.num_doorbells == 0)
    350 		return -EINVAL;
    351 
    352 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
    353 	if (rdev->doorbell.ptr == NULL) {
    354 		return -ENOMEM;
    355 	}
    356 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
    357 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
    358 
    359 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
    360 
    361 	return 0;
    362 }
    363 
    364 /**
    365  * radeon_doorbell_fini - Tear down doorbell driver information.
    366  *
    367  * @rdev: radeon_device pointer
    368  *
    369  * Tear down doorbell driver information (CIK)
    370  */
    371 static void radeon_doorbell_fini(struct radeon_device *rdev)
    372 {
    373 	iounmap(rdev->doorbell.ptr);
    374 	rdev->doorbell.ptr = NULL;
    375 }
    376 
    377 /**
    378  * radeon_doorbell_get - Allocate a doorbell entry
    379  *
    380  * @rdev: radeon_device pointer
    381  * @doorbell: doorbell index
    382  *
    383  * Allocate a doorbell for use by the driver (all asics).
    384  * Returns 0 on success or -EINVAL on failure.
    385  */
    386 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
    387 {
    388 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
    389 	if (offset < rdev->doorbell.num_doorbells) {
    390 		__set_bit(offset, rdev->doorbell.used);
    391 		*doorbell = offset;
    392 		return 0;
    393 	} else {
    394 		return -EINVAL;
    395 	}
    396 }
    397 
    398 /**
    399  * radeon_doorbell_free - Free a doorbell entry
    400  *
    401  * @rdev: radeon_device pointer
    402  * @doorbell: doorbell index
    403  *
    404  * Free a doorbell allocated for use by the driver (all asics)
    405  */
    406 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
    407 {
    408 	if (doorbell < rdev->doorbell.num_doorbells)
    409 		__clear_bit(doorbell, rdev->doorbell.used);
    410 }
    411 
    412 /*
    413  * radeon_wb_*()
    414  * Writeback is the the method by which the the GPU updates special pages
    415  * in memory with the status of certain GPU events (fences, ring pointers,
    416  * etc.).
    417  */
    418 
    419 /**
    420  * radeon_wb_disable - Disable Writeback
    421  *
    422  * @rdev: radeon_device pointer
    423  *
    424  * Disables Writeback (all asics).  Used for suspend.
    425  */
    426 void radeon_wb_disable(struct radeon_device *rdev)
    427 {
    428 	rdev->wb.enabled = false;
    429 }
    430 
    431 /**
    432  * radeon_wb_fini - Disable Writeback and free memory
    433  *
    434  * @rdev: radeon_device pointer
    435  *
    436  * Disables Writeback and frees the Writeback memory (all asics).
    437  * Used at driver shutdown.
    438  */
    439 void radeon_wb_fini(struct radeon_device *rdev)
    440 {
    441 	radeon_wb_disable(rdev);
    442 	if (rdev->wb.wb_obj) {
    443 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
    444 			radeon_bo_kunmap(rdev->wb.wb_obj);
    445 			radeon_bo_unpin(rdev->wb.wb_obj);
    446 			radeon_bo_unreserve(rdev->wb.wb_obj);
    447 		}
    448 		radeon_bo_unref(&rdev->wb.wb_obj);
    449 		rdev->wb.wb = NULL;
    450 		rdev->wb.wb_obj = NULL;
    451 	}
    452 }
    453 
    454 /**
    455  * radeon_wb_init- Init Writeback driver info and allocate memory
    456  *
    457  * @rdev: radeon_device pointer
    458  *
    459  * Disables Writeback and frees the Writeback memory (all asics).
    460  * Used at driver startup.
    461  * Returns 0 on success or an -error on failure.
    462  */
    463 int radeon_wb_init(struct radeon_device *rdev)
    464 {
    465 	int r;
    466 
    467 	if (rdev->wb.wb_obj == NULL) {
    468 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
    469 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
    470 				     &rdev->wb.wb_obj);
    471 		if (r) {
    472 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
    473 			return r;
    474 		}
    475 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
    476 		if (unlikely(r != 0)) {
    477 			radeon_wb_fini(rdev);
    478 			return r;
    479 		}
    480 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
    481 				&rdev->wb.gpu_addr);
    482 		if (r) {
    483 			radeon_bo_unreserve(rdev->wb.wb_obj);
    484 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
    485 			radeon_wb_fini(rdev);
    486 			return r;
    487 		}
    488 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
    489 		radeon_bo_unreserve(rdev->wb.wb_obj);
    490 		if (r) {
    491 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
    492 			radeon_wb_fini(rdev);
    493 			return r;
    494 		}
    495 	}
    496 
    497 	/* clear wb memory */
    498 	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
    499 	/* disable event_write fences */
    500 	rdev->wb.use_event = false;
    501 	/* disabled via module param */
    502 	if (radeon_no_wb == 1) {
    503 		rdev->wb.enabled = false;
    504 	} else {
    505 		if (rdev->flags & RADEON_IS_AGP) {
    506 			/* often unreliable on AGP */
    507 			rdev->wb.enabled = false;
    508 		} else if (rdev->family < CHIP_R300) {
    509 			/* often unreliable on pre-r300 */
    510 			rdev->wb.enabled = false;
    511 		} else {
    512 			rdev->wb.enabled = true;
    513 			/* event_write fences are only available on r600+ */
    514 			if (rdev->family >= CHIP_R600) {
    515 				rdev->wb.use_event = true;
    516 			}
    517 		}
    518 	}
    519 	/* always use writeback/events on NI, APUs */
    520 	if (rdev->family >= CHIP_PALM) {
    521 		rdev->wb.enabled = true;
    522 		rdev->wb.use_event = true;
    523 	}
    524 
    525 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
    526 
    527 	return 0;
    528 }
    529 
    530 /**
    531  * radeon_vram_location - try to find VRAM location
    532  * @rdev: radeon device structure holding all necessary informations
    533  * @mc: memory controller structure holding memory informations
    534  * @base: base address at which to put VRAM
    535  *
    536  * Function will place try to place VRAM at base address provided
    537  * as parameter (which is so far either PCI aperture address or
    538  * for IGP TOM base address).
    539  *
    540  * If there is not enough space to fit the unvisible VRAM in the 32bits
    541  * address space then we limit the VRAM size to the aperture.
    542  *
    543  * If we are using AGP and if the AGP aperture doesn't allow us to have
    544  * room for all the VRAM than we restrict the VRAM to the PCI aperture
    545  * size and print a warning.
    546  *
    547  * This function will never fails, worst case are limiting VRAM.
    548  *
    549  * Note: GTT start, end, size should be initialized before calling this
    550  * function on AGP platform.
    551  *
    552  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
    553  * this shouldn't be a problem as we are using the PCI aperture as a reference.
    554  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
    555  * not IGP.
    556  *
    557  * Note: we use mc_vram_size as on some board we need to program the mc to
    558  * cover the whole aperture even if VRAM size is inferior to aperture size
    559  * Novell bug 204882 + along with lots of ubuntu ones
    560  *
    561  * Note: when limiting vram it's safe to overwritte real_vram_size because
    562  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
    563  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
    564  * ones)
    565  *
    566  * Note: IGP TOM addr should be the same as the aperture addr, we don't
    567  * explicitly check for that thought.
    568  *
    569  * FIXME: when reducing VRAM size align new size on power of 2.
    570  */
    571 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
    572 {
    573 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
    574 
    575 	mc->vram_start = base;
    576 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
    577 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    578 		mc->real_vram_size = mc->aper_size;
    579 		mc->mc_vram_size = mc->aper_size;
    580 	}
    581 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    582 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
    583 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    584 		mc->real_vram_size = mc->aper_size;
    585 		mc->mc_vram_size = mc->aper_size;
    586 	}
    587 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    588 	if (limit && limit < mc->real_vram_size)
    589 		mc->real_vram_size = limit;
    590 	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
    591 			mc->mc_vram_size >> 20, mc->vram_start,
    592 			mc->vram_end, mc->real_vram_size >> 20);
    593 }
    594 
    595 /**
    596  * radeon_gtt_location - try to find GTT location
    597  * @rdev: radeon device structure holding all necessary informations
    598  * @mc: memory controller structure holding memory informations
    599  *
    600  * Function will place try to place GTT before or after VRAM.
    601  *
    602  * If GTT size is bigger than space left then we ajust GTT size.
    603  * Thus function will never fails.
    604  *
    605  * FIXME: when reducing GTT size align new size on power of 2.
    606  */
    607 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
    608 {
    609 	u64 size_af, size_bf;
    610 
    611 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
    612 	size_bf = mc->vram_start & ~mc->gtt_base_align;
    613 	if (size_bf > size_af) {
    614 		if (mc->gtt_size > size_bf) {
    615 			dev_warn(rdev->dev, "limiting GTT\n");
    616 			mc->gtt_size = size_bf;
    617 		}
    618 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
    619 	} else {
    620 		if (mc->gtt_size > size_af) {
    621 			dev_warn(rdev->dev, "limiting GTT\n");
    622 			mc->gtt_size = size_af;
    623 		}
    624 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
    625 	}
    626 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
    627 	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
    628 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
    629 }
    630 
    631 /*
    632  * GPU helpers function.
    633  */
    634 
    635 /**
    636  * radeon_device_is_virtual - check if we are running is a virtual environment
    637  *
    638  * Check if the asic has been passed through to a VM (all asics).
    639  * Used at driver startup.
    640  * Returns true if virtual or false if not.
    641  */
    642 bool radeon_device_is_virtual(void)
    643 {
    644 #ifdef CONFIG_X86
    645 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
    646 #else
    647 	return false;
    648 #endif
    649 }
    650 
    651 /**
    652  * radeon_card_posted - check if the hw has already been initialized
    653  *
    654  * @rdev: radeon_device pointer
    655  *
    656  * Check if the asic has been initialized (all asics).
    657  * Used at driver startup.
    658  * Returns true if initialized or false if not.
    659  */
    660 bool radeon_card_posted(struct radeon_device *rdev)
    661 {
    662 	uint32_t reg;
    663 
    664 	/* for pass through, always force asic_init for CI */
    665 	if (rdev->family >= CHIP_BONAIRE &&
    666 	    radeon_device_is_virtual())
    667 		return false;
    668 
    669 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
    670 	if (efi_enabled(EFI_BOOT) &&
    671 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
    672 	    (rdev->family < CHIP_R600))
    673 		return false;
    674 
    675 	if (ASIC_IS_NODCE(rdev))
    676 		goto check_memsize;
    677 
    678 	/* first check CRTCs */
    679 	if (ASIC_IS_DCE4(rdev)) {
    680 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
    681 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
    682 			if (rdev->num_crtc >= 4) {
    683 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
    684 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
    685 			}
    686 			if (rdev->num_crtc >= 6) {
    687 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
    688 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
    689 			}
    690 		if (reg & EVERGREEN_CRTC_MASTER_EN)
    691 			return true;
    692 	} else if (ASIC_IS_AVIVO(rdev)) {
    693 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
    694 		      RREG32(AVIVO_D2CRTC_CONTROL);
    695 		if (reg & AVIVO_CRTC_EN) {
    696 			return true;
    697 		}
    698 	} else {
    699 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
    700 		      RREG32(RADEON_CRTC2_GEN_CNTL);
    701 		if (reg & RADEON_CRTC_EN) {
    702 			return true;
    703 		}
    704 	}
    705 
    706 check_memsize:
    707 	/* then check MEM_SIZE, in case the crtcs are off */
    708 	if (rdev->family >= CHIP_R600)
    709 		reg = RREG32(R600_CONFIG_MEMSIZE);
    710 	else
    711 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
    712 
    713 	if (reg)
    714 		return true;
    715 
    716 	return false;
    717 
    718 }
    719 
    720 /**
    721  * radeon_update_bandwidth_info - update display bandwidth params
    722  *
    723  * @rdev: radeon_device pointer
    724  *
    725  * Used when sclk/mclk are switched or display modes are set.
    726  * params are used to calculate display watermarks (all asics)
    727  */
    728 void radeon_update_bandwidth_info(struct radeon_device *rdev)
    729 {
    730 	fixed20_12 a;
    731 	u32 sclk = rdev->pm.current_sclk;
    732 	u32 mclk = rdev->pm.current_mclk;
    733 
    734 	/* sclk/mclk in Mhz */
    735 	a.full = dfixed_const(100);
    736 	rdev->pm.sclk.full = dfixed_const(sclk);
    737 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
    738 	rdev->pm.mclk.full = dfixed_const(mclk);
    739 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
    740 
    741 	if (rdev->flags & RADEON_IS_IGP) {
    742 		a.full = dfixed_const(16);
    743 		/* core_bandwidth = sclk(Mhz) * 16 */
    744 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
    745 	}
    746 }
    747 
    748 /**
    749  * radeon_boot_test_post_card - check and possibly initialize the hw
    750  *
    751  * @rdev: radeon_device pointer
    752  *
    753  * Check if the asic is initialized and if not, attempt to initialize
    754  * it (all asics).
    755  * Returns true if initialized or false if not.
    756  */
    757 bool radeon_boot_test_post_card(struct radeon_device *rdev)
    758 {
    759 	if (radeon_card_posted(rdev))
    760 		return true;
    761 
    762 	if (rdev->bios) {
    763 		DRM_INFO("GPU not posted. posting now...\n");
    764 		if (rdev->is_atom_bios)
    765 			atom_asic_init(rdev->mode_info.atom_context);
    766 		else
    767 			radeon_combios_asic_init(rdev->ddev);
    768 		return true;
    769 	} else {
    770 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
    771 		return false;
    772 	}
    773 }
    774 
    775 /**
    776  * radeon_dummy_page_init - init dummy page used by the driver
    777  *
    778  * @rdev: radeon_device pointer
    779  *
    780  * Allocate the dummy page used by the driver (all asics).
    781  * This dummy page is used by the driver as a filler for gart entries
    782  * when pages are taken out of the GART
    783  * Returns 0 on sucess, -ENOMEM on failure.
    784  */
    785 int radeon_dummy_page_init(struct radeon_device *rdev)
    786 {
    787 	if (rdev->dummy_page.page)
    788 		return 0;
    789 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
    790 	if (rdev->dummy_page.page == NULL)
    791 		return -ENOMEM;
    792 	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
    793 					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    794 	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
    795 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    796 		__free_page(rdev->dummy_page.page);
    797 		rdev->dummy_page.page = NULL;
    798 		return -ENOMEM;
    799 	}
    800 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
    801 							    RADEON_GART_PAGE_DUMMY);
    802 	return 0;
    803 }
    804 
    805 /**
    806  * radeon_dummy_page_fini - free dummy page used by the driver
    807  *
    808  * @rdev: radeon_device pointer
    809  *
    810  * Frees the dummy page used by the driver (all asics).
    811  */
    812 void radeon_dummy_page_fini(struct radeon_device *rdev)
    813 {
    814 	if (rdev->dummy_page.page == NULL)
    815 		return;
    816 	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
    817 			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    818 	__free_page(rdev->dummy_page.page);
    819 	rdev->dummy_page.page = NULL;
    820 }
    821 
    822 
    823 /* ATOM accessor methods */
    824 /*
    825  * ATOM is an interpreted byte code stored in tables in the vbios.  The
    826  * driver registers callbacks to access registers and the interpreter
    827  * in the driver parses the tables and executes then to program specific
    828  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
    829  * atombios.h, and atom.c
    830  */
    831 
    832 /**
    833  * cail_pll_read - read PLL register
    834  *
    835  * @info: atom card_info pointer
    836  * @reg: PLL register offset
    837  *
    838  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    839  * Returns the value of the PLL register.
    840  */
    841 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
    842 {
    843 	struct radeon_device *rdev = info->dev->dev_private;
    844 	uint32_t r;
    845 
    846 	r = rdev->pll_rreg(rdev, reg);
    847 	return r;
    848 }
    849 
    850 /**
    851  * cail_pll_write - write PLL register
    852  *
    853  * @info: atom card_info pointer
    854  * @reg: PLL register offset
    855  * @val: value to write to the pll register
    856  *
    857  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    858  */
    859 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
    860 {
    861 	struct radeon_device *rdev = info->dev->dev_private;
    862 
    863 	rdev->pll_wreg(rdev, reg, val);
    864 }
    865 
    866 /**
    867  * cail_mc_read - read MC (Memory Controller) register
    868  *
    869  * @info: atom card_info pointer
    870  * @reg: MC register offset
    871  *
    872  * Provides an MC register accessor for the atom interpreter (r4xx+).
    873  * Returns the value of the MC register.
    874  */
    875 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
    876 {
    877 	struct radeon_device *rdev = info->dev->dev_private;
    878 	uint32_t r;
    879 
    880 	r = rdev->mc_rreg(rdev, reg);
    881 	return r;
    882 }
    883 
    884 /**
    885  * cail_mc_write - write MC (Memory Controller) register
    886  *
    887  * @info: atom card_info pointer
    888  * @reg: MC register offset
    889  * @val: value to write to the pll register
    890  *
    891  * Provides a MC register accessor for the atom interpreter (r4xx+).
    892  */
    893 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
    894 {
    895 	struct radeon_device *rdev = info->dev->dev_private;
    896 
    897 	rdev->mc_wreg(rdev, reg, val);
    898 }
    899 
    900 /**
    901  * cail_reg_write - write MMIO register
    902  *
    903  * @info: atom card_info pointer
    904  * @reg: MMIO register offset
    905  * @val: value to write to the pll register
    906  *
    907  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
    908  */
    909 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
    910 {
    911 	struct radeon_device *rdev = info->dev->dev_private;
    912 
    913 	WREG32(reg*4, val);
    914 }
    915 
    916 /**
    917  * cail_reg_read - read MMIO register
    918  *
    919  * @info: atom card_info pointer
    920  * @reg: MMIO register offset
    921  *
    922  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
    923  * Returns the value of the MMIO register.
    924  */
    925 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
    926 {
    927 	struct radeon_device *rdev = info->dev->dev_private;
    928 	uint32_t r;
    929 
    930 	r = RREG32(reg*4);
    931 	return r;
    932 }
    933 
    934 /**
    935  * cail_ioreg_write - write IO register
    936  *
    937  * @info: atom card_info pointer
    938  * @reg: IO register offset
    939  * @val: value to write to the pll register
    940  *
    941  * Provides a IO register accessor for the atom interpreter (r4xx+).
    942  */
    943 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
    944 {
    945 	struct radeon_device *rdev = info->dev->dev_private;
    946 
    947 	WREG32_IO(reg*4, val);
    948 }
    949 
    950 /**
    951  * cail_ioreg_read - read IO register
    952  *
    953  * @info: atom card_info pointer
    954  * @reg: IO register offset
    955  *
    956  * Provides an IO register accessor for the atom interpreter (r4xx+).
    957  * Returns the value of the IO register.
    958  */
    959 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
    960 {
    961 	struct radeon_device *rdev = info->dev->dev_private;
    962 	uint32_t r;
    963 
    964 	r = RREG32_IO(reg*4);
    965 	return r;
    966 }
    967 
    968 /**
    969  * radeon_atombios_init - init the driver info and callbacks for atombios
    970  *
    971  * @rdev: radeon_device pointer
    972  *
    973  * Initializes the driver info and register access callbacks for the
    974  * ATOM interpreter (r4xx+).
    975  * Returns 0 on sucess, -ENOMEM on failure.
    976  * Called at driver startup.
    977  */
    978 int radeon_atombios_init(struct radeon_device *rdev)
    979 {
    980 	struct card_info *atom_card_info =
    981 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
    982 
    983 	if (!atom_card_info)
    984 		return -ENOMEM;
    985 
    986 	rdev->mode_info.atom_card_info = atom_card_info;
    987 	atom_card_info->dev = rdev->ddev;
    988 	atom_card_info->reg_read = cail_reg_read;
    989 	atom_card_info->reg_write = cail_reg_write;
    990 	/* needed for iio ops */
    991 	if (rdev->rio_mem) {
    992 		atom_card_info->ioreg_read = cail_ioreg_read;
    993 		atom_card_info->ioreg_write = cail_ioreg_write;
    994 	} else {
    995 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
    996 		atom_card_info->ioreg_read = cail_reg_read;
    997 		atom_card_info->ioreg_write = cail_reg_write;
    998 	}
    999 	atom_card_info->mc_read = cail_mc_read;
   1000 	atom_card_info->mc_write = cail_mc_write;
   1001 	atom_card_info->pll_read = cail_pll_read;
   1002 	atom_card_info->pll_write = cail_pll_write;
   1003 
   1004 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
   1005 	if (!rdev->mode_info.atom_context) {
   1006 		radeon_atombios_fini(rdev);
   1007 		return -ENOMEM;
   1008 	}
   1009 
   1010 	mutex_init(&rdev->mode_info.atom_context->mutex);
   1011 	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
   1012 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
   1013 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
   1014 	return 0;
   1015 }
   1016 
   1017 /**
   1018  * radeon_atombios_fini - free the driver info and callbacks for atombios
   1019  *
   1020  * @rdev: radeon_device pointer
   1021  *
   1022  * Frees the driver info and register access callbacks for the ATOM
   1023  * interpreter (r4xx+).
   1024  * Called at driver shutdown.
   1025  */
   1026 void radeon_atombios_fini(struct radeon_device *rdev)
   1027 {
   1028 	if (rdev->mode_info.atom_context) {
   1029 		kfree(rdev->mode_info.atom_context->scratch);
   1030 	}
   1031 	kfree(rdev->mode_info.atom_context);
   1032 	rdev->mode_info.atom_context = NULL;
   1033 	kfree(rdev->mode_info.atom_card_info);
   1034 	rdev->mode_info.atom_card_info = NULL;
   1035 }
   1036 
   1037 /* COMBIOS */
   1038 /*
   1039  * COMBIOS is the bios format prior to ATOM. It provides
   1040  * command tables similar to ATOM, but doesn't have a unified
   1041  * parser.  See radeon_combios.c
   1042  */
   1043 
   1044 /**
   1045  * radeon_combios_init - init the driver info for combios
   1046  *
   1047  * @rdev: radeon_device pointer
   1048  *
   1049  * Initializes the driver info for combios (r1xx-r3xx).
   1050  * Returns 0 on sucess.
   1051  * Called at driver startup.
   1052  */
   1053 int radeon_combios_init(struct radeon_device *rdev)
   1054 {
   1055 	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
   1056 	return 0;
   1057 }
   1058 
   1059 /**
   1060  * radeon_combios_fini - free the driver info for combios
   1061  *
   1062  * @rdev: radeon_device pointer
   1063  *
   1064  * Frees the driver info for combios (r1xx-r3xx).
   1065  * Called at driver shutdown.
   1066  */
   1067 void radeon_combios_fini(struct radeon_device *rdev)
   1068 {
   1069 }
   1070 
   1071 /* if we get transitioned to only one device, take VGA back */
   1072 /**
   1073  * radeon_vga_set_decode - enable/disable vga decode
   1074  *
   1075  * @cookie: radeon_device pointer
   1076  * @state: enable/disable vga decode
   1077  *
   1078  * Enable/disable vga decode (all asics).
   1079  * Returns VGA resource flags.
   1080  */
   1081 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
   1082 {
   1083 	struct radeon_device *rdev = cookie;
   1084 	radeon_vga_set_state(rdev, state);
   1085 	if (state)
   1086 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
   1087 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1088 	else
   1089 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1090 }
   1091 
   1092 /**
   1093  * radeon_check_pot_argument - check that argument is a power of two
   1094  *
   1095  * @arg: value to check
   1096  *
   1097  * Validates that a certain argument is a power of two (all asics).
   1098  * Returns true if argument is valid.
   1099  */
   1100 static bool radeon_check_pot_argument(int arg)
   1101 {
   1102 	return (arg & (arg - 1)) == 0;
   1103 }
   1104 
   1105 /**
   1106  * Determine a sensible default GART size according to ASIC family.
   1107  *
   1108  * @family ASIC family name
   1109  */
   1110 static int radeon_gart_size_auto(enum radeon_family family)
   1111 {
   1112 	/* default to a larger gart size on newer asics */
   1113 	if (family >= CHIP_TAHITI)
   1114 		return 2048;
   1115 	else if (family >= CHIP_RV770)
   1116 		return 1024;
   1117 	else
   1118 		return 512;
   1119 }
   1120 
   1121 /**
   1122  * radeon_check_arguments - validate module params
   1123  *
   1124  * @rdev: radeon_device pointer
   1125  *
   1126  * Validates certain module parameters and updates
   1127  * the associated values used by the driver (all asics).
   1128  */
   1129 static void radeon_check_arguments(struct radeon_device *rdev)
   1130 {
   1131 	/* vramlimit must be a power of two */
   1132 	if (!radeon_check_pot_argument(radeon_vram_limit)) {
   1133 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
   1134 				radeon_vram_limit);
   1135 		radeon_vram_limit = 0;
   1136 	}
   1137 
   1138 	if (radeon_gart_size == -1) {
   1139 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1140 	}
   1141 	/* gtt size must be power of two and greater or equal to 32M */
   1142 	if (radeon_gart_size < 32) {
   1143 		dev_warn(rdev->dev, "gart size (%d) too small\n",
   1144 				radeon_gart_size);
   1145 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1146 	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
   1147 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
   1148 				radeon_gart_size);
   1149 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1150 	}
   1151 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
   1152 
   1153 	/* AGP mode can only be -1, 1, 2, 4, 8 */
   1154 	switch (radeon_agpmode) {
   1155 	case -1:
   1156 	case 0:
   1157 	case 1:
   1158 	case 2:
   1159 	case 4:
   1160 	case 8:
   1161 		break;
   1162 	default:
   1163 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
   1164 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
   1165 		radeon_agpmode = 0;
   1166 		break;
   1167 	}
   1168 
   1169 	if (!radeon_check_pot_argument(radeon_vm_size)) {
   1170 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
   1171 			 radeon_vm_size);
   1172 		radeon_vm_size = 4;
   1173 	}
   1174 
   1175 	if (radeon_vm_size < 1) {
   1176 		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
   1177 			 radeon_vm_size);
   1178 		radeon_vm_size = 4;
   1179 	}
   1180 
   1181 	/*
   1182 	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
   1183 	 */
   1184 	if (radeon_vm_size > 1024) {
   1185 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
   1186 			 radeon_vm_size);
   1187 		radeon_vm_size = 4;
   1188 	}
   1189 
   1190 	/* defines number of bits in page table versus page directory,
   1191 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
   1192 	 * page table and the remaining bits are in the page directory */
   1193 	if (radeon_vm_block_size == -1) {
   1194 
   1195 		/* Total bits covered by PD + PTs */
   1196 		unsigned bits = ilog2(radeon_vm_size) + 18;
   1197 
   1198 		/* Make sure the PD is 4K in size up to 8GB address space.
   1199 		   Above that split equal between PD and PTs */
   1200 		if (radeon_vm_size <= 8)
   1201 			radeon_vm_block_size = bits - 9;
   1202 		else
   1203 			radeon_vm_block_size = (bits + 3) / 2;
   1204 
   1205 	} else if (radeon_vm_block_size < 9) {
   1206 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
   1207 			 radeon_vm_block_size);
   1208 		radeon_vm_block_size = 9;
   1209 	}
   1210 
   1211 	if (radeon_vm_block_size > 24 ||
   1212 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
   1213 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
   1214 			 radeon_vm_block_size);
   1215 		radeon_vm_block_size = 9;
   1216 	}
   1217 }
   1218 
   1219 /**
   1220  * radeon_switcheroo_set_state - set switcheroo state
   1221  *
   1222  * @pdev: pci dev pointer
   1223  * @state: vga_switcheroo state
   1224  *
   1225  * Callback for the switcheroo driver.  Suspends or resumes the
   1226  * the asics before or after it is powered up using ACPI methods.
   1227  */
   1228 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
   1229 {
   1230 	struct drm_device *dev = pci_get_drvdata(pdev);
   1231 
   1232 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
   1233 		return;
   1234 
   1235 	if (state == VGA_SWITCHEROO_ON) {
   1236 		pr_info("radeon: switched on\n");
   1237 		/* don't suspend or resume card normally */
   1238 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1239 
   1240 		radeon_resume_kms(dev, true, true);
   1241 
   1242 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
   1243 		drm_kms_helper_poll_enable(dev);
   1244 	} else {
   1245 		pr_info("radeon: switched off\n");
   1246 		drm_kms_helper_poll_disable(dev);
   1247 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1248 		radeon_suspend_kms(dev, true, true, false);
   1249 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
   1250 	}
   1251 }
   1252 
   1253 /**
   1254  * radeon_switcheroo_can_switch - see if switcheroo state can change
   1255  *
   1256  * @pdev: pci dev pointer
   1257  *
   1258  * Callback for the switcheroo driver.  Check of the switcheroo
   1259  * state can be changed.
   1260  * Returns true if the state can be changed, false if not.
   1261  */
   1262 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
   1263 {
   1264 	struct drm_device *dev = pci_get_drvdata(pdev);
   1265 
   1266 	/*
   1267 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
   1268 	 * locking inversion with the driver load path. And the access here is
   1269 	 * completely racy anyway. So don't bother with locking for now.
   1270 	 */
   1271 	return dev->open_count == 0;
   1272 }
   1273 
   1274 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
   1275 	.set_gpu_state = radeon_switcheroo_set_state,
   1276 	.reprobe = NULL,
   1277 	.can_switch = radeon_switcheroo_can_switch,
   1278 };
   1279 
   1280 /**
   1281  * radeon_device_init - initialize the driver
   1282  *
   1283  * @rdev: radeon_device pointer
   1284  * @pdev: drm dev pointer
   1285  * @pdev: pci dev pointer
   1286  * @flags: driver flags
   1287  *
   1288  * Initializes the driver info and hw (all asics).
   1289  * Returns 0 for success or an error on failure.
   1290  * Called at driver startup.
   1291  */
   1292 int radeon_device_init(struct radeon_device *rdev,
   1293 		       struct drm_device *ddev,
   1294 		       struct pci_dev *pdev,
   1295 		       uint32_t flags)
   1296 {
   1297 	int r, i;
   1298 	int dma_bits;
   1299 	bool runtime = false;
   1300 
   1301 	rdev->shutdown = false;
   1302 	rdev->dev = &pdev->dev;
   1303 	rdev->ddev = ddev;
   1304 	rdev->pdev = pdev;
   1305 	rdev->flags = flags;
   1306 	rdev->family = flags & RADEON_FAMILY_MASK;
   1307 	rdev->is_atom_bios = false;
   1308 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
   1309 	rdev->mc.gtt_size = 512 * 1024 * 1024;
   1310 	rdev->accel_working = false;
   1311 	/* set up ring ids */
   1312 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1313 		rdev->ring[i].idx = i;
   1314 	}
   1315 	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
   1316 
   1317 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
   1318 		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
   1319 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
   1320 
   1321 	/* mutex initialization are all done here so we
   1322 	 * can recall function without having locking issues */
   1323 	mutex_init(&rdev->ring_lock);
   1324 	mutex_init(&rdev->dc_hw_i2c_mutex);
   1325 	atomic_set(&rdev->ih.lock, 0);
   1326 	mutex_init(&rdev->gem.mutex);
   1327 	mutex_init(&rdev->pm.mutex);
   1328 	mutex_init(&rdev->gpu_clock_mutex);
   1329 	mutex_init(&rdev->srbm_mutex);
   1330 	init_rwsem(&rdev->pm.mclk_lock);
   1331 	init_rwsem(&rdev->exclusive_lock);
   1332 	init_waitqueue_head(&rdev->irq.vblank_queue);
   1333 	r = radeon_gem_init(rdev);
   1334 	if (r)
   1335 		return r;
   1336 
   1337 	radeon_check_arguments(rdev);
   1338 	/* Adjust VM size here.
   1339 	 * Max GPUVM size for cayman+ is 40 bits.
   1340 	 */
   1341 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
   1342 
   1343 	/* Set asic functions */
   1344 	r = radeon_asic_init(rdev);
   1345 	if (r)
   1346 		return r;
   1347 
   1348 	/* all of the newer IGP chips have an internal gart
   1349 	 * However some rs4xx report as AGP, so remove that here.
   1350 	 */
   1351 	if ((rdev->family >= CHIP_RS400) &&
   1352 	    (rdev->flags & RADEON_IS_IGP)) {
   1353 		rdev->flags &= ~RADEON_IS_AGP;
   1354 	}
   1355 
   1356 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
   1357 		radeon_agp_disable(rdev);
   1358 	}
   1359 
   1360 	/* Set the internal MC address mask
   1361 	 * This is the max address of the GPU's
   1362 	 * internal address space.
   1363 	 */
   1364 	if (rdev->family >= CHIP_CAYMAN)
   1365 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
   1366 	else if (rdev->family >= CHIP_CEDAR)
   1367 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
   1368 	else
   1369 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
   1370 
   1371 	/* set DMA mask.
   1372 	 * PCIE - can handle 40-bits.
   1373 	 * IGP - can handle 40-bits
   1374 	 * AGP - generally dma32 is safest
   1375 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
   1376 	 */
   1377 	dma_bits = 40;
   1378 	if (rdev->flags & RADEON_IS_AGP)
   1379 		dma_bits = 32;
   1380 	if ((rdev->flags & RADEON_IS_PCI) &&
   1381 	    (rdev->family <= CHIP_RS740))
   1382 		dma_bits = 32;
   1383 #ifdef CONFIG_PPC64
   1384 	if (rdev->family == CHIP_CEDAR)
   1385 		dma_bits = 32;
   1386 #endif
   1387 
   1388 	r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
   1389 	if (r) {
   1390 		pr_warn("radeon: No suitable DMA available\n");
   1391 		return r;
   1392 	}
   1393 	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
   1394 
   1395 	/* Registers mapping */
   1396 	/* TODO: block userspace mapping of io register */
   1397 	spin_lock_init(&rdev->mmio_idx_lock);
   1398 	spin_lock_init(&rdev->smc_idx_lock);
   1399 	spin_lock_init(&rdev->pll_idx_lock);
   1400 	spin_lock_init(&rdev->mc_idx_lock);
   1401 	spin_lock_init(&rdev->pcie_idx_lock);
   1402 	spin_lock_init(&rdev->pciep_idx_lock);
   1403 	spin_lock_init(&rdev->pif_idx_lock);
   1404 	spin_lock_init(&rdev->cg_idx_lock);
   1405 	spin_lock_init(&rdev->uvd_idx_lock);
   1406 	spin_lock_init(&rdev->rcu_idx_lock);
   1407 	spin_lock_init(&rdev->didt_idx_lock);
   1408 	spin_lock_init(&rdev->end_idx_lock);
   1409 	if (rdev->family >= CHIP_BONAIRE) {
   1410 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
   1411 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
   1412 	} else {
   1413 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
   1414 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
   1415 	}
   1416 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
   1417 	if (rdev->rmmio == NULL)
   1418 		return -ENOMEM;
   1419 
   1420 	/* doorbell bar mapping */
   1421 	if (rdev->family >= CHIP_BONAIRE)
   1422 		radeon_doorbell_init(rdev);
   1423 
   1424 	/* io port mapping */
   1425 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
   1426 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
   1427 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
   1428 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
   1429 			break;
   1430 		}
   1431 	}
   1432 	if (rdev->rio_mem == NULL)
   1433 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1434 
   1435 	if (rdev->flags & RADEON_IS_PX)
   1436 		radeon_device_handle_px_quirks(rdev);
   1437 
   1438 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
   1439 	/* this will fail for cards that aren't VGA class devices, just
   1440 	 * ignore it */
   1441 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
   1442 
   1443 	if (rdev->flags & RADEON_IS_PX)
   1444 		runtime = true;
   1445 	if (!pci_is_thunderbolt_attached(rdev->pdev))
   1446 		vga_switcheroo_register_client(rdev->pdev,
   1447 					       &radeon_switcheroo_ops, runtime);
   1448 	if (runtime)
   1449 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
   1450 
   1451 	r = radeon_init(rdev);
   1452 	if (r)
   1453 		goto failed;
   1454 
   1455 	r = radeon_gem_debugfs_init(rdev);
   1456 	if (r) {
   1457 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
   1458 	}
   1459 
   1460 	r = radeon_mst_debugfs_init(rdev);
   1461 	if (r) {
   1462 		DRM_ERROR("registering mst debugfs failed (%d).\n", r);
   1463 	}
   1464 
   1465 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
   1466 		/* Acceleration not working on AGP card try again
   1467 		 * with fallback to PCI or PCIE GART
   1468 		 */
   1469 		radeon_asic_reset(rdev);
   1470 		radeon_fini(rdev);
   1471 		radeon_agp_disable(rdev);
   1472 		r = radeon_init(rdev);
   1473 		if (r)
   1474 			goto failed;
   1475 	}
   1476 
   1477 	r = radeon_ib_ring_tests(rdev);
   1478 	if (r)
   1479 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1480 
   1481 	/*
   1482 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
   1483 	 * after the CP ring have chew one packet at least. Hence here we stop
   1484 	 * and restart DPM after the radeon_ib_ring_tests().
   1485 	 */
   1486 	if (rdev->pm.dpm_enabled &&
   1487 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
   1488 	    (rdev->family == CHIP_TURKS) &&
   1489 	    (rdev->flags & RADEON_IS_MOBILITY)) {
   1490 		mutex_lock(&rdev->pm.mutex);
   1491 		radeon_dpm_disable(rdev);
   1492 		radeon_dpm_enable(rdev);
   1493 		mutex_unlock(&rdev->pm.mutex);
   1494 	}
   1495 
   1496 	if ((radeon_testing & 1)) {
   1497 		if (rdev->accel_working)
   1498 			radeon_test_moves(rdev);
   1499 		else
   1500 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
   1501 	}
   1502 	if ((radeon_testing & 2)) {
   1503 		if (rdev->accel_working)
   1504 			radeon_test_syncing(rdev);
   1505 		else
   1506 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
   1507 	}
   1508 	if (radeon_benchmarking) {
   1509 		if (rdev->accel_working)
   1510 			radeon_benchmark(rdev, radeon_benchmarking);
   1511 		else
   1512 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
   1513 	}
   1514 	return 0;
   1515 
   1516 failed:
   1517 	/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
   1518 	if (radeon_is_px(ddev))
   1519 		pm_runtime_put_noidle(ddev->dev);
   1520 	if (runtime)
   1521 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1522 	return r;
   1523 }
   1524 
   1525 /**
   1526  * radeon_device_fini - tear down the driver
   1527  *
   1528  * @rdev: radeon_device pointer
   1529  *
   1530  * Tear down the driver info (all asics).
   1531  * Called at driver shutdown.
   1532  */
   1533 void radeon_device_fini(struct radeon_device *rdev)
   1534 {
   1535 	DRM_INFO("radeon: finishing device.\n");
   1536 	rdev->shutdown = true;
   1537 	/* evict vram memory */
   1538 	radeon_bo_evict_vram(rdev);
   1539 	radeon_fini(rdev);
   1540 	if (!pci_is_thunderbolt_attached(rdev->pdev))
   1541 		vga_switcheroo_unregister_client(rdev->pdev);
   1542 	if (rdev->flags & RADEON_IS_PX)
   1543 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1544 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
   1545 	if (rdev->rio_mem)
   1546 		pci_iounmap(rdev->pdev, rdev->rio_mem);
   1547 	rdev->rio_mem = NULL;
   1548 	iounmap(rdev->rmmio);
   1549 	rdev->rmmio = NULL;
   1550 	if (rdev->family >= CHIP_BONAIRE)
   1551 		radeon_doorbell_fini(rdev);
   1552 }
   1553 
   1554 
   1555 /*
   1556  * Suspend & resume.
   1557  */
   1558 /**
   1559  * radeon_suspend_kms - initiate device suspend
   1560  *
   1561  * @pdev: drm dev pointer
   1562  * @state: suspend state
   1563  *
   1564  * Puts the hw in the suspend state (all asics).
   1565  * Returns 0 for success or an error on failure.
   1566  * Called at driver suspend.
   1567  */
   1568 int radeon_suspend_kms(struct drm_device *dev, bool suspend,
   1569 		       bool fbcon, bool freeze)
   1570 {
   1571 	struct radeon_device *rdev;
   1572 	struct drm_crtc *crtc;
   1573 	struct drm_connector *connector;
   1574 	int i, r;
   1575 
   1576 	if (dev == NULL || dev->dev_private == NULL) {
   1577 		return -ENODEV;
   1578 	}
   1579 
   1580 	rdev = dev->dev_private;
   1581 
   1582 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1583 		return 0;
   1584 
   1585 	drm_kms_helper_poll_disable(dev);
   1586 
   1587 	drm_modeset_lock_all(dev);
   1588 	/* turn off display hw */
   1589 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1590 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
   1591 	}
   1592 	drm_modeset_unlock_all(dev);
   1593 
   1594 	/* unpin the front buffers and cursors */
   1595 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1596 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1597 		struct drm_framebuffer *fb = crtc->primary->fb;
   1598 		struct radeon_bo *robj;
   1599 
   1600 		if (radeon_crtc->cursor_bo) {
   1601 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1602 			r = radeon_bo_reserve(robj, false);
   1603 			if (r == 0) {
   1604 				radeon_bo_unpin(robj);
   1605 				radeon_bo_unreserve(robj);
   1606 			}
   1607 		}
   1608 
   1609 		if (fb == NULL || fb->obj[0] == NULL) {
   1610 			continue;
   1611 		}
   1612 		robj = gem_to_radeon_bo(fb->obj[0]);
   1613 		/* don't unpin kernel fb objects */
   1614 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
   1615 			r = radeon_bo_reserve(robj, false);
   1616 			if (r == 0) {
   1617 				radeon_bo_unpin(robj);
   1618 				radeon_bo_unreserve(robj);
   1619 			}
   1620 		}
   1621 	}
   1622 	/* evict vram memory */
   1623 	radeon_bo_evict_vram(rdev);
   1624 
   1625 	/* wait for gpu to finish processing current batch */
   1626 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1627 		r = radeon_fence_wait_empty(rdev, i);
   1628 		if (r) {
   1629 			/* delay GPU reset to resume */
   1630 			radeon_fence_driver_force_completion(rdev, i);
   1631 		}
   1632 	}
   1633 
   1634 	radeon_save_bios_scratch_regs(rdev);
   1635 
   1636 	radeon_suspend(rdev);
   1637 	radeon_hpd_fini(rdev);
   1638 	/* evict remaining vram memory
   1639 	 * This second call to evict vram is to evict the gart page table
   1640 	 * using the CPU.
   1641 	 */
   1642 	radeon_bo_evict_vram(rdev);
   1643 
   1644 	radeon_agp_suspend(rdev);
   1645 
   1646 	pci_save_state(dev->pdev);
   1647 	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
   1648 		rdev->asic->asic_reset(rdev, true);
   1649 		pci_restore_state(dev->pdev);
   1650 	} else if (suspend) {
   1651 		/* Shut down the device */
   1652 		pci_disable_device(dev->pdev);
   1653 		pci_set_power_state(dev->pdev, PCI_D3hot);
   1654 	}
   1655 
   1656 	if (fbcon) {
   1657 		console_lock();
   1658 		radeon_fbdev_set_suspend(rdev, 1);
   1659 		console_unlock();
   1660 	}
   1661 	return 0;
   1662 }
   1663 
   1664 /**
   1665  * radeon_resume_kms - initiate device resume
   1666  *
   1667  * @pdev: drm dev pointer
   1668  *
   1669  * Bring the hw back to operating state (all asics).
   1670  * Returns 0 for success or an error on failure.
   1671  * Called at driver resume.
   1672  */
   1673 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
   1674 {
   1675 	struct drm_connector *connector;
   1676 	struct radeon_device *rdev = dev->dev_private;
   1677 	struct drm_crtc *crtc;
   1678 	int r;
   1679 
   1680 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1681 		return 0;
   1682 
   1683 	if (fbcon) {
   1684 		console_lock();
   1685 	}
   1686 	if (resume) {
   1687 		pci_set_power_state(dev->pdev, PCI_D0);
   1688 		pci_restore_state(dev->pdev);
   1689 		if (pci_enable_device(dev->pdev)) {
   1690 			if (fbcon)
   1691 				console_unlock();
   1692 			return -1;
   1693 		}
   1694 	}
   1695 	/* resume AGP if in use */
   1696 	radeon_agp_resume(rdev);
   1697 	radeon_resume(rdev);
   1698 
   1699 	r = radeon_ib_ring_tests(rdev);
   1700 	if (r)
   1701 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1702 
   1703 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   1704 		/* do dpm late init */
   1705 		r = radeon_pm_late_init(rdev);
   1706 		if (r) {
   1707 			rdev->pm.dpm_enabled = false;
   1708 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1709 		}
   1710 	} else {
   1711 		/* resume old pm late */
   1712 		radeon_pm_resume(rdev);
   1713 	}
   1714 
   1715 	radeon_restore_bios_scratch_regs(rdev);
   1716 
   1717 	/* pin cursors */
   1718 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1719 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1720 
   1721 		if (radeon_crtc->cursor_bo) {
   1722 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1723 			r = radeon_bo_reserve(robj, false);
   1724 			if (r == 0) {
   1725 				/* Only 27 bit offset for legacy cursor */
   1726 				r = radeon_bo_pin_restricted(robj,
   1727 							     RADEON_GEM_DOMAIN_VRAM,
   1728 							     ASIC_IS_AVIVO(rdev) ?
   1729 							     0 : 1 << 27,
   1730 							     &radeon_crtc->cursor_addr);
   1731 				if (r != 0)
   1732 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
   1733 				radeon_bo_unreserve(robj);
   1734 			}
   1735 		}
   1736 	}
   1737 
   1738 	/* init dig PHYs, disp eng pll */
   1739 	if (rdev->is_atom_bios) {
   1740 		radeon_atom_encoder_init(rdev);
   1741 		radeon_atom_disp_eng_pll_init(rdev);
   1742 		/* turn on the BL */
   1743 		if (rdev->mode_info.bl_encoder) {
   1744 			u8 bl_level = radeon_get_backlight_level(rdev,
   1745 								 rdev->mode_info.bl_encoder);
   1746 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   1747 						   bl_level);
   1748 		}
   1749 	}
   1750 	/* reset hpd state */
   1751 	radeon_hpd_init(rdev);
   1752 	/* blat the mode back in */
   1753 	if (fbcon) {
   1754 		drm_helper_resume_force_mode(dev);
   1755 		/* turn on display hw */
   1756 		drm_modeset_lock_all(dev);
   1757 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1758 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
   1759 		}
   1760 		drm_modeset_unlock_all(dev);
   1761 	}
   1762 
   1763 	drm_kms_helper_poll_enable(dev);
   1764 
   1765 	/* set the power state here in case we are a PX system or headless */
   1766 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   1767 		radeon_pm_compute_clocks(rdev);
   1768 
   1769 	if (fbcon) {
   1770 		radeon_fbdev_set_suspend(rdev, 0);
   1771 		console_unlock();
   1772 	}
   1773 
   1774 	return 0;
   1775 }
   1776 
   1777 /**
   1778  * radeon_gpu_reset - reset the asic
   1779  *
   1780  * @rdev: radeon device pointer
   1781  *
   1782  * Attempt the reset the GPU if it has hung (all asics).
   1783  * Returns 0 for success or an error on failure.
   1784  */
   1785 int radeon_gpu_reset(struct radeon_device *rdev)
   1786 {
   1787 	unsigned ring_sizes[RADEON_NUM_RINGS];
   1788 	uint32_t *ring_data[RADEON_NUM_RINGS];
   1789 
   1790 	bool saved = false;
   1791 
   1792 	int i, r;
   1793 	int resched;
   1794 
   1795 	down_write(&rdev->exclusive_lock);
   1796 
   1797 	if (!rdev->needs_reset) {
   1798 		up_write(&rdev->exclusive_lock);
   1799 		return 0;
   1800 	}
   1801 
   1802 	atomic_inc(&rdev->gpu_reset_counter);
   1803 
   1804 	radeon_save_bios_scratch_regs(rdev);
   1805 	/* block TTM */
   1806 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   1807 	radeon_suspend(rdev);
   1808 	radeon_hpd_fini(rdev);
   1809 
   1810 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1811 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
   1812 						   &ring_data[i]);
   1813 		if (ring_sizes[i]) {
   1814 			saved = true;
   1815 			dev_info(rdev->dev, "Saved %d dwords of commands "
   1816 				 "on ring %d.\n", ring_sizes[i], i);
   1817 		}
   1818 	}
   1819 
   1820 	r = radeon_asic_reset(rdev);
   1821 	if (!r) {
   1822 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
   1823 		radeon_resume(rdev);
   1824 	}
   1825 
   1826 	radeon_restore_bios_scratch_regs(rdev);
   1827 
   1828 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1829 		if (!r && ring_data[i]) {
   1830 			radeon_ring_restore(rdev, &rdev->ring[i],
   1831 					    ring_sizes[i], ring_data[i]);
   1832 		} else {
   1833 			radeon_fence_driver_force_completion(rdev, i);
   1834 			kfree(ring_data[i]);
   1835 		}
   1836 	}
   1837 
   1838 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   1839 		/* do dpm late init */
   1840 		r = radeon_pm_late_init(rdev);
   1841 		if (r) {
   1842 			rdev->pm.dpm_enabled = false;
   1843 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1844 		}
   1845 	} else {
   1846 		/* resume old pm late */
   1847 		radeon_pm_resume(rdev);
   1848 	}
   1849 
   1850 	/* init dig PHYs, disp eng pll */
   1851 	if (rdev->is_atom_bios) {
   1852 		radeon_atom_encoder_init(rdev);
   1853 		radeon_atom_disp_eng_pll_init(rdev);
   1854 		/* turn on the BL */
   1855 		if (rdev->mode_info.bl_encoder) {
   1856 			u8 bl_level = radeon_get_backlight_level(rdev,
   1857 								 rdev->mode_info.bl_encoder);
   1858 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   1859 						   bl_level);
   1860 		}
   1861 	}
   1862 	/* reset hpd state */
   1863 	radeon_hpd_init(rdev);
   1864 
   1865 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   1866 
   1867 	rdev->in_reset = true;
   1868 	rdev->needs_reset = false;
   1869 
   1870 	downgrade_write(&rdev->exclusive_lock);
   1871 
   1872 	drm_helper_resume_force_mode(rdev->ddev);
   1873 
   1874 	/* set the power state here in case we are a PX system or headless */
   1875 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   1876 		radeon_pm_compute_clocks(rdev);
   1877 
   1878 	if (!r) {
   1879 		r = radeon_ib_ring_tests(rdev);
   1880 		if (r && saved)
   1881 			r = -EAGAIN;
   1882 	} else {
   1883 		/* bad news, how to tell it to userspace ? */
   1884 		dev_info(rdev->dev, "GPU reset failed\n");
   1885 	}
   1886 
   1887 	rdev->needs_reset = r == -EAGAIN;
   1888 	rdev->in_reset = false;
   1889 
   1890 	up_read(&rdev->exclusive_lock);
   1891 	return r;
   1892 }
   1893 
   1894 
   1895 /*
   1896  * Debugfs
   1897  */
   1898 int radeon_debugfs_add_files(struct radeon_device *rdev,
   1899 			     struct drm_info_list *files,
   1900 			     unsigned nfiles)
   1901 {
   1902 	unsigned i;
   1903 
   1904 	for (i = 0; i < rdev->debugfs_count; i++) {
   1905 		if (rdev->debugfs[i].files == files) {
   1906 			/* Already registered */
   1907 			return 0;
   1908 		}
   1909 	}
   1910 
   1911 	i = rdev->debugfs_count + 1;
   1912 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
   1913 		DRM_ERROR("Reached maximum number of debugfs components.\n");
   1914 		DRM_ERROR("Report so we increase "
   1915 			  "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
   1916 		return -EINVAL;
   1917 	}
   1918 	rdev->debugfs[rdev->debugfs_count].files = files;
   1919 	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
   1920 	rdev->debugfs_count = i;
   1921 #if defined(CONFIG_DEBUG_FS)
   1922 	drm_debugfs_create_files(files, nfiles,
   1923 				 rdev->ddev->primary->debugfs_root,
   1924 				 rdev->ddev->primary);
   1925 #endif
   1926 	return 0;
   1927 }
   1928