Home | History | Annotate | Line # | Download | only in radeon
radeon_device.c revision 1.11
      1 /*	$NetBSD: radeon_device.c,v 1.11 2021/12/18 23:45:43 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.11 2021/12/18 23:45:43 riastradh Exp $");
     33 
     34 #include <linux/console.h>
     35 #include <linux/efi.h>
     36 #include <linux/pci.h>
     37 #include <linux/pm_runtime.h>
     38 #include <linux/slab.h>
     39 #include <linux/vga_switcheroo.h>
     40 #include <linux/vgaarb.h>
     41 
     42 #include <drm/drm_cache.h>
     43 #include <drm/drm_crtc_helper.h>
     44 #include <drm/drm_debugfs.h>
     45 #include <drm/drm_device.h>
     46 #include <drm/drm_file.h>
     47 #include <drm/drm_probe_helper.h>
     48 #include <drm/radeon_drm.h>
     49 
     50 #include "radeon_reg.h"
     51 #include "radeon.h"
     52 #include "atom.h"
     53 
     54 #include <linux/nbsd-namespace.h>
     55 
     56 static const char radeon_family_name[][16] = {
     57 	"R100",
     58 	"RV100",
     59 	"RS100",
     60 	"RV200",
     61 	"RS200",
     62 	"R200",
     63 	"RV250",
     64 	"RS300",
     65 	"RV280",
     66 	"R300",
     67 	"R350",
     68 	"RV350",
     69 	"RV380",
     70 	"R420",
     71 	"R423",
     72 	"RV410",
     73 	"RS400",
     74 	"RS480",
     75 	"RS600",
     76 	"RS690",
     77 	"RS740",
     78 	"RV515",
     79 	"R520",
     80 	"RV530",
     81 	"RV560",
     82 	"RV570",
     83 	"R580",
     84 	"R600",
     85 	"RV610",
     86 	"RV630",
     87 	"RV670",
     88 	"RV620",
     89 	"RV635",
     90 	"RS780",
     91 	"RS880",
     92 	"RV770",
     93 	"RV730",
     94 	"RV710",
     95 	"RV740",
     96 	"CEDAR",
     97 	"REDWOOD",
     98 	"JUNIPER",
     99 	"CYPRESS",
    100 	"HEMLOCK",
    101 	"PALM",
    102 	"SUMO",
    103 	"SUMO2",
    104 	"BARTS",
    105 	"TURKS",
    106 	"CAICOS",
    107 	"CAYMAN",
    108 	"ARUBA",
    109 	"TAHITI",
    110 	"PITCAIRN",
    111 	"VERDE",
    112 	"OLAND",
    113 	"HAINAN",
    114 	"BONAIRE",
    115 	"KAVERI",
    116 	"KABINI",
    117 	"HAWAII",
    118 	"MULLINS",
    119 	"LAST",
    120 };
    121 
    122 #if defined(CONFIG_VGA_SWITCHEROO)
    123 bool radeon_has_atpx_dgpu_power_cntl(void);
    124 bool radeon_is_atpx_hybrid(void);
    125 #else
    126 static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
    127 static inline bool radeon_is_atpx_hybrid(void) { return false; }
    128 #endif
    129 
    130 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
    131 
    132 struct radeon_px_quirk {
    133 	u32 chip_vendor;
    134 	u32 chip_device;
    135 	u32 subsys_vendor;
    136 	u32 subsys_device;
    137 	u32 px_quirk_flags;
    138 };
    139 
    140 static struct radeon_px_quirk radeon_px_quirk_list[] = {
    141 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
    142 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
    143 	 */
    144 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
    145 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
    146 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    147 	 */
    148 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
    149 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    150 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    151 	 */
    152 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    153 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    154 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
    155 	 */
    156 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    157 	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    158 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
    159 	 */
    160 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
    161 	{ 0, 0, 0, 0, 0 },
    162 };
    163 
    164 bool radeon_is_px(struct drm_device *dev)
    165 {
    166 	struct radeon_device *rdev = dev->dev_private;
    167 
    168 	if (rdev->flags & RADEON_IS_PX)
    169 		return true;
    170 	return false;
    171 }
    172 
    173 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
    174 {
    175 	struct radeon_px_quirk *p = radeon_px_quirk_list;
    176 
    177 	/* Apply PX quirks */
    178 	while (p && p->chip_device != 0) {
    179 		if (rdev->pdev->vendor == p->chip_vendor &&
    180 		    rdev->pdev->device == p->chip_device &&
    181 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
    182 		    rdev->pdev->subsystem_device == p->subsys_device) {
    183 			rdev->px_quirk_flags = p->px_quirk_flags;
    184 			break;
    185 		}
    186 		++p;
    187 	}
    188 
    189 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
    190 		rdev->flags &= ~RADEON_IS_PX;
    191 
    192 	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
    193 	if (!radeon_is_atpx_hybrid() &&
    194 	    !radeon_has_atpx_dgpu_power_cntl())
    195 		rdev->flags &= ~RADEON_IS_PX;
    196 }
    197 
    198 /**
    199  * radeon_program_register_sequence - program an array of registers.
    200  *
    201  * @rdev: radeon_device pointer
    202  * @registers: pointer to the register array
    203  * @array_size: size of the register array
    204  *
    205  * Programs an array or registers with and and or masks.
    206  * This is a helper for setting golden registers.
    207  */
    208 void radeon_program_register_sequence(struct radeon_device *rdev,
    209 				      const u32 *registers,
    210 				      const u32 array_size)
    211 {
    212 	u32 tmp, reg, and_mask, or_mask;
    213 	int i;
    214 
    215 	if (array_size % 3)
    216 		return;
    217 
    218 	for (i = 0; i < array_size; i +=3) {
    219 		reg = registers[i + 0];
    220 		and_mask = registers[i + 1];
    221 		or_mask = registers[i + 2];
    222 
    223 		if (and_mask == 0xffffffff) {
    224 			tmp = or_mask;
    225 		} else {
    226 			tmp = RREG32(reg);
    227 			tmp &= ~and_mask;
    228 			tmp |= or_mask;
    229 		}
    230 		WREG32(reg, tmp);
    231 	}
    232 }
    233 
    234 void radeon_pci_config_reset(struct radeon_device *rdev)
    235 {
    236 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
    237 }
    238 
    239 /**
    240  * radeon_surface_init - Clear GPU surface registers.
    241  *
    242  * @rdev: radeon_device pointer
    243  *
    244  * Clear GPU surface registers (r1xx-r5xx).
    245  */
    246 void radeon_surface_init(struct radeon_device *rdev)
    247 {
    248 	/* FIXME: check this out */
    249 	if (rdev->family < CHIP_R600) {
    250 		int i;
    251 
    252 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
    253 			if (rdev->surface_regs[i].bo)
    254 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
    255 			else
    256 				radeon_clear_surface_reg(rdev, i);
    257 		}
    258 		/* enable surfaces */
    259 		WREG32(RADEON_SURFACE_CNTL, 0);
    260 	}
    261 }
    262 
    263 /*
    264  * GPU scratch registers helpers function.
    265  */
    266 /**
    267  * radeon_scratch_init - Init scratch register driver information.
    268  *
    269  * @rdev: radeon_device pointer
    270  *
    271  * Init CP scratch register driver information (r1xx-r5xx)
    272  */
    273 void radeon_scratch_init(struct radeon_device *rdev)
    274 {
    275 	int i;
    276 
    277 	/* FIXME: check this out */
    278 	if (rdev->family < CHIP_R300) {
    279 		rdev->scratch.num_reg = 5;
    280 	} else {
    281 		rdev->scratch.num_reg = 7;
    282 	}
    283 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
    284 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    285 		rdev->scratch.free[i] = true;
    286 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
    287 	}
    288 }
    289 
    290 /**
    291  * radeon_scratch_get - Allocate a scratch register
    292  *
    293  * @rdev: radeon_device pointer
    294  * @reg: scratch register mmio offset
    295  *
    296  * Allocate a CP scratch register for use by the driver (all asics).
    297  * Returns 0 on success or -EINVAL on failure.
    298  */
    299 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
    300 {
    301 	int i;
    302 
    303 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    304 		if (rdev->scratch.free[i]) {
    305 			rdev->scratch.free[i] = false;
    306 			*reg = rdev->scratch.reg[i];
    307 			return 0;
    308 		}
    309 	}
    310 	return -EINVAL;
    311 }
    312 
    313 /**
    314  * radeon_scratch_free - Free a scratch register
    315  *
    316  * @rdev: radeon_device pointer
    317  * @reg: scratch register mmio offset
    318  *
    319  * Free a CP scratch register allocated for use by the driver (all asics)
    320  */
    321 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
    322 {
    323 	int i;
    324 
    325 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    326 		if (rdev->scratch.reg[i] == reg) {
    327 			rdev->scratch.free[i] = true;
    328 			return;
    329 		}
    330 	}
    331 }
    332 
    333 /*
    334  * GPU doorbell aperture helpers function.
    335  */
    336 /**
    337  * radeon_doorbell_init - Init doorbell driver information.
    338  *
    339  * @rdev: radeon_device pointer
    340  *
    341  * Init doorbell driver information (CIK)
    342  * Returns 0 on success, error on failure.
    343  */
    344 static int radeon_doorbell_init(struct radeon_device *rdev)
    345 {
    346 #ifdef __NetBSD__
    347 	int r;
    348 #endif
    349 
    350 	/* doorbell bar mapping */
    351 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
    352 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
    353 
    354 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
    355 	if (rdev->doorbell.num_doorbells == 0)
    356 		return -EINVAL;
    357 
    358 #ifdef __NetBSD__
    359 	/* XXX errno NetBSD->Linux */
    360 	rdev->doorbell.bst = rdev->pdev->pd_pa.pa_memt;
    361 	r = -bus_space_map(rdev->doorbell.bst, rdev->doorbell.base,
    362 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)),
    363 	    0, &rdev->doorbell.bsh);
    364 	if (r)
    365 		return r;
    366 #else
    367 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
    368 	if (rdev->doorbell.ptr == NULL) {
    369 		return -ENOMEM;
    370 	}
    371 #endif
    372 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
    373 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
    374 
    375 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
    376 
    377 	return 0;
    378 }
    379 
    380 /**
    381  * radeon_doorbell_fini - Tear down doorbell driver information.
    382  *
    383  * @rdev: radeon_device pointer
    384  *
    385  * Tear down doorbell driver information (CIK)
    386  */
    387 static void radeon_doorbell_fini(struct radeon_device *rdev)
    388 {
    389 #ifdef __NetBSD__
    390 	bus_space_unmap(rdev->doorbell.bst, rdev->doorbell.bsh,
    391 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)));
    392 #else
    393 	iounmap(rdev->doorbell.ptr);
    394 	rdev->doorbell.ptr = NULL;
    395 #endif
    396 }
    397 
    398 /**
    399  * radeon_doorbell_get - Allocate a doorbell entry
    400  *
    401  * @rdev: radeon_device pointer
    402  * @doorbell: doorbell index
    403  *
    404  * Allocate a doorbell for use by the driver (all asics).
    405  * Returns 0 on success or -EINVAL on failure.
    406  */
    407 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
    408 {
    409 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
    410 	if (offset < rdev->doorbell.num_doorbells) {
    411 		__set_bit(offset, rdev->doorbell.used);
    412 		*doorbell = offset;
    413 		return 0;
    414 	} else {
    415 		return -EINVAL;
    416 	}
    417 }
    418 
    419 /**
    420  * radeon_doorbell_free - Free a doorbell entry
    421  *
    422  * @rdev: radeon_device pointer
    423  * @doorbell: doorbell index
    424  *
    425  * Free a doorbell allocated for use by the driver (all asics)
    426  */
    427 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
    428 {
    429 	if (doorbell < rdev->doorbell.num_doorbells)
    430 		__clear_bit(doorbell, rdev->doorbell.used);
    431 }
    432 
    433 /*
    434  * radeon_wb_*()
    435  * Writeback is the the method by which the the GPU updates special pages
    436  * in memory with the status of certain GPU events (fences, ring pointers,
    437  * etc.).
    438  */
    439 
    440 /**
    441  * radeon_wb_disable - Disable Writeback
    442  *
    443  * @rdev: radeon_device pointer
    444  *
    445  * Disables Writeback (all asics).  Used for suspend.
    446  */
    447 void radeon_wb_disable(struct radeon_device *rdev)
    448 {
    449 	rdev->wb.enabled = false;
    450 }
    451 
    452 /**
    453  * radeon_wb_fini - Disable Writeback and free memory
    454  *
    455  * @rdev: radeon_device pointer
    456  *
    457  * Disables Writeback and frees the Writeback memory (all asics).
    458  * Used at driver shutdown.
    459  */
    460 void radeon_wb_fini(struct radeon_device *rdev)
    461 {
    462 	radeon_wb_disable(rdev);
    463 	if (rdev->wb.wb_obj) {
    464 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
    465 			radeon_bo_kunmap(rdev->wb.wb_obj);
    466 			radeon_bo_unpin(rdev->wb.wb_obj);
    467 			radeon_bo_unreserve(rdev->wb.wb_obj);
    468 		}
    469 		radeon_bo_unref(&rdev->wb.wb_obj);
    470 		rdev->wb.wb = NULL;
    471 		rdev->wb.wb_obj = NULL;
    472 	}
    473 }
    474 
    475 /**
    476  * radeon_wb_init- Init Writeback driver info and allocate memory
    477  *
    478  * @rdev: radeon_device pointer
    479  *
    480  * Disables Writeback and frees the Writeback memory (all asics).
    481  * Used at driver startup.
    482  * Returns 0 on success or an -error on failure.
    483  */
    484 int radeon_wb_init(struct radeon_device *rdev)
    485 {
    486 	int r;
    487 
    488 	if (rdev->wb.wb_obj == NULL) {
    489 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
    490 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
    491 				     &rdev->wb.wb_obj);
    492 		if (r) {
    493 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
    494 			return r;
    495 		}
    496 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
    497 		if (unlikely(r != 0)) {
    498 			radeon_wb_fini(rdev);
    499 			return r;
    500 		}
    501 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
    502 				&rdev->wb.gpu_addr);
    503 		if (r) {
    504 			radeon_bo_unreserve(rdev->wb.wb_obj);
    505 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
    506 			radeon_wb_fini(rdev);
    507 			return r;
    508 		}
    509 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)__UNVOLATILE(&rdev->wb.wb));
    510 		radeon_bo_unreserve(rdev->wb.wb_obj);
    511 		if (r) {
    512 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
    513 			radeon_wb_fini(rdev);
    514 			return r;
    515 		}
    516 	}
    517 
    518 	/* clear wb memory */
    519 	memset(__UNVOLATILE(rdev->wb.wb), 0, RADEON_GPU_PAGE_SIZE);
    520 	/* disable event_write fences */
    521 	rdev->wb.use_event = false;
    522 	/* disabled via module param */
    523 	if (radeon_no_wb == 1) {
    524 		rdev->wb.enabled = false;
    525 	} else {
    526 		if (rdev->flags & RADEON_IS_AGP) {
    527 			/* often unreliable on AGP */
    528 			rdev->wb.enabled = false;
    529 		} else if (rdev->family < CHIP_R300) {
    530 			/* often unreliable on pre-r300 */
    531 			rdev->wb.enabled = false;
    532 		} else {
    533 			rdev->wb.enabled = true;
    534 			/* event_write fences are only available on r600+ */
    535 			if (rdev->family >= CHIP_R600) {
    536 				rdev->wb.use_event = true;
    537 			}
    538 		}
    539 	}
    540 	/* always use writeback/events on NI, APUs */
    541 	if (rdev->family >= CHIP_PALM) {
    542 		rdev->wb.enabled = true;
    543 		rdev->wb.use_event = true;
    544 	}
    545 
    546 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
    547 
    548 	return 0;
    549 }
    550 
    551 /**
    552  * radeon_vram_location - try to find VRAM location
    553  * @rdev: radeon device structure holding all necessary informations
    554  * @mc: memory controller structure holding memory informations
    555  * @base: base address at which to put VRAM
    556  *
    557  * Function will place try to place VRAM at base address provided
    558  * as parameter (which is so far either PCI aperture address or
    559  * for IGP TOM base address).
    560  *
    561  * If there is not enough space to fit the unvisible VRAM in the 32bits
    562  * address space then we limit the VRAM size to the aperture.
    563  *
    564  * If we are using AGP and if the AGP aperture doesn't allow us to have
    565  * room for all the VRAM than we restrict the VRAM to the PCI aperture
    566  * size and print a warning.
    567  *
    568  * This function will never fails, worst case are limiting VRAM.
    569  *
    570  * Note: GTT start, end, size should be initialized before calling this
    571  * function on AGP platform.
    572  *
    573  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
    574  * this shouldn't be a problem as we are using the PCI aperture as a reference.
    575  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
    576  * not IGP.
    577  *
    578  * Note: we use mc_vram_size as on some board we need to program the mc to
    579  * cover the whole aperture even if VRAM size is inferior to aperture size
    580  * Novell bug 204882 + along with lots of ubuntu ones
    581  *
    582  * Note: when limiting vram it's safe to overwritte real_vram_size because
    583  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
    584  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
    585  * ones)
    586  *
    587  * Note: IGP TOM addr should be the same as the aperture addr, we don't
    588  * explicitly check for that thought.
    589  *
    590  * FIXME: when reducing VRAM size align new size on power of 2.
    591  */
    592 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
    593 {
    594 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
    595 
    596 	mc->vram_start = base;
    597 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
    598 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    599 		mc->real_vram_size = mc->aper_size;
    600 		mc->mc_vram_size = mc->aper_size;
    601 	}
    602 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    603 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
    604 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    605 		mc->real_vram_size = mc->aper_size;
    606 		mc->mc_vram_size = mc->aper_size;
    607 	}
    608 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    609 	if (limit && limit < mc->real_vram_size)
    610 		mc->real_vram_size = limit;
    611 	dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64" (%"PRIu64"M used)\n",
    612 			mc->mc_vram_size >> 20, mc->vram_start,
    613 			mc->vram_end, mc->real_vram_size >> 20);
    614 }
    615 
    616 /**
    617  * radeon_gtt_location - try to find GTT location
    618  * @rdev: radeon device structure holding all necessary informations
    619  * @mc: memory controller structure holding memory informations
    620  *
    621  * Function will place try to place GTT before or after VRAM.
    622  *
    623  * If GTT size is bigger than space left then we ajust GTT size.
    624  * Thus function will never fails.
    625  *
    626  * FIXME: when reducing GTT size align new size on power of 2.
    627  */
    628 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
    629 {
    630 	u64 size_af, size_bf;
    631 
    632 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
    633 	size_bf = mc->vram_start & ~mc->gtt_base_align;
    634 	if (size_bf > size_af) {
    635 		if (mc->gtt_size > size_bf) {
    636 			dev_warn(rdev->dev, "limiting GTT\n");
    637 			mc->gtt_size = size_bf;
    638 		}
    639 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
    640 	} else {
    641 		if (mc->gtt_size > size_af) {
    642 			dev_warn(rdev->dev, "limiting GTT\n");
    643 			mc->gtt_size = size_af;
    644 		}
    645 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
    646 	}
    647 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
    648 	dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64"\n",
    649 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
    650 }
    651 
    652 /*
    653  * GPU helpers function.
    654  */
    655 
    656 /**
    657  * radeon_device_is_virtual - check if we are running is a virtual environment
    658  *
    659  * Check if the asic has been passed through to a VM (all asics).
    660  * Used at driver startup.
    661  * Returns true if virtual or false if not.
    662  */
    663 bool radeon_device_is_virtual(void)
    664 {
    665 #ifdef CONFIG_X86
    666 #ifdef __NetBSD__		/* XXX virtualization */
    667 	return false;
    668 #else
    669 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
    670 #endif
    671 #else
    672 	return false;
    673 #endif
    674 }
    675 
    676 /**
    677  * radeon_card_posted - check if the hw has already been initialized
    678  *
    679  * @rdev: radeon_device pointer
    680  *
    681  * Check if the asic has been initialized (all asics).
    682  * Used at driver startup.
    683  * Returns true if initialized or false if not.
    684  */
    685 bool radeon_card_posted(struct radeon_device *rdev)
    686 {
    687 	uint32_t reg;
    688 
    689 	/* for pass through, always force asic_init for CI */
    690 	if (rdev->family >= CHIP_BONAIRE &&
    691 	    radeon_device_is_virtual())
    692 		return false;
    693 
    694 #ifndef __NetBSD__		/* XXX radeon efi */
    695 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
    696 	if (efi_enabled(EFI_BOOT) &&
    697 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
    698 	    (rdev->family < CHIP_R600))
    699 		return false;
    700 #endif
    701 
    702 	if (ASIC_IS_NODCE(rdev))
    703 		goto check_memsize;
    704 
    705 	/* first check CRTCs */
    706 	if (ASIC_IS_DCE4(rdev)) {
    707 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
    708 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
    709 			if (rdev->num_crtc >= 4) {
    710 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
    711 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
    712 			}
    713 			if (rdev->num_crtc >= 6) {
    714 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
    715 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
    716 			}
    717 		if (reg & EVERGREEN_CRTC_MASTER_EN)
    718 			return true;
    719 	} else if (ASIC_IS_AVIVO(rdev)) {
    720 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
    721 		      RREG32(AVIVO_D2CRTC_CONTROL);
    722 		if (reg & AVIVO_CRTC_EN) {
    723 			return true;
    724 		}
    725 	} else {
    726 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
    727 		      RREG32(RADEON_CRTC2_GEN_CNTL);
    728 		if (reg & RADEON_CRTC_EN) {
    729 			return true;
    730 		}
    731 	}
    732 
    733 check_memsize:
    734 	/* then check MEM_SIZE, in case the crtcs are off */
    735 	if (rdev->family >= CHIP_R600)
    736 		reg = RREG32(R600_CONFIG_MEMSIZE);
    737 	else
    738 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
    739 
    740 	if (reg)
    741 		return true;
    742 
    743 	return false;
    744 
    745 }
    746 
    747 /**
    748  * radeon_update_bandwidth_info - update display bandwidth params
    749  *
    750  * @rdev: radeon_device pointer
    751  *
    752  * Used when sclk/mclk are switched or display modes are set.
    753  * params are used to calculate display watermarks (all asics)
    754  */
    755 void radeon_update_bandwidth_info(struct radeon_device *rdev)
    756 {
    757 	fixed20_12 a;
    758 	u32 sclk = rdev->pm.current_sclk;
    759 	u32 mclk = rdev->pm.current_mclk;
    760 
    761 	/* sclk/mclk in Mhz */
    762 	a.full = dfixed_const(100);
    763 	rdev->pm.sclk.full = dfixed_const(sclk);
    764 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
    765 	rdev->pm.mclk.full = dfixed_const(mclk);
    766 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
    767 
    768 	if (rdev->flags & RADEON_IS_IGP) {
    769 		a.full = dfixed_const(16);
    770 		/* core_bandwidth = sclk(Mhz) * 16 */
    771 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
    772 	}
    773 }
    774 
    775 /**
    776  * radeon_boot_test_post_card - check and possibly initialize the hw
    777  *
    778  * @rdev: radeon_device pointer
    779  *
    780  * Check if the asic is initialized and if not, attempt to initialize
    781  * it (all asics).
    782  * Returns true if initialized or false if not.
    783  */
    784 bool radeon_boot_test_post_card(struct radeon_device *rdev)
    785 {
    786 	if (radeon_card_posted(rdev))
    787 		return true;
    788 
    789 	if (rdev->bios) {
    790 		DRM_INFO("GPU not posted. posting now...\n");
    791 		if (rdev->is_atom_bios)
    792 			atom_asic_init(rdev->mode_info.atom_context);
    793 		else
    794 			radeon_combios_asic_init(rdev->ddev);
    795 		return true;
    796 	} else {
    797 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
    798 		return false;
    799 	}
    800 }
    801 
    802 /**
    803  * radeon_dummy_page_init - init dummy page used by the driver
    804  *
    805  * @rdev: radeon_device pointer
    806  *
    807  * Allocate the dummy page used by the driver (all asics).
    808  * This dummy page is used by the driver as a filler for gart entries
    809  * when pages are taken out of the GART
    810  * Returns 0 on sucess, -ENOMEM on failure.
    811  */
    812 int radeon_dummy_page_init(struct radeon_device *rdev)
    813 {
    814 #ifdef __NetBSD__
    815 	int rsegs;
    816 	int error;
    817 
    818 	/* XXX Can this be called more than once??  */
    819 	if (rdev->dummy_page.rdp_map != NULL)
    820 		return 0;
    821 
    822 	error = bus_dmamem_alloc(rdev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
    823 	    &rdev->dummy_page.rdp_seg, 1, &rsegs, BUS_DMA_WAITOK);
    824 	if (error)
    825 		goto fail0;
    826 	KASSERT(rsegs == 1);
    827 	error = bus_dmamap_create(rdev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    828 	    BUS_DMA_WAITOK, &rdev->dummy_page.rdp_map);
    829 	if (error)
    830 		goto fail1;
    831 	error = bus_dmamem_map(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1,
    832 	    PAGE_SIZE, &rdev->dummy_page.rdp_addr,
    833 	    BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
    834 	if (error)
    835 		goto fail2;
    836 	error = bus_dmamap_load(rdev->ddev->dmat, rdev->dummy_page.rdp_map,
    837 	    rdev->dummy_page.rdp_addr, PAGE_SIZE, NULL, BUS_DMA_WAITOK);
    838 	if (error)
    839 		goto fail3;
    840 
    841 	memset(rdev->dummy_page.rdp_addr, 0, PAGE_SIZE);
    842 
    843 	/* Success!  */
    844 	rdev->dummy_page.addr = rdev->dummy_page.rdp_map->dm_segs[0].ds_addr;
    845 	rdev->dummy_page.entry = radeon_gart_get_page_entry(
    846 		rdev->dummy_page.addr, RADEON_GART_PAGE_DUMMY);
    847 	return 0;
    848 
    849 fail4: __unused
    850 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    851 fail3:	bus_dmamem_unmap(rdev->ddev->dmat, rdev->dummy_page.rdp_addr,
    852 	    PAGE_SIZE);
    853 fail2:	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    854 fail1:	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    855 fail0:	KASSERT(error);
    856 	rdev->dummy_page.rdp_map = NULL;
    857 	/* XXX errno NetBSD->Linux */
    858 	return -error;
    859 #else
    860 	if (rdev->dummy_page.page)
    861 		return 0;
    862 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
    863 	if (rdev->dummy_page.page == NULL)
    864 		return -ENOMEM;
    865 	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
    866 					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    867 	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
    868 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    869 		__free_page(rdev->dummy_page.page);
    870 		rdev->dummy_page.page = NULL;
    871 		return -ENOMEM;
    872 	}
    873 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
    874 							    RADEON_GART_PAGE_DUMMY);
    875 	return 0;
    876 #endif
    877 }
    878 
    879 /**
    880  * radeon_dummy_page_fini - free dummy page used by the driver
    881  *
    882  * @rdev: radeon_device pointer
    883  *
    884  * Frees the dummy page used by the driver (all asics).
    885  */
    886 void radeon_dummy_page_fini(struct radeon_device *rdev)
    887 {
    888 #ifdef __NetBSD__
    889 
    890 	if (rdev->dummy_page.rdp_map == NULL)
    891 		return;
    892 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    893 	bus_dmamem_unmap(rdev->ddev->dmat, rdev->dummy_page.rdp_addr,
    894 	    PAGE_SIZE);
    895 	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    896 	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    897 	rdev->dummy_page.rdp_map = NULL;
    898 #else
    899 	if (rdev->dummy_page.page == NULL)
    900 		return;
    901 	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
    902 			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    903 	__free_page(rdev->dummy_page.page);
    904 	rdev->dummy_page.page = NULL;
    905 #endif
    906 }
    907 
    908 
    909 /* ATOM accessor methods */
    910 /*
    911  * ATOM is an interpreted byte code stored in tables in the vbios.  The
    912  * driver registers callbacks to access registers and the interpreter
    913  * in the driver parses the tables and executes then to program specific
    914  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
    915  * atombios.h, and atom.c
    916  */
    917 
    918 /**
    919  * cail_pll_read - read PLL register
    920  *
    921  * @info: atom card_info pointer
    922  * @reg: PLL register offset
    923  *
    924  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    925  * Returns the value of the PLL register.
    926  */
    927 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
    928 {
    929 	struct radeon_device *rdev = info->dev->dev_private;
    930 	uint32_t r;
    931 
    932 	r = rdev->pll_rreg(rdev, reg);
    933 	return r;
    934 }
    935 
    936 /**
    937  * cail_pll_write - write PLL register
    938  *
    939  * @info: atom card_info pointer
    940  * @reg: PLL register offset
    941  * @val: value to write to the pll register
    942  *
    943  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    944  */
    945 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
    946 {
    947 	struct radeon_device *rdev = info->dev->dev_private;
    948 
    949 	rdev->pll_wreg(rdev, reg, val);
    950 }
    951 
    952 /**
    953  * cail_mc_read - read MC (Memory Controller) register
    954  *
    955  * @info: atom card_info pointer
    956  * @reg: MC register offset
    957  *
    958  * Provides an MC register accessor for the atom interpreter (r4xx+).
    959  * Returns the value of the MC register.
    960  */
    961 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
    962 {
    963 	struct radeon_device *rdev = info->dev->dev_private;
    964 	uint32_t r;
    965 
    966 	r = rdev->mc_rreg(rdev, reg);
    967 	return r;
    968 }
    969 
    970 /**
    971  * cail_mc_write - write MC (Memory Controller) register
    972  *
    973  * @info: atom card_info pointer
    974  * @reg: MC register offset
    975  * @val: value to write to the pll register
    976  *
    977  * Provides a MC register accessor for the atom interpreter (r4xx+).
    978  */
    979 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
    980 {
    981 	struct radeon_device *rdev = info->dev->dev_private;
    982 
    983 	rdev->mc_wreg(rdev, reg, val);
    984 }
    985 
    986 /**
    987  * cail_reg_write - write MMIO register
    988  *
    989  * @info: atom card_info pointer
    990  * @reg: MMIO register offset
    991  * @val: value to write to the pll register
    992  *
    993  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
    994  */
    995 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
    996 {
    997 	struct radeon_device *rdev = info->dev->dev_private;
    998 
    999 	WREG32(reg*4, val);
   1000 }
   1001 
   1002 /**
   1003  * cail_reg_read - read MMIO register
   1004  *
   1005  * @info: atom card_info pointer
   1006  * @reg: MMIO register offset
   1007  *
   1008  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
   1009  * Returns the value of the MMIO register.
   1010  */
   1011 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
   1012 {
   1013 	struct radeon_device *rdev = info->dev->dev_private;
   1014 	uint32_t r;
   1015 
   1016 	r = RREG32(reg*4);
   1017 	return r;
   1018 }
   1019 
   1020 /**
   1021  * cail_ioreg_write - write IO register
   1022  *
   1023  * @info: atom card_info pointer
   1024  * @reg: IO register offset
   1025  * @val: value to write to the pll register
   1026  *
   1027  * Provides a IO register accessor for the atom interpreter (r4xx+).
   1028  */
   1029 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
   1030 {
   1031 	struct radeon_device *rdev = info->dev->dev_private;
   1032 
   1033 	WREG32_IO(reg*4, val);
   1034 }
   1035 
   1036 /**
   1037  * cail_ioreg_read - read IO register
   1038  *
   1039  * @info: atom card_info pointer
   1040  * @reg: IO register offset
   1041  *
   1042  * Provides an IO register accessor for the atom interpreter (r4xx+).
   1043  * Returns the value of the IO register.
   1044  */
   1045 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
   1046 {
   1047 	struct radeon_device *rdev = info->dev->dev_private;
   1048 	uint32_t r;
   1049 
   1050 	r = RREG32_IO(reg*4);
   1051 	return r;
   1052 }
   1053 
   1054 /**
   1055  * radeon_atombios_init - init the driver info and callbacks for atombios
   1056  *
   1057  * @rdev: radeon_device pointer
   1058  *
   1059  * Initializes the driver info and register access callbacks for the
   1060  * ATOM interpreter (r4xx+).
   1061  * Returns 0 on sucess, -ENOMEM on failure.
   1062  * Called at driver startup.
   1063  */
   1064 int radeon_atombios_init(struct radeon_device *rdev)
   1065 {
   1066 	struct card_info *atom_card_info =
   1067 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
   1068 
   1069 	if (!atom_card_info)
   1070 		return -ENOMEM;
   1071 
   1072 	rdev->mode_info.atom_card_info = atom_card_info;
   1073 	atom_card_info->dev = rdev->ddev;
   1074 	atom_card_info->reg_read = cail_reg_read;
   1075 	atom_card_info->reg_write = cail_reg_write;
   1076 	/* needed for iio ops */
   1077 #ifdef __NetBSD__
   1078 	if (rdev->rio_mem_size)
   1079 #else
   1080 	if (rdev->rio_mem)
   1081 #endif
   1082 	{
   1083 		atom_card_info->ioreg_read = cail_ioreg_read;
   1084 		atom_card_info->ioreg_write = cail_ioreg_write;
   1085 	} else {
   1086 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
   1087 		atom_card_info->ioreg_read = cail_reg_read;
   1088 		atom_card_info->ioreg_write = cail_reg_write;
   1089 	}
   1090 	atom_card_info->mc_read = cail_mc_read;
   1091 	atom_card_info->mc_write = cail_mc_write;
   1092 	atom_card_info->pll_read = cail_pll_read;
   1093 	atom_card_info->pll_write = cail_pll_write;
   1094 
   1095 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
   1096 	if (!rdev->mode_info.atom_context) {
   1097 		radeon_atombios_fini(rdev);
   1098 		return -ENOMEM;
   1099 	}
   1100 
   1101 	mutex_init(&rdev->mode_info.atom_context->mutex);
   1102 	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
   1103 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
   1104 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
   1105 	return 0;
   1106 }
   1107 
   1108 /**
   1109  * radeon_atombios_fini - free the driver info and callbacks for atombios
   1110  *
   1111  * @rdev: radeon_device pointer
   1112  *
   1113  * Frees the driver info and register access callbacks for the ATOM
   1114  * interpreter (r4xx+).
   1115  * Called at driver shutdown.
   1116  */
   1117 void radeon_atombios_fini(struct radeon_device *rdev)
   1118 {
   1119 	if (rdev->mode_info.atom_context) {
   1120 		mutex_destroy(&rdev->mode_info.atom_context->scratch_mutex);
   1121 		mutex_destroy(&rdev->mode_info.atom_context->mutex);
   1122 		kfree(rdev->mode_info.atom_context->scratch);
   1123 	}
   1124 	kfree(rdev->mode_info.atom_context);
   1125 	rdev->mode_info.atom_context = NULL;
   1126 	kfree(rdev->mode_info.atom_card_info);
   1127 	rdev->mode_info.atom_card_info = NULL;
   1128 }
   1129 
   1130 /* COMBIOS */
   1131 /*
   1132  * COMBIOS is the bios format prior to ATOM. It provides
   1133  * command tables similar to ATOM, but doesn't have a unified
   1134  * parser.  See radeon_combios.c
   1135  */
   1136 
   1137 /**
   1138  * radeon_combios_init - init the driver info for combios
   1139  *
   1140  * @rdev: radeon_device pointer
   1141  *
   1142  * Initializes the driver info for combios (r1xx-r3xx).
   1143  * Returns 0 on sucess.
   1144  * Called at driver startup.
   1145  */
   1146 int radeon_combios_init(struct radeon_device *rdev)
   1147 {
   1148 	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
   1149 	return 0;
   1150 }
   1151 
   1152 /**
   1153  * radeon_combios_fini - free the driver info for combios
   1154  *
   1155  * @rdev: radeon_device pointer
   1156  *
   1157  * Frees the driver info for combios (r1xx-r3xx).
   1158  * Called at driver shutdown.
   1159  */
   1160 void radeon_combios_fini(struct radeon_device *rdev)
   1161 {
   1162 }
   1163 
   1164 #ifndef __NetBSD__		/* XXX radeon vga */
   1165 /* if we get transitioned to only one device, take VGA back */
   1166 /**
   1167  * radeon_vga_set_decode - enable/disable vga decode
   1168  *
   1169  * @cookie: radeon_device pointer
   1170  * @state: enable/disable vga decode
   1171  *
   1172  * Enable/disable vga decode (all asics).
   1173  * Returns VGA resource flags.
   1174  */
   1175 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
   1176 {
   1177 	struct radeon_device *rdev = cookie;
   1178 	radeon_vga_set_state(rdev, state);
   1179 	if (state)
   1180 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
   1181 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1182 	else
   1183 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1184 }
   1185 #endif
   1186 
   1187 /**
   1188  * radeon_check_pot_argument - check that argument is a power of two
   1189  *
   1190  * @arg: value to check
   1191  *
   1192  * Validates that a certain argument is a power of two (all asics).
   1193  * Returns true if argument is valid.
   1194  */
   1195 static bool radeon_check_pot_argument(int arg)
   1196 {
   1197 	return (arg & (arg - 1)) == 0;
   1198 }
   1199 
   1200 /**
   1201  * Determine a sensible default GART size according to ASIC family.
   1202  *
   1203  * @family ASIC family name
   1204  */
   1205 static int radeon_gart_size_auto(enum radeon_family family)
   1206 {
   1207 	/* default to a larger gart size on newer asics */
   1208 	if (family >= CHIP_TAHITI)
   1209 		return 2048;
   1210 	else if (family >= CHIP_RV770)
   1211 		return 1024;
   1212 	else
   1213 		return 512;
   1214 }
   1215 
   1216 /**
   1217  * radeon_check_arguments - validate module params
   1218  *
   1219  * @rdev: radeon_device pointer
   1220  *
   1221  * Validates certain module parameters and updates
   1222  * the associated values used by the driver (all asics).
   1223  */
   1224 static void radeon_check_arguments(struct radeon_device *rdev)
   1225 {
   1226 	/* vramlimit must be a power of two */
   1227 	if (!radeon_check_pot_argument(radeon_vram_limit)) {
   1228 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
   1229 				radeon_vram_limit);
   1230 		radeon_vram_limit = 0;
   1231 	}
   1232 
   1233 	if (radeon_gart_size == -1) {
   1234 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1235 	}
   1236 	/* gtt size must be power of two and greater or equal to 32M */
   1237 	if (radeon_gart_size < 32) {
   1238 		dev_warn(rdev->dev, "gart size (%d) too small\n",
   1239 				radeon_gart_size);
   1240 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1241 	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
   1242 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
   1243 				radeon_gart_size);
   1244 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1245 	}
   1246 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
   1247 
   1248 	/* AGP mode can only be -1, 1, 2, 4, 8 */
   1249 	switch (radeon_agpmode) {
   1250 	case -1:
   1251 	case 0:
   1252 	case 1:
   1253 	case 2:
   1254 	case 4:
   1255 	case 8:
   1256 		break;
   1257 	default:
   1258 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
   1259 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
   1260 		radeon_agpmode = 0;
   1261 		break;
   1262 	}
   1263 
   1264 	if (!radeon_check_pot_argument(radeon_vm_size)) {
   1265 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
   1266 			 radeon_vm_size);
   1267 		radeon_vm_size = 4;
   1268 	}
   1269 
   1270 	if (radeon_vm_size < 1) {
   1271 		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
   1272 			 radeon_vm_size);
   1273 		radeon_vm_size = 4;
   1274 	}
   1275 
   1276 	/*
   1277 	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
   1278 	 */
   1279 	if (radeon_vm_size > 1024) {
   1280 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
   1281 			 radeon_vm_size);
   1282 		radeon_vm_size = 4;
   1283 	}
   1284 
   1285 	/* defines number of bits in page table versus page directory,
   1286 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
   1287 	 * page table and the remaining bits are in the page directory */
   1288 	if (radeon_vm_block_size == -1) {
   1289 
   1290 		/* Total bits covered by PD + PTs */
   1291 		unsigned bits = ilog2(radeon_vm_size) + 18;
   1292 
   1293 		/* Make sure the PD is 4K in size up to 8GB address space.
   1294 		   Above that split equal between PD and PTs */
   1295 		if (radeon_vm_size <= 8)
   1296 			radeon_vm_block_size = bits - 9;
   1297 		else
   1298 			radeon_vm_block_size = (bits + 3) / 2;
   1299 
   1300 	} else if (radeon_vm_block_size < 9) {
   1301 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
   1302 			 radeon_vm_block_size);
   1303 		radeon_vm_block_size = 9;
   1304 	}
   1305 
   1306 	if (radeon_vm_block_size > 24 ||
   1307 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
   1308 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
   1309 			 radeon_vm_block_size);
   1310 		radeon_vm_block_size = 9;
   1311 	}
   1312 }
   1313 
   1314 #ifndef __NetBSD__		/* XXX radeon vga */
   1315 /**
   1316  * radeon_switcheroo_set_state - set switcheroo state
   1317  *
   1318  * @pdev: pci dev pointer
   1319  * @state: vga_switcheroo state
   1320  *
   1321  * Callback for the switcheroo driver.  Suspends or resumes the
   1322  * the asics before or after it is powered up using ACPI methods.
   1323  */
   1324 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
   1325 {
   1326 	struct drm_device *dev = pci_get_drvdata(pdev);
   1327 
   1328 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
   1329 		return;
   1330 
   1331 	if (state == VGA_SWITCHEROO_ON) {
   1332 		pr_info("radeon: switched on\n");
   1333 		/* don't suspend or resume card normally */
   1334 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1335 
   1336 		radeon_resume_kms(dev, true, true);
   1337 
   1338 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
   1339 		drm_kms_helper_poll_enable(dev);
   1340 	} else {
   1341 		pr_info("radeon: switched off\n");
   1342 		drm_kms_helper_poll_disable(dev);
   1343 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1344 		radeon_suspend_kms(dev, true, true, false);
   1345 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
   1346 	}
   1347 }
   1348 
   1349 /**
   1350  * radeon_switcheroo_can_switch - see if switcheroo state can change
   1351  *
   1352  * @pdev: pci dev pointer
   1353  *
   1354  * Callback for the switcheroo driver.  Check of the switcheroo
   1355  * state can be changed.
   1356  * Returns true if the state can be changed, false if not.
   1357  */
   1358 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
   1359 {
   1360 	struct drm_device *dev = pci_get_drvdata(pdev);
   1361 
   1362 	/*
   1363 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
   1364 	 * locking inversion with the driver load path. And the access here is
   1365 	 * completely racy anyway. So don't bother with locking for now.
   1366 	 */
   1367 	return dev->open_count == 0;
   1368 }
   1369 
   1370 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
   1371 	.set_gpu_state = radeon_switcheroo_set_state,
   1372 	.reprobe = NULL,
   1373 	.can_switch = radeon_switcheroo_can_switch,
   1374 };
   1375 #endif
   1376 
   1377 /**
   1378  * radeon_device_init - initialize the driver
   1379  *
   1380  * @rdev: radeon_device pointer
   1381  * @pdev: drm dev pointer
   1382  * @pdev: pci dev pointer
   1383  * @flags: driver flags
   1384  *
   1385  * Initializes the driver info and hw (all asics).
   1386  * Returns 0 for success or an error on failure.
   1387  * Called at driver startup.
   1388  */
   1389 int radeon_device_init(struct radeon_device *rdev,
   1390 		       struct drm_device *ddev,
   1391 		       struct pci_dev *pdev,
   1392 		       uint32_t flags)
   1393 {
   1394 	int r, i;
   1395 	int dma_bits;
   1396 #ifndef __NetBSD__
   1397 	bool runtime = false;
   1398 #endif
   1399 
   1400 	rdev->shutdown = false;
   1401 	rdev->dev = ddev->dev;
   1402 	rdev->ddev = ddev;
   1403 	rdev->pdev = pdev;
   1404 	rdev->flags = flags;
   1405 	rdev->family = flags & RADEON_FAMILY_MASK;
   1406 	rdev->is_atom_bios = false;
   1407 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
   1408 	rdev->mc.gtt_size = 512 * 1024 * 1024;
   1409 	rdev->accel_working = false;
   1410 	/* set up ring ids */
   1411 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1412 		rdev->ring[i].idx = i;
   1413 	}
   1414 	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
   1415 
   1416 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
   1417 		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
   1418 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
   1419 
   1420 	/* mutex initialization are all done here so we
   1421 	 * can recall function without having locking issues */
   1422 	mutex_init(&rdev->ring_lock);
   1423 	mutex_init(&rdev->dc_hw_i2c_mutex);
   1424 	atomic_set(&rdev->ih.lock, 0);
   1425 	mutex_init(&rdev->gem.mutex);
   1426 	mutex_init(&rdev->pm.mutex);
   1427 	mutex_init(&rdev->gpu_clock_mutex);
   1428 	mutex_init(&rdev->srbm_mutex);
   1429 	init_rwsem(&rdev->pm.mclk_lock);
   1430 	init_rwsem(&rdev->exclusive_lock);
   1431 #ifdef __NetBSD__
   1432 	spin_lock_init(&rdev->irq.vblank_lock);
   1433 	DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
   1434 #else
   1435 	init_waitqueue_head(&rdev->irq.vblank_queue);
   1436 #endif
   1437 	r = radeon_gem_init(rdev);
   1438 	if (r)
   1439 		return r;
   1440 
   1441 	radeon_check_arguments(rdev);
   1442 	/* Adjust VM size here.
   1443 	 * Max GPUVM size for cayman+ is 40 bits.
   1444 	 */
   1445 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
   1446 
   1447 	/* Set asic functions */
   1448 	r = radeon_asic_init(rdev);
   1449 	if (r)
   1450 		return r;
   1451 
   1452 	/* all of the newer IGP chips have an internal gart
   1453 	 * However some rs4xx report as AGP, so remove that here.
   1454 	 */
   1455 	if ((rdev->family >= CHIP_RS400) &&
   1456 	    (rdev->flags & RADEON_IS_IGP)) {
   1457 		rdev->flags &= ~RADEON_IS_AGP;
   1458 	}
   1459 
   1460 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
   1461 		radeon_agp_disable(rdev);
   1462 	}
   1463 
   1464 	/* Set the internal MC address mask
   1465 	 * This is the max address of the GPU's
   1466 	 * internal address space.
   1467 	 */
   1468 	if (rdev->family >= CHIP_CAYMAN)
   1469 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
   1470 	else if (rdev->family >= CHIP_CEDAR)
   1471 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
   1472 	else
   1473 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
   1474 
   1475 	/* set DMA mask.
   1476 	 * PCIE - can handle 40-bits.
   1477 	 * IGP - can handle 40-bits
   1478 	 * AGP - generally dma32 is safest
   1479 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
   1480 	 */
   1481 	dma_bits = 40;
   1482 	if (rdev->flags & RADEON_IS_AGP)
   1483 		dma_bits = 32;
   1484 	if ((rdev->flags & RADEON_IS_PCI) &&
   1485 	    (rdev->family <= CHIP_RS740))
   1486 		dma_bits = 32;
   1487 #ifdef CONFIG_PPC64
   1488 	if (rdev->family == CHIP_CEDAR)
   1489 		dma_bits = 32;
   1490 #endif
   1491 
   1492 #ifdef __NetBSD__
   1493 	r = drm_limit_dma_space(rdev->ddev, 0, __BITS(dma_bits - 1, 0));
   1494 	if (r)
   1495 		DRM_ERROR("No suitable DMA available.\n");
   1496 #else
   1497 	r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
   1498 	if (r) {
   1499 		pr_warn("radeon: No suitable DMA available\n");
   1500 		return r;
   1501 	}
   1502 #endif
   1503 	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
   1504 
   1505 	/* Registers mapping */
   1506 	/* TODO: block userspace mapping of io register */
   1507 	/* XXX Destroy these locks on detach...  */
   1508 	spin_lock_init(&rdev->mmio_idx_lock);
   1509 	spin_lock_init(&rdev->smc_idx_lock);
   1510 	spin_lock_init(&rdev->pll_idx_lock);
   1511 	spin_lock_init(&rdev->mc_idx_lock);
   1512 	spin_lock_init(&rdev->pcie_idx_lock);
   1513 	spin_lock_init(&rdev->pciep_idx_lock);
   1514 	spin_lock_init(&rdev->pif_idx_lock);
   1515 	spin_lock_init(&rdev->cg_idx_lock);
   1516 	spin_lock_init(&rdev->uvd_idx_lock);
   1517 	spin_lock_init(&rdev->rcu_idx_lock);
   1518 	spin_lock_init(&rdev->didt_idx_lock);
   1519 	spin_lock_init(&rdev->end_idx_lock);
   1520 #ifdef __NetBSD__
   1521     {
   1522 	pcireg_t bar;
   1523 
   1524 	if (rdev->family >= CHIP_BONAIRE)
   1525 		bar = 5;
   1526 	else
   1527 		bar = 2;
   1528 	if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(bar),
   1529 		pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
   1530 		    rdev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
   1531 		0,
   1532 		&rdev->rmmio_bst, &rdev->rmmio_bsh,
   1533 		&rdev->rmmio_addr, &rdev->rmmio_size))
   1534 		return -EIO;
   1535     }
   1536 	DRM_INFO("register mmio base: 0x%"PRIxMAX"\n",
   1537 	    (uintmax_t)rdev->rmmio_addr);
   1538 	DRM_INFO("register mmio size: %"PRIuMAX"\n",
   1539 	    (uintmax_t)rdev->rmmio_size);
   1540 #else
   1541 	if (rdev->family >= CHIP_BONAIRE) {
   1542 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
   1543 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
   1544 	} else {
   1545 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
   1546 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
   1547 	}
   1548 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
   1549 	if (rdev->rmmio == NULL)
   1550 		return -ENOMEM;
   1551 #endif
   1552 
   1553 	/* doorbell bar mapping */
   1554 	if (rdev->family >= CHIP_BONAIRE)
   1555 		radeon_doorbell_init(rdev);
   1556 
   1557 	/* io port mapping */
   1558 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
   1559 #ifdef __NetBSD__
   1560 		if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(i),
   1561 			PCI_MAPREG_TYPE_IO, 0,
   1562 			&rdev->rio_mem_bst, &rdev->rio_mem_bsh,
   1563 			NULL, &rdev->rio_mem_size))
   1564 			continue;
   1565 		break;
   1566 #else
   1567 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
   1568 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
   1569 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
   1570 			break;
   1571 		}
   1572 #endif
   1573 	}
   1574 #ifdef __NetBSD__
   1575 	if (i == DEVICE_COUNT_RESOURCE)
   1576 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1577 #else
   1578 	if (rdev->rio_mem == NULL)
   1579 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1580 #endif
   1581 
   1582 	if (rdev->flags & RADEON_IS_PX)
   1583 		radeon_device_handle_px_quirks(rdev);
   1584 
   1585 #ifndef __NetBSD__		/* XXX radeon vga */
   1586 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
   1587 	/* this will fail for cards that aren't VGA class devices, just
   1588 	 * ignore it */
   1589 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
   1590 
   1591 	if (rdev->flags & RADEON_IS_PX)
   1592 		runtime = true;
   1593 	if (!pci_is_thunderbolt_attached(rdev->pdev))
   1594 		vga_switcheroo_register_client(rdev->pdev,
   1595 					       &radeon_switcheroo_ops, runtime);
   1596 	if (runtime)
   1597 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
   1598 #endif
   1599 
   1600 	r = radeon_init(rdev);
   1601 	if (r)
   1602 		goto failed;
   1603 
   1604 	r = radeon_gem_debugfs_init(rdev);
   1605 	if (r) {
   1606 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
   1607 	}
   1608 
   1609 	r = radeon_mst_debugfs_init(rdev);
   1610 	if (r) {
   1611 		DRM_ERROR("registering mst debugfs failed (%d).\n", r);
   1612 	}
   1613 
   1614 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
   1615 		/* Acceleration not working on AGP card try again
   1616 		 * with fallback to PCI or PCIE GART
   1617 		 */
   1618 		radeon_asic_reset(rdev);
   1619 		radeon_fini(rdev);
   1620 		radeon_agp_disable(rdev);
   1621 		r = radeon_init(rdev);
   1622 		if (r)
   1623 			goto failed;
   1624 	}
   1625 
   1626 	r = radeon_ib_ring_tests(rdev);
   1627 	if (r)
   1628 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1629 
   1630 	/*
   1631 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
   1632 	 * after the CP ring have chew one packet at least. Hence here we stop
   1633 	 * and restart DPM after the radeon_ib_ring_tests().
   1634 	 */
   1635 	if (rdev->pm.dpm_enabled &&
   1636 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
   1637 	    (rdev->family == CHIP_TURKS) &&
   1638 	    (rdev->flags & RADEON_IS_MOBILITY)) {
   1639 		mutex_lock(&rdev->pm.mutex);
   1640 		radeon_dpm_disable(rdev);
   1641 		radeon_dpm_enable(rdev);
   1642 		mutex_unlock(&rdev->pm.mutex);
   1643 	}
   1644 
   1645 	if ((radeon_testing & 1)) {
   1646 		if (rdev->accel_working)
   1647 			radeon_test_moves(rdev);
   1648 		else
   1649 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
   1650 	}
   1651 	if ((radeon_testing & 2)) {
   1652 		if (rdev->accel_working)
   1653 			radeon_test_syncing(rdev);
   1654 		else
   1655 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
   1656 	}
   1657 	if (radeon_benchmarking) {
   1658 		if (rdev->accel_working)
   1659 			radeon_benchmark(rdev, radeon_benchmarking);
   1660 		else
   1661 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
   1662 	}
   1663 	return 0;
   1664 
   1665 failed:
   1666 #ifndef __NetBSD__		/* XXX radeon vga */
   1667 	if (runtime)
   1668 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1669 #endif
   1670 	return r;
   1671 }
   1672 
   1673 /**
   1674  * radeon_device_fini - tear down the driver
   1675  *
   1676  * @rdev: radeon_device pointer
   1677  *
   1678  * Tear down the driver info (all asics).
   1679  * Called at driver shutdown.
   1680  */
   1681 void radeon_device_fini(struct radeon_device *rdev)
   1682 {
   1683 	DRM_INFO("radeon: finishing device.\n");
   1684 	rdev->shutdown = true;
   1685 	/* evict vram memory */
   1686 	radeon_bo_evict_vram(rdev);
   1687 	radeon_fini(rdev);
   1688 #ifndef __NetBSD__
   1689 	if (!pci_is_thunderbolt_attached(rdev->pdev))
   1690 		vga_switcheroo_unregister_client(rdev->pdev);
   1691 	if (rdev->flags & RADEON_IS_PX)
   1692 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1693 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
   1694 #endif
   1695 #ifdef __NetBSD__
   1696 	if (rdev->rio_mem_size)
   1697 		bus_space_unmap(rdev->rio_mem_bst, rdev->rio_mem_bsh,
   1698 		    rdev->rio_mem_size);
   1699 	rdev->rio_mem_size = 0;
   1700 	bus_space_unmap(rdev->rmmio_bst, rdev->rmmio_bsh, rdev->rmmio_size);
   1701 #else
   1702 	if (rdev->rio_mem)
   1703 		pci_iounmap(rdev->pdev, rdev->rio_mem);
   1704 	rdev->rio_mem = NULL;
   1705 	iounmap(rdev->rmmio);
   1706 	rdev->rmmio = NULL;
   1707 #endif
   1708 	if (rdev->family >= CHIP_BONAIRE)
   1709 		radeon_doorbell_fini(rdev);
   1710 
   1711 #ifdef __NetBSD__
   1712 	DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
   1713 	spin_lock_destroy(&rdev->irq.vblank_lock);
   1714 	destroy_rwsem(&rdev->exclusive_lock);
   1715 	destroy_rwsem(&rdev->pm.mclk_lock);
   1716 #endif
   1717 	mutex_destroy(&rdev->srbm_mutex);
   1718 	mutex_destroy(&rdev->gpu_clock_mutex);
   1719 	mutex_destroy(&rdev->pm.mutex);
   1720 	mutex_destroy(&rdev->gem.mutex);
   1721 	mutex_destroy(&rdev->dc_hw_i2c_mutex);
   1722 	mutex_destroy(&rdev->ring_lock);
   1723 }
   1724 
   1725 
   1726 /*
   1727  * Suspend & resume.
   1728  */
   1729 /**
   1730  * radeon_suspend_kms - initiate device suspend
   1731  *
   1732  * @pdev: drm dev pointer
   1733  * @state: suspend state
   1734  *
   1735  * Puts the hw in the suspend state (all asics).
   1736  * Returns 0 for success or an error on failure.
   1737  * Called at driver suspend.
   1738  */
   1739 int radeon_suspend_kms(struct drm_device *dev, bool suspend,
   1740 		       bool fbcon, bool freeze)
   1741 {
   1742 	struct radeon_device *rdev;
   1743 	struct drm_crtc *crtc;
   1744 	struct drm_connector *connector;
   1745 	int i, r;
   1746 
   1747 	if (dev == NULL || dev->dev_private == NULL) {
   1748 		return -ENODEV;
   1749 	}
   1750 
   1751 	rdev = dev->dev_private;
   1752 
   1753 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1754 		return 0;
   1755 
   1756 	drm_kms_helper_poll_disable(dev);
   1757 
   1758 	drm_modeset_lock_all(dev);
   1759 	/* turn off display hw */
   1760 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1761 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
   1762 	}
   1763 	drm_modeset_unlock_all(dev);
   1764 
   1765 	/* unpin the front buffers and cursors */
   1766 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1767 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1768 		struct drm_framebuffer *fb = crtc->primary->fb;
   1769 		struct radeon_bo *robj;
   1770 
   1771 		if (radeon_crtc->cursor_bo) {
   1772 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1773 			r = radeon_bo_reserve(robj, false);
   1774 			if (r == 0) {
   1775 				radeon_bo_unpin(robj);
   1776 				radeon_bo_unreserve(robj);
   1777 			}
   1778 		}
   1779 
   1780 		if (fb == NULL || fb->obj[0] == NULL) {
   1781 			continue;
   1782 		}
   1783 		robj = gem_to_radeon_bo(fb->obj[0]);
   1784 		/* don't unpin kernel fb objects */
   1785 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
   1786 			r = radeon_bo_reserve(robj, false);
   1787 			if (r == 0) {
   1788 				radeon_bo_unpin(robj);
   1789 				radeon_bo_unreserve(robj);
   1790 			}
   1791 		}
   1792 	}
   1793 	/* evict vram memory */
   1794 	radeon_bo_evict_vram(rdev);
   1795 
   1796 	/* wait for gpu to finish processing current batch */
   1797 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1798 		r = radeon_fence_wait_empty(rdev, i);
   1799 		if (r) {
   1800 			/* delay GPU reset to resume */
   1801 			radeon_fence_driver_force_completion(rdev, i);
   1802 		}
   1803 	}
   1804 
   1805 	radeon_save_bios_scratch_regs(rdev);
   1806 
   1807 	radeon_suspend(rdev);
   1808 	radeon_hpd_fini(rdev);
   1809 	/* evict remaining vram memory
   1810 	 * This second call to evict vram is to evict the gart page table
   1811 	 * using the CPU.
   1812 	 */
   1813 	radeon_bo_evict_vram(rdev);
   1814 
   1815 	radeon_agp_suspend(rdev);
   1816 
   1817 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1818 	pci_save_state(dev->pdev);
   1819 	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
   1820 		rdev->asic->asic_reset(rdev, true);
   1821 		pci_restore_state(dev->pdev);
   1822 	} else if (suspend) {
   1823 		/* Shut down the device */
   1824 		pci_disable_device(dev->pdev);
   1825 		pci_set_power_state(dev->pdev, PCI_D3hot);
   1826 	}
   1827 #endif
   1828 
   1829 	if (fbcon) {
   1830 		console_lock();
   1831 		radeon_fbdev_set_suspend(rdev, 1);
   1832 		console_unlock();
   1833 	}
   1834 	return 0;
   1835 }
   1836 
   1837 /**
   1838  * radeon_resume_kms - initiate device resume
   1839  *
   1840  * @pdev: drm dev pointer
   1841  *
   1842  * Bring the hw back to operating state (all asics).
   1843  * Returns 0 for success or an error on failure.
   1844  * Called at driver resume.
   1845  */
   1846 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
   1847 {
   1848 	struct drm_connector *connector;
   1849 	struct radeon_device *rdev = dev->dev_private;
   1850 	struct drm_crtc *crtc;
   1851 	int r;
   1852 
   1853 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1854 		return 0;
   1855 
   1856 	if (fbcon) {
   1857 		console_lock();
   1858 	}
   1859 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1860 	if (resume) {
   1861 		pci_set_power_state(dev->pdev, PCI_D0);
   1862 		pci_restore_state(dev->pdev);
   1863 		if (pci_enable_device(dev->pdev)) {
   1864 			if (fbcon)
   1865 				console_unlock();
   1866 			return -1;
   1867 		}
   1868 	}
   1869 #endif
   1870 	/* resume AGP if in use */
   1871 	radeon_agp_resume(rdev);
   1872 	radeon_resume(rdev);
   1873 
   1874 	r = radeon_ib_ring_tests(rdev);
   1875 	if (r)
   1876 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1877 
   1878 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   1879 		/* do dpm late init */
   1880 		r = radeon_pm_late_init(rdev);
   1881 		if (r) {
   1882 			rdev->pm.dpm_enabled = false;
   1883 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1884 		}
   1885 	} else {
   1886 		/* resume old pm late */
   1887 		radeon_pm_resume(rdev);
   1888 	}
   1889 
   1890 	radeon_restore_bios_scratch_regs(rdev);
   1891 
   1892 	/* pin cursors */
   1893 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1894 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1895 
   1896 		if (radeon_crtc->cursor_bo) {
   1897 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1898 			r = radeon_bo_reserve(robj, false);
   1899 			if (r == 0) {
   1900 				/* Only 27 bit offset for legacy cursor */
   1901 				r = radeon_bo_pin_restricted(robj,
   1902 							     RADEON_GEM_DOMAIN_VRAM,
   1903 							     ASIC_IS_AVIVO(rdev) ?
   1904 							     0 : 1 << 27,
   1905 							     &radeon_crtc->cursor_addr);
   1906 				if (r != 0)
   1907 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
   1908 				radeon_bo_unreserve(robj);
   1909 			}
   1910 		}
   1911 	}
   1912 
   1913 	/* init dig PHYs, disp eng pll */
   1914 	if (rdev->is_atom_bios) {
   1915 		radeon_atom_encoder_init(rdev);
   1916 		radeon_atom_disp_eng_pll_init(rdev);
   1917 		/* turn on the BL */
   1918 		if (rdev->mode_info.bl_encoder) {
   1919 			u8 bl_level = radeon_get_backlight_level(rdev,
   1920 								 rdev->mode_info.bl_encoder);
   1921 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   1922 						   bl_level);
   1923 		}
   1924 	}
   1925 	/* reset hpd state */
   1926 	radeon_hpd_init(rdev);
   1927 	/* blat the mode back in */
   1928 	if (fbcon) {
   1929 		drm_helper_resume_force_mode(dev);
   1930 		/* turn on display hw */
   1931 		drm_modeset_lock_all(dev);
   1932 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1933 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
   1934 		}
   1935 		drm_modeset_unlock_all(dev);
   1936 	}
   1937 
   1938 	drm_kms_helper_poll_enable(dev);
   1939 
   1940 	/* set the power state here in case we are a PX system or headless */
   1941 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   1942 		radeon_pm_compute_clocks(rdev);
   1943 
   1944 	if (fbcon) {
   1945 		radeon_fbdev_set_suspend(rdev, 0);
   1946 		console_unlock();
   1947 	}
   1948 
   1949 	return 0;
   1950 }
   1951 
   1952 /**
   1953  * radeon_gpu_reset - reset the asic
   1954  *
   1955  * @rdev: radeon device pointer
   1956  *
   1957  * Attempt the reset the GPU if it has hung (all asics).
   1958  * Returns 0 for success or an error on failure.
   1959  */
   1960 int radeon_gpu_reset(struct radeon_device *rdev)
   1961 {
   1962 	unsigned ring_sizes[RADEON_NUM_RINGS];
   1963 	uint32_t *ring_data[RADEON_NUM_RINGS];
   1964 
   1965 	bool saved = false;
   1966 
   1967 	int i, r;
   1968 	int resched;
   1969 
   1970 	down_write(&rdev->exclusive_lock);
   1971 
   1972 	if (!rdev->needs_reset) {
   1973 		up_write(&rdev->exclusive_lock);
   1974 		return 0;
   1975 	}
   1976 
   1977 	atomic_inc(&rdev->gpu_reset_counter);
   1978 
   1979 	radeon_save_bios_scratch_regs(rdev);
   1980 	/* block TTM */
   1981 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   1982 	radeon_suspend(rdev);
   1983 	radeon_hpd_fini(rdev);
   1984 
   1985 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1986 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
   1987 						   &ring_data[i]);
   1988 		if (ring_sizes[i]) {
   1989 			saved = true;
   1990 			dev_info(rdev->dev, "Saved %d dwords of commands "
   1991 				 "on ring %d.\n", ring_sizes[i], i);
   1992 		}
   1993 	}
   1994 
   1995 	r = radeon_asic_reset(rdev);
   1996 	if (!r) {
   1997 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
   1998 		radeon_resume(rdev);
   1999 	}
   2000 
   2001 	radeon_restore_bios_scratch_regs(rdev);
   2002 
   2003 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   2004 		if (!r && ring_data[i]) {
   2005 			radeon_ring_restore(rdev, &rdev->ring[i],
   2006 					    ring_sizes[i], ring_data[i]);
   2007 		} else {
   2008 			radeon_fence_driver_force_completion(rdev, i);
   2009 			kfree(ring_data[i]);
   2010 		}
   2011 	}
   2012 
   2013 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   2014 		/* do dpm late init */
   2015 		r = radeon_pm_late_init(rdev);
   2016 		if (r) {
   2017 			rdev->pm.dpm_enabled = false;
   2018 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   2019 		}
   2020 	} else {
   2021 		/* resume old pm late */
   2022 		radeon_pm_resume(rdev);
   2023 	}
   2024 
   2025 	/* init dig PHYs, disp eng pll */
   2026 	if (rdev->is_atom_bios) {
   2027 		radeon_atom_encoder_init(rdev);
   2028 		radeon_atom_disp_eng_pll_init(rdev);
   2029 		/* turn on the BL */
   2030 		if (rdev->mode_info.bl_encoder) {
   2031 			u8 bl_level = radeon_get_backlight_level(rdev,
   2032 								 rdev->mode_info.bl_encoder);
   2033 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   2034 						   bl_level);
   2035 		}
   2036 	}
   2037 	/* reset hpd state */
   2038 	radeon_hpd_init(rdev);
   2039 
   2040 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   2041 
   2042 	rdev->in_reset = true;
   2043 	rdev->needs_reset = false;
   2044 
   2045 	downgrade_write(&rdev->exclusive_lock);
   2046 
   2047 	drm_helper_resume_force_mode(rdev->ddev);
   2048 
   2049 	/* set the power state here in case we are a PX system or headless */
   2050 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   2051 		radeon_pm_compute_clocks(rdev);
   2052 
   2053 	if (!r) {
   2054 		r = radeon_ib_ring_tests(rdev);
   2055 		if (r && saved)
   2056 			r = -EAGAIN;
   2057 	} else {
   2058 		/* bad news, how to tell it to userspace ? */
   2059 		dev_info(rdev->dev, "GPU reset failed\n");
   2060 	}
   2061 
   2062 	rdev->needs_reset = r == -EAGAIN;
   2063 	rdev->in_reset = false;
   2064 
   2065 	up_read(&rdev->exclusive_lock);
   2066 	return r;
   2067 }
   2068 
   2069 
   2070 /*
   2071  * Debugfs
   2072  */
   2073 int radeon_debugfs_add_files(struct radeon_device *rdev,
   2074 			     struct drm_info_list *files,
   2075 			     unsigned nfiles)
   2076 {
   2077 	unsigned i;
   2078 
   2079 	for (i = 0; i < rdev->debugfs_count; i++) {
   2080 		if (rdev->debugfs[i].files == files) {
   2081 			/* Already registered */
   2082 			return 0;
   2083 		}
   2084 	}
   2085 
   2086 	i = rdev->debugfs_count + 1;
   2087 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
   2088 		DRM_ERROR("Reached maximum number of debugfs components.\n");
   2089 		DRM_ERROR("Report so we increase "
   2090 			  "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
   2091 		return -EINVAL;
   2092 	}
   2093 	rdev->debugfs[rdev->debugfs_count].files = files;
   2094 	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
   2095 	rdev->debugfs_count = i;
   2096 #if defined(CONFIG_DEBUG_FS)
   2097 	drm_debugfs_create_files(files, nfiles,
   2098 				 rdev->ddev->primary->debugfs_root,
   2099 				 rdev->ddev->primary);
   2100 #endif
   2101 	return 0;
   2102 }
   2103