Home | History | Annotate | Line # | Download | only in radeon
radeon_device.c revision 1.10
      1 /*	$NetBSD: radeon_device.c,v 1.10 2020/02/14 14:34:59 maya Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.10 2020/02/14 14:34:59 maya Exp $");
     32 
     33 #include <linux/console.h>
     34 #include <linux/slab.h>
     35 #include <drm/drmP.h>
     36 #include <drm/drm_crtc_helper.h>
     37 #include <drm/radeon_drm.h>
     38 #include <linux/vgaarb.h>
     39 #include <linux/vga_switcheroo.h>
     40 #include <linux/efi.h>
     41 #include "radeon_reg.h"
     42 #include "radeon.h"
     43 #include "atom.h"
     44 
     45 #include <linux/nbsd-namespace.h>
     46 
     47 static const char radeon_family_name[][16] = {
     48 	"R100",
     49 	"RV100",
     50 	"RS100",
     51 	"RV200",
     52 	"RS200",
     53 	"R200",
     54 	"RV250",
     55 	"RS300",
     56 	"RV280",
     57 	"R300",
     58 	"R350",
     59 	"RV350",
     60 	"RV380",
     61 	"R420",
     62 	"R423",
     63 	"RV410",
     64 	"RS400",
     65 	"RS480",
     66 	"RS600",
     67 	"RS690",
     68 	"RS740",
     69 	"RV515",
     70 	"R520",
     71 	"RV530",
     72 	"RV560",
     73 	"RV570",
     74 	"R580",
     75 	"R600",
     76 	"RV610",
     77 	"RV630",
     78 	"RV670",
     79 	"RV620",
     80 	"RV635",
     81 	"RS780",
     82 	"RS880",
     83 	"RV770",
     84 	"RV730",
     85 	"RV710",
     86 	"RV740",
     87 	"CEDAR",
     88 	"REDWOOD",
     89 	"JUNIPER",
     90 	"CYPRESS",
     91 	"HEMLOCK",
     92 	"PALM",
     93 	"SUMO",
     94 	"SUMO2",
     95 	"BARTS",
     96 	"TURKS",
     97 	"CAICOS",
     98 	"CAYMAN",
     99 	"ARUBA",
    100 	"TAHITI",
    101 	"PITCAIRN",
    102 	"VERDE",
    103 	"OLAND",
    104 	"HAINAN",
    105 	"BONAIRE",
    106 	"KAVERI",
    107 	"KABINI",
    108 	"HAWAII",
    109 	"MULLINS",
    110 	"LAST",
    111 };
    112 
    113 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
    114 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
    115 
    116 struct radeon_px_quirk {
    117 	u32 chip_vendor;
    118 	u32 chip_device;
    119 	u32 subsys_vendor;
    120 	u32 subsys_device;
    121 	u32 px_quirk_flags;
    122 };
    123 
    124 static struct radeon_px_quirk radeon_px_quirk_list[] = {
    125 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
    126 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
    127 	 */
    128 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
    129 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
    130 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    131 	 */
    132 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
    133 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    134 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    135 	 */
    136 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    137 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    138 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
    139 	 */
    140 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    141 	/* macbook pro 8.2 */
    142 	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
    143 	{ 0, 0, 0, 0, 0 },
    144 };
    145 
    146 bool radeon_is_px(struct drm_device *dev)
    147 {
    148 	struct radeon_device *rdev = dev->dev_private;
    149 
    150 	if (rdev->flags & RADEON_IS_PX)
    151 		return true;
    152 	return false;
    153 }
    154 
    155 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
    156 {
    157 	struct radeon_px_quirk *p = radeon_px_quirk_list;
    158 
    159 	/* Apply PX quirks */
    160 	while (p && p->chip_device != 0) {
    161 		if (rdev->pdev->vendor == p->chip_vendor &&
    162 		    rdev->pdev->device == p->chip_device &&
    163 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
    164 		    rdev->pdev->subsystem_device == p->subsys_device) {
    165 			rdev->px_quirk_flags = p->px_quirk_flags;
    166 			break;
    167 		}
    168 		++p;
    169 	}
    170 
    171 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
    172 		rdev->flags &= ~RADEON_IS_PX;
    173 }
    174 
    175 /**
    176  * radeon_program_register_sequence - program an array of registers.
    177  *
    178  * @rdev: radeon_device pointer
    179  * @registers: pointer to the register array
    180  * @array_size: size of the register array
    181  *
    182  * Programs an array or registers with and and or masks.
    183  * This is a helper for setting golden registers.
    184  */
    185 void radeon_program_register_sequence(struct radeon_device *rdev,
    186 				      const u32 *registers,
    187 				      const u32 array_size)
    188 {
    189 	u32 tmp, reg, and_mask, or_mask;
    190 	int i;
    191 
    192 	if (array_size % 3)
    193 		return;
    194 
    195 	for (i = 0; i < array_size; i +=3) {
    196 		reg = registers[i + 0];
    197 		and_mask = registers[i + 1];
    198 		or_mask = registers[i + 2];
    199 
    200 		if (and_mask == 0xffffffff) {
    201 			tmp = or_mask;
    202 		} else {
    203 			tmp = RREG32(reg);
    204 			tmp &= ~and_mask;
    205 			tmp |= or_mask;
    206 		}
    207 		WREG32(reg, tmp);
    208 	}
    209 }
    210 
    211 void radeon_pci_config_reset(struct radeon_device *rdev)
    212 {
    213 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
    214 }
    215 
    216 /**
    217  * radeon_surface_init - Clear GPU surface registers.
    218  *
    219  * @rdev: radeon_device pointer
    220  *
    221  * Clear GPU surface registers (r1xx-r5xx).
    222  */
    223 void radeon_surface_init(struct radeon_device *rdev)
    224 {
    225 	/* FIXME: check this out */
    226 	if (rdev->family < CHIP_R600) {
    227 		int i;
    228 
    229 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
    230 			if (rdev->surface_regs[i].bo)
    231 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
    232 			else
    233 				radeon_clear_surface_reg(rdev, i);
    234 		}
    235 		/* enable surfaces */
    236 		WREG32(RADEON_SURFACE_CNTL, 0);
    237 	}
    238 }
    239 
    240 /*
    241  * GPU scratch registers helpers function.
    242  */
    243 /**
    244  * radeon_scratch_init - Init scratch register driver information.
    245  *
    246  * @rdev: radeon_device pointer
    247  *
    248  * Init CP scratch register driver information (r1xx-r5xx)
    249  */
    250 void radeon_scratch_init(struct radeon_device *rdev)
    251 {
    252 	int i;
    253 
    254 	/* FIXME: check this out */
    255 	if (rdev->family < CHIP_R300) {
    256 		rdev->scratch.num_reg = 5;
    257 	} else {
    258 		rdev->scratch.num_reg = 7;
    259 	}
    260 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
    261 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    262 		rdev->scratch.free[i] = true;
    263 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
    264 	}
    265 }
    266 
    267 /**
    268  * radeon_scratch_get - Allocate a scratch register
    269  *
    270  * @rdev: radeon_device pointer
    271  * @reg: scratch register mmio offset
    272  *
    273  * Allocate a CP scratch register for use by the driver (all asics).
    274  * Returns 0 on success or -EINVAL on failure.
    275  */
    276 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
    277 {
    278 	int i;
    279 
    280 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    281 		if (rdev->scratch.free[i]) {
    282 			rdev->scratch.free[i] = false;
    283 			*reg = rdev->scratch.reg[i];
    284 			return 0;
    285 		}
    286 	}
    287 	return -EINVAL;
    288 }
    289 
    290 /**
    291  * radeon_scratch_free - Free a scratch register
    292  *
    293  * @rdev: radeon_device pointer
    294  * @reg: scratch register mmio offset
    295  *
    296  * Free a CP scratch register allocated for use by the driver (all asics)
    297  */
    298 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
    299 {
    300 	int i;
    301 
    302 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    303 		if (rdev->scratch.reg[i] == reg) {
    304 			rdev->scratch.free[i] = true;
    305 			return;
    306 		}
    307 	}
    308 }
    309 
    310 /*
    311  * GPU doorbell aperture helpers function.
    312  */
    313 /**
    314  * radeon_doorbell_init - Init doorbell driver information.
    315  *
    316  * @rdev: radeon_device pointer
    317  *
    318  * Init doorbell driver information (CIK)
    319  * Returns 0 on success, error on failure.
    320  */
    321 static int radeon_doorbell_init(struct radeon_device *rdev)
    322 {
    323 #ifdef __NetBSD__
    324 	int r;
    325 #endif
    326 
    327 	/* doorbell bar mapping */
    328 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
    329 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
    330 
    331 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
    332 	if (rdev->doorbell.num_doorbells == 0)
    333 		return -EINVAL;
    334 
    335 #ifdef __NetBSD__
    336 	/* XXX errno NetBSD->Linux */
    337 	rdev->doorbell.bst = rdev->pdev->pd_pa.pa_memt;
    338 	r = -bus_space_map(rdev->doorbell.bst, rdev->doorbell.base,
    339 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)),
    340 	    0, &rdev->doorbell.bsh);
    341 	if (r)
    342 		return r;
    343 #else
    344 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
    345 	if (rdev->doorbell.ptr == NULL) {
    346 		return -ENOMEM;
    347 	}
    348 #endif
    349 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
    350 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
    351 
    352 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
    353 
    354 	return 0;
    355 }
    356 
    357 /**
    358  * radeon_doorbell_fini - Tear down doorbell driver information.
    359  *
    360  * @rdev: radeon_device pointer
    361  *
    362  * Tear down doorbell driver information (CIK)
    363  */
    364 static void radeon_doorbell_fini(struct radeon_device *rdev)
    365 {
    366 #ifdef __NetBSD__
    367 	bus_space_unmap(rdev->doorbell.bst, rdev->doorbell.bsh,
    368 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)));
    369 #else
    370 	iounmap(rdev->doorbell.ptr);
    371 	rdev->doorbell.ptr = NULL;
    372 #endif
    373 }
    374 
    375 /**
    376  * radeon_doorbell_get - Allocate a doorbell entry
    377  *
    378  * @rdev: radeon_device pointer
    379  * @doorbell: doorbell index
    380  *
    381  * Allocate a doorbell for use by the driver (all asics).
    382  * Returns 0 on success or -EINVAL on failure.
    383  */
    384 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
    385 {
    386 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
    387 	if (offset < rdev->doorbell.num_doorbells) {
    388 		__set_bit(offset, rdev->doorbell.used);
    389 		*doorbell = offset;
    390 		return 0;
    391 	} else {
    392 		return -EINVAL;
    393 	}
    394 }
    395 
    396 /**
    397  * radeon_doorbell_free - Free a doorbell entry
    398  *
    399  * @rdev: radeon_device pointer
    400  * @doorbell: doorbell index
    401  *
    402  * Free a doorbell allocated for use by the driver (all asics)
    403  */
    404 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
    405 {
    406 	if (doorbell < rdev->doorbell.num_doorbells)
    407 		__clear_bit(doorbell, rdev->doorbell.used);
    408 }
    409 
    410 /**
    411  * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
    412  *                                setup KFD
    413  *
    414  * @rdev: radeon_device pointer
    415  * @aperture_base: output returning doorbell aperture base physical address
    416  * @aperture_size: output returning doorbell aperture size in bytes
    417  * @start_offset: output returning # of doorbell bytes reserved for radeon.
    418  *
    419  * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
    420  * takes doorbells required for its own rings and reports the setup to KFD.
    421  * Radeon reserved doorbells are at the start of the doorbell aperture.
    422  */
    423 void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
    424 				  phys_addr_t *aperture_base,
    425 				  size_t *aperture_size,
    426 				  size_t *start_offset)
    427 {
    428 	/* The first num_doorbells are used by radeon.
    429 	 * KFD takes whatever's left in the aperture. */
    430 	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
    431 		*aperture_base = rdev->doorbell.base;
    432 		*aperture_size = rdev->doorbell.size;
    433 		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
    434 	} else {
    435 		*aperture_base = 0;
    436 		*aperture_size = 0;
    437 		*start_offset = 0;
    438 	}
    439 }
    440 
    441 /*
    442  * radeon_wb_*()
    443  * Writeback is the the method by which the the GPU updates special pages
    444  * in memory with the status of certain GPU events (fences, ring pointers,
    445  * etc.).
    446  */
    447 
    448 /**
    449  * radeon_wb_disable - Disable Writeback
    450  *
    451  * @rdev: radeon_device pointer
    452  *
    453  * Disables Writeback (all asics).  Used for suspend.
    454  */
    455 void radeon_wb_disable(struct radeon_device *rdev)
    456 {
    457 	rdev->wb.enabled = false;
    458 }
    459 
    460 /**
    461  * radeon_wb_fini - Disable Writeback and free memory
    462  *
    463  * @rdev: radeon_device pointer
    464  *
    465  * Disables Writeback and frees the Writeback memory (all asics).
    466  * Used at driver shutdown.
    467  */
    468 void radeon_wb_fini(struct radeon_device *rdev)
    469 {
    470 	radeon_wb_disable(rdev);
    471 	if (rdev->wb.wb_obj) {
    472 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
    473 			radeon_bo_kunmap(rdev->wb.wb_obj);
    474 			radeon_bo_unpin(rdev->wb.wb_obj);
    475 			radeon_bo_unreserve(rdev->wb.wb_obj);
    476 		}
    477 		radeon_bo_unref(&rdev->wb.wb_obj);
    478 		rdev->wb.wb = NULL;
    479 		rdev->wb.wb_obj = NULL;
    480 	}
    481 }
    482 
    483 /**
    484  * radeon_wb_init- Init Writeback driver info and allocate memory
    485  *
    486  * @rdev: radeon_device pointer
    487  *
    488  * Disables Writeback and frees the Writeback memory (all asics).
    489  * Used at driver startup.
    490  * Returns 0 on success or an -error on failure.
    491  */
    492 int radeon_wb_init(struct radeon_device *rdev)
    493 {
    494 	int r;
    495 
    496 	if (rdev->wb.wb_obj == NULL) {
    497 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
    498 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
    499 				     &rdev->wb.wb_obj);
    500 		if (r) {
    501 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
    502 			return r;
    503 		}
    504 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
    505 		if (unlikely(r != 0)) {
    506 			radeon_wb_fini(rdev);
    507 			return r;
    508 		}
    509 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
    510 				&rdev->wb.gpu_addr);
    511 		if (r) {
    512 			radeon_bo_unreserve(rdev->wb.wb_obj);
    513 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
    514 			radeon_wb_fini(rdev);
    515 			return r;
    516 		}
    517 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)__UNVOLATILE(&rdev->wb.wb));
    518 		radeon_bo_unreserve(rdev->wb.wb_obj);
    519 		if (r) {
    520 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
    521 			radeon_wb_fini(rdev);
    522 			return r;
    523 		}
    524 	}
    525 
    526 	/* clear wb memory */
    527 	memset(__UNVOLATILE(rdev->wb.wb), 0, RADEON_GPU_PAGE_SIZE);
    528 	/* disable event_write fences */
    529 	rdev->wb.use_event = false;
    530 	/* disabled via module param */
    531 	if (radeon_no_wb == 1) {
    532 		rdev->wb.enabled = false;
    533 	} else {
    534 		if (rdev->flags & RADEON_IS_AGP) {
    535 			/* often unreliable on AGP */
    536 			rdev->wb.enabled = false;
    537 		} else if (rdev->family < CHIP_R300) {
    538 			/* often unreliable on pre-r300 */
    539 			rdev->wb.enabled = false;
    540 		} else {
    541 			rdev->wb.enabled = true;
    542 			/* event_write fences are only available on r600+ */
    543 			if (rdev->family >= CHIP_R600) {
    544 				rdev->wb.use_event = true;
    545 			}
    546 		}
    547 	}
    548 	/* always use writeback/events on NI, APUs */
    549 	if (rdev->family >= CHIP_PALM) {
    550 		rdev->wb.enabled = true;
    551 		rdev->wb.use_event = true;
    552 	}
    553 
    554 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
    555 
    556 	return 0;
    557 }
    558 
    559 /**
    560  * radeon_vram_location - try to find VRAM location
    561  * @rdev: radeon device structure holding all necessary informations
    562  * @mc: memory controller structure holding memory informations
    563  * @base: base address at which to put VRAM
    564  *
    565  * Function will place try to place VRAM at base address provided
    566  * as parameter (which is so far either PCI aperture address or
    567  * for IGP TOM base address).
    568  *
    569  * If there is not enough space to fit the unvisible VRAM in the 32bits
    570  * address space then we limit the VRAM size to the aperture.
    571  *
    572  * If we are using AGP and if the AGP aperture doesn't allow us to have
    573  * room for all the VRAM than we restrict the VRAM to the PCI aperture
    574  * size and print a warning.
    575  *
    576  * This function will never fails, worst case are limiting VRAM.
    577  *
    578  * Note: GTT start, end, size should be initialized before calling this
    579  * function on AGP platform.
    580  *
    581  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
    582  * this shouldn't be a problem as we are using the PCI aperture as a reference.
    583  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
    584  * not IGP.
    585  *
    586  * Note: we use mc_vram_size as on some board we need to program the mc to
    587  * cover the whole aperture even if VRAM size is inferior to aperture size
    588  * Novell bug 204882 + along with lots of ubuntu ones
    589  *
    590  * Note: when limiting vram it's safe to overwritte real_vram_size because
    591  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
    592  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
    593  * ones)
    594  *
    595  * Note: IGP TOM addr should be the same as the aperture addr, we don't
    596  * explicitly check for that thought.
    597  *
    598  * FIXME: when reducing VRAM size align new size on power of 2.
    599  */
    600 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
    601 {
    602 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
    603 
    604 	mc->vram_start = base;
    605 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
    606 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    607 		mc->real_vram_size = mc->aper_size;
    608 		mc->mc_vram_size = mc->aper_size;
    609 	}
    610 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    611 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
    612 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    613 		mc->real_vram_size = mc->aper_size;
    614 		mc->mc_vram_size = mc->aper_size;
    615 	}
    616 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    617 	if (limit && limit < mc->real_vram_size)
    618 		mc->real_vram_size = limit;
    619 	dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64" (%"PRIu64"M used)\n",
    620 			mc->mc_vram_size >> 20, mc->vram_start,
    621 			mc->vram_end, mc->real_vram_size >> 20);
    622 }
    623 
    624 /**
    625  * radeon_gtt_location - try to find GTT location
    626  * @rdev: radeon device structure holding all necessary informations
    627  * @mc: memory controller structure holding memory informations
    628  *
    629  * Function will place try to place GTT before or after VRAM.
    630  *
    631  * If GTT size is bigger than space left then we ajust GTT size.
    632  * Thus function will never fails.
    633  *
    634  * FIXME: when reducing GTT size align new size on power of 2.
    635  */
    636 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
    637 {
    638 	u64 size_af, size_bf;
    639 
    640 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
    641 	size_bf = mc->vram_start & ~mc->gtt_base_align;
    642 	if (size_bf > size_af) {
    643 		if (mc->gtt_size > size_bf) {
    644 			dev_warn(rdev->dev, "limiting GTT\n");
    645 			mc->gtt_size = size_bf;
    646 		}
    647 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
    648 	} else {
    649 		if (mc->gtt_size > size_af) {
    650 			dev_warn(rdev->dev, "limiting GTT\n");
    651 			mc->gtt_size = size_af;
    652 		}
    653 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
    654 	}
    655 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
    656 	dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64"\n",
    657 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
    658 }
    659 
    660 /*
    661  * GPU helpers function.
    662  */
    663 
    664 /**
    665  * radeon_device_is_virtual - check if we are running is a virtual environment
    666  *
    667  * Check if the asic has been passed through to a VM (all asics).
    668  * Used at driver startup.
    669  * Returns true if virtual or false if not.
    670  */
    671 static bool radeon_device_is_virtual(void)
    672 {
    673 #ifdef CONFIG_X86
    674 #ifdef __NetBSD__		/* XXX virtualization */
    675 	return false;
    676 #else
    677 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
    678 #endif
    679 #else
    680 	return false;
    681 #endif
    682 }
    683 
    684 /**
    685  * radeon_card_posted - check if the hw has already been initialized
    686  *
    687  * @rdev: radeon_device pointer
    688  *
    689  * Check if the asic has been initialized (all asics).
    690  * Used at driver startup.
    691  * Returns true if initialized or false if not.
    692  */
    693 bool radeon_card_posted(struct radeon_device *rdev)
    694 {
    695 	uint32_t reg;
    696 
    697 	/* for pass through, always force asic_init for CI */
    698 	if (rdev->family >= CHIP_BONAIRE &&
    699 	    radeon_device_is_virtual())
    700 		return false;
    701 
    702 #ifndef __NetBSD__		/* XXX radeon efi */
    703 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
    704 	if (efi_enabled(EFI_BOOT) &&
    705 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
    706 	    (rdev->family < CHIP_R600))
    707 		return false;
    708 #endif
    709 
    710 	if (ASIC_IS_NODCE(rdev))
    711 		goto check_memsize;
    712 
    713 	/* first check CRTCs */
    714 	if (ASIC_IS_DCE4(rdev)) {
    715 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
    716 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
    717 			if (rdev->num_crtc >= 4) {
    718 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
    719 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
    720 			}
    721 			if (rdev->num_crtc >= 6) {
    722 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
    723 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
    724 			}
    725 		if (reg & EVERGREEN_CRTC_MASTER_EN)
    726 			return true;
    727 	} else if (ASIC_IS_AVIVO(rdev)) {
    728 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
    729 		      RREG32(AVIVO_D2CRTC_CONTROL);
    730 		if (reg & AVIVO_CRTC_EN) {
    731 			return true;
    732 		}
    733 	} else {
    734 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
    735 		      RREG32(RADEON_CRTC2_GEN_CNTL);
    736 		if (reg & RADEON_CRTC_EN) {
    737 			return true;
    738 		}
    739 	}
    740 
    741 check_memsize:
    742 	/* then check MEM_SIZE, in case the crtcs are off */
    743 	if (rdev->family >= CHIP_R600)
    744 		reg = RREG32(R600_CONFIG_MEMSIZE);
    745 	else
    746 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
    747 
    748 	if (reg)
    749 		return true;
    750 
    751 	return false;
    752 
    753 }
    754 
    755 /**
    756  * radeon_update_bandwidth_info - update display bandwidth params
    757  *
    758  * @rdev: radeon_device pointer
    759  *
    760  * Used when sclk/mclk are switched or display modes are set.
    761  * params are used to calculate display watermarks (all asics)
    762  */
    763 void radeon_update_bandwidth_info(struct radeon_device *rdev)
    764 {
    765 	fixed20_12 a;
    766 	u32 sclk = rdev->pm.current_sclk;
    767 	u32 mclk = rdev->pm.current_mclk;
    768 
    769 	/* sclk/mclk in Mhz */
    770 	a.full = dfixed_const(100);
    771 	rdev->pm.sclk.full = dfixed_const(sclk);
    772 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
    773 	rdev->pm.mclk.full = dfixed_const(mclk);
    774 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
    775 
    776 	if (rdev->flags & RADEON_IS_IGP) {
    777 		a.full = dfixed_const(16);
    778 		/* core_bandwidth = sclk(Mhz) * 16 */
    779 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
    780 	}
    781 }
    782 
    783 /**
    784  * radeon_boot_test_post_card - check and possibly initialize the hw
    785  *
    786  * @rdev: radeon_device pointer
    787  *
    788  * Check if the asic is initialized and if not, attempt to initialize
    789  * it (all asics).
    790  * Returns true if initialized or false if not.
    791  */
    792 bool radeon_boot_test_post_card(struct radeon_device *rdev)
    793 {
    794 	if (radeon_card_posted(rdev))
    795 		return true;
    796 
    797 	if (rdev->bios) {
    798 		DRM_INFO("GPU not posted. posting now...\n");
    799 		if (rdev->is_atom_bios)
    800 			atom_asic_init(rdev->mode_info.atom_context);
    801 		else
    802 			radeon_combios_asic_init(rdev->ddev);
    803 		return true;
    804 	} else {
    805 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
    806 		return false;
    807 	}
    808 }
    809 
    810 /**
    811  * radeon_dummy_page_init - init dummy page used by the driver
    812  *
    813  * @rdev: radeon_device pointer
    814  *
    815  * Allocate the dummy page used by the driver (all asics).
    816  * This dummy page is used by the driver as a filler for gart entries
    817  * when pages are taken out of the GART
    818  * Returns 0 on sucess, -ENOMEM on failure.
    819  */
    820 int radeon_dummy_page_init(struct radeon_device *rdev)
    821 {
    822 #ifdef __NetBSD__
    823 	int rsegs;
    824 	int error;
    825 
    826 	/* XXX Can this be called more than once??  */
    827 	if (rdev->dummy_page.rdp_map != NULL)
    828 		return 0;
    829 
    830 	error = bus_dmamem_alloc(rdev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
    831 	    &rdev->dummy_page.rdp_seg, 1, &rsegs, BUS_DMA_WAITOK);
    832 	if (error)
    833 		goto fail0;
    834 	KASSERT(rsegs == 1);
    835 	error = bus_dmamap_create(rdev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    836 	    BUS_DMA_WAITOK, &rdev->dummy_page.rdp_map);
    837 	if (error)
    838 		goto fail1;
    839 	error = bus_dmamem_map(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1,
    840 	    PAGE_SIZE, &rdev->dummy_page.rdp_addr,
    841 	    BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
    842 	if (error)
    843 		goto fail2;
    844 	error = bus_dmamap_load(rdev->ddev->dmat, rdev->dummy_page.rdp_map,
    845 	    rdev->dummy_page.rdp_addr, PAGE_SIZE, NULL, BUS_DMA_WAITOK);
    846 	if (error)
    847 		goto fail3;
    848 
    849 	memset(rdev->dummy_page.rdp_addr, 0, PAGE_SIZE);
    850 
    851 	/* Success!  */
    852 	rdev->dummy_page.addr = rdev->dummy_page.rdp_map->dm_segs[0].ds_addr;
    853 	rdev->dummy_page.entry = radeon_gart_get_page_entry(
    854 		rdev->dummy_page.addr, RADEON_GART_PAGE_DUMMY);
    855 	return 0;
    856 
    857 fail4: __unused
    858 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    859 fail3:	bus_dmamem_unmap(rdev->ddev->dmat, rdev->dummy_page.rdp_addr,
    860 	    PAGE_SIZE);
    861 fail2:	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    862 fail1:	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    863 fail0:	KASSERT(error);
    864 	rdev->dummy_page.rdp_map = NULL;
    865 	/* XXX errno NetBSD->Linux */
    866 	return -error;
    867 #else
    868 	if (rdev->dummy_page.page)
    869 		return 0;
    870 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
    871 	if (rdev->dummy_page.page == NULL)
    872 		return -ENOMEM;
    873 	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
    874 					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    875 	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
    876 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    877 		__free_page(rdev->dummy_page.page);
    878 		rdev->dummy_page.page = NULL;
    879 		return -ENOMEM;
    880 	}
    881 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
    882 							    RADEON_GART_PAGE_DUMMY);
    883 	return 0;
    884 #endif
    885 }
    886 
    887 /**
    888  * radeon_dummy_page_fini - free dummy page used by the driver
    889  *
    890  * @rdev: radeon_device pointer
    891  *
    892  * Frees the dummy page used by the driver (all asics).
    893  */
    894 void radeon_dummy_page_fini(struct radeon_device *rdev)
    895 {
    896 #ifdef __NetBSD__
    897 
    898 	if (rdev->dummy_page.rdp_map == NULL)
    899 		return;
    900 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    901 	bus_dmamem_unmap(rdev->ddev->dmat, rdev->dummy_page.rdp_addr,
    902 	    PAGE_SIZE);
    903 	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    904 	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    905 	rdev->dummy_page.rdp_map = NULL;
    906 #else
    907 	if (rdev->dummy_page.page == NULL)
    908 		return;
    909 	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
    910 			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    911 	__free_page(rdev->dummy_page.page);
    912 	rdev->dummy_page.page = NULL;
    913 #endif
    914 }
    915 
    916 
    917 /* ATOM accessor methods */
    918 /*
    919  * ATOM is an interpreted byte code stored in tables in the vbios.  The
    920  * driver registers callbacks to access registers and the interpreter
    921  * in the driver parses the tables and executes then to program specific
    922  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
    923  * atombios.h, and atom.c
    924  */
    925 
    926 /**
    927  * cail_pll_read - read PLL register
    928  *
    929  * @info: atom card_info pointer
    930  * @reg: PLL register offset
    931  *
    932  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    933  * Returns the value of the PLL register.
    934  */
    935 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
    936 {
    937 	struct radeon_device *rdev = info->dev->dev_private;
    938 	uint32_t r;
    939 
    940 	r = rdev->pll_rreg(rdev, reg);
    941 	return r;
    942 }
    943 
    944 /**
    945  * cail_pll_write - write PLL register
    946  *
    947  * @info: atom card_info pointer
    948  * @reg: PLL register offset
    949  * @val: value to write to the pll register
    950  *
    951  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    952  */
    953 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
    954 {
    955 	struct radeon_device *rdev = info->dev->dev_private;
    956 
    957 	rdev->pll_wreg(rdev, reg, val);
    958 }
    959 
    960 /**
    961  * cail_mc_read - read MC (Memory Controller) register
    962  *
    963  * @info: atom card_info pointer
    964  * @reg: MC register offset
    965  *
    966  * Provides an MC register accessor for the atom interpreter (r4xx+).
    967  * Returns the value of the MC register.
    968  */
    969 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
    970 {
    971 	struct radeon_device *rdev = info->dev->dev_private;
    972 	uint32_t r;
    973 
    974 	r = rdev->mc_rreg(rdev, reg);
    975 	return r;
    976 }
    977 
    978 /**
    979  * cail_mc_write - write MC (Memory Controller) register
    980  *
    981  * @info: atom card_info pointer
    982  * @reg: MC register offset
    983  * @val: value to write to the pll register
    984  *
    985  * Provides a MC register accessor for the atom interpreter (r4xx+).
    986  */
    987 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
    988 {
    989 	struct radeon_device *rdev = info->dev->dev_private;
    990 
    991 	rdev->mc_wreg(rdev, reg, val);
    992 }
    993 
    994 /**
    995  * cail_reg_write - write MMIO register
    996  *
    997  * @info: atom card_info pointer
    998  * @reg: MMIO register offset
    999  * @val: value to write to the pll register
   1000  *
   1001  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
   1002  */
   1003 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
   1004 {
   1005 	struct radeon_device *rdev = info->dev->dev_private;
   1006 
   1007 	WREG32(reg*4, val);
   1008 }
   1009 
   1010 /**
   1011  * cail_reg_read - read MMIO register
   1012  *
   1013  * @info: atom card_info pointer
   1014  * @reg: MMIO register offset
   1015  *
   1016  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
   1017  * Returns the value of the MMIO register.
   1018  */
   1019 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
   1020 {
   1021 	struct radeon_device *rdev = info->dev->dev_private;
   1022 	uint32_t r;
   1023 
   1024 	r = RREG32(reg*4);
   1025 	return r;
   1026 }
   1027 
   1028 /**
   1029  * cail_ioreg_write - write IO register
   1030  *
   1031  * @info: atom card_info pointer
   1032  * @reg: IO register offset
   1033  * @val: value to write to the pll register
   1034  *
   1035  * Provides a IO register accessor for the atom interpreter (r4xx+).
   1036  */
   1037 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
   1038 {
   1039 	struct radeon_device *rdev = info->dev->dev_private;
   1040 
   1041 	WREG32_IO(reg*4, val);
   1042 }
   1043 
   1044 /**
   1045  * cail_ioreg_read - read IO register
   1046  *
   1047  * @info: atom card_info pointer
   1048  * @reg: IO register offset
   1049  *
   1050  * Provides an IO register accessor for the atom interpreter (r4xx+).
   1051  * Returns the value of the IO register.
   1052  */
   1053 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
   1054 {
   1055 	struct radeon_device *rdev = info->dev->dev_private;
   1056 	uint32_t r;
   1057 
   1058 	r = RREG32_IO(reg*4);
   1059 	return r;
   1060 }
   1061 
   1062 /**
   1063  * radeon_atombios_init - init the driver info and callbacks for atombios
   1064  *
   1065  * @rdev: radeon_device pointer
   1066  *
   1067  * Initializes the driver info and register access callbacks for the
   1068  * ATOM interpreter (r4xx+).
   1069  * Returns 0 on sucess, -ENOMEM on failure.
   1070  * Called at driver startup.
   1071  */
   1072 int radeon_atombios_init(struct radeon_device *rdev)
   1073 {
   1074 	struct card_info *atom_card_info =
   1075 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
   1076 
   1077 	if (!atom_card_info)
   1078 		return -ENOMEM;
   1079 
   1080 	rdev->mode_info.atom_card_info = atom_card_info;
   1081 	atom_card_info->dev = rdev->ddev;
   1082 	atom_card_info->reg_read = cail_reg_read;
   1083 	atom_card_info->reg_write = cail_reg_write;
   1084 	/* needed for iio ops */
   1085 #ifdef __NetBSD__
   1086 	if (rdev->rio_mem_size)
   1087 #else
   1088 	if (rdev->rio_mem)
   1089 #endif
   1090 	{
   1091 		atom_card_info->ioreg_read = cail_ioreg_read;
   1092 		atom_card_info->ioreg_write = cail_ioreg_write;
   1093 	} else {
   1094 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
   1095 		atom_card_info->ioreg_read = cail_reg_read;
   1096 		atom_card_info->ioreg_write = cail_reg_write;
   1097 	}
   1098 	atom_card_info->mc_read = cail_mc_read;
   1099 	atom_card_info->mc_write = cail_mc_write;
   1100 	atom_card_info->pll_read = cail_pll_read;
   1101 	atom_card_info->pll_write = cail_pll_write;
   1102 
   1103 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
   1104 	if (!rdev->mode_info.atom_context) {
   1105 		radeon_atombios_fini(rdev);
   1106 		return -ENOMEM;
   1107 	}
   1108 
   1109 	mutex_init(&rdev->mode_info.atom_context->mutex);
   1110 	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
   1111 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
   1112 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
   1113 	return 0;
   1114 }
   1115 
   1116 /**
   1117  * radeon_atombios_fini - free the driver info and callbacks for atombios
   1118  *
   1119  * @rdev: radeon_device pointer
   1120  *
   1121  * Frees the driver info and register access callbacks for the ATOM
   1122  * interpreter (r4xx+).
   1123  * Called at driver shutdown.
   1124  */
   1125 void radeon_atombios_fini(struct radeon_device *rdev)
   1126 {
   1127 	if (rdev->mode_info.atom_context) {
   1128 		mutex_destroy(&rdev->mode_info.atom_context->scratch_mutex);
   1129 		mutex_destroy(&rdev->mode_info.atom_context->mutex);
   1130 		kfree(rdev->mode_info.atom_context->scratch);
   1131 	}
   1132 	kfree(rdev->mode_info.atom_context);
   1133 	rdev->mode_info.atom_context = NULL;
   1134 	kfree(rdev->mode_info.atom_card_info);
   1135 	rdev->mode_info.atom_card_info = NULL;
   1136 }
   1137 
   1138 /* COMBIOS */
   1139 /*
   1140  * COMBIOS is the bios format prior to ATOM. It provides
   1141  * command tables similar to ATOM, but doesn't have a unified
   1142  * parser.  See radeon_combios.c
   1143  */
   1144 
   1145 /**
   1146  * radeon_combios_init - init the driver info for combios
   1147  *
   1148  * @rdev: radeon_device pointer
   1149  *
   1150  * Initializes the driver info for combios (r1xx-r3xx).
   1151  * Returns 0 on sucess.
   1152  * Called at driver startup.
   1153  */
   1154 int radeon_combios_init(struct radeon_device *rdev)
   1155 {
   1156 	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
   1157 	return 0;
   1158 }
   1159 
   1160 /**
   1161  * radeon_combios_fini - free the driver info for combios
   1162  *
   1163  * @rdev: radeon_device pointer
   1164  *
   1165  * Frees the driver info for combios (r1xx-r3xx).
   1166  * Called at driver shutdown.
   1167  */
   1168 void radeon_combios_fini(struct radeon_device *rdev)
   1169 {
   1170 }
   1171 
   1172 #ifndef __NetBSD__		/* XXX radeon vga */
   1173 /* if we get transitioned to only one device, take VGA back */
   1174 /**
   1175  * radeon_vga_set_decode - enable/disable vga decode
   1176  *
   1177  * @cookie: radeon_device pointer
   1178  * @state: enable/disable vga decode
   1179  *
   1180  * Enable/disable vga decode (all asics).
   1181  * Returns VGA resource flags.
   1182  */
   1183 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
   1184 {
   1185 	struct radeon_device *rdev = cookie;
   1186 	radeon_vga_set_state(rdev, state);
   1187 	if (state)
   1188 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
   1189 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1190 	else
   1191 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1192 }
   1193 #endif
   1194 
   1195 /**
   1196  * radeon_check_pot_argument - check that argument is a power of two
   1197  *
   1198  * @arg: value to check
   1199  *
   1200  * Validates that a certain argument is a power of two (all asics).
   1201  * Returns true if argument is valid.
   1202  */
   1203 static bool radeon_check_pot_argument(int arg)
   1204 {
   1205 	return (arg & (arg - 1)) == 0;
   1206 }
   1207 
   1208 /**
   1209  * Determine a sensible default GART size according to ASIC family.
   1210  *
   1211  * @family ASIC family name
   1212  */
   1213 static int radeon_gart_size_auto(enum radeon_family family)
   1214 {
   1215 	/* default to a larger gart size on newer asics */
   1216 	if (family >= CHIP_TAHITI)
   1217 		return 2048;
   1218 	else if (family >= CHIP_RV770)
   1219 		return 1024;
   1220 	else
   1221 		return 512;
   1222 }
   1223 
   1224 /**
   1225  * radeon_check_arguments - validate module params
   1226  *
   1227  * @rdev: radeon_device pointer
   1228  *
   1229  * Validates certain module parameters and updates
   1230  * the associated values used by the driver (all asics).
   1231  */
   1232 static void radeon_check_arguments(struct radeon_device *rdev)
   1233 {
   1234 	/* vramlimit must be a power of two */
   1235 	if (!radeon_check_pot_argument(radeon_vram_limit)) {
   1236 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
   1237 				radeon_vram_limit);
   1238 		radeon_vram_limit = 0;
   1239 	}
   1240 
   1241 	if (radeon_gart_size == -1) {
   1242 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1243 	}
   1244 	/* gtt size must be power of two and greater or equal to 32M */
   1245 	if (radeon_gart_size < 32) {
   1246 		dev_warn(rdev->dev, "gart size (%d) too small\n",
   1247 				radeon_gart_size);
   1248 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1249 	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
   1250 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
   1251 				radeon_gart_size);
   1252 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1253 	}
   1254 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
   1255 
   1256 	/* AGP mode can only be -1, 1, 2, 4, 8 */
   1257 	switch (radeon_agpmode) {
   1258 	case -1:
   1259 	case 0:
   1260 	case 1:
   1261 	case 2:
   1262 	case 4:
   1263 	case 8:
   1264 		break;
   1265 	default:
   1266 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
   1267 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
   1268 		radeon_agpmode = 0;
   1269 		break;
   1270 	}
   1271 
   1272 	if (!radeon_check_pot_argument(radeon_vm_size)) {
   1273 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
   1274 			 radeon_vm_size);
   1275 		radeon_vm_size = 4;
   1276 	}
   1277 
   1278 	if (radeon_vm_size < 1) {
   1279 		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
   1280 			 radeon_vm_size);
   1281 		radeon_vm_size = 4;
   1282 	}
   1283 
   1284        /*
   1285         * Max GPUVM size for Cayman, SI and CI are 40 bits.
   1286         */
   1287 	if (radeon_vm_size > 1024) {
   1288 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
   1289 			 radeon_vm_size);
   1290 		radeon_vm_size = 4;
   1291 	}
   1292 
   1293 	/* defines number of bits in page table versus page directory,
   1294 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
   1295 	 * page table and the remaining bits are in the page directory */
   1296 	if (radeon_vm_block_size == -1) {
   1297 
   1298 		/* Total bits covered by PD + PTs */
   1299 		unsigned bits = ilog2(radeon_vm_size) + 18;
   1300 
   1301 		/* Make sure the PD is 4K in size up to 8GB address space.
   1302 		   Above that split equal between PD and PTs */
   1303 		if (radeon_vm_size <= 8)
   1304 			radeon_vm_block_size = bits - 9;
   1305 		else
   1306 			radeon_vm_block_size = (bits + 3) / 2;
   1307 
   1308 	} else if (radeon_vm_block_size < 9) {
   1309 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
   1310 			 radeon_vm_block_size);
   1311 		radeon_vm_block_size = 9;
   1312 	}
   1313 
   1314 	if (radeon_vm_block_size > 24 ||
   1315 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
   1316 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
   1317 			 radeon_vm_block_size);
   1318 		radeon_vm_block_size = 9;
   1319 	}
   1320 }
   1321 
   1322 #ifndef __NetBSD__		/* XXX radeon vga */
   1323 /**
   1324  * radeon_switcheroo_set_state - set switcheroo state
   1325  *
   1326  * @pdev: pci dev pointer
   1327  * @state: vga_switcheroo state
   1328  *
   1329  * Callback for the switcheroo driver.  Suspends or resumes the
   1330  * the asics before or after it is powered up using ACPI methods.
   1331  */
   1332 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
   1333 {
   1334 	struct drm_device *dev = pci_get_drvdata(pdev);
   1335 	struct radeon_device *rdev = dev->dev_private;
   1336 
   1337 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
   1338 		return;
   1339 
   1340 	if (state == VGA_SWITCHEROO_ON) {
   1341 		unsigned d3_delay = dev->pdev->d3_delay;
   1342 
   1343 		printk(KERN_INFO "radeon: switched on\n");
   1344 		/* don't suspend or resume card normally */
   1345 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1346 
   1347 		if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
   1348 			dev->pdev->d3_delay = 20;
   1349 
   1350 		radeon_resume_kms(dev, true, true);
   1351 
   1352 		dev->pdev->d3_delay = d3_delay;
   1353 
   1354 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
   1355 		drm_kms_helper_poll_enable(dev);
   1356 	} else {
   1357 		printk(KERN_INFO "radeon: switched off\n");
   1358 		drm_kms_helper_poll_disable(dev);
   1359 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1360 		radeon_suspend_kms(dev, true, true);
   1361 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
   1362 	}
   1363 }
   1364 
   1365 /**
   1366  * radeon_switcheroo_can_switch - see if switcheroo state can change
   1367  *
   1368  * @pdev: pci dev pointer
   1369  *
   1370  * Callback for the switcheroo driver.  Check of the switcheroo
   1371  * state can be changed.
   1372  * Returns true if the state can be changed, false if not.
   1373  */
   1374 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
   1375 {
   1376 	struct drm_device *dev = pci_get_drvdata(pdev);
   1377 
   1378 	/*
   1379 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
   1380 	 * locking inversion with the driver load path. And the access here is
   1381 	 * completely racy anyway. So don't bother with locking for now.
   1382 	 */
   1383 	return dev->open_count == 0;
   1384 }
   1385 
   1386 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
   1387 	.set_gpu_state = radeon_switcheroo_set_state,
   1388 	.reprobe = NULL,
   1389 	.can_switch = radeon_switcheroo_can_switch,
   1390 };
   1391 #endif
   1392 
   1393 /**
   1394  * radeon_device_init - initialize the driver
   1395  *
   1396  * @rdev: radeon_device pointer
   1397  * @pdev: drm dev pointer
   1398  * @pdev: pci dev pointer
   1399  * @flags: driver flags
   1400  *
   1401  * Initializes the driver info and hw (all asics).
   1402  * Returns 0 for success or an error on failure.
   1403  * Called at driver startup.
   1404  */
   1405 int radeon_device_init(struct radeon_device *rdev,
   1406 		       struct drm_device *ddev,
   1407 		       struct pci_dev *pdev,
   1408 		       uint32_t flags)
   1409 {
   1410 	int r, i;
   1411 	int dma_bits;
   1412 #ifndef __NetBSD__
   1413 	bool runtime = false;
   1414 #endif
   1415 
   1416 	rdev->shutdown = false;
   1417 	rdev->dev = ddev->dev;
   1418 	rdev->ddev = ddev;
   1419 	rdev->pdev = pdev;
   1420 	rdev->flags = flags;
   1421 	rdev->family = flags & RADEON_FAMILY_MASK;
   1422 	rdev->is_atom_bios = false;
   1423 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
   1424 	rdev->mc.gtt_size = 512 * 1024 * 1024;
   1425 	rdev->accel_working = false;
   1426 	/* set up ring ids */
   1427 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1428 		rdev->ring[i].idx = i;
   1429 	}
   1430 	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
   1431 
   1432 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
   1433 		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
   1434 		pdev->subsystem_vendor, pdev->subsystem_device);
   1435 
   1436 	/* mutex initialization are all done here so we
   1437 	 * can recall function without having locking issues */
   1438 	mutex_init(&rdev->ring_lock);
   1439 	mutex_init(&rdev->dc_hw_i2c_mutex);
   1440 	atomic_set(&rdev->ih.lock, 0);
   1441 	mutex_init(&rdev->gem.mutex);
   1442 	mutex_init(&rdev->pm.mutex);
   1443 	mutex_init(&rdev->gpu_clock_mutex);
   1444 	mutex_init(&rdev->srbm_mutex);
   1445 	mutex_init(&rdev->grbm_idx_mutex);
   1446 	init_rwsem(&rdev->pm.mclk_lock);
   1447 	init_rwsem(&rdev->exclusive_lock);
   1448 #ifdef __NetBSD__
   1449 	spin_lock_init(&rdev->irq.vblank_lock);
   1450 	DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
   1451 #else
   1452 	init_waitqueue_head(&rdev->irq.vblank_queue);
   1453 #endif
   1454 	mutex_init(&rdev->mn_lock);
   1455 	hash_init(rdev->mn_hash);
   1456 	r = radeon_gem_init(rdev);
   1457 	if (r)
   1458 		return r;
   1459 
   1460 	radeon_check_arguments(rdev);
   1461 	/* Adjust VM size here.
   1462 	 * Max GPUVM size for cayman+ is 40 bits.
   1463 	 */
   1464 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
   1465 
   1466 	/* Set asic functions */
   1467 	r = radeon_asic_init(rdev);
   1468 	if (r)
   1469 		return r;
   1470 
   1471 	/* all of the newer IGP chips have an internal gart
   1472 	 * However some rs4xx report as AGP, so remove that here.
   1473 	 */
   1474 	if ((rdev->family >= CHIP_RS400) &&
   1475 	    (rdev->flags & RADEON_IS_IGP)) {
   1476 		rdev->flags &= ~RADEON_IS_AGP;
   1477 	}
   1478 
   1479 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
   1480 		radeon_agp_disable(rdev);
   1481 	}
   1482 
   1483 	/* Set the internal MC address mask
   1484 	 * This is the max address of the GPU's
   1485 	 * internal address space.
   1486 	 */
   1487 	if (rdev->family >= CHIP_CAYMAN)
   1488 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
   1489 	else if (rdev->family >= CHIP_CEDAR)
   1490 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
   1491 	else
   1492 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
   1493 
   1494 	/* set DMA mask + need_dma32 flags.
   1495 	 * PCIE - can handle 40-bits.
   1496 	 * IGP - can handle 40-bits
   1497 	 * AGP - generally dma32 is safest
   1498 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
   1499 	 */
   1500 	rdev->need_dma32 = false;
   1501 	if (rdev->flags & RADEON_IS_AGP)
   1502 		rdev->need_dma32 = true;
   1503 	if ((rdev->flags & RADEON_IS_PCI) &&
   1504 	    (rdev->family <= CHIP_RS740))
   1505 		rdev->need_dma32 = true;
   1506 
   1507 	dma_bits = rdev->need_dma32 ? 32 : 40;
   1508 #ifdef __NetBSD__
   1509 	r = drm_limit_dma_space(rdev->ddev, 0, __BITS(dma_bits - 1, 0));
   1510 	if (r)
   1511 		DRM_ERROR("No suitable DMA available.\n");
   1512 #else
   1513 	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
   1514 	if (r) {
   1515 		rdev->need_dma32 = true;
   1516 		dma_bits = 32;
   1517 		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
   1518 	}
   1519 	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
   1520 	if (r) {
   1521 		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
   1522 		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
   1523 	}
   1524 #endif
   1525 
   1526 	/* Registers mapping */
   1527 	/* TODO: block userspace mapping of io register */
   1528 	/* XXX Destroy these locks on detach...  */
   1529 	spin_lock_init(&rdev->mmio_idx_lock);
   1530 	spin_lock_init(&rdev->smc_idx_lock);
   1531 	spin_lock_init(&rdev->pll_idx_lock);
   1532 	spin_lock_init(&rdev->mc_idx_lock);
   1533 	spin_lock_init(&rdev->pcie_idx_lock);
   1534 	spin_lock_init(&rdev->pciep_idx_lock);
   1535 	spin_lock_init(&rdev->pif_idx_lock);
   1536 	spin_lock_init(&rdev->cg_idx_lock);
   1537 	spin_lock_init(&rdev->uvd_idx_lock);
   1538 	spin_lock_init(&rdev->rcu_idx_lock);
   1539 	spin_lock_init(&rdev->didt_idx_lock);
   1540 	spin_lock_init(&rdev->end_idx_lock);
   1541 #ifdef __NetBSD__
   1542     {
   1543 	pcireg_t bar;
   1544 
   1545 	if (rdev->family >= CHIP_BONAIRE)
   1546 		bar = 5;
   1547 	else
   1548 		bar = 2;
   1549 	if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(bar),
   1550 		pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
   1551 		    rdev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
   1552 		0,
   1553 		&rdev->rmmio_bst, &rdev->rmmio_bsh,
   1554 		&rdev->rmmio_addr, &rdev->rmmio_size))
   1555 		return -EIO;
   1556     }
   1557 	DRM_INFO("register mmio base: 0x%"PRIxMAX"\n",
   1558 	    (uintmax_t)rdev->rmmio_addr);
   1559 	DRM_INFO("register mmio size: %"PRIuMAX"\n",
   1560 	    (uintmax_t)rdev->rmmio_size);
   1561 #else
   1562 	if (rdev->family >= CHIP_BONAIRE) {
   1563 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
   1564 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
   1565 	} else {
   1566 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
   1567 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
   1568 	}
   1569 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
   1570 	if (rdev->rmmio == NULL) {
   1571 		return -ENOMEM;
   1572 	}
   1573 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
   1574 	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
   1575 #endif
   1576 
   1577 	/* doorbell bar mapping */
   1578 	if (rdev->family >= CHIP_BONAIRE)
   1579 		radeon_doorbell_init(rdev);
   1580 
   1581 	/* io port mapping */
   1582 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
   1583 #ifdef __NetBSD__
   1584 		if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(i),
   1585 			PCI_MAPREG_TYPE_IO, 0,
   1586 			&rdev->rio_mem_bst, &rdev->rio_mem_bsh,
   1587 			NULL, &rdev->rio_mem_size))
   1588 			continue;
   1589 		break;
   1590 #else
   1591 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
   1592 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
   1593 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
   1594 			break;
   1595 		}
   1596 #endif
   1597 	}
   1598 #ifdef __NetBSD__
   1599 	if (i == DEVICE_COUNT_RESOURCE)
   1600 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1601 #else
   1602 	if (rdev->rio_mem == NULL)
   1603 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1604 #endif
   1605 
   1606 	if (rdev->flags & RADEON_IS_PX)
   1607 		radeon_device_handle_px_quirks(rdev);
   1608 
   1609 #ifndef __NetBSD__		/* XXX radeon vga */
   1610 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
   1611 	/* this will fail for cards that aren't VGA class devices, just
   1612 	 * ignore it */
   1613 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
   1614 
   1615 	if (rdev->flags & RADEON_IS_PX)
   1616 		runtime = true;
   1617 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
   1618 	if (runtime)
   1619 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
   1620 #endif
   1621 
   1622 	r = radeon_init(rdev);
   1623 	if (r)
   1624 		goto failed;
   1625 
   1626 	r = radeon_gem_debugfs_init(rdev);
   1627 	if (r) {
   1628 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
   1629 	}
   1630 
   1631 	r = radeon_mst_debugfs_init(rdev);
   1632 	if (r) {
   1633 		DRM_ERROR("registering mst debugfs failed (%d).\n", r);
   1634 	}
   1635 
   1636 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
   1637 		/* Acceleration not working on AGP card try again
   1638 		 * with fallback to PCI or PCIE GART
   1639 		 */
   1640 		radeon_asic_reset(rdev);
   1641 		radeon_fini(rdev);
   1642 		radeon_agp_disable(rdev);
   1643 		r = radeon_init(rdev);
   1644 		if (r)
   1645 			goto failed;
   1646 	}
   1647 
   1648 	r = radeon_ib_ring_tests(rdev);
   1649 	if (r)
   1650 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1651 
   1652 	/*
   1653 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
   1654 	 * after the CP ring have chew one packet at least. Hence here we stop
   1655 	 * and restart DPM after the radeon_ib_ring_tests().
   1656 	 */
   1657 	if (rdev->pm.dpm_enabled &&
   1658 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
   1659 	    (rdev->family == CHIP_TURKS) &&
   1660 	    (rdev->flags & RADEON_IS_MOBILITY)) {
   1661 		mutex_lock(&rdev->pm.mutex);
   1662 		radeon_dpm_disable(rdev);
   1663 		radeon_dpm_enable(rdev);
   1664 		mutex_unlock(&rdev->pm.mutex);
   1665 	}
   1666 
   1667 	if ((radeon_testing & 1)) {
   1668 		if (rdev->accel_working)
   1669 			radeon_test_moves(rdev);
   1670 		else
   1671 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
   1672 	}
   1673 	if ((radeon_testing & 2)) {
   1674 		if (rdev->accel_working)
   1675 			radeon_test_syncing(rdev);
   1676 		else
   1677 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
   1678 	}
   1679 	if (radeon_benchmarking) {
   1680 		if (rdev->accel_working)
   1681 			radeon_benchmark(rdev, radeon_benchmarking);
   1682 		else
   1683 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
   1684 	}
   1685 	return 0;
   1686 
   1687 failed:
   1688 #ifndef __NetBSD__		/* XXX radeon vga */
   1689 	if (runtime)
   1690 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1691 #endif
   1692 	return r;
   1693 }
   1694 
   1695 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
   1696 
   1697 /**
   1698  * radeon_device_fini - tear down the driver
   1699  *
   1700  * @rdev: radeon_device pointer
   1701  *
   1702  * Tear down the driver info (all asics).
   1703  * Called at driver shutdown.
   1704  */
   1705 void radeon_device_fini(struct radeon_device *rdev)
   1706 {
   1707 	DRM_INFO("radeon: finishing device.\n");
   1708 	rdev->shutdown = true;
   1709 	/* evict vram memory */
   1710 	radeon_bo_evict_vram(rdev);
   1711 	radeon_fini(rdev);
   1712 #ifndef __NetBSD__
   1713 	vga_switcheroo_unregister_client(rdev->pdev);
   1714 	if (rdev->flags & RADEON_IS_PX)
   1715 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1716 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
   1717 #endif
   1718 #ifdef __NetBSD__
   1719 	if (rdev->rio_mem_size)
   1720 		bus_space_unmap(rdev->rio_mem_bst, rdev->rio_mem_bsh,
   1721 		    rdev->rio_mem_size);
   1722 	rdev->rio_mem_size = 0;
   1723 	bus_space_unmap(rdev->rmmio_bst, rdev->rmmio_bsh, rdev->rmmio_size);
   1724 #else
   1725 	if (rdev->rio_mem)
   1726 		pci_iounmap(rdev->pdev, rdev->rio_mem);
   1727 	rdev->rio_mem = NULL;
   1728 	iounmap(rdev->rmmio);
   1729 	rdev->rmmio = NULL;
   1730 #endif
   1731 	if (rdev->family >= CHIP_BONAIRE)
   1732 		radeon_doorbell_fini(rdev);
   1733 	radeon_debugfs_remove_files(rdev);
   1734 
   1735 #ifdef __NetBSD__
   1736 	DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
   1737 	spin_lock_destroy(&rdev->irq.vblank_lock);
   1738 	destroy_rwsem(&rdev->exclusive_lock);
   1739 	destroy_rwsem(&rdev->pm.mclk_lock);
   1740 #endif
   1741 	mutex_destroy(&rdev->srbm_mutex);
   1742 	mutex_destroy(&rdev->gpu_clock_mutex);
   1743 	mutex_destroy(&rdev->pm.mutex);
   1744 	mutex_destroy(&rdev->gem.mutex);
   1745 	mutex_destroy(&rdev->dc_hw_i2c_mutex);
   1746 	mutex_destroy(&rdev->ring_lock);
   1747 }
   1748 
   1749 
   1750 /*
   1751  * Suspend & resume.
   1752  */
   1753 /**
   1754  * radeon_suspend_kms - initiate device suspend
   1755  *
   1756  * @pdev: drm dev pointer
   1757  * @state: suspend state
   1758  *
   1759  * Puts the hw in the suspend state (all asics).
   1760  * Returns 0 for success or an error on failure.
   1761  * Called at driver suspend.
   1762  */
   1763 int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
   1764 {
   1765 	struct radeon_device *rdev;
   1766 	struct drm_crtc *crtc;
   1767 	struct drm_connector *connector;
   1768 	int i, r;
   1769 
   1770 	if (dev == NULL || dev->dev_private == NULL) {
   1771 		return -ENODEV;
   1772 	}
   1773 
   1774 	rdev = dev->dev_private;
   1775 
   1776 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1777 		return 0;
   1778 
   1779 	drm_kms_helper_poll_disable(dev);
   1780 
   1781 	drm_modeset_lock_all(dev);
   1782 	/* turn off display hw */
   1783 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1784 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
   1785 	}
   1786 	drm_modeset_unlock_all(dev);
   1787 
   1788 	/* unpin the front buffers and cursors */
   1789 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1790 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1791 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
   1792 		struct radeon_bo *robj;
   1793 
   1794 		if (radeon_crtc->cursor_bo) {
   1795 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1796 			r = radeon_bo_reserve(robj, false);
   1797 			if (r == 0) {
   1798 				radeon_bo_unpin(robj);
   1799 				radeon_bo_unreserve(robj);
   1800 			}
   1801 		}
   1802 
   1803 		if (rfb == NULL || rfb->obj == NULL) {
   1804 			continue;
   1805 		}
   1806 		robj = gem_to_radeon_bo(rfb->obj);
   1807 		/* don't unpin kernel fb objects */
   1808 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
   1809 			r = radeon_bo_reserve(robj, false);
   1810 			if (r == 0) {
   1811 				radeon_bo_unpin(robj);
   1812 				radeon_bo_unreserve(robj);
   1813 			}
   1814 		}
   1815 	}
   1816 	/* evict vram memory */
   1817 	radeon_bo_evict_vram(rdev);
   1818 
   1819 	/* wait for gpu to finish processing current batch */
   1820 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1821 		r = radeon_fence_wait_empty(rdev, i);
   1822 		if (r) {
   1823 			/* delay GPU reset to resume */
   1824 			radeon_fence_driver_force_completion(rdev, i);
   1825 		}
   1826 	}
   1827 
   1828 	radeon_save_bios_scratch_regs(rdev);
   1829 
   1830 	radeon_suspend(rdev);
   1831 	radeon_hpd_fini(rdev);
   1832 	/* evict remaining vram memory */
   1833 	radeon_bo_evict_vram(rdev);
   1834 
   1835 	radeon_agp_suspend(rdev);
   1836 
   1837 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1838 	pci_save_state(dev->pdev);
   1839 	if (suspend) {
   1840 		/* Shut down the device */
   1841 		pci_disable_device(dev->pdev);
   1842 		pci_set_power_state(dev->pdev, PCI_D3hot);
   1843 	}
   1844 #endif
   1845 
   1846 	if (fbcon) {
   1847 		console_lock();
   1848 		radeon_fbdev_set_suspend(rdev, 1);
   1849 		console_unlock();
   1850 	}
   1851 	return 0;
   1852 }
   1853 
   1854 /**
   1855  * radeon_resume_kms - initiate device resume
   1856  *
   1857  * @pdev: drm dev pointer
   1858  *
   1859  * Bring the hw back to operating state (all asics).
   1860  * Returns 0 for success or an error on failure.
   1861  * Called at driver resume.
   1862  */
   1863 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
   1864 {
   1865 	struct drm_connector *connector;
   1866 	struct radeon_device *rdev = dev->dev_private;
   1867 	struct drm_crtc *crtc;
   1868 	int r;
   1869 
   1870 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1871 		return 0;
   1872 
   1873 	if (fbcon) {
   1874 		console_lock();
   1875 	}
   1876 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1877 	if (resume) {
   1878 		pci_set_power_state(dev->pdev, PCI_D0);
   1879 		pci_restore_state(dev->pdev);
   1880 		if (pci_enable_device(dev->pdev)) {
   1881 			if (fbcon)
   1882 				console_unlock();
   1883 			return -1;
   1884 		}
   1885 	}
   1886 #endif
   1887 	/* resume AGP if in use */
   1888 	radeon_agp_resume(rdev);
   1889 	radeon_resume(rdev);
   1890 
   1891 	r = radeon_ib_ring_tests(rdev);
   1892 	if (r)
   1893 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1894 
   1895 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   1896 		/* do dpm late init */
   1897 		r = radeon_pm_late_init(rdev);
   1898 		if (r) {
   1899 			rdev->pm.dpm_enabled = false;
   1900 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1901 		}
   1902 	} else {
   1903 		/* resume old pm late */
   1904 		radeon_pm_resume(rdev);
   1905 	}
   1906 
   1907 	radeon_restore_bios_scratch_regs(rdev);
   1908 
   1909 	/* pin cursors */
   1910 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1911 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1912 
   1913 		if (radeon_crtc->cursor_bo) {
   1914 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1915 			r = radeon_bo_reserve(robj, false);
   1916 			if (r == 0) {
   1917 				/* Only 27 bit offset for legacy cursor */
   1918 				r = radeon_bo_pin_restricted(robj,
   1919 							     RADEON_GEM_DOMAIN_VRAM,
   1920 							     ASIC_IS_AVIVO(rdev) ?
   1921 							     0 : 1 << 27,
   1922 							     &radeon_crtc->cursor_addr);
   1923 				if (r != 0)
   1924 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
   1925 				radeon_bo_unreserve(robj);
   1926 			}
   1927 		}
   1928 	}
   1929 
   1930 	/* init dig PHYs, disp eng pll */
   1931 	if (rdev->is_atom_bios) {
   1932 		radeon_atom_encoder_init(rdev);
   1933 		radeon_atom_disp_eng_pll_init(rdev);
   1934 		/* turn on the BL */
   1935 		if (rdev->mode_info.bl_encoder) {
   1936 			u8 bl_level = radeon_get_backlight_level(rdev,
   1937 								 rdev->mode_info.bl_encoder);
   1938 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   1939 						   bl_level);
   1940 		}
   1941 	}
   1942 	/* reset hpd state */
   1943 	radeon_hpd_init(rdev);
   1944 	/* blat the mode back in */
   1945 	if (fbcon) {
   1946 		drm_helper_resume_force_mode(dev);
   1947 		/* turn on display hw */
   1948 		drm_modeset_lock_all(dev);
   1949 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1950 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
   1951 		}
   1952 		drm_modeset_unlock_all(dev);
   1953 	}
   1954 
   1955 	drm_kms_helper_poll_enable(dev);
   1956 
   1957 	/* set the power state here in case we are a PX system or headless */
   1958 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   1959 		radeon_pm_compute_clocks(rdev);
   1960 
   1961 	if (fbcon) {
   1962 		radeon_fbdev_set_suspend(rdev, 0);
   1963 		console_unlock();
   1964 	}
   1965 
   1966 	return 0;
   1967 }
   1968 
   1969 /**
   1970  * radeon_gpu_reset - reset the asic
   1971  *
   1972  * @rdev: radeon device pointer
   1973  *
   1974  * Attempt the reset the GPU if it has hung (all asics).
   1975  * Returns 0 for success or an error on failure.
   1976  */
   1977 int radeon_gpu_reset(struct radeon_device *rdev)
   1978 {
   1979 	unsigned ring_sizes[RADEON_NUM_RINGS];
   1980 	uint32_t *ring_data[RADEON_NUM_RINGS];
   1981 
   1982 	bool saved = false;
   1983 
   1984 	int i, r;
   1985 	int resched;
   1986 
   1987 	down_write(&rdev->exclusive_lock);
   1988 
   1989 	if (!rdev->needs_reset) {
   1990 		up_write(&rdev->exclusive_lock);
   1991 		return 0;
   1992 	}
   1993 
   1994 	atomic_inc(&rdev->gpu_reset_counter);
   1995 
   1996 	radeon_save_bios_scratch_regs(rdev);
   1997 	/* block TTM */
   1998 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   1999 	radeon_suspend(rdev);
   2000 	radeon_hpd_fini(rdev);
   2001 
   2002 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   2003 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
   2004 						   &ring_data[i]);
   2005 		if (ring_sizes[i]) {
   2006 			saved = true;
   2007 			dev_info(rdev->dev, "Saved %d dwords of commands "
   2008 				 "on ring %d.\n", ring_sizes[i], i);
   2009 		}
   2010 	}
   2011 
   2012 	r = radeon_asic_reset(rdev);
   2013 	if (!r) {
   2014 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
   2015 		radeon_resume(rdev);
   2016 	}
   2017 
   2018 	radeon_restore_bios_scratch_regs(rdev);
   2019 
   2020 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   2021 		if (!r && ring_data[i]) {
   2022 			radeon_ring_restore(rdev, &rdev->ring[i],
   2023 					    ring_sizes[i], ring_data[i]);
   2024 		} else {
   2025 			radeon_fence_driver_force_completion(rdev, i);
   2026 			kfree(ring_data[i]);
   2027 		}
   2028 	}
   2029 
   2030 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   2031 		/* do dpm late init */
   2032 		r = radeon_pm_late_init(rdev);
   2033 		if (r) {
   2034 			rdev->pm.dpm_enabled = false;
   2035 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   2036 		}
   2037 	} else {
   2038 		/* resume old pm late */
   2039 		radeon_pm_resume(rdev);
   2040 	}
   2041 
   2042 	/* init dig PHYs, disp eng pll */
   2043 	if (rdev->is_atom_bios) {
   2044 		radeon_atom_encoder_init(rdev);
   2045 		radeon_atom_disp_eng_pll_init(rdev);
   2046 		/* turn on the BL */
   2047 		if (rdev->mode_info.bl_encoder) {
   2048 			u8 bl_level = radeon_get_backlight_level(rdev,
   2049 								 rdev->mode_info.bl_encoder);
   2050 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   2051 						   bl_level);
   2052 		}
   2053 	}
   2054 	/* reset hpd state */
   2055 	radeon_hpd_init(rdev);
   2056 
   2057 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   2058 
   2059 	rdev->in_reset = true;
   2060 	rdev->needs_reset = false;
   2061 
   2062 	downgrade_write(&rdev->exclusive_lock);
   2063 
   2064 	drm_helper_resume_force_mode(rdev->ddev);
   2065 
   2066 	/* set the power state here in case we are a PX system or headless */
   2067 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   2068 		radeon_pm_compute_clocks(rdev);
   2069 
   2070 	if (!r) {
   2071 		r = radeon_ib_ring_tests(rdev);
   2072 		if (r && saved)
   2073 			r = -EAGAIN;
   2074 	} else {
   2075 		/* bad news, how to tell it to userspace ? */
   2076 		dev_info(rdev->dev, "GPU reset failed\n");
   2077 	}
   2078 
   2079 	rdev->needs_reset = r == -EAGAIN;
   2080 	rdev->in_reset = false;
   2081 
   2082 	up_read(&rdev->exclusive_lock);
   2083 	return r;
   2084 }
   2085 
   2086 
   2087 /*
   2088  * Debugfs
   2089  */
   2090 int radeon_debugfs_add_files(struct radeon_device *rdev,
   2091 			     struct drm_info_list *files,
   2092 			     unsigned nfiles)
   2093 {
   2094 	unsigned i;
   2095 
   2096 	for (i = 0; i < rdev->debugfs_count; i++) {
   2097 		if (rdev->debugfs[i].files == files) {
   2098 			/* Already registered */
   2099 			return 0;
   2100 		}
   2101 	}
   2102 
   2103 	i = rdev->debugfs_count + 1;
   2104 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
   2105 		DRM_ERROR("Reached maximum number of debugfs components.\n");
   2106 		DRM_ERROR("Report so we increase "
   2107 		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
   2108 		return -EINVAL;
   2109 	}
   2110 	rdev->debugfs[rdev->debugfs_count].files = files;
   2111 	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
   2112 	rdev->debugfs_count = i;
   2113 #if defined(CONFIG_DEBUG_FS)
   2114 	drm_debugfs_create_files(files, nfiles,
   2115 				 rdev->ddev->control->debugfs_root,
   2116 				 rdev->ddev->control);
   2117 	drm_debugfs_create_files(files, nfiles,
   2118 				 rdev->ddev->primary->debugfs_root,
   2119 				 rdev->ddev->primary);
   2120 #endif
   2121 	return 0;
   2122 }
   2123 
   2124 static void radeon_debugfs_remove_files(struct radeon_device *rdev)
   2125 {
   2126 #if defined(CONFIG_DEBUG_FS)
   2127 	unsigned i;
   2128 
   2129 	for (i = 0; i < rdev->debugfs_count; i++) {
   2130 		drm_debugfs_remove_files(rdev->debugfs[i].files,
   2131 					 rdev->debugfs[i].num_files,
   2132 					 rdev->ddev->control);
   2133 		drm_debugfs_remove_files(rdev->debugfs[i].files,
   2134 					 rdev->debugfs[i].num_files,
   2135 					 rdev->ddev->primary);
   2136 	}
   2137 #endif
   2138 }
   2139 
   2140 #if defined(CONFIG_DEBUG_FS)
   2141 int radeon_debugfs_init(struct drm_minor *minor)
   2142 {
   2143 	return 0;
   2144 }
   2145 
   2146 void radeon_debugfs_cleanup(struct drm_minor *minor)
   2147 {
   2148 }
   2149 #endif
   2150