Home | History | Annotate | Line # | Download | only in radeon
radeon_device.c revision 1.3.32.1
      1 /*	$NetBSD: radeon_device.c,v 1.3.32.1 2019/06/10 22:08:26 christos Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.3.32.1 2019/06/10 22:08:26 christos Exp $");
     32 
     33 #include <linux/console.h>
     34 #include <linux/slab.h>
     35 #include <drm/drmP.h>
     36 #include <drm/drm_crtc_helper.h>
     37 #include <drm/radeon_drm.h>
     38 #include <linux/vgaarb.h>
     39 #include <linux/vga_switcheroo.h>
     40 #include <linux/efi.h>
     41 #include <linux/bitops.h>
     42 #include "radeon_reg.h"
     43 #include "radeon.h"
     44 #include "atom.h"
     45 
     46 static const char radeon_family_name[][16] = {
     47 	"R100",
     48 	"RV100",
     49 	"RS100",
     50 	"RV200",
     51 	"RS200",
     52 	"R200",
     53 	"RV250",
     54 	"RS300",
     55 	"RV280",
     56 	"R300",
     57 	"R350",
     58 	"RV350",
     59 	"RV380",
     60 	"R420",
     61 	"R423",
     62 	"RV410",
     63 	"RS400",
     64 	"RS480",
     65 	"RS600",
     66 	"RS690",
     67 	"RS740",
     68 	"RV515",
     69 	"R520",
     70 	"RV530",
     71 	"RV560",
     72 	"RV570",
     73 	"R580",
     74 	"R600",
     75 	"RV610",
     76 	"RV630",
     77 	"RV670",
     78 	"RV620",
     79 	"RV635",
     80 	"RS780",
     81 	"RS880",
     82 	"RV770",
     83 	"RV730",
     84 	"RV710",
     85 	"RV740",
     86 	"CEDAR",
     87 	"REDWOOD",
     88 	"JUNIPER",
     89 	"CYPRESS",
     90 	"HEMLOCK",
     91 	"PALM",
     92 	"SUMO",
     93 	"SUMO2",
     94 	"BARTS",
     95 	"TURKS",
     96 	"CAICOS",
     97 	"CAYMAN",
     98 	"ARUBA",
     99 	"TAHITI",
    100 	"PITCAIRN",
    101 	"VERDE",
    102 	"OLAND",
    103 	"HAINAN",
    104 	"BONAIRE",
    105 	"KAVERI",
    106 	"KABINI",
    107 	"HAWAII",
    108 	"MULLINS",
    109 	"LAST",
    110 };
    111 
    112 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
    113 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
    114 
    115 struct radeon_px_quirk {
    116 	u32 chip_vendor;
    117 	u32 chip_device;
    118 	u32 subsys_vendor;
    119 	u32 subsys_device;
    120 	u32 px_quirk_flags;
    121 };
    122 
    123 static struct radeon_px_quirk radeon_px_quirk_list[] = {
    124 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
    125 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
    126 	 */
    127 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
    128 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
    129 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    130 	 */
    131 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
    132 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    133 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
    134 	 */
    135 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    136 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
    137 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
    138 	 */
    139 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
    140 	/* macbook pro 8.2 */
    141 	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
    142 	{ 0, 0, 0, 0, 0 },
    143 };
    144 
    145 bool radeon_is_px(struct drm_device *dev)
    146 {
    147 	struct radeon_device *rdev = dev->dev_private;
    148 
    149 	if (rdev->flags & RADEON_IS_PX)
    150 		return true;
    151 	return false;
    152 }
    153 
    154 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
    155 {
    156 	struct radeon_px_quirk *p = radeon_px_quirk_list;
    157 
    158 	/* Apply PX quirks */
    159 	while (p && p->chip_device != 0) {
    160 		if (rdev->pdev->vendor == p->chip_vendor &&
    161 		    rdev->pdev->device == p->chip_device &&
    162 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
    163 		    rdev->pdev->subsystem_device == p->subsys_device) {
    164 			rdev->px_quirk_flags = p->px_quirk_flags;
    165 			break;
    166 		}
    167 		++p;
    168 	}
    169 
    170 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
    171 		rdev->flags &= ~RADEON_IS_PX;
    172 }
    173 
    174 /**
    175  * radeon_program_register_sequence - program an array of registers.
    176  *
    177  * @rdev: radeon_device pointer
    178  * @registers: pointer to the register array
    179  * @array_size: size of the register array
    180  *
    181  * Programs an array or registers with and and or masks.
    182  * This is a helper for setting golden registers.
    183  */
    184 void radeon_program_register_sequence(struct radeon_device *rdev,
    185 				      const u32 *registers,
    186 				      const u32 array_size)
    187 {
    188 	u32 tmp, reg, and_mask, or_mask;
    189 	int i;
    190 
    191 	if (array_size % 3)
    192 		return;
    193 
    194 	for (i = 0; i < array_size; i +=3) {
    195 		reg = registers[i + 0];
    196 		and_mask = registers[i + 1];
    197 		or_mask = registers[i + 2];
    198 
    199 		if (and_mask == 0xffffffff) {
    200 			tmp = or_mask;
    201 		} else {
    202 			tmp = RREG32(reg);
    203 			tmp &= ~and_mask;
    204 			tmp |= or_mask;
    205 		}
    206 		WREG32(reg, tmp);
    207 	}
    208 }
    209 
    210 void radeon_pci_config_reset(struct radeon_device *rdev)
    211 {
    212 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
    213 }
    214 
    215 /**
    216  * radeon_surface_init - Clear GPU surface registers.
    217  *
    218  * @rdev: radeon_device pointer
    219  *
    220  * Clear GPU surface registers (r1xx-r5xx).
    221  */
    222 void radeon_surface_init(struct radeon_device *rdev)
    223 {
    224 	/* FIXME: check this out */
    225 	if (rdev->family < CHIP_R600) {
    226 		int i;
    227 
    228 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
    229 			if (rdev->surface_regs[i].bo)
    230 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
    231 			else
    232 				radeon_clear_surface_reg(rdev, i);
    233 		}
    234 		/* enable surfaces */
    235 		WREG32(RADEON_SURFACE_CNTL, 0);
    236 	}
    237 }
    238 
    239 /*
    240  * GPU scratch registers helpers function.
    241  */
    242 /**
    243  * radeon_scratch_init - Init scratch register driver information.
    244  *
    245  * @rdev: radeon_device pointer
    246  *
    247  * Init CP scratch register driver information (r1xx-r5xx)
    248  */
    249 void radeon_scratch_init(struct radeon_device *rdev)
    250 {
    251 	int i;
    252 
    253 	/* FIXME: check this out */
    254 	if (rdev->family < CHIP_R300) {
    255 		rdev->scratch.num_reg = 5;
    256 	} else {
    257 		rdev->scratch.num_reg = 7;
    258 	}
    259 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
    260 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    261 		rdev->scratch.free[i] = true;
    262 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
    263 	}
    264 }
    265 
    266 /**
    267  * radeon_scratch_get - Allocate a scratch register
    268  *
    269  * @rdev: radeon_device pointer
    270  * @reg: scratch register mmio offset
    271  *
    272  * Allocate a CP scratch register for use by the driver (all asics).
    273  * Returns 0 on success or -EINVAL on failure.
    274  */
    275 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
    276 {
    277 	int i;
    278 
    279 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    280 		if (rdev->scratch.free[i]) {
    281 			rdev->scratch.free[i] = false;
    282 			*reg = rdev->scratch.reg[i];
    283 			return 0;
    284 		}
    285 	}
    286 	return -EINVAL;
    287 }
    288 
    289 /**
    290  * radeon_scratch_free - Free a scratch register
    291  *
    292  * @rdev: radeon_device pointer
    293  * @reg: scratch register mmio offset
    294  *
    295  * Free a CP scratch register allocated for use by the driver (all asics)
    296  */
    297 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
    298 {
    299 	int i;
    300 
    301 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    302 		if (rdev->scratch.reg[i] == reg) {
    303 			rdev->scratch.free[i] = true;
    304 			return;
    305 		}
    306 	}
    307 }
    308 
    309 /*
    310  * GPU doorbell aperture helpers function.
    311  */
    312 /**
    313  * radeon_doorbell_init - Init doorbell driver information.
    314  *
    315  * @rdev: radeon_device pointer
    316  *
    317  * Init doorbell driver information (CIK)
    318  * Returns 0 on success, error on failure.
    319  */
    320 static int radeon_doorbell_init(struct radeon_device *rdev)
    321 {
    322 #ifdef __NetBSD__
    323 	int r;
    324 #endif
    325 
    326 	/* doorbell bar mapping */
    327 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
    328 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
    329 
    330 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
    331 	if (rdev->doorbell.num_doorbells == 0)
    332 		return -EINVAL;
    333 
    334 #ifdef __NetBSD__
    335 	/* XXX errno NetBSD->Linux */
    336 	rdev->doorbell.bst = rdev->pdev->pd_pa.pa_memt;
    337 	r = -bus_space_map(rdev->doorbell.bst, rdev->doorbell.base,
    338 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)),
    339 	    0, &rdev->doorbell.bsh);
    340 	if (r)
    341 		return r;
    342 #else
    343 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
    344 	if (rdev->doorbell.ptr == NULL) {
    345 		return -ENOMEM;
    346 	}
    347 #endif
    348 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
    349 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
    350 
    351 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
    352 
    353 	return 0;
    354 }
    355 
    356 /**
    357  * radeon_doorbell_fini - Tear down doorbell driver information.
    358  *
    359  * @rdev: radeon_device pointer
    360  *
    361  * Tear down doorbell driver information (CIK)
    362  */
    363 static void radeon_doorbell_fini(struct radeon_device *rdev)
    364 {
    365 #ifdef __NetBSD__
    366 	bus_space_unmap(rdev->doorbell.bst, rdev->doorbell.bsh,
    367 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)));
    368 #else
    369 	iounmap(rdev->doorbell.ptr);
    370 	rdev->doorbell.ptr = NULL;
    371 #endif
    372 }
    373 
    374 /**
    375  * radeon_doorbell_get - Allocate a doorbell entry
    376  *
    377  * @rdev: radeon_device pointer
    378  * @doorbell: doorbell index
    379  *
    380  * Allocate a doorbell for use by the driver (all asics).
    381  * Returns 0 on success or -EINVAL on failure.
    382  */
    383 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
    384 {
    385 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
    386 	if (offset < rdev->doorbell.num_doorbells) {
    387 		__set_bit(offset, rdev->doorbell.used);
    388 		*doorbell = offset;
    389 		return 0;
    390 	} else {
    391 		return -EINVAL;
    392 	}
    393 }
    394 
    395 /**
    396  * radeon_doorbell_free - Free a doorbell entry
    397  *
    398  * @rdev: radeon_device pointer
    399  * @doorbell: doorbell index
    400  *
    401  * Free a doorbell allocated for use by the driver (all asics)
    402  */
    403 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
    404 {
    405 	if (doorbell < rdev->doorbell.num_doorbells)
    406 		__clear_bit(doorbell, rdev->doorbell.used);
    407 }
    408 
    409 /**
    410  * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
    411  *                                setup KFD
    412  *
    413  * @rdev: radeon_device pointer
    414  * @aperture_base: output returning doorbell aperture base physical address
    415  * @aperture_size: output returning doorbell aperture size in bytes
    416  * @start_offset: output returning # of doorbell bytes reserved for radeon.
    417  *
    418  * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
    419  * takes doorbells required for its own rings and reports the setup to KFD.
    420  * Radeon reserved doorbells are at the start of the doorbell aperture.
    421  */
    422 void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
    423 				  phys_addr_t *aperture_base,
    424 				  size_t *aperture_size,
    425 				  size_t *start_offset)
    426 {
    427 	/* The first num_doorbells are used by radeon.
    428 	 * KFD takes whatever's left in the aperture. */
    429 	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
    430 		*aperture_base = rdev->doorbell.base;
    431 		*aperture_size = rdev->doorbell.size;
    432 		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
    433 	} else {
    434 		*aperture_base = 0;
    435 		*aperture_size = 0;
    436 		*start_offset = 0;
    437 	}
    438 }
    439 
    440 /*
    441  * radeon_wb_*()
    442  * Writeback is the the method by which the the GPU updates special pages
    443  * in memory with the status of certain GPU events (fences, ring pointers,
    444  * etc.).
    445  */
    446 
    447 /**
    448  * radeon_wb_disable - Disable Writeback
    449  *
    450  * @rdev: radeon_device pointer
    451  *
    452  * Disables Writeback (all asics).  Used for suspend.
    453  */
    454 void radeon_wb_disable(struct radeon_device *rdev)
    455 {
    456 	rdev->wb.enabled = false;
    457 }
    458 
    459 /**
    460  * radeon_wb_fini - Disable Writeback and free memory
    461  *
    462  * @rdev: radeon_device pointer
    463  *
    464  * Disables Writeback and frees the Writeback memory (all asics).
    465  * Used at driver shutdown.
    466  */
    467 void radeon_wb_fini(struct radeon_device *rdev)
    468 {
    469 	radeon_wb_disable(rdev);
    470 	if (rdev->wb.wb_obj) {
    471 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
    472 			radeon_bo_kunmap(rdev->wb.wb_obj);
    473 			radeon_bo_unpin(rdev->wb.wb_obj);
    474 			radeon_bo_unreserve(rdev->wb.wb_obj);
    475 		}
    476 		radeon_bo_unref(&rdev->wb.wb_obj);
    477 		rdev->wb.wb = NULL;
    478 		rdev->wb.wb_obj = NULL;
    479 	}
    480 }
    481 
    482 /**
    483  * radeon_wb_init- Init Writeback driver info and allocate memory
    484  *
    485  * @rdev: radeon_device pointer
    486  *
    487  * Disables Writeback and frees the Writeback memory (all asics).
    488  * Used at driver startup.
    489  * Returns 0 on success or an -error on failure.
    490  */
    491 int radeon_wb_init(struct radeon_device *rdev)
    492 {
    493 	int r;
    494 
    495 	if (rdev->wb.wb_obj == NULL) {
    496 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
    497 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
    498 				     &rdev->wb.wb_obj);
    499 		if (r) {
    500 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
    501 			return r;
    502 		}
    503 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
    504 		if (unlikely(r != 0)) {
    505 			radeon_wb_fini(rdev);
    506 			return r;
    507 		}
    508 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
    509 				&rdev->wb.gpu_addr);
    510 		if (r) {
    511 			radeon_bo_unreserve(rdev->wb.wb_obj);
    512 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
    513 			radeon_wb_fini(rdev);
    514 			return r;
    515 		}
    516 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)__UNVOLATILE(&rdev->wb.wb));
    517 		radeon_bo_unreserve(rdev->wb.wb_obj);
    518 		if (r) {
    519 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
    520 			radeon_wb_fini(rdev);
    521 			return r;
    522 		}
    523 	}
    524 
    525 	/* clear wb memory */
    526 	memset(__UNVOLATILE(rdev->wb.wb), 0, RADEON_GPU_PAGE_SIZE);
    527 	/* disable event_write fences */
    528 	rdev->wb.use_event = false;
    529 	/* disabled via module param */
    530 	if (radeon_no_wb == 1) {
    531 		rdev->wb.enabled = false;
    532 	} else {
    533 		if (rdev->flags & RADEON_IS_AGP) {
    534 			/* often unreliable on AGP */
    535 			rdev->wb.enabled = false;
    536 		} else if (rdev->family < CHIP_R300) {
    537 			/* often unreliable on pre-r300 */
    538 			rdev->wb.enabled = false;
    539 		} else {
    540 			rdev->wb.enabled = true;
    541 			/* event_write fences are only available on r600+ */
    542 			if (rdev->family >= CHIP_R600) {
    543 				rdev->wb.use_event = true;
    544 			}
    545 		}
    546 	}
    547 	/* always use writeback/events on NI, APUs */
    548 	if (rdev->family >= CHIP_PALM) {
    549 		rdev->wb.enabled = true;
    550 		rdev->wb.use_event = true;
    551 	}
    552 
    553 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
    554 
    555 	return 0;
    556 }
    557 
    558 /**
    559  * radeon_vram_location - try to find VRAM location
    560  * @rdev: radeon device structure holding all necessary informations
    561  * @mc: memory controller structure holding memory informations
    562  * @base: base address at which to put VRAM
    563  *
    564  * Function will place try to place VRAM at base address provided
    565  * as parameter (which is so far either PCI aperture address or
    566  * for IGP TOM base address).
    567  *
    568  * If there is not enough space to fit the unvisible VRAM in the 32bits
    569  * address space then we limit the VRAM size to the aperture.
    570  *
    571  * If we are using AGP and if the AGP aperture doesn't allow us to have
    572  * room for all the VRAM than we restrict the VRAM to the PCI aperture
    573  * size and print a warning.
    574  *
    575  * This function will never fails, worst case are limiting VRAM.
    576  *
    577  * Note: GTT start, end, size should be initialized before calling this
    578  * function on AGP platform.
    579  *
    580  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
    581  * this shouldn't be a problem as we are using the PCI aperture as a reference.
    582  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
    583  * not IGP.
    584  *
    585  * Note: we use mc_vram_size as on some board we need to program the mc to
    586  * cover the whole aperture even if VRAM size is inferior to aperture size
    587  * Novell bug 204882 + along with lots of ubuntu ones
    588  *
    589  * Note: when limiting vram it's safe to overwritte real_vram_size because
    590  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
    591  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
    592  * ones)
    593  *
    594  * Note: IGP TOM addr should be the same as the aperture addr, we don't
    595  * explicitly check for that thought.
    596  *
    597  * FIXME: when reducing VRAM size align new size on power of 2.
    598  */
    599 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
    600 {
    601 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
    602 
    603 	mc->vram_start = base;
    604 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
    605 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    606 		mc->real_vram_size = mc->aper_size;
    607 		mc->mc_vram_size = mc->aper_size;
    608 	}
    609 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    610 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
    611 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    612 		mc->real_vram_size = mc->aper_size;
    613 		mc->mc_vram_size = mc->aper_size;
    614 	}
    615 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    616 	if (limit && limit < mc->real_vram_size)
    617 		mc->real_vram_size = limit;
    618 	dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64" (%"PRIu64"M used)\n",
    619 			mc->mc_vram_size >> 20, mc->vram_start,
    620 			mc->vram_end, mc->real_vram_size >> 20);
    621 }
    622 
    623 /**
    624  * radeon_gtt_location - try to find GTT location
    625  * @rdev: radeon device structure holding all necessary informations
    626  * @mc: memory controller structure holding memory informations
    627  *
    628  * Function will place try to place GTT before or after VRAM.
    629  *
    630  * If GTT size is bigger than space left then we ajust GTT size.
    631  * Thus function will never fails.
    632  *
    633  * FIXME: when reducing GTT size align new size on power of 2.
    634  */
    635 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
    636 {
    637 	u64 size_af, size_bf;
    638 
    639 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
    640 	size_bf = mc->vram_start & ~mc->gtt_base_align;
    641 	if (size_bf > size_af) {
    642 		if (mc->gtt_size > size_bf) {
    643 			dev_warn(rdev->dev, "limiting GTT\n");
    644 			mc->gtt_size = size_bf;
    645 		}
    646 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
    647 	} else {
    648 		if (mc->gtt_size > size_af) {
    649 			dev_warn(rdev->dev, "limiting GTT\n");
    650 			mc->gtt_size = size_af;
    651 		}
    652 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
    653 	}
    654 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
    655 	dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64"\n",
    656 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
    657 }
    658 
    659 /*
    660  * GPU helpers function.
    661  */
    662 
    663 /**
    664  * radeon_device_is_virtual - check if we are running is a virtual environment
    665  *
    666  * Check if the asic has been passed through to a VM (all asics).
    667  * Used at driver startup.
    668  * Returns true if virtual or false if not.
    669  */
    670 static bool radeon_device_is_virtual(void)
    671 {
    672 #ifdef CONFIG_X86
    673 #ifdef __NetBSD__		/* XXX virtualization */
    674 	return false;
    675 #else
    676 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
    677 #endif
    678 #else
    679 	return false;
    680 #endif
    681 }
    682 
    683 /**
    684  * radeon_card_posted - check if the hw has already been initialized
    685  *
    686  * @rdev: radeon_device pointer
    687  *
    688  * Check if the asic has been initialized (all asics).
    689  * Used at driver startup.
    690  * Returns true if initialized or false if not.
    691  */
    692 bool radeon_card_posted(struct radeon_device *rdev)
    693 {
    694 	uint32_t reg;
    695 
    696 	/* for pass through, always force asic_init for CI */
    697 	if (rdev->family >= CHIP_BONAIRE &&
    698 	    radeon_device_is_virtual())
    699 		return false;
    700 
    701 #ifndef __NetBSD__		/* XXX radeon efi */
    702 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
    703 	if (efi_enabled(EFI_BOOT) &&
    704 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
    705 	    (rdev->family < CHIP_R600))
    706 		return false;
    707 #endif
    708 
    709 	if (ASIC_IS_NODCE(rdev))
    710 		goto check_memsize;
    711 
    712 	/* first check CRTCs */
    713 	if (ASIC_IS_DCE4(rdev)) {
    714 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
    715 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
    716 			if (rdev->num_crtc >= 4) {
    717 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
    718 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
    719 			}
    720 			if (rdev->num_crtc >= 6) {
    721 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
    722 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
    723 			}
    724 		if (reg & EVERGREEN_CRTC_MASTER_EN)
    725 			return true;
    726 	} else if (ASIC_IS_AVIVO(rdev)) {
    727 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
    728 		      RREG32(AVIVO_D2CRTC_CONTROL);
    729 		if (reg & AVIVO_CRTC_EN) {
    730 			return true;
    731 		}
    732 	} else {
    733 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
    734 		      RREG32(RADEON_CRTC2_GEN_CNTL);
    735 		if (reg & RADEON_CRTC_EN) {
    736 			return true;
    737 		}
    738 	}
    739 
    740 check_memsize:
    741 	/* then check MEM_SIZE, in case the crtcs are off */
    742 	if (rdev->family >= CHIP_R600)
    743 		reg = RREG32(R600_CONFIG_MEMSIZE);
    744 	else
    745 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
    746 
    747 	if (reg)
    748 		return true;
    749 
    750 	return false;
    751 
    752 }
    753 
    754 /**
    755  * radeon_update_bandwidth_info - update display bandwidth params
    756  *
    757  * @rdev: radeon_device pointer
    758  *
    759  * Used when sclk/mclk are switched or display modes are set.
    760  * params are used to calculate display watermarks (all asics)
    761  */
    762 void radeon_update_bandwidth_info(struct radeon_device *rdev)
    763 {
    764 	fixed20_12 a;
    765 	u32 sclk = rdev->pm.current_sclk;
    766 	u32 mclk = rdev->pm.current_mclk;
    767 
    768 	/* sclk/mclk in Mhz */
    769 	a.full = dfixed_const(100);
    770 	rdev->pm.sclk.full = dfixed_const(sclk);
    771 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
    772 	rdev->pm.mclk.full = dfixed_const(mclk);
    773 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
    774 
    775 	if (rdev->flags & RADEON_IS_IGP) {
    776 		a.full = dfixed_const(16);
    777 		/* core_bandwidth = sclk(Mhz) * 16 */
    778 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
    779 	}
    780 }
    781 
    782 /**
    783  * radeon_boot_test_post_card - check and possibly initialize the hw
    784  *
    785  * @rdev: radeon_device pointer
    786  *
    787  * Check if the asic is initialized and if not, attempt to initialize
    788  * it (all asics).
    789  * Returns true if initialized or false if not.
    790  */
    791 bool radeon_boot_test_post_card(struct radeon_device *rdev)
    792 {
    793 	if (radeon_card_posted(rdev))
    794 		return true;
    795 
    796 	if (rdev->bios) {
    797 		DRM_INFO("GPU not posted. posting now...\n");
    798 		if (rdev->is_atom_bios)
    799 			atom_asic_init(rdev->mode_info.atom_context);
    800 		else
    801 			radeon_combios_asic_init(rdev->ddev);
    802 		return true;
    803 	} else {
    804 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
    805 		return false;
    806 	}
    807 }
    808 
    809 /**
    810  * radeon_dummy_page_init - init dummy page used by the driver
    811  *
    812  * @rdev: radeon_device pointer
    813  *
    814  * Allocate the dummy page used by the driver (all asics).
    815  * This dummy page is used by the driver as a filler for gart entries
    816  * when pages are taken out of the GART
    817  * Returns 0 on sucess, -ENOMEM on failure.
    818  */
    819 int radeon_dummy_page_init(struct radeon_device *rdev)
    820 {
    821 #ifdef __NetBSD__
    822 	int rsegs;
    823 	int error;
    824 
    825 	/* XXX Can this be called more than once??  */
    826 	if (rdev->dummy_page.rdp_map != NULL)
    827 		return 0;
    828 
    829 	error = bus_dmamem_alloc(rdev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
    830 	    &rdev->dummy_page.rdp_seg, 1, &rsegs, BUS_DMA_WAITOK);
    831 	if (error)
    832 		goto fail0;
    833 	KASSERT(rsegs == 1);
    834 	error = bus_dmamap_create(rdev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    835 	    BUS_DMA_WAITOK, &rdev->dummy_page.rdp_map);
    836 	if (error)
    837 		goto fail1;
    838 	error = bus_dmamap_load_raw(rdev->ddev->dmat, rdev->dummy_page.rdp_map,
    839 	    &rdev->dummy_page.rdp_seg, 1, PAGE_SIZE, BUS_DMA_WAITOK);
    840 	if (error)
    841 		goto fail2;
    842 
    843 	/* Success!  */
    844 	rdev->dummy_page.addr = rdev->dummy_page.rdp_map->dm_segs[0].ds_addr;
    845 	rdev->dummy_page.entry = radeon_gart_get_page_entry(
    846 		rdev->dummy_page.addr, RADEON_GART_PAGE_DUMMY);
    847 	return 0;
    848 
    849 fail3: __unused
    850 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    851 fail2:	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    852 fail1:	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    853 fail0:	KASSERT(error);
    854 	rdev->dummy_page.rdp_map = NULL;
    855 	/* XXX errno NetBSD->Linux */
    856 	return -error;
    857 #else
    858 	if (rdev->dummy_page.page)
    859 		return 0;
    860 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
    861 	if (rdev->dummy_page.page == NULL)
    862 		return -ENOMEM;
    863 	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
    864 					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    865 	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
    866 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    867 		__free_page(rdev->dummy_page.page);
    868 		rdev->dummy_page.page = NULL;
    869 		return -ENOMEM;
    870 	}
    871 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
    872 							    RADEON_GART_PAGE_DUMMY);
    873 	return 0;
    874 #endif
    875 }
    876 
    877 /**
    878  * radeon_dummy_page_fini - free dummy page used by the driver
    879  *
    880  * @rdev: radeon_device pointer
    881  *
    882  * Frees the dummy page used by the driver (all asics).
    883  */
    884 void radeon_dummy_page_fini(struct radeon_device *rdev)
    885 {
    886 #ifdef __NetBSD__
    887 
    888 	if (rdev->dummy_page.rdp_map == NULL)
    889 		return;
    890 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    891 	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    892 	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    893 	rdev->dummy_page.rdp_map = NULL;
    894 #else
    895 	if (rdev->dummy_page.page == NULL)
    896 		return;
    897 	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
    898 			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    899 	__free_page(rdev->dummy_page.page);
    900 	rdev->dummy_page.page = NULL;
    901 #endif
    902 }
    903 
    904 
    905 /* ATOM accessor methods */
    906 /*
    907  * ATOM is an interpreted byte code stored in tables in the vbios.  The
    908  * driver registers callbacks to access registers and the interpreter
    909  * in the driver parses the tables and executes then to program specific
    910  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
    911  * atombios.h, and atom.c
    912  */
    913 
    914 /**
    915  * cail_pll_read - read PLL register
    916  *
    917  * @info: atom card_info pointer
    918  * @reg: PLL register offset
    919  *
    920  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    921  * Returns the value of the PLL register.
    922  */
    923 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
    924 {
    925 	struct radeon_device *rdev = info->dev->dev_private;
    926 	uint32_t r;
    927 
    928 	r = rdev->pll_rreg(rdev, reg);
    929 	return r;
    930 }
    931 
    932 /**
    933  * cail_pll_write - write PLL register
    934  *
    935  * @info: atom card_info pointer
    936  * @reg: PLL register offset
    937  * @val: value to write to the pll register
    938  *
    939  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    940  */
    941 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
    942 {
    943 	struct radeon_device *rdev = info->dev->dev_private;
    944 
    945 	rdev->pll_wreg(rdev, reg, val);
    946 }
    947 
    948 /**
    949  * cail_mc_read - read MC (Memory Controller) register
    950  *
    951  * @info: atom card_info pointer
    952  * @reg: MC register offset
    953  *
    954  * Provides an MC register accessor for the atom interpreter (r4xx+).
    955  * Returns the value of the MC register.
    956  */
    957 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
    958 {
    959 	struct radeon_device *rdev = info->dev->dev_private;
    960 	uint32_t r;
    961 
    962 	r = rdev->mc_rreg(rdev, reg);
    963 	return r;
    964 }
    965 
    966 /**
    967  * cail_mc_write - write MC (Memory Controller) register
    968  *
    969  * @info: atom card_info pointer
    970  * @reg: MC register offset
    971  * @val: value to write to the pll register
    972  *
    973  * Provides a MC register accessor for the atom interpreter (r4xx+).
    974  */
    975 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
    976 {
    977 	struct radeon_device *rdev = info->dev->dev_private;
    978 
    979 	rdev->mc_wreg(rdev, reg, val);
    980 }
    981 
    982 /**
    983  * cail_reg_write - write MMIO register
    984  *
    985  * @info: atom card_info pointer
    986  * @reg: MMIO register offset
    987  * @val: value to write to the pll register
    988  *
    989  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
    990  */
    991 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
    992 {
    993 	struct radeon_device *rdev = info->dev->dev_private;
    994 
    995 	WREG32(reg*4, val);
    996 }
    997 
    998 /**
    999  * cail_reg_read - read MMIO register
   1000  *
   1001  * @info: atom card_info pointer
   1002  * @reg: MMIO register offset
   1003  *
   1004  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
   1005  * Returns the value of the MMIO register.
   1006  */
   1007 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
   1008 {
   1009 	struct radeon_device *rdev = info->dev->dev_private;
   1010 	uint32_t r;
   1011 
   1012 	r = RREG32(reg*4);
   1013 	return r;
   1014 }
   1015 
   1016 /**
   1017  * cail_ioreg_write - write IO register
   1018  *
   1019  * @info: atom card_info pointer
   1020  * @reg: IO register offset
   1021  * @val: value to write to the pll register
   1022  *
   1023  * Provides a IO register accessor for the atom interpreter (r4xx+).
   1024  */
   1025 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
   1026 {
   1027 	struct radeon_device *rdev = info->dev->dev_private;
   1028 
   1029 	WREG32_IO(reg*4, val);
   1030 }
   1031 
   1032 /**
   1033  * cail_ioreg_read - read IO register
   1034  *
   1035  * @info: atom card_info pointer
   1036  * @reg: IO register offset
   1037  *
   1038  * Provides an IO register accessor for the atom interpreter (r4xx+).
   1039  * Returns the value of the IO register.
   1040  */
   1041 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
   1042 {
   1043 	struct radeon_device *rdev = info->dev->dev_private;
   1044 	uint32_t r;
   1045 
   1046 	r = RREG32_IO(reg*4);
   1047 	return r;
   1048 }
   1049 
   1050 /**
   1051  * radeon_atombios_init - init the driver info and callbacks for atombios
   1052  *
   1053  * @rdev: radeon_device pointer
   1054  *
   1055  * Initializes the driver info and register access callbacks for the
   1056  * ATOM interpreter (r4xx+).
   1057  * Returns 0 on sucess, -ENOMEM on failure.
   1058  * Called at driver startup.
   1059  */
   1060 int radeon_atombios_init(struct radeon_device *rdev)
   1061 {
   1062 	struct card_info *atom_card_info =
   1063 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
   1064 
   1065 	if (!atom_card_info)
   1066 		return -ENOMEM;
   1067 
   1068 	rdev->mode_info.atom_card_info = atom_card_info;
   1069 	atom_card_info->dev = rdev->ddev;
   1070 	atom_card_info->reg_read = cail_reg_read;
   1071 	atom_card_info->reg_write = cail_reg_write;
   1072 	/* needed for iio ops */
   1073 #ifdef __NetBSD__
   1074 	if (rdev->rio_mem_size)
   1075 #else
   1076 	if (rdev->rio_mem)
   1077 #endif
   1078 	{
   1079 		atom_card_info->ioreg_read = cail_ioreg_read;
   1080 		atom_card_info->ioreg_write = cail_ioreg_write;
   1081 	} else {
   1082 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
   1083 		atom_card_info->ioreg_read = cail_reg_read;
   1084 		atom_card_info->ioreg_write = cail_reg_write;
   1085 	}
   1086 	atom_card_info->mc_read = cail_mc_read;
   1087 	atom_card_info->mc_write = cail_mc_write;
   1088 	atom_card_info->pll_read = cail_pll_read;
   1089 	atom_card_info->pll_write = cail_pll_write;
   1090 
   1091 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
   1092 	if (!rdev->mode_info.atom_context) {
   1093 		radeon_atombios_fini(rdev);
   1094 		return -ENOMEM;
   1095 	}
   1096 
   1097 #ifdef __NetBSD__
   1098 	linux_mutex_init(&rdev->mode_info.atom_context->mutex);
   1099 	linux_mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
   1100 #else
   1101 	mutex_init(&rdev->mode_info.atom_context->mutex);
   1102 	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
   1103 #endif
   1104 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
   1105 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
   1106 	return 0;
   1107 }
   1108 
   1109 /**
   1110  * radeon_atombios_fini - free the driver info and callbacks for atombios
   1111  *
   1112  * @rdev: radeon_device pointer
   1113  *
   1114  * Frees the driver info and register access callbacks for the ATOM
   1115  * interpreter (r4xx+).
   1116  * Called at driver shutdown.
   1117  */
   1118 void radeon_atombios_fini(struct radeon_device *rdev)
   1119 {
   1120 	if (rdev->mode_info.atom_context) {
   1121 #ifdef __NetBSD__
   1122 		linux_mutex_destroy(&rdev->mode_info.atom_context->scratch_mutex);
   1123 		linux_mutex_destroy(&rdev->mode_info.atom_context->mutex);
   1124 #else
   1125 		mutex_destroy(&rdev->mode_info.atom_context->scratch_mutex);
   1126 		mutex_destroy(&rdev->mode_info.atom_context->mutex);
   1127 #endif
   1128 		kfree(rdev->mode_info.atom_context->scratch);
   1129 	}
   1130 	kfree(rdev->mode_info.atom_context);
   1131 	rdev->mode_info.atom_context = NULL;
   1132 	kfree(rdev->mode_info.atom_card_info);
   1133 	rdev->mode_info.atom_card_info = NULL;
   1134 }
   1135 
   1136 /* COMBIOS */
   1137 /*
   1138  * COMBIOS is the bios format prior to ATOM. It provides
   1139  * command tables similar to ATOM, but doesn't have a unified
   1140  * parser.  See radeon_combios.c
   1141  */
   1142 
   1143 /**
   1144  * radeon_combios_init - init the driver info for combios
   1145  *
   1146  * @rdev: radeon_device pointer
   1147  *
   1148  * Initializes the driver info for combios (r1xx-r3xx).
   1149  * Returns 0 on sucess.
   1150  * Called at driver startup.
   1151  */
   1152 int radeon_combios_init(struct radeon_device *rdev)
   1153 {
   1154 	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
   1155 	return 0;
   1156 }
   1157 
   1158 /**
   1159  * radeon_combios_fini - free the driver info for combios
   1160  *
   1161  * @rdev: radeon_device pointer
   1162  *
   1163  * Frees the driver info for combios (r1xx-r3xx).
   1164  * Called at driver shutdown.
   1165  */
   1166 void radeon_combios_fini(struct radeon_device *rdev)
   1167 {
   1168 }
   1169 
   1170 #ifndef __NetBSD__		/* XXX radeon vga */
   1171 /* if we get transitioned to only one device, take VGA back */
   1172 /**
   1173  * radeon_vga_set_decode - enable/disable vga decode
   1174  *
   1175  * @cookie: radeon_device pointer
   1176  * @state: enable/disable vga decode
   1177  *
   1178  * Enable/disable vga decode (all asics).
   1179  * Returns VGA resource flags.
   1180  */
   1181 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
   1182 {
   1183 	struct radeon_device *rdev = cookie;
   1184 	radeon_vga_set_state(rdev, state);
   1185 	if (state)
   1186 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
   1187 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1188 	else
   1189 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1190 }
   1191 #endif
   1192 
   1193 /**
   1194  * radeon_check_pot_argument - check that argument is a power of two
   1195  *
   1196  * @arg: value to check
   1197  *
   1198  * Validates that a certain argument is a power of two (all asics).
   1199  * Returns true if argument is valid.
   1200  */
   1201 static bool radeon_check_pot_argument(int arg)
   1202 {
   1203 	return (arg & (arg - 1)) == 0;
   1204 }
   1205 
   1206 /**
   1207  * Determine a sensible default GART size according to ASIC family.
   1208  *
   1209  * @family ASIC family name
   1210  */
   1211 static int radeon_gart_size_auto(enum radeon_family family)
   1212 {
   1213 	/* default to a larger gart size on newer asics */
   1214 	if (family >= CHIP_TAHITI)
   1215 		return 2048;
   1216 	else if (family >= CHIP_RV770)
   1217 		return 1024;
   1218 	else
   1219 		return 512;
   1220 }
   1221 
   1222 /**
   1223  * radeon_check_arguments - validate module params
   1224  *
   1225  * @rdev: radeon_device pointer
   1226  *
   1227  * Validates certain module parameters and updates
   1228  * the associated values used by the driver (all asics).
   1229  */
   1230 static void radeon_check_arguments(struct radeon_device *rdev)
   1231 {
   1232 	/* vramlimit must be a power of two */
   1233 	if (!radeon_check_pot_argument(radeon_vram_limit)) {
   1234 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
   1235 				radeon_vram_limit);
   1236 		radeon_vram_limit = 0;
   1237 	}
   1238 
   1239 	if (radeon_gart_size == -1) {
   1240 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1241 	}
   1242 	/* gtt size must be power of two and greater or equal to 32M */
   1243 	if (radeon_gart_size < 32) {
   1244 		dev_warn(rdev->dev, "gart size (%d) too small\n",
   1245 				radeon_gart_size);
   1246 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1247 	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
   1248 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
   1249 				radeon_gart_size);
   1250 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
   1251 	}
   1252 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
   1253 
   1254 	/* AGP mode can only be -1, 1, 2, 4, 8 */
   1255 	switch (radeon_agpmode) {
   1256 	case -1:
   1257 	case 0:
   1258 	case 1:
   1259 	case 2:
   1260 	case 4:
   1261 	case 8:
   1262 		break;
   1263 	default:
   1264 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
   1265 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
   1266 		radeon_agpmode = 0;
   1267 		break;
   1268 	}
   1269 
   1270 	if (!radeon_check_pot_argument(radeon_vm_size)) {
   1271 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
   1272 			 radeon_vm_size);
   1273 		radeon_vm_size = 4;
   1274 	}
   1275 
   1276 	if (radeon_vm_size < 1) {
   1277 		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
   1278 			 radeon_vm_size);
   1279 		radeon_vm_size = 4;
   1280 	}
   1281 
   1282        /*
   1283         * Max GPUVM size for Cayman, SI and CI are 40 bits.
   1284         */
   1285 	if (radeon_vm_size > 1024) {
   1286 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
   1287 			 radeon_vm_size);
   1288 		radeon_vm_size = 4;
   1289 	}
   1290 
   1291 	/* defines number of bits in page table versus page directory,
   1292 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
   1293 	 * page table and the remaining bits are in the page directory */
   1294 	if (radeon_vm_block_size == -1) {
   1295 
   1296 		/* Total bits covered by PD + PTs */
   1297 		unsigned bits = ilog2(radeon_vm_size) + 18;
   1298 
   1299 		/* Make sure the PD is 4K in size up to 8GB address space.
   1300 		   Above that split equal between PD and PTs */
   1301 		if (radeon_vm_size <= 8)
   1302 			radeon_vm_block_size = bits - 9;
   1303 		else
   1304 			radeon_vm_block_size = (bits + 3) / 2;
   1305 
   1306 	} else if (radeon_vm_block_size < 9) {
   1307 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
   1308 			 radeon_vm_block_size);
   1309 		radeon_vm_block_size = 9;
   1310 	}
   1311 
   1312 	if (radeon_vm_block_size > 24 ||
   1313 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
   1314 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
   1315 			 radeon_vm_block_size);
   1316 		radeon_vm_block_size = 9;
   1317 	}
   1318 }
   1319 
   1320 #ifndef __NetBSD__		/* XXX radeon vga */
   1321 /**
   1322  * radeon_switcheroo_set_state - set switcheroo state
   1323  *
   1324  * @pdev: pci dev pointer
   1325  * @state: vga_switcheroo state
   1326  *
   1327  * Callback for the switcheroo driver.  Suspends or resumes the
   1328  * the asics before or after it is powered up using ACPI methods.
   1329  */
   1330 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
   1331 {
   1332 	struct drm_device *dev = pci_get_drvdata(pdev);
   1333 	struct radeon_device *rdev = dev->dev_private;
   1334 
   1335 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
   1336 		return;
   1337 
   1338 	if (state == VGA_SWITCHEROO_ON) {
   1339 		unsigned d3_delay = dev->pdev->d3_delay;
   1340 
   1341 		printk(KERN_INFO "radeon: switched on\n");
   1342 		/* don't suspend or resume card normally */
   1343 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1344 
   1345 		if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
   1346 			dev->pdev->d3_delay = 20;
   1347 
   1348 		radeon_resume_kms(dev, true, true);
   1349 
   1350 		dev->pdev->d3_delay = d3_delay;
   1351 
   1352 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
   1353 		drm_kms_helper_poll_enable(dev);
   1354 	} else {
   1355 		printk(KERN_INFO "radeon: switched off\n");
   1356 		drm_kms_helper_poll_disable(dev);
   1357 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1358 		radeon_suspend_kms(dev, true, true);
   1359 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
   1360 	}
   1361 }
   1362 
   1363 /**
   1364  * radeon_switcheroo_can_switch - see if switcheroo state can change
   1365  *
   1366  * @pdev: pci dev pointer
   1367  *
   1368  * Callback for the switcheroo driver.  Check of the switcheroo
   1369  * state can be changed.
   1370  * Returns true if the state can be changed, false if not.
   1371  */
   1372 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
   1373 {
   1374 	struct drm_device *dev = pci_get_drvdata(pdev);
   1375 
   1376 	/*
   1377 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
   1378 	 * locking inversion with the driver load path. And the access here is
   1379 	 * completely racy anyway. So don't bother with locking for now.
   1380 	 */
   1381 	return dev->open_count == 0;
   1382 }
   1383 
   1384 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
   1385 	.set_gpu_state = radeon_switcheroo_set_state,
   1386 	.reprobe = NULL,
   1387 	.can_switch = radeon_switcheroo_can_switch,
   1388 };
   1389 #endif
   1390 
   1391 /**
   1392  * radeon_device_init - initialize the driver
   1393  *
   1394  * @rdev: radeon_device pointer
   1395  * @pdev: drm dev pointer
   1396  * @pdev: pci dev pointer
   1397  * @flags: driver flags
   1398  *
   1399  * Initializes the driver info and hw (all asics).
   1400  * Returns 0 for success or an error on failure.
   1401  * Called at driver startup.
   1402  */
   1403 int radeon_device_init(struct radeon_device *rdev,
   1404 		       struct drm_device *ddev,
   1405 		       struct pci_dev *pdev,
   1406 		       uint32_t flags)
   1407 {
   1408 	int r, i;
   1409 	int dma_bits;
   1410 #ifndef __NetBSD__
   1411 	bool runtime = false;
   1412 #endif
   1413 
   1414 	rdev->shutdown = false;
   1415 	rdev->dev = ddev->dev;
   1416 	rdev->ddev = ddev;
   1417 	rdev->pdev = pdev;
   1418 	rdev->flags = flags;
   1419 	rdev->family = flags & RADEON_FAMILY_MASK;
   1420 	rdev->is_atom_bios = false;
   1421 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
   1422 	rdev->mc.gtt_size = 512 * 1024 * 1024;
   1423 	rdev->accel_working = false;
   1424 	/* set up ring ids */
   1425 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1426 		rdev->ring[i].idx = i;
   1427 	}
   1428 	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
   1429 
   1430 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
   1431 		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
   1432 		pdev->subsystem_vendor, pdev->subsystem_device);
   1433 
   1434 	/* mutex initialization are all done here so we
   1435 	 * can recall function without having locking issues */
   1436 #ifdef __NetBSD__
   1437 	linux_mutex_init(&rdev->ring_lock);
   1438 	linux_mutex_init(&rdev->dc_hw_i2c_mutex);
   1439 #else
   1440 	mutex_init(&rdev->ring_lock);
   1441 	mutex_init(&rdev->dc_hw_i2c_mutex);
   1442 #endif
   1443 	atomic_set(&rdev->ih.lock, 0);
   1444 #ifdef __NetBSD__
   1445 	linux_mutex_init(&rdev->gem.mutex);
   1446 	linux_mutex_init(&rdev->pm.mutex);
   1447 	linux_mutex_init(&rdev->gpu_clock_mutex);
   1448 	linux_mutex_init(&rdev->srbm_mutex);
   1449 	linux_mutex_init(&rdev->grbm_idx_mutex);
   1450 #else
   1451 	mutex_init(&rdev->gem.mutex);
   1452 	mutex_init(&rdev->pm.mutex);
   1453 	mutex_init(&rdev->gpu_clock_mutex);
   1454 	mutex_init(&rdev->srbm_mutex);
   1455 	mutex_init(&rdev->grbm_idx_mutex);
   1456 #endif
   1457 	init_rwsem(&rdev->pm.mclk_lock);
   1458 	init_rwsem(&rdev->exclusive_lock);
   1459 #ifdef __NetBSD__
   1460 	spin_lock_init(&rdev->irq.vblank_lock);
   1461 	DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
   1462 	linux_mutex_init(&rdev->mn_lock);
   1463 #else
   1464 	init_waitqueue_head(&rdev->irq.vblank_queue);
   1465 	mutex_init(&rdev->mn_lock);
   1466 #endif
   1467 	hash_init(rdev->mn_hash);
   1468 	r = radeon_gem_init(rdev);
   1469 	if (r)
   1470 		return r;
   1471 
   1472 	radeon_check_arguments(rdev);
   1473 	/* Adjust VM size here.
   1474 	 * Max GPUVM size for cayman+ is 40 bits.
   1475 	 */
   1476 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
   1477 
   1478 	/* Set asic functions */
   1479 	r = radeon_asic_init(rdev);
   1480 	if (r)
   1481 		return r;
   1482 
   1483 	/* all of the newer IGP chips have an internal gart
   1484 	 * However some rs4xx report as AGP, so remove that here.
   1485 	 */
   1486 	if ((rdev->family >= CHIP_RS400) &&
   1487 	    (rdev->flags & RADEON_IS_IGP)) {
   1488 		rdev->flags &= ~RADEON_IS_AGP;
   1489 	}
   1490 
   1491 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
   1492 		radeon_agp_disable(rdev);
   1493 	}
   1494 
   1495 	/* Set the internal MC address mask
   1496 	 * This is the max address of the GPU's
   1497 	 * internal address space.
   1498 	 */
   1499 	if (rdev->family >= CHIP_CAYMAN)
   1500 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
   1501 	else if (rdev->family >= CHIP_CEDAR)
   1502 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
   1503 	else
   1504 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
   1505 
   1506 	/* set DMA mask + need_dma32 flags.
   1507 	 * PCIE - can handle 40-bits.
   1508 	 * IGP - can handle 40-bits
   1509 	 * AGP - generally dma32 is safest
   1510 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
   1511 	 */
   1512 	rdev->need_dma32 = false;
   1513 	if (rdev->flags & RADEON_IS_AGP)
   1514 		rdev->need_dma32 = true;
   1515 	if ((rdev->flags & RADEON_IS_PCI) &&
   1516 	    (rdev->family <= CHIP_RS740))
   1517 		rdev->need_dma32 = true;
   1518 
   1519 	dma_bits = rdev->need_dma32 ? 32 : 40;
   1520 #ifdef __NetBSD__
   1521 	r = drm_limit_dma_space(rdev->ddev, 0, __BITS(dma_bits - 1, 0));
   1522 	if (r)
   1523 		DRM_ERROR("No suitable DMA available.\n");
   1524 #else
   1525 	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
   1526 	if (r) {
   1527 		rdev->need_dma32 = true;
   1528 		dma_bits = 32;
   1529 		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
   1530 	}
   1531 	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
   1532 	if (r) {
   1533 		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
   1534 		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
   1535 	}
   1536 #endif
   1537 
   1538 	/* Registers mapping */
   1539 	/* TODO: block userspace mapping of io register */
   1540 	/* XXX Destroy these locks on detach...  */
   1541 	spin_lock_init(&rdev->mmio_idx_lock);
   1542 	spin_lock_init(&rdev->smc_idx_lock);
   1543 	spin_lock_init(&rdev->pll_idx_lock);
   1544 	spin_lock_init(&rdev->mc_idx_lock);
   1545 	spin_lock_init(&rdev->pcie_idx_lock);
   1546 	spin_lock_init(&rdev->pciep_idx_lock);
   1547 	spin_lock_init(&rdev->pif_idx_lock);
   1548 	spin_lock_init(&rdev->cg_idx_lock);
   1549 	spin_lock_init(&rdev->uvd_idx_lock);
   1550 	spin_lock_init(&rdev->rcu_idx_lock);
   1551 	spin_lock_init(&rdev->didt_idx_lock);
   1552 	spin_lock_init(&rdev->end_idx_lock);
   1553 #ifdef __NetBSD__
   1554     {
   1555 	pcireg_t bar;
   1556 
   1557 	if (rdev->family >= CHIP_BONAIRE)
   1558 		bar = 5;
   1559 	else
   1560 		bar = 2;
   1561 	if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(bar),
   1562 		pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
   1563 		    rdev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
   1564 		0,
   1565 		&rdev->rmmio_bst, &rdev->rmmio_bsh,
   1566 		&rdev->rmmio_addr, &rdev->rmmio_size))
   1567 		return -EIO;
   1568     }
   1569 	DRM_INFO("register mmio base: 0x%"PRIxMAX"\n",
   1570 	    (uintmax_t)rdev->rmmio_addr);
   1571 	DRM_INFO("register mmio size: %"PRIuMAX"\n",
   1572 	    (uintmax_t)rdev->rmmio_size);
   1573 #else
   1574 	if (rdev->family >= CHIP_BONAIRE) {
   1575 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
   1576 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
   1577 	} else {
   1578 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
   1579 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
   1580 	}
   1581 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
   1582 	if (rdev->rmmio == NULL) {
   1583 		return -ENOMEM;
   1584 	}
   1585 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
   1586 	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
   1587 #endif
   1588 
   1589 	/* doorbell bar mapping */
   1590 	if (rdev->family >= CHIP_BONAIRE)
   1591 		radeon_doorbell_init(rdev);
   1592 
   1593 	/* io port mapping */
   1594 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
   1595 #ifdef __NetBSD__
   1596 		if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(i),
   1597 			PCI_MAPREG_TYPE_IO, 0,
   1598 			&rdev->rio_mem_bst, &rdev->rio_mem_bsh,
   1599 			NULL, &rdev->rio_mem_size))
   1600 			continue;
   1601 		break;
   1602 #else
   1603 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
   1604 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
   1605 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
   1606 			break;
   1607 		}
   1608 #endif
   1609 	}
   1610 #ifdef __NetBSD__
   1611 	if (i == DEVICE_COUNT_RESOURCE)
   1612 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1613 #else
   1614 	if (rdev->rio_mem == NULL)
   1615 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1616 #endif
   1617 
   1618 	if (rdev->flags & RADEON_IS_PX)
   1619 		radeon_device_handle_px_quirks(rdev);
   1620 
   1621 #ifndef __NetBSD__		/* XXX radeon vga */
   1622 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
   1623 	/* this will fail for cards that aren't VGA class devices, just
   1624 	 * ignore it */
   1625 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
   1626 
   1627 	if (rdev->flags & RADEON_IS_PX)
   1628 		runtime = true;
   1629 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
   1630 	if (runtime)
   1631 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
   1632 #endif
   1633 
   1634 	r = radeon_init(rdev);
   1635 	if (r)
   1636 		goto failed;
   1637 
   1638 	r = radeon_gem_debugfs_init(rdev);
   1639 	if (r) {
   1640 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
   1641 	}
   1642 
   1643 	r = radeon_mst_debugfs_init(rdev);
   1644 	if (r) {
   1645 		DRM_ERROR("registering mst debugfs failed (%d).\n", r);
   1646 	}
   1647 
   1648 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
   1649 		/* Acceleration not working on AGP card try again
   1650 		 * with fallback to PCI or PCIE GART
   1651 		 */
   1652 		radeon_asic_reset(rdev);
   1653 		radeon_fini(rdev);
   1654 		radeon_agp_disable(rdev);
   1655 		r = radeon_init(rdev);
   1656 		if (r)
   1657 			goto failed;
   1658 	}
   1659 
   1660 	r = radeon_ib_ring_tests(rdev);
   1661 	if (r)
   1662 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1663 
   1664 	/*
   1665 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
   1666 	 * after the CP ring have chew one packet at least. Hence here we stop
   1667 	 * and restart DPM after the radeon_ib_ring_tests().
   1668 	 */
   1669 	if (rdev->pm.dpm_enabled &&
   1670 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
   1671 	    (rdev->family == CHIP_TURKS) &&
   1672 	    (rdev->flags & RADEON_IS_MOBILITY)) {
   1673 		mutex_lock(&rdev->pm.mutex);
   1674 		radeon_dpm_disable(rdev);
   1675 		radeon_dpm_enable(rdev);
   1676 		mutex_unlock(&rdev->pm.mutex);
   1677 	}
   1678 
   1679 	if ((radeon_testing & 1)) {
   1680 		if (rdev->accel_working)
   1681 			radeon_test_moves(rdev);
   1682 		else
   1683 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
   1684 	}
   1685 	if ((radeon_testing & 2)) {
   1686 		if (rdev->accel_working)
   1687 			radeon_test_syncing(rdev);
   1688 		else
   1689 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
   1690 	}
   1691 	if (radeon_benchmarking) {
   1692 		if (rdev->accel_working)
   1693 			radeon_benchmark(rdev, radeon_benchmarking);
   1694 		else
   1695 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
   1696 	}
   1697 	return 0;
   1698 
   1699 failed:
   1700 #ifndef __NetBSD__		/* XXX radeon vga */
   1701 	if (runtime)
   1702 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1703 #endif
   1704 	return r;
   1705 }
   1706 
   1707 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
   1708 
   1709 /**
   1710  * radeon_device_fini - tear down the driver
   1711  *
   1712  * @rdev: radeon_device pointer
   1713  *
   1714  * Tear down the driver info (all asics).
   1715  * Called at driver shutdown.
   1716  */
   1717 void radeon_device_fini(struct radeon_device *rdev)
   1718 {
   1719 	DRM_INFO("radeon: finishing device.\n");
   1720 	rdev->shutdown = true;
   1721 	/* evict vram memory */
   1722 	radeon_bo_evict_vram(rdev);
   1723 	radeon_fini(rdev);
   1724 #ifndef __NetBSD__
   1725 	vga_switcheroo_unregister_client(rdev->pdev);
   1726 	if (rdev->flags & RADEON_IS_PX)
   1727 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
   1728 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
   1729 #endif
   1730 #ifdef __NetBSD__
   1731 	if (rdev->rio_mem_size)
   1732 		bus_space_unmap(rdev->rio_mem_bst, rdev->rio_mem_bsh,
   1733 		    rdev->rio_mem_size);
   1734 	rdev->rio_mem_size = 0;
   1735 	bus_space_unmap(rdev->rmmio_bst, rdev->rmmio_bsh, rdev->rmmio_size);
   1736 #else
   1737 	if (rdev->rio_mem)
   1738 		pci_iounmap(rdev->pdev, rdev->rio_mem);
   1739 	rdev->rio_mem = NULL;
   1740 	iounmap(rdev->rmmio);
   1741 	rdev->rmmio = NULL;
   1742 #endif
   1743 	if (rdev->family >= CHIP_BONAIRE)
   1744 		radeon_doorbell_fini(rdev);
   1745 	radeon_debugfs_remove_files(rdev);
   1746 
   1747 #ifdef __NetBSD__
   1748 	DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
   1749 	spin_lock_destroy(&rdev->irq.vblank_lock);
   1750 	destroy_rwsem(&rdev->exclusive_lock);
   1751 	destroy_rwsem(&rdev->pm.mclk_lock);
   1752 	linux_mutex_destroy(&rdev->srbm_mutex);
   1753 	linux_mutex_destroy(&rdev->gpu_clock_mutex);
   1754 	linux_mutex_destroy(&rdev->pm.mutex);
   1755 	linux_mutex_destroy(&rdev->gem.mutex);
   1756 	linux_mutex_destroy(&rdev->dc_hw_i2c_mutex);
   1757 	linux_mutex_destroy(&rdev->ring_lock);
   1758 #else
   1759 	mutex_destroy(&rdev->srbm_mutex);
   1760 	mutex_destroy(&rdev->gpu_clock_mutex);
   1761 	mutex_destroy(&rdev->pm.mutex);
   1762 	mutex_destroy(&rdev->gem.mutex);
   1763 	mutex_destroy(&rdev->dc_hw_i2c_mutex);
   1764 	mutex_destroy(&rdev->ring_lock);
   1765 #endif
   1766 }
   1767 
   1768 
   1769 /*
   1770  * Suspend & resume.
   1771  */
   1772 /**
   1773  * radeon_suspend_kms - initiate device suspend
   1774  *
   1775  * @pdev: drm dev pointer
   1776  * @state: suspend state
   1777  *
   1778  * Puts the hw in the suspend state (all asics).
   1779  * Returns 0 for success or an error on failure.
   1780  * Called at driver suspend.
   1781  */
   1782 int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
   1783 {
   1784 	struct radeon_device *rdev;
   1785 	struct drm_crtc *crtc;
   1786 	struct drm_connector *connector;
   1787 	int i, r;
   1788 
   1789 	if (dev == NULL || dev->dev_private == NULL) {
   1790 		return -ENODEV;
   1791 	}
   1792 
   1793 	rdev = dev->dev_private;
   1794 
   1795 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1796 		return 0;
   1797 
   1798 	drm_kms_helper_poll_disable(dev);
   1799 
   1800 	drm_modeset_lock_all(dev);
   1801 	/* turn off display hw */
   1802 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1803 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
   1804 	}
   1805 	drm_modeset_unlock_all(dev);
   1806 
   1807 	/* unpin the front buffers and cursors */
   1808 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1809 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1810 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
   1811 		struct radeon_bo *robj;
   1812 
   1813 		if (radeon_crtc->cursor_bo) {
   1814 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1815 			r = radeon_bo_reserve(robj, false);
   1816 			if (r == 0) {
   1817 				radeon_bo_unpin(robj);
   1818 				radeon_bo_unreserve(robj);
   1819 			}
   1820 		}
   1821 
   1822 		if (rfb == NULL || rfb->obj == NULL) {
   1823 			continue;
   1824 		}
   1825 		robj = gem_to_radeon_bo(rfb->obj);
   1826 		/* don't unpin kernel fb objects */
   1827 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
   1828 			r = radeon_bo_reserve(robj, false);
   1829 			if (r == 0) {
   1830 				radeon_bo_unpin(robj);
   1831 				radeon_bo_unreserve(robj);
   1832 			}
   1833 		}
   1834 	}
   1835 	/* evict vram memory */
   1836 	radeon_bo_evict_vram(rdev);
   1837 
   1838 	/* wait for gpu to finish processing current batch */
   1839 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1840 		r = radeon_fence_wait_empty(rdev, i);
   1841 		if (r) {
   1842 			/* delay GPU reset to resume */
   1843 			radeon_fence_driver_force_completion(rdev, i);
   1844 		}
   1845 	}
   1846 
   1847 	radeon_save_bios_scratch_regs(rdev);
   1848 
   1849 	radeon_suspend(rdev);
   1850 	radeon_hpd_fini(rdev);
   1851 	/* evict remaining vram memory */
   1852 	radeon_bo_evict_vram(rdev);
   1853 
   1854 	radeon_agp_suspend(rdev);
   1855 
   1856 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1857 	pci_save_state(dev->pdev);
   1858 	if (suspend) {
   1859 		/* Shut down the device */
   1860 		pci_disable_device(dev->pdev);
   1861 		pci_set_power_state(dev->pdev, PCI_D3hot);
   1862 	}
   1863 #endif
   1864 
   1865 #ifndef __NetBSD__		/* XXX radeon fb */
   1866 	if (fbcon) {
   1867 		console_lock();
   1868 		radeon_fbdev_set_suspend(rdev, 1);
   1869 		console_unlock();
   1870 	}
   1871 #endif
   1872 	return 0;
   1873 }
   1874 
   1875 /**
   1876  * radeon_resume_kms - initiate device resume
   1877  *
   1878  * @pdev: drm dev pointer
   1879  *
   1880  * Bring the hw back to operating state (all asics).
   1881  * Returns 0 for success or an error on failure.
   1882  * Called at driver resume.
   1883  */
   1884 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
   1885 {
   1886 	struct drm_connector *connector;
   1887 	struct radeon_device *rdev = dev->dev_private;
   1888 	struct drm_crtc *crtc;
   1889 	int r;
   1890 
   1891 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1892 		return 0;
   1893 
   1894 #ifndef __NetBSD__		/* XXX radeon fb */
   1895 	if (fbcon) {
   1896 		console_lock();
   1897 	}
   1898 #endif
   1899 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1900 	if (resume) {
   1901 		pci_set_power_state(dev->pdev, PCI_D0);
   1902 		pci_restore_state(dev->pdev);
   1903 		if (pci_enable_device(dev->pdev)) {
   1904 			if (fbcon)
   1905 				console_unlock();
   1906 			return -1;
   1907 		}
   1908 	}
   1909 #endif
   1910 	/* resume AGP if in use */
   1911 	radeon_agp_resume(rdev);
   1912 	radeon_resume(rdev);
   1913 
   1914 	r = radeon_ib_ring_tests(rdev);
   1915 	if (r)
   1916 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1917 
   1918 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   1919 		/* do dpm late init */
   1920 		r = radeon_pm_late_init(rdev);
   1921 		if (r) {
   1922 			rdev->pm.dpm_enabled = false;
   1923 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1924 		}
   1925 	} else {
   1926 		/* resume old pm late */
   1927 		radeon_pm_resume(rdev);
   1928 	}
   1929 
   1930 	radeon_restore_bios_scratch_regs(rdev);
   1931 
   1932 	/* pin cursors */
   1933 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1934 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
   1935 
   1936 		if (radeon_crtc->cursor_bo) {
   1937 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
   1938 			r = radeon_bo_reserve(robj, false);
   1939 			if (r == 0) {
   1940 				/* Only 27 bit offset for legacy cursor */
   1941 				r = radeon_bo_pin_restricted(robj,
   1942 							     RADEON_GEM_DOMAIN_VRAM,
   1943 							     ASIC_IS_AVIVO(rdev) ?
   1944 							     0 : 1 << 27,
   1945 							     &radeon_crtc->cursor_addr);
   1946 				if (r != 0)
   1947 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
   1948 				radeon_bo_unreserve(robj);
   1949 			}
   1950 		}
   1951 	}
   1952 
   1953 	/* init dig PHYs, disp eng pll */
   1954 	if (rdev->is_atom_bios) {
   1955 		radeon_atom_encoder_init(rdev);
   1956 		radeon_atom_disp_eng_pll_init(rdev);
   1957 		/* turn on the BL */
   1958 		if (rdev->mode_info.bl_encoder) {
   1959 			u8 bl_level = radeon_get_backlight_level(rdev,
   1960 								 rdev->mode_info.bl_encoder);
   1961 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   1962 						   bl_level);
   1963 		}
   1964 	}
   1965 	/* reset hpd state */
   1966 	radeon_hpd_init(rdev);
   1967 	/* blat the mode back in */
   1968 	if (fbcon) {
   1969 		drm_helper_resume_force_mode(dev);
   1970 		/* turn on display hw */
   1971 		drm_modeset_lock_all(dev);
   1972 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1973 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
   1974 		}
   1975 		drm_modeset_unlock_all(dev);
   1976 	}
   1977 
   1978 	drm_kms_helper_poll_enable(dev);
   1979 
   1980 	/* set the power state here in case we are a PX system or headless */
   1981 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   1982 		radeon_pm_compute_clocks(rdev);
   1983 
   1984 #ifndef __NetBSD__		/* XXX radeon fb */
   1985 	if (fbcon) {
   1986 		radeon_fbdev_set_suspend(rdev, 0);
   1987 		console_unlock();
   1988 	}
   1989 #endif
   1990 
   1991 	return 0;
   1992 }
   1993 
   1994 /**
   1995  * radeon_gpu_reset - reset the asic
   1996  *
   1997  * @rdev: radeon device pointer
   1998  *
   1999  * Attempt the reset the GPU if it has hung (all asics).
   2000  * Returns 0 for success or an error on failure.
   2001  */
   2002 int radeon_gpu_reset(struct radeon_device *rdev)
   2003 {
   2004 	unsigned ring_sizes[RADEON_NUM_RINGS];
   2005 	uint32_t *ring_data[RADEON_NUM_RINGS];
   2006 
   2007 	bool saved = false;
   2008 
   2009 	int i, r;
   2010 	int resched;
   2011 
   2012 	down_write(&rdev->exclusive_lock);
   2013 
   2014 	if (!rdev->needs_reset) {
   2015 		up_write(&rdev->exclusive_lock);
   2016 		return 0;
   2017 	}
   2018 
   2019 	atomic_inc(&rdev->gpu_reset_counter);
   2020 
   2021 	radeon_save_bios_scratch_regs(rdev);
   2022 	/* block TTM */
   2023 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   2024 	radeon_suspend(rdev);
   2025 	radeon_hpd_fini(rdev);
   2026 
   2027 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   2028 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
   2029 						   &ring_data[i]);
   2030 		if (ring_sizes[i]) {
   2031 			saved = true;
   2032 			dev_info(rdev->dev, "Saved %d dwords of commands "
   2033 				 "on ring %d.\n", ring_sizes[i], i);
   2034 		}
   2035 	}
   2036 
   2037 	r = radeon_asic_reset(rdev);
   2038 	if (!r) {
   2039 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
   2040 		radeon_resume(rdev);
   2041 	}
   2042 
   2043 	radeon_restore_bios_scratch_regs(rdev);
   2044 
   2045 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   2046 		if (!r && ring_data[i]) {
   2047 			radeon_ring_restore(rdev, &rdev->ring[i],
   2048 					    ring_sizes[i], ring_data[i]);
   2049 		} else {
   2050 			radeon_fence_driver_force_completion(rdev, i);
   2051 			kfree(ring_data[i]);
   2052 		}
   2053 	}
   2054 
   2055 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   2056 		/* do dpm late init */
   2057 		r = radeon_pm_late_init(rdev);
   2058 		if (r) {
   2059 			rdev->pm.dpm_enabled = false;
   2060 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   2061 		}
   2062 	} else {
   2063 		/* resume old pm late */
   2064 		radeon_pm_resume(rdev);
   2065 	}
   2066 
   2067 	/* init dig PHYs, disp eng pll */
   2068 	if (rdev->is_atom_bios) {
   2069 		radeon_atom_encoder_init(rdev);
   2070 		radeon_atom_disp_eng_pll_init(rdev);
   2071 		/* turn on the BL */
   2072 		if (rdev->mode_info.bl_encoder) {
   2073 			u8 bl_level = radeon_get_backlight_level(rdev,
   2074 								 rdev->mode_info.bl_encoder);
   2075 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   2076 						   bl_level);
   2077 		}
   2078 	}
   2079 	/* reset hpd state */
   2080 	radeon_hpd_init(rdev);
   2081 
   2082 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   2083 
   2084 	rdev->in_reset = true;
   2085 	rdev->needs_reset = false;
   2086 
   2087 	downgrade_write(&rdev->exclusive_lock);
   2088 
   2089 	drm_helper_resume_force_mode(rdev->ddev);
   2090 
   2091 	/* set the power state here in case we are a PX system or headless */
   2092 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   2093 		radeon_pm_compute_clocks(rdev);
   2094 
   2095 	if (!r) {
   2096 		r = radeon_ib_ring_tests(rdev);
   2097 		if (r && saved)
   2098 			r = -EAGAIN;
   2099 	} else {
   2100 		/* bad news, how to tell it to userspace ? */
   2101 		dev_info(rdev->dev, "GPU reset failed\n");
   2102 	}
   2103 
   2104 	rdev->needs_reset = r == -EAGAIN;
   2105 	rdev->in_reset = false;
   2106 
   2107 	up_read(&rdev->exclusive_lock);
   2108 	return r;
   2109 }
   2110 
   2111 
   2112 /*
   2113  * Debugfs
   2114  */
   2115 int radeon_debugfs_add_files(struct radeon_device *rdev,
   2116 			     struct drm_info_list *files,
   2117 			     unsigned nfiles)
   2118 {
   2119 	unsigned i;
   2120 
   2121 	for (i = 0; i < rdev->debugfs_count; i++) {
   2122 		if (rdev->debugfs[i].files == files) {
   2123 			/* Already registered */
   2124 			return 0;
   2125 		}
   2126 	}
   2127 
   2128 	i = rdev->debugfs_count + 1;
   2129 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
   2130 		DRM_ERROR("Reached maximum number of debugfs components.\n");
   2131 		DRM_ERROR("Report so we increase "
   2132 		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
   2133 		return -EINVAL;
   2134 	}
   2135 	rdev->debugfs[rdev->debugfs_count].files = files;
   2136 	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
   2137 	rdev->debugfs_count = i;
   2138 #if defined(CONFIG_DEBUG_FS)
   2139 	drm_debugfs_create_files(files, nfiles,
   2140 				 rdev->ddev->control->debugfs_root,
   2141 				 rdev->ddev->control);
   2142 	drm_debugfs_create_files(files, nfiles,
   2143 				 rdev->ddev->primary->debugfs_root,
   2144 				 rdev->ddev->primary);
   2145 #endif
   2146 	return 0;
   2147 }
   2148 
   2149 static void radeon_debugfs_remove_files(struct radeon_device *rdev)
   2150 {
   2151 #if defined(CONFIG_DEBUG_FS)
   2152 	unsigned i;
   2153 
   2154 	for (i = 0; i < rdev->debugfs_count; i++) {
   2155 		drm_debugfs_remove_files(rdev->debugfs[i].files,
   2156 					 rdev->debugfs[i].num_files,
   2157 					 rdev->ddev->control);
   2158 		drm_debugfs_remove_files(rdev->debugfs[i].files,
   2159 					 rdev->debugfs[i].num_files,
   2160 					 rdev->ddev->primary);
   2161 	}
   2162 #endif
   2163 }
   2164 
   2165 #if defined(CONFIG_DEBUG_FS)
   2166 int radeon_debugfs_init(struct drm_minor *minor)
   2167 {
   2168 	return 0;
   2169 }
   2170 
   2171 void radeon_debugfs_cleanup(struct drm_minor *minor)
   2172 {
   2173 }
   2174 #endif
   2175