Home | History | Annotate | Line # | Download | only in radeon
radeon_device.c revision 1.3
      1 /*
      2  * Copyright 2008 Advanced Micro Devices, Inc.
      3  * Copyright 2008 Red Hat Inc.
      4  * Copyright 2009 Jerome Glisse.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Dave Airlie
     25  *          Alex Deucher
     26  *          Jerome Glisse
     27  */
     28 #include <linux/console.h>
     29 #include <linux/slab.h>
     30 #include <drm/drmP.h>
     31 #include <drm/drm_crtc_helper.h>
     32 #include <drm/radeon_drm.h>
     33 #include <linux/vgaarb.h>
     34 #include <linux/vga_switcheroo.h>
     35 #include <linux/efi.h>
     36 #include <linux/bitops.h>
     37 #include "radeon_reg.h"
     38 #include "radeon.h"
     39 #include "atom.h"
     40 
     41 static const char radeon_family_name[][16] = {
     42 	"R100",
     43 	"RV100",
     44 	"RS100",
     45 	"RV200",
     46 	"RS200",
     47 	"R200",
     48 	"RV250",
     49 	"RS300",
     50 	"RV280",
     51 	"R300",
     52 	"R350",
     53 	"RV350",
     54 	"RV380",
     55 	"R420",
     56 	"R423",
     57 	"RV410",
     58 	"RS400",
     59 	"RS480",
     60 	"RS600",
     61 	"RS690",
     62 	"RS740",
     63 	"RV515",
     64 	"R520",
     65 	"RV530",
     66 	"RV560",
     67 	"RV570",
     68 	"R580",
     69 	"R600",
     70 	"RV610",
     71 	"RV630",
     72 	"RV670",
     73 	"RV620",
     74 	"RV635",
     75 	"RS780",
     76 	"RS880",
     77 	"RV770",
     78 	"RV730",
     79 	"RV710",
     80 	"RV740",
     81 	"CEDAR",
     82 	"REDWOOD",
     83 	"JUNIPER",
     84 	"CYPRESS",
     85 	"HEMLOCK",
     86 	"PALM",
     87 	"SUMO",
     88 	"SUMO2",
     89 	"BARTS",
     90 	"TURKS",
     91 	"CAICOS",
     92 	"CAYMAN",
     93 	"ARUBA",
     94 	"TAHITI",
     95 	"PITCAIRN",
     96 	"VERDE",
     97 	"OLAND",
     98 	"HAINAN",
     99 	"BONAIRE",
    100 	"KAVERI",
    101 	"KABINI",
    102 	"HAWAII",
    103 	"MULLINS",
    104 	"LAST",
    105 };
    106 
    107 bool radeon_is_px(struct drm_device *dev)
    108 {
    109 	struct radeon_device *rdev = dev->dev_private;
    110 
    111 	if (rdev->flags & RADEON_IS_PX)
    112 		return true;
    113 	return false;
    114 }
    115 
    116 /**
    117  * radeon_program_register_sequence - program an array of registers.
    118  *
    119  * @rdev: radeon_device pointer
    120  * @registers: pointer to the register array
    121  * @array_size: size of the register array
    122  *
    123  * Programs an array or registers with and and or masks.
    124  * This is a helper for setting golden registers.
    125  */
    126 void radeon_program_register_sequence(struct radeon_device *rdev,
    127 				      const u32 *registers,
    128 				      const u32 array_size)
    129 {
    130 	u32 tmp, reg, and_mask, or_mask;
    131 	int i;
    132 
    133 	if (array_size % 3)
    134 		return;
    135 
    136 	for (i = 0; i < array_size; i +=3) {
    137 		reg = registers[i + 0];
    138 		and_mask = registers[i + 1];
    139 		or_mask = registers[i + 2];
    140 
    141 		if (and_mask == 0xffffffff) {
    142 			tmp = or_mask;
    143 		} else {
    144 			tmp = RREG32(reg);
    145 			tmp &= ~and_mask;
    146 			tmp |= or_mask;
    147 		}
    148 		WREG32(reg, tmp);
    149 	}
    150 }
    151 
    152 void radeon_pci_config_reset(struct radeon_device *rdev)
    153 {
    154 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
    155 }
    156 
    157 /**
    158  * radeon_surface_init - Clear GPU surface registers.
    159  *
    160  * @rdev: radeon_device pointer
    161  *
    162  * Clear GPU surface registers (r1xx-r5xx).
    163  */
    164 void radeon_surface_init(struct radeon_device *rdev)
    165 {
    166 	/* FIXME: check this out */
    167 	if (rdev->family < CHIP_R600) {
    168 		int i;
    169 
    170 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
    171 			if (rdev->surface_regs[i].bo)
    172 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
    173 			else
    174 				radeon_clear_surface_reg(rdev, i);
    175 		}
    176 		/* enable surfaces */
    177 		WREG32(RADEON_SURFACE_CNTL, 0);
    178 	}
    179 }
    180 
    181 /*
    182  * GPU scratch registers helpers function.
    183  */
    184 /**
    185  * radeon_scratch_init - Init scratch register driver information.
    186  *
    187  * @rdev: radeon_device pointer
    188  *
    189  * Init CP scratch register driver information (r1xx-r5xx)
    190  */
    191 void radeon_scratch_init(struct radeon_device *rdev)
    192 {
    193 	int i;
    194 
    195 	/* FIXME: check this out */
    196 	if (rdev->family < CHIP_R300) {
    197 		rdev->scratch.num_reg = 5;
    198 	} else {
    199 		rdev->scratch.num_reg = 7;
    200 	}
    201 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
    202 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    203 		rdev->scratch.free[i] = true;
    204 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
    205 	}
    206 }
    207 
    208 /**
    209  * radeon_scratch_get - Allocate a scratch register
    210  *
    211  * @rdev: radeon_device pointer
    212  * @reg: scratch register mmio offset
    213  *
    214  * Allocate a CP scratch register for use by the driver (all asics).
    215  * Returns 0 on success or -EINVAL on failure.
    216  */
    217 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
    218 {
    219 	int i;
    220 
    221 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    222 		if (rdev->scratch.free[i]) {
    223 			rdev->scratch.free[i] = false;
    224 			*reg = rdev->scratch.reg[i];
    225 			return 0;
    226 		}
    227 	}
    228 	return -EINVAL;
    229 }
    230 
    231 /**
    232  * radeon_scratch_free - Free a scratch register
    233  *
    234  * @rdev: radeon_device pointer
    235  * @reg: scratch register mmio offset
    236  *
    237  * Free a CP scratch register allocated for use by the driver (all asics)
    238  */
    239 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
    240 {
    241 	int i;
    242 
    243 	for (i = 0; i < rdev->scratch.num_reg; i++) {
    244 		if (rdev->scratch.reg[i] == reg) {
    245 			rdev->scratch.free[i] = true;
    246 			return;
    247 		}
    248 	}
    249 }
    250 
    251 /*
    252  * GPU doorbell aperture helpers function.
    253  */
    254 /**
    255  * radeon_doorbell_init - Init doorbell driver information.
    256  *
    257  * @rdev: radeon_device pointer
    258  *
    259  * Init doorbell driver information (CIK)
    260  * Returns 0 on success, error on failure.
    261  */
    262 static int radeon_doorbell_init(struct radeon_device *rdev)
    263 {
    264 #ifdef __NetBSD__
    265 	int r;
    266 #endif
    267 
    268 	/* doorbell bar mapping */
    269 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
    270 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
    271 
    272 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
    273 	if (rdev->doorbell.num_doorbells == 0)
    274 		return -EINVAL;
    275 
    276 #ifdef __NetBSD__
    277 	/* XXX errno NetBSD->Linux */
    278 	rdev->doorbell.bst = rdev->pdev->pd_pa.pa_memt;
    279 	r = -bus_space_map(rdev->doorbell.bst, rdev->doorbell.base,
    280 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)),
    281 	    0, &rdev->doorbell.bsh);
    282 	if (r)
    283 		return r;
    284 #else
    285 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
    286 	if (rdev->doorbell.ptr == NULL) {
    287 		return -ENOMEM;
    288 	}
    289 #endif
    290 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
    291 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
    292 
    293 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
    294 
    295 	return 0;
    296 }
    297 
    298 /**
    299  * radeon_doorbell_fini - Tear down doorbell driver information.
    300  *
    301  * @rdev: radeon_device pointer
    302  *
    303  * Tear down doorbell driver information (CIK)
    304  */
    305 static void radeon_doorbell_fini(struct radeon_device *rdev)
    306 {
    307 #ifdef __NetBSD__
    308 	bus_space_unmap(rdev->doorbell.bst, rdev->doorbell.bsh,
    309 	    (rdev->doorbell.num_doorbells * sizeof(uint32_t)));
    310 #else
    311 	iounmap(rdev->doorbell.ptr);
    312 	rdev->doorbell.ptr = NULL;
    313 #endif
    314 }
    315 
    316 /**
    317  * radeon_doorbell_get - Allocate a doorbell entry
    318  *
    319  * @rdev: radeon_device pointer
    320  * @doorbell: doorbell index
    321  *
    322  * Allocate a doorbell for use by the driver (all asics).
    323  * Returns 0 on success or -EINVAL on failure.
    324  */
    325 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
    326 {
    327 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
    328 	if (offset < rdev->doorbell.num_doorbells) {
    329 		__set_bit(offset, rdev->doorbell.used);
    330 		*doorbell = offset;
    331 		return 0;
    332 	} else {
    333 		return -EINVAL;
    334 	}
    335 }
    336 
    337 /**
    338  * radeon_doorbell_free - Free a doorbell entry
    339  *
    340  * @rdev: radeon_device pointer
    341  * @doorbell: doorbell index
    342  *
    343  * Free a doorbell allocated for use by the driver (all asics)
    344  */
    345 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
    346 {
    347 	if (doorbell < rdev->doorbell.num_doorbells)
    348 		__clear_bit(doorbell, rdev->doorbell.used);
    349 }
    350 
    351 /*
    352  * radeon_wb_*()
    353  * Writeback is the the method by which the the GPU updates special pages
    354  * in memory with the status of certain GPU events (fences, ring pointers,
    355  * etc.).
    356  */
    357 
    358 /**
    359  * radeon_wb_disable - Disable Writeback
    360  *
    361  * @rdev: radeon_device pointer
    362  *
    363  * Disables Writeback (all asics).  Used for suspend.
    364  */
    365 void radeon_wb_disable(struct radeon_device *rdev)
    366 {
    367 	rdev->wb.enabled = false;
    368 }
    369 
    370 /**
    371  * radeon_wb_fini - Disable Writeback and free memory
    372  *
    373  * @rdev: radeon_device pointer
    374  *
    375  * Disables Writeback and frees the Writeback memory (all asics).
    376  * Used at driver shutdown.
    377  */
    378 void radeon_wb_fini(struct radeon_device *rdev)
    379 {
    380 	radeon_wb_disable(rdev);
    381 	if (rdev->wb.wb_obj) {
    382 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
    383 			radeon_bo_kunmap(rdev->wb.wb_obj);
    384 			radeon_bo_unpin(rdev->wb.wb_obj);
    385 			radeon_bo_unreserve(rdev->wb.wb_obj);
    386 		}
    387 		radeon_bo_unref(&rdev->wb.wb_obj);
    388 		rdev->wb.wb = NULL;
    389 		rdev->wb.wb_obj = NULL;
    390 	}
    391 }
    392 
    393 /**
    394  * radeon_wb_init- Init Writeback driver info and allocate memory
    395  *
    396  * @rdev: radeon_device pointer
    397  *
    398  * Disables Writeback and frees the Writeback memory (all asics).
    399  * Used at driver startup.
    400  * Returns 0 on success or an -error on failure.
    401  */
    402 int radeon_wb_init(struct radeon_device *rdev)
    403 {
    404 	int r;
    405 
    406 	if (rdev->wb.wb_obj == NULL) {
    407 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
    408 				     RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
    409 		if (r) {
    410 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
    411 			return r;
    412 		}
    413 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
    414 		if (unlikely(r != 0)) {
    415 			radeon_wb_fini(rdev);
    416 			return r;
    417 		}
    418 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
    419 				&rdev->wb.gpu_addr);
    420 		if (r) {
    421 			radeon_bo_unreserve(rdev->wb.wb_obj);
    422 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
    423 			radeon_wb_fini(rdev);
    424 			return r;
    425 		}
    426 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)__UNVOLATILE(&rdev->wb.wb));
    427 		radeon_bo_unreserve(rdev->wb.wb_obj);
    428 		if (r) {
    429 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
    430 			radeon_wb_fini(rdev);
    431 			return r;
    432 		}
    433 	}
    434 
    435 	/* clear wb memory */
    436 	memset(__UNVOLATILE(rdev->wb.wb), 0, RADEON_GPU_PAGE_SIZE);
    437 	/* disable event_write fences */
    438 	rdev->wb.use_event = false;
    439 	/* disabled via module param */
    440 	if (radeon_no_wb == 1) {
    441 		rdev->wb.enabled = false;
    442 	} else {
    443 		if (rdev->flags & RADEON_IS_AGP) {
    444 			/* often unreliable on AGP */
    445 			rdev->wb.enabled = false;
    446 		} else if (rdev->family < CHIP_R300) {
    447 			/* often unreliable on pre-r300 */
    448 			rdev->wb.enabled = false;
    449 		} else {
    450 			rdev->wb.enabled = true;
    451 			/* event_write fences are only available on r600+ */
    452 			if (rdev->family >= CHIP_R600) {
    453 				rdev->wb.use_event = true;
    454 			}
    455 		}
    456 	}
    457 	/* always use writeback/events on NI, APUs */
    458 	if (rdev->family >= CHIP_PALM) {
    459 		rdev->wb.enabled = true;
    460 		rdev->wb.use_event = true;
    461 	}
    462 
    463 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
    464 
    465 	return 0;
    466 }
    467 
    468 /**
    469  * radeon_vram_location - try to find VRAM location
    470  * @rdev: radeon device structure holding all necessary informations
    471  * @mc: memory controller structure holding memory informations
    472  * @base: base address at which to put VRAM
    473  *
    474  * Function will place try to place VRAM at base address provided
    475  * as parameter (which is so far either PCI aperture address or
    476  * for IGP TOM base address).
    477  *
    478  * If there is not enough space to fit the unvisible VRAM in the 32bits
    479  * address space then we limit the VRAM size to the aperture.
    480  *
    481  * If we are using AGP and if the AGP aperture doesn't allow us to have
    482  * room for all the VRAM than we restrict the VRAM to the PCI aperture
    483  * size and print a warning.
    484  *
    485  * This function will never fails, worst case are limiting VRAM.
    486  *
    487  * Note: GTT start, end, size should be initialized before calling this
    488  * function on AGP platform.
    489  *
    490  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
    491  * this shouldn't be a problem as we are using the PCI aperture as a reference.
    492  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
    493  * not IGP.
    494  *
    495  * Note: we use mc_vram_size as on some board we need to program the mc to
    496  * cover the whole aperture even if VRAM size is inferior to aperture size
    497  * Novell bug 204882 + along with lots of ubuntu ones
    498  *
    499  * Note: when limiting vram it's safe to overwritte real_vram_size because
    500  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
    501  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
    502  * ones)
    503  *
    504  * Note: IGP TOM addr should be the same as the aperture addr, we don't
    505  * explicitly check for that thought.
    506  *
    507  * FIXME: when reducing VRAM size align new size on power of 2.
    508  */
    509 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
    510 {
    511 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
    512 
    513 	mc->vram_start = base;
    514 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
    515 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    516 		mc->real_vram_size = mc->aper_size;
    517 		mc->mc_vram_size = mc->aper_size;
    518 	}
    519 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    520 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
    521 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
    522 		mc->real_vram_size = mc->aper_size;
    523 		mc->mc_vram_size = mc->aper_size;
    524 	}
    525 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
    526 	if (limit && limit < mc->real_vram_size)
    527 		mc->real_vram_size = limit;
    528 	dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64" (%"PRIu64"M used)\n",
    529 			mc->mc_vram_size >> 20, mc->vram_start,
    530 			mc->vram_end, mc->real_vram_size >> 20);
    531 }
    532 
    533 /**
    534  * radeon_gtt_location - try to find GTT location
    535  * @rdev: radeon device structure holding all necessary informations
    536  * @mc: memory controller structure holding memory informations
    537  *
    538  * Function will place try to place GTT before or after VRAM.
    539  *
    540  * If GTT size is bigger than space left then we ajust GTT size.
    541  * Thus function will never fails.
    542  *
    543  * FIXME: when reducing GTT size align new size on power of 2.
    544  */
    545 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
    546 {
    547 	u64 size_af, size_bf;
    548 
    549 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
    550 	size_bf = mc->vram_start & ~mc->gtt_base_align;
    551 	if (size_bf > size_af) {
    552 		if (mc->gtt_size > size_bf) {
    553 			dev_warn(rdev->dev, "limiting GTT\n");
    554 			mc->gtt_size = size_bf;
    555 		}
    556 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
    557 	} else {
    558 		if (mc->gtt_size > size_af) {
    559 			dev_warn(rdev->dev, "limiting GTT\n");
    560 			mc->gtt_size = size_af;
    561 		}
    562 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
    563 	}
    564 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
    565 	dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64"\n",
    566 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
    567 }
    568 
    569 /*
    570  * GPU helpers function.
    571  */
    572 /**
    573  * radeon_card_posted - check if the hw has already been initialized
    574  *
    575  * @rdev: radeon_device pointer
    576  *
    577  * Check if the asic has been initialized (all asics).
    578  * Used at driver startup.
    579  * Returns true if initialized or false if not.
    580  */
    581 bool radeon_card_posted(struct radeon_device *rdev)
    582 {
    583 	uint32_t reg;
    584 
    585 #ifndef __NetBSD__		/* XXX radeon efi */
    586 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
    587 	if (efi_enabled(EFI_BOOT) &&
    588 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
    589 	    (rdev->family < CHIP_R600))
    590 		return false;
    591 #endif
    592 
    593 	if (ASIC_IS_NODCE(rdev))
    594 		goto check_memsize;
    595 
    596 	/* first check CRTCs */
    597 	if (ASIC_IS_DCE4(rdev)) {
    598 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
    599 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
    600 			if (rdev->num_crtc >= 4) {
    601 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
    602 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
    603 			}
    604 			if (rdev->num_crtc >= 6) {
    605 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
    606 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
    607 			}
    608 		if (reg & EVERGREEN_CRTC_MASTER_EN)
    609 			return true;
    610 	} else if (ASIC_IS_AVIVO(rdev)) {
    611 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
    612 		      RREG32(AVIVO_D2CRTC_CONTROL);
    613 		if (reg & AVIVO_CRTC_EN) {
    614 			return true;
    615 		}
    616 	} else {
    617 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
    618 		      RREG32(RADEON_CRTC2_GEN_CNTL);
    619 		if (reg & RADEON_CRTC_EN) {
    620 			return true;
    621 		}
    622 	}
    623 
    624 check_memsize:
    625 	/* then check MEM_SIZE, in case the crtcs are off */
    626 	if (rdev->family >= CHIP_R600)
    627 		reg = RREG32(R600_CONFIG_MEMSIZE);
    628 	else
    629 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
    630 
    631 	if (reg)
    632 		return true;
    633 
    634 	return false;
    635 
    636 }
    637 
    638 /**
    639  * radeon_update_bandwidth_info - update display bandwidth params
    640  *
    641  * @rdev: radeon_device pointer
    642  *
    643  * Used when sclk/mclk are switched or display modes are set.
    644  * params are used to calculate display watermarks (all asics)
    645  */
    646 void radeon_update_bandwidth_info(struct radeon_device *rdev)
    647 {
    648 	fixed20_12 a;
    649 	u32 sclk = rdev->pm.current_sclk;
    650 	u32 mclk = rdev->pm.current_mclk;
    651 
    652 	/* sclk/mclk in Mhz */
    653 	a.full = dfixed_const(100);
    654 	rdev->pm.sclk.full = dfixed_const(sclk);
    655 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
    656 	rdev->pm.mclk.full = dfixed_const(mclk);
    657 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
    658 
    659 	if (rdev->flags & RADEON_IS_IGP) {
    660 		a.full = dfixed_const(16);
    661 		/* core_bandwidth = sclk(Mhz) * 16 */
    662 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
    663 	}
    664 }
    665 
    666 /**
    667  * radeon_boot_test_post_card - check and possibly initialize the hw
    668  *
    669  * @rdev: radeon_device pointer
    670  *
    671  * Check if the asic is initialized and if not, attempt to initialize
    672  * it (all asics).
    673  * Returns true if initialized or false if not.
    674  */
    675 bool radeon_boot_test_post_card(struct radeon_device *rdev)
    676 {
    677 	if (radeon_card_posted(rdev))
    678 		return true;
    679 
    680 	if (rdev->bios) {
    681 		DRM_INFO("GPU not posted. posting now...\n");
    682 		if (rdev->is_atom_bios)
    683 			atom_asic_init(rdev->mode_info.atom_context);
    684 		else
    685 			radeon_combios_asic_init(rdev->ddev);
    686 		return true;
    687 	} else {
    688 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
    689 		return false;
    690 	}
    691 }
    692 
    693 /**
    694  * radeon_dummy_page_init - init dummy page used by the driver
    695  *
    696  * @rdev: radeon_device pointer
    697  *
    698  * Allocate the dummy page used by the driver (all asics).
    699  * This dummy page is used by the driver as a filler for gart entries
    700  * when pages are taken out of the GART
    701  * Returns 0 on sucess, -ENOMEM on failure.
    702  */
    703 int radeon_dummy_page_init(struct radeon_device *rdev)
    704 {
    705 #ifdef __NetBSD__
    706 	int rsegs;
    707 	int error;
    708 
    709 	/* XXX Can this be called more than once??  */
    710 	if (rdev->dummy_page.rdp_map != NULL)
    711 		return 0;
    712 
    713 	error = bus_dmamem_alloc(rdev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
    714 	    &rdev->dummy_page.rdp_seg, 1, &rsegs, BUS_DMA_WAITOK);
    715 	if (error)
    716 		goto fail0;
    717 	KASSERT(rsegs == 1);
    718 	error = bus_dmamap_create(rdev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    719 	    BUS_DMA_WAITOK, &rdev->dummy_page.rdp_map);
    720 	if (error)
    721 		goto fail1;
    722 	error = bus_dmamap_load_raw(rdev->ddev->dmat, rdev->dummy_page.rdp_map,
    723 	    &rdev->dummy_page.rdp_seg, 1, PAGE_SIZE, BUS_DMA_WAITOK);
    724 	if (error)
    725 		goto fail2;
    726 
    727 	/* Success!  */
    728 	rdev->dummy_page.addr = rdev->dummy_page.rdp_map->dm_segs[0].ds_addr;
    729 	return 0;
    730 
    731 fail3: __unused
    732 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    733 fail2:	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    734 fail1:	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    735 fail0:	KASSERT(error);
    736 	rdev->dummy_page.rdp_map = NULL;
    737 	/* XXX errno NetBSD->Linux */
    738 	return -error;
    739 #else
    740 	if (rdev->dummy_page.page)
    741 		return 0;
    742 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
    743 	if (rdev->dummy_page.page == NULL)
    744 		return -ENOMEM;
    745 	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
    746 					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    747 	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
    748 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    749 		__free_page(rdev->dummy_page.page);
    750 		rdev->dummy_page.page = NULL;
    751 		return -ENOMEM;
    752 	}
    753 	return 0;
    754 #endif
    755 }
    756 
    757 /**
    758  * radeon_dummy_page_fini - free dummy page used by the driver
    759  *
    760  * @rdev: radeon_device pointer
    761  *
    762  * Frees the dummy page used by the driver (all asics).
    763  */
    764 void radeon_dummy_page_fini(struct radeon_device *rdev)
    765 {
    766 #ifdef __NetBSD__
    767 
    768 	if (rdev->dummy_page.rdp_map == NULL)
    769 		return;
    770 	bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    771 	bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
    772 	bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
    773 	rdev->dummy_page.rdp_map = NULL;
    774 #else
    775 	if (rdev->dummy_page.page == NULL)
    776 		return;
    777 	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
    778 			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    779 	__free_page(rdev->dummy_page.page);
    780 	rdev->dummy_page.page = NULL;
    781 #endif
    782 }
    783 
    784 
    785 /* ATOM accessor methods */
    786 /*
    787  * ATOM is an interpreted byte code stored in tables in the vbios.  The
    788  * driver registers callbacks to access registers and the interpreter
    789  * in the driver parses the tables and executes then to program specific
    790  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
    791  * atombios.h, and atom.c
    792  */
    793 
    794 /**
    795  * cail_pll_read - read PLL register
    796  *
    797  * @info: atom card_info pointer
    798  * @reg: PLL register offset
    799  *
    800  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    801  * Returns the value of the PLL register.
    802  */
    803 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
    804 {
    805 	struct radeon_device *rdev = info->dev->dev_private;
    806 	uint32_t r;
    807 
    808 	r = rdev->pll_rreg(rdev, reg);
    809 	return r;
    810 }
    811 
    812 /**
    813  * cail_pll_write - write PLL register
    814  *
    815  * @info: atom card_info pointer
    816  * @reg: PLL register offset
    817  * @val: value to write to the pll register
    818  *
    819  * Provides a PLL register accessor for the atom interpreter (r4xx+).
    820  */
    821 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
    822 {
    823 	struct radeon_device *rdev = info->dev->dev_private;
    824 
    825 	rdev->pll_wreg(rdev, reg, val);
    826 }
    827 
    828 /**
    829  * cail_mc_read - read MC (Memory Controller) register
    830  *
    831  * @info: atom card_info pointer
    832  * @reg: MC register offset
    833  *
    834  * Provides an MC register accessor for the atom interpreter (r4xx+).
    835  * Returns the value of the MC register.
    836  */
    837 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
    838 {
    839 	struct radeon_device *rdev = info->dev->dev_private;
    840 	uint32_t r;
    841 
    842 	r = rdev->mc_rreg(rdev, reg);
    843 	return r;
    844 }
    845 
    846 /**
    847  * cail_mc_write - write MC (Memory Controller) register
    848  *
    849  * @info: atom card_info pointer
    850  * @reg: MC register offset
    851  * @val: value to write to the pll register
    852  *
    853  * Provides a MC register accessor for the atom interpreter (r4xx+).
    854  */
    855 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
    856 {
    857 	struct radeon_device *rdev = info->dev->dev_private;
    858 
    859 	rdev->mc_wreg(rdev, reg, val);
    860 }
    861 
    862 /**
    863  * cail_reg_write - write MMIO register
    864  *
    865  * @info: atom card_info pointer
    866  * @reg: MMIO register offset
    867  * @val: value to write to the pll register
    868  *
    869  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
    870  */
    871 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
    872 {
    873 	struct radeon_device *rdev = info->dev->dev_private;
    874 
    875 	WREG32(reg*4, val);
    876 }
    877 
    878 /**
    879  * cail_reg_read - read MMIO register
    880  *
    881  * @info: atom card_info pointer
    882  * @reg: MMIO register offset
    883  *
    884  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
    885  * Returns the value of the MMIO register.
    886  */
    887 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
    888 {
    889 	struct radeon_device *rdev = info->dev->dev_private;
    890 	uint32_t r;
    891 
    892 	r = RREG32(reg*4);
    893 	return r;
    894 }
    895 
    896 /**
    897  * cail_ioreg_write - write IO register
    898  *
    899  * @info: atom card_info pointer
    900  * @reg: IO register offset
    901  * @val: value to write to the pll register
    902  *
    903  * Provides a IO register accessor for the atom interpreter (r4xx+).
    904  */
    905 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
    906 {
    907 	struct radeon_device *rdev = info->dev->dev_private;
    908 
    909 	WREG32_IO(reg*4, val);
    910 }
    911 
    912 /**
    913  * cail_ioreg_read - read IO register
    914  *
    915  * @info: atom card_info pointer
    916  * @reg: IO register offset
    917  *
    918  * Provides an IO register accessor for the atom interpreter (r4xx+).
    919  * Returns the value of the IO register.
    920  */
    921 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
    922 {
    923 	struct radeon_device *rdev = info->dev->dev_private;
    924 	uint32_t r;
    925 
    926 	r = RREG32_IO(reg*4);
    927 	return r;
    928 }
    929 
    930 /**
    931  * radeon_atombios_init - init the driver info and callbacks for atombios
    932  *
    933  * @rdev: radeon_device pointer
    934  *
    935  * Initializes the driver info and register access callbacks for the
    936  * ATOM interpreter (r4xx+).
    937  * Returns 0 on sucess, -ENOMEM on failure.
    938  * Called at driver startup.
    939  */
    940 int radeon_atombios_init(struct radeon_device *rdev)
    941 {
    942 	struct card_info *atom_card_info =
    943 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
    944 
    945 	if (!atom_card_info)
    946 		return -ENOMEM;
    947 
    948 	rdev->mode_info.atom_card_info = atom_card_info;
    949 	atom_card_info->dev = rdev->ddev;
    950 	atom_card_info->reg_read = cail_reg_read;
    951 	atom_card_info->reg_write = cail_reg_write;
    952 	/* needed for iio ops */
    953 #ifdef __NetBSD__
    954 	if (rdev->rio_mem_size)
    955 #else
    956 	if (rdev->rio_mem)
    957 #endif
    958 	{
    959 		atom_card_info->ioreg_read = cail_ioreg_read;
    960 		atom_card_info->ioreg_write = cail_ioreg_write;
    961 	} else {
    962 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
    963 		atom_card_info->ioreg_read = cail_reg_read;
    964 		atom_card_info->ioreg_write = cail_reg_write;
    965 	}
    966 	atom_card_info->mc_read = cail_mc_read;
    967 	atom_card_info->mc_write = cail_mc_write;
    968 	atom_card_info->pll_read = cail_pll_read;
    969 	atom_card_info->pll_write = cail_pll_write;
    970 
    971 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
    972 	if (!rdev->mode_info.atom_context) {
    973 		radeon_atombios_fini(rdev);
    974 		return -ENOMEM;
    975 	}
    976 
    977 #ifdef __NetBSD__
    978 	linux_mutex_init(&rdev->mode_info.atom_context->mutex);
    979 #else
    980 	mutex_init(&rdev->mode_info.atom_context->mutex);
    981 #endif
    982 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
    983 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
    984 	return 0;
    985 }
    986 
    987 /**
    988  * radeon_atombios_fini - free the driver info and callbacks for atombios
    989  *
    990  * @rdev: radeon_device pointer
    991  *
    992  * Frees the driver info and register access callbacks for the ATOM
    993  * interpreter (r4xx+).
    994  * Called at driver shutdown.
    995  */
    996 void radeon_atombios_fini(struct radeon_device *rdev)
    997 {
    998 	if (rdev->mode_info.atom_context) {
    999 #ifdef __NetBSD__
   1000 		linux_mutex_destroy(&rdev->mode_info.atom_context->mutex);
   1001 #else
   1002 		mutex_destroy(&rdev->mode_info.atom_context->mutex);
   1003 #endif
   1004 		kfree(rdev->mode_info.atom_context->scratch);
   1005 	}
   1006 	kfree(rdev->mode_info.atom_context);
   1007 	rdev->mode_info.atom_context = NULL;
   1008 	kfree(rdev->mode_info.atom_card_info);
   1009 	rdev->mode_info.atom_card_info = NULL;
   1010 }
   1011 
   1012 /* COMBIOS */
   1013 /*
   1014  * COMBIOS is the bios format prior to ATOM. It provides
   1015  * command tables similar to ATOM, but doesn't have a unified
   1016  * parser.  See radeon_combios.c
   1017  */
   1018 
   1019 /**
   1020  * radeon_combios_init - init the driver info for combios
   1021  *
   1022  * @rdev: radeon_device pointer
   1023  *
   1024  * Initializes the driver info for combios (r1xx-r3xx).
   1025  * Returns 0 on sucess.
   1026  * Called at driver startup.
   1027  */
   1028 int radeon_combios_init(struct radeon_device *rdev)
   1029 {
   1030 	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
   1031 	return 0;
   1032 }
   1033 
   1034 /**
   1035  * radeon_combios_fini - free the driver info for combios
   1036  *
   1037  * @rdev: radeon_device pointer
   1038  *
   1039  * Frees the driver info for combios (r1xx-r3xx).
   1040  * Called at driver shutdown.
   1041  */
   1042 void radeon_combios_fini(struct radeon_device *rdev)
   1043 {
   1044 }
   1045 
   1046 #ifndef __NetBSD__		/* XXX radeon vga */
   1047 /* if we get transitioned to only one device, take VGA back */
   1048 /**
   1049  * radeon_vga_set_decode - enable/disable vga decode
   1050  *
   1051  * @cookie: radeon_device pointer
   1052  * @state: enable/disable vga decode
   1053  *
   1054  * Enable/disable vga decode (all asics).
   1055  * Returns VGA resource flags.
   1056  */
   1057 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
   1058 {
   1059 	struct radeon_device *rdev = cookie;
   1060 	radeon_vga_set_state(rdev, state);
   1061 	if (state)
   1062 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
   1063 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1064 	else
   1065 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
   1066 }
   1067 #endif
   1068 
   1069 /**
   1070  * radeon_check_pot_argument - check that argument is a power of two
   1071  *
   1072  * @arg: value to check
   1073  *
   1074  * Validates that a certain argument is a power of two (all asics).
   1075  * Returns true if argument is valid.
   1076  */
   1077 static bool radeon_check_pot_argument(int arg)
   1078 {
   1079 	return (arg & (arg - 1)) == 0;
   1080 }
   1081 
   1082 /**
   1083  * radeon_check_arguments - validate module params
   1084  *
   1085  * @rdev: radeon_device pointer
   1086  *
   1087  * Validates certain module parameters and updates
   1088  * the associated values used by the driver (all asics).
   1089  */
   1090 static void radeon_check_arguments(struct radeon_device *rdev)
   1091 {
   1092 	/* vramlimit must be a power of two */
   1093 	if (!radeon_check_pot_argument(radeon_vram_limit)) {
   1094 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
   1095 				radeon_vram_limit);
   1096 		radeon_vram_limit = 0;
   1097 	}
   1098 
   1099 	if (radeon_gart_size == -1) {
   1100 		/* default to a larger gart size on newer asics */
   1101 		if (rdev->family >= CHIP_RV770)
   1102 			radeon_gart_size = 1024;
   1103 		else
   1104 			radeon_gart_size = 512;
   1105 	}
   1106 	/* gtt size must be power of two and greater or equal to 32M */
   1107 	if (radeon_gart_size < 32) {
   1108 		dev_warn(rdev->dev, "gart size (%d) too small\n",
   1109 				radeon_gart_size);
   1110 		if (rdev->family >= CHIP_RV770)
   1111 			radeon_gart_size = 1024;
   1112 		else
   1113 			radeon_gart_size = 512;
   1114 	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
   1115 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
   1116 				radeon_gart_size);
   1117 		if (rdev->family >= CHIP_RV770)
   1118 			radeon_gart_size = 1024;
   1119 		else
   1120 			radeon_gart_size = 512;
   1121 	}
   1122 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
   1123 
   1124 	/* AGP mode can only be -1, 1, 2, 4, 8 */
   1125 	switch (radeon_agpmode) {
   1126 	case -1:
   1127 	case 0:
   1128 	case 1:
   1129 	case 2:
   1130 	case 4:
   1131 	case 8:
   1132 		break;
   1133 	default:
   1134 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
   1135 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
   1136 		radeon_agpmode = 0;
   1137 		break;
   1138 	}
   1139 }
   1140 
   1141 #ifndef __NetBSD__		/* XXX radeon vga */
   1142 /**
   1143  * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
   1144  * needed for waking up.
   1145  *
   1146  * @pdev: pci dev pointer
   1147  */
   1148 static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
   1149 {
   1150 
   1151 	/* 6600m in a macbook pro */
   1152 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
   1153 	    pdev->subsystem_device == 0x00e2) {
   1154 		printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
   1155 		return true;
   1156 	}
   1157 
   1158 	return false;
   1159 }
   1160 
   1161 /**
   1162  * radeon_switcheroo_set_state - set switcheroo state
   1163  *
   1164  * @pdev: pci dev pointer
   1165  * @state: vga switcheroo state
   1166  *
   1167  * Callback for the switcheroo driver.  Suspends or resumes the
   1168  * the asics before or after it is powered up using ACPI methods.
   1169  */
   1170 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
   1171 {
   1172 	struct drm_device *dev = pci_get_drvdata(pdev);
   1173 
   1174 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
   1175 		return;
   1176 
   1177 	if (state == VGA_SWITCHEROO_ON) {
   1178 		unsigned d3_delay = dev->pdev->d3_delay;
   1179 
   1180 		printk(KERN_INFO "radeon: switched on\n");
   1181 		/* don't suspend or resume card normally */
   1182 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1183 
   1184 		if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
   1185 			dev->pdev->d3_delay = 20;
   1186 
   1187 		radeon_resume_kms(dev, true, true);
   1188 
   1189 		dev->pdev->d3_delay = d3_delay;
   1190 
   1191 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
   1192 		drm_kms_helper_poll_enable(dev);
   1193 	} else {
   1194 		printk(KERN_INFO "radeon: switched off\n");
   1195 		drm_kms_helper_poll_disable(dev);
   1196 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
   1197 		radeon_suspend_kms(dev, true, true);
   1198 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
   1199 	}
   1200 }
   1201 
   1202 /**
   1203  * radeon_switcheroo_can_switch - see if switcheroo state can change
   1204  *
   1205  * @pdev: pci dev pointer
   1206  *
   1207  * Callback for the switcheroo driver.  Check of the switcheroo
   1208  * state can be changed.
   1209  * Returns true if the state can be changed, false if not.
   1210  */
   1211 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
   1212 {
   1213 	struct drm_device *dev = pci_get_drvdata(pdev);
   1214 	bool can_switch;
   1215 
   1216 	spin_lock(&dev->count_lock);
   1217 	can_switch = (dev->open_count == 0);
   1218 	spin_unlock(&dev->count_lock);
   1219 	return can_switch;
   1220 }
   1221 
   1222 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
   1223 	.set_gpu_state = radeon_switcheroo_set_state,
   1224 	.reprobe = NULL,
   1225 	.can_switch = radeon_switcheroo_can_switch,
   1226 };
   1227 #endif
   1228 
   1229 /**
   1230  * radeon_device_init - initialize the driver
   1231  *
   1232  * @rdev: radeon_device pointer
   1233  * @pdev: drm dev pointer
   1234  * @pdev: pci dev pointer
   1235  * @flags: driver flags
   1236  *
   1237  * Initializes the driver info and hw (all asics).
   1238  * Returns 0 for success or an error on failure.
   1239  * Called at driver startup.
   1240  */
   1241 int radeon_device_init(struct radeon_device *rdev,
   1242 		       struct drm_device *ddev,
   1243 		       struct pci_dev *pdev,
   1244 		       uint32_t flags)
   1245 {
   1246 	int r, i;
   1247 	int dma_bits;
   1248 #ifndef __NetBSD__
   1249 	bool runtime = false;
   1250 #endif
   1251 
   1252 	rdev->shutdown = false;
   1253 	rdev->dev = ddev->dev;
   1254 	rdev->ddev = ddev;
   1255 	rdev->pdev = pdev;
   1256 	rdev->flags = flags;
   1257 	rdev->family = flags & RADEON_FAMILY_MASK;
   1258 	rdev->is_atom_bios = false;
   1259 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
   1260 	rdev->mc.gtt_size = 512 * 1024 * 1024;
   1261 	rdev->accel_working = false;
   1262 	/* set up ring ids */
   1263 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1264 		rdev->ring[i].idx = i;
   1265 	}
   1266 
   1267 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
   1268 		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
   1269 		pdev->subsystem_vendor, pdev->subsystem_device);
   1270 
   1271 	/* mutex initialization are all done here so we
   1272 	 * can recall function without having locking issues */
   1273 #ifdef __NetBSD__
   1274 	linux_mutex_init(&rdev->ring_lock);
   1275 	linux_mutex_init(&rdev->dc_hw_i2c_mutex);
   1276 #else
   1277 	mutex_init(&rdev->ring_lock);
   1278 	mutex_init(&rdev->dc_hw_i2c_mutex);
   1279 #endif
   1280 	atomic_set(&rdev->ih.lock, 0);
   1281 #ifdef __NetBSD__
   1282 	linux_mutex_init(&rdev->gem.mutex);
   1283 	linux_mutex_init(&rdev->pm.mutex);
   1284 	linux_mutex_init(&rdev->gpu_clock_mutex);
   1285 	linux_mutex_init(&rdev->srbm_mutex);
   1286 #else
   1287 	mutex_init(&rdev->gem.mutex);
   1288 	mutex_init(&rdev->pm.mutex);
   1289 	mutex_init(&rdev->gpu_clock_mutex);
   1290 	mutex_init(&rdev->srbm_mutex);
   1291 #endif
   1292 	init_rwsem(&rdev->pm.mclk_lock);
   1293 	init_rwsem(&rdev->exclusive_lock);
   1294 #ifdef __NetBSD__
   1295 	spin_lock_init(&rdev->irq.vblank_lock);
   1296 	DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
   1297 #else
   1298 	init_waitqueue_head(&rdev->irq.vblank_queue);
   1299 #endif
   1300 	r = radeon_gem_init(rdev);
   1301 	if (r)
   1302 		return r;
   1303 
   1304 	/* Adjust VM size here.
   1305 	 * Currently set to 4GB ((1 << 20) 4k pages).
   1306 	 * Max GPUVM size for cayman and SI is 40 bits.
   1307 	 */
   1308 	rdev->vm_manager.max_pfn = 1 << 20;
   1309 
   1310 	/* Set asic functions */
   1311 	r = radeon_asic_init(rdev);
   1312 	if (r)
   1313 		return r;
   1314 	radeon_check_arguments(rdev);
   1315 
   1316 	/* all of the newer IGP chips have an internal gart
   1317 	 * However some rs4xx report as AGP, so remove that here.
   1318 	 */
   1319 	if ((rdev->family >= CHIP_RS400) &&
   1320 	    (rdev->flags & RADEON_IS_IGP)) {
   1321 		rdev->flags &= ~RADEON_IS_AGP;
   1322 	}
   1323 
   1324 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
   1325 		radeon_agp_disable(rdev);
   1326 	}
   1327 
   1328 	/* Set the internal MC address mask
   1329 	 * This is the max address of the GPU's
   1330 	 * internal address space.
   1331 	 */
   1332 	if (rdev->family >= CHIP_CAYMAN)
   1333 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
   1334 	else if (rdev->family >= CHIP_CEDAR)
   1335 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
   1336 	else
   1337 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
   1338 
   1339 	/* set DMA mask + need_dma32 flags.
   1340 	 * PCIE - can handle 40-bits.
   1341 	 * IGP - can handle 40-bits
   1342 	 * AGP - generally dma32 is safest
   1343 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
   1344 	 */
   1345 	rdev->need_dma32 = false;
   1346 	if (rdev->flags & RADEON_IS_AGP)
   1347 		rdev->need_dma32 = true;
   1348 	if ((rdev->flags & RADEON_IS_PCI) &&
   1349 	    (rdev->family <= CHIP_RS740))
   1350 		rdev->need_dma32 = true;
   1351 
   1352 	dma_bits = rdev->need_dma32 ? 32 : 40;
   1353 #ifdef __NetBSD__
   1354 	r = drm_limit_dma_space(rdev->ddev, 0, __BITS(dma_bits - 1, 0));
   1355 	if (r)
   1356 		DRM_ERROR("No suitable DMA available.\n");
   1357 #else
   1358 	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
   1359 	if (r) {
   1360 		rdev->need_dma32 = true;
   1361 		dma_bits = 32;
   1362 		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
   1363 	}
   1364 	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
   1365 	if (r) {
   1366 		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
   1367 		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
   1368 	}
   1369 #endif
   1370 
   1371 	/* Registers mapping */
   1372 	/* TODO: block userspace mapping of io register */
   1373 	/* XXX Destroy these locks on detach...  */
   1374 	spin_lock_init(&rdev->mmio_idx_lock);
   1375 	spin_lock_init(&rdev->smc_idx_lock);
   1376 	spin_lock_init(&rdev->pll_idx_lock);
   1377 	spin_lock_init(&rdev->mc_idx_lock);
   1378 	spin_lock_init(&rdev->pcie_idx_lock);
   1379 	spin_lock_init(&rdev->pciep_idx_lock);
   1380 	spin_lock_init(&rdev->pif_idx_lock);
   1381 	spin_lock_init(&rdev->cg_idx_lock);
   1382 	spin_lock_init(&rdev->uvd_idx_lock);
   1383 	spin_lock_init(&rdev->rcu_idx_lock);
   1384 	spin_lock_init(&rdev->didt_idx_lock);
   1385 	spin_lock_init(&rdev->end_idx_lock);
   1386 #ifdef __NetBSD__
   1387     {
   1388 	pcireg_t bar;
   1389 
   1390 	if (rdev->family >= CHIP_BONAIRE)
   1391 		bar = 5;
   1392 	else
   1393 		bar = 2;
   1394 	if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(bar),
   1395 		pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
   1396 		    rdev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
   1397 		0,
   1398 		&rdev->rmmio_bst, &rdev->rmmio_bsh,
   1399 		&rdev->rmmio_addr, &rdev->rmmio_size))
   1400 		return -EIO;
   1401     }
   1402 	DRM_INFO("register mmio base: 0x%"PRIxMAX"\n",
   1403 	    (uintmax_t)rdev->rmmio_addr);
   1404 	DRM_INFO("register mmio size: %"PRIuMAX"\n",
   1405 	    (uintmax_t)rdev->rmmio_size);
   1406 #else
   1407 	if (rdev->family >= CHIP_BONAIRE) {
   1408 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
   1409 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
   1410 	} else {
   1411 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
   1412 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
   1413 	}
   1414 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
   1415 	if (rdev->rmmio == NULL) {
   1416 		return -ENOMEM;
   1417 	}
   1418 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
   1419 	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
   1420 #endif
   1421 
   1422 	/* doorbell bar mapping */
   1423 	if (rdev->family >= CHIP_BONAIRE)
   1424 		radeon_doorbell_init(rdev);
   1425 
   1426 	/* io port mapping */
   1427 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
   1428 #ifdef __NetBSD__
   1429 		if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(i),
   1430 			PCI_MAPREG_TYPE_IO, 0,
   1431 			&rdev->rio_mem_bst, &rdev->rio_mem_bsh,
   1432 			NULL, &rdev->rio_mem_size))
   1433 			continue;
   1434 		break;
   1435 #else
   1436 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
   1437 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
   1438 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
   1439 			break;
   1440 		}
   1441 #endif
   1442 	}
   1443 #ifdef __NetBSD__
   1444 	if (i == DEVICE_COUNT_RESOURCE)
   1445 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1446 #else
   1447 	if (rdev->rio_mem == NULL)
   1448 		DRM_ERROR("Unable to find PCI I/O BAR\n");
   1449 #endif
   1450 
   1451 #ifndef __NetBSD__		/* XXX radeon vga */
   1452 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
   1453 	/* this will fail for cards that aren't VGA class devices, just
   1454 	 * ignore it */
   1455 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
   1456 
   1457 	if (rdev->flags & RADEON_IS_PX)
   1458 		runtime = true;
   1459 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
   1460 	if (runtime)
   1461 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
   1462 #endif
   1463 
   1464 	r = radeon_init(rdev);
   1465 	if (r)
   1466 		return r;
   1467 
   1468 	r = radeon_ib_ring_tests(rdev);
   1469 	if (r)
   1470 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1471 
   1472 	r = radeon_gem_debugfs_init(rdev);
   1473 	if (r) {
   1474 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
   1475 	}
   1476 
   1477 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
   1478 		/* Acceleration not working on AGP card try again
   1479 		 * with fallback to PCI or PCIE GART
   1480 		 */
   1481 		radeon_asic_reset(rdev);
   1482 		radeon_fini(rdev);
   1483 		radeon_agp_disable(rdev);
   1484 		r = radeon_init(rdev);
   1485 		if (r)
   1486 			return r;
   1487 	}
   1488 
   1489 	if ((radeon_testing & 1)) {
   1490 		if (rdev->accel_working)
   1491 			radeon_test_moves(rdev);
   1492 		else
   1493 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
   1494 	}
   1495 	if ((radeon_testing & 2)) {
   1496 		if (rdev->accel_working)
   1497 			radeon_test_syncing(rdev);
   1498 		else
   1499 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
   1500 	}
   1501 	if (radeon_benchmarking) {
   1502 		if (rdev->accel_working)
   1503 			radeon_benchmark(rdev, radeon_benchmarking);
   1504 		else
   1505 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
   1506 	}
   1507 	return 0;
   1508 }
   1509 
   1510 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
   1511 
   1512 /**
   1513  * radeon_device_fini - tear down the driver
   1514  *
   1515  * @rdev: radeon_device pointer
   1516  *
   1517  * Tear down the driver info (all asics).
   1518  * Called at driver shutdown.
   1519  */
   1520 void radeon_device_fini(struct radeon_device *rdev)
   1521 {
   1522 	DRM_INFO("radeon: finishing device.\n");
   1523 	rdev->shutdown = true;
   1524 	/* evict vram memory */
   1525 	radeon_bo_evict_vram(rdev);
   1526 	radeon_fini(rdev);
   1527 #ifndef __NetBSD__
   1528 	vga_switcheroo_unregister_client(rdev->pdev);
   1529 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
   1530 #endif
   1531 #ifdef __NetBSD__
   1532 	if (rdev->rio_mem_size)
   1533 		bus_space_unmap(rdev->rio_mem_bst, rdev->rio_mem_bsh,
   1534 		    rdev->rio_mem_size);
   1535 	rdev->rio_mem_size = 0;
   1536 	bus_space_unmap(rdev->rmmio_bst, rdev->rmmio_bsh, rdev->rmmio_size);
   1537 #else
   1538 	if (rdev->rio_mem)
   1539 		pci_iounmap(rdev->pdev, rdev->rio_mem);
   1540 	rdev->rio_mem = NULL;
   1541 	iounmap(rdev->rmmio);
   1542 	rdev->rmmio = NULL;
   1543 #endif
   1544 	if (rdev->family >= CHIP_BONAIRE)
   1545 		radeon_doorbell_fini(rdev);
   1546 	radeon_debugfs_remove_files(rdev);
   1547 
   1548 #ifdef __NetBSD__
   1549 	DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
   1550 	spin_lock_destroy(&rdev->irq.vblank_lock);
   1551 	destroy_rwsem(&rdev->exclusive_lock);
   1552 	destroy_rwsem(&rdev->pm.mclk_lock);
   1553 	linux_mutex_destroy(&rdev->srbm_mutex);
   1554 	linux_mutex_destroy(&rdev->gpu_clock_mutex);
   1555 	linux_mutex_destroy(&rdev->pm.mutex);
   1556 	linux_mutex_destroy(&rdev->gem.mutex);
   1557 	linux_mutex_destroy(&rdev->dc_hw_i2c_mutex);
   1558 	linux_mutex_destroy(&rdev->ring_lock);
   1559 #else
   1560 	mutex_destroy(&rdev->srbm_mutex);
   1561 	mutex_destroy(&rdev->gpu_clock_mutex);
   1562 	mutex_destroy(&rdev->pm.mutex);
   1563 	mutex_destroy(&rdev->gem.mutex);
   1564 	mutex_destroy(&rdev->dc_hw_i2c_mutex);
   1565 	mutex_destroy(&rdev->ring_lock);
   1566 #endif
   1567 }
   1568 
   1569 
   1570 /*
   1571  * Suspend & resume.
   1572  */
   1573 /**
   1574  * radeon_suspend_kms - initiate device suspend
   1575  *
   1576  * @pdev: drm dev pointer
   1577  * @state: suspend state
   1578  *
   1579  * Puts the hw in the suspend state (all asics).
   1580  * Returns 0 for success or an error on failure.
   1581  * Called at driver suspend.
   1582  */
   1583 int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
   1584 {
   1585 	struct radeon_device *rdev;
   1586 	struct drm_crtc *crtc;
   1587 	struct drm_connector *connector;
   1588 	int i, r;
   1589 	bool force_completion = false;
   1590 
   1591 	if (dev == NULL || dev->dev_private == NULL) {
   1592 		return -ENODEV;
   1593 	}
   1594 
   1595 	rdev = dev->dev_private;
   1596 
   1597 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1598 		return 0;
   1599 
   1600 	drm_kms_helper_poll_disable(dev);
   1601 
   1602 	/* turn off display hw */
   1603 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1604 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
   1605 	}
   1606 
   1607 	/* unpin the front buffers */
   1608 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1609 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
   1610 		struct radeon_bo *robj;
   1611 
   1612 		if (rfb == NULL || rfb->obj == NULL) {
   1613 			continue;
   1614 		}
   1615 		robj = gem_to_radeon_bo(rfb->obj);
   1616 		/* don't unpin kernel fb objects */
   1617 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
   1618 			r = radeon_bo_reserve(robj, false);
   1619 			if (r == 0) {
   1620 				radeon_bo_unpin(robj);
   1621 				radeon_bo_unreserve(robj);
   1622 			}
   1623 		}
   1624 	}
   1625 	/* evict vram memory */
   1626 	radeon_bo_evict_vram(rdev);
   1627 
   1628 	/* wait for gpu to finish processing current batch */
   1629 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1630 		r = radeon_fence_wait_empty(rdev, i);
   1631 		if (r) {
   1632 			/* delay GPU reset to resume */
   1633 			force_completion = true;
   1634 		}
   1635 	}
   1636 	if (force_completion) {
   1637 		radeon_fence_driver_force_completion(rdev);
   1638 	}
   1639 
   1640 	radeon_save_bios_scratch_regs(rdev);
   1641 
   1642 	radeon_suspend(rdev);
   1643 	radeon_hpd_fini(rdev);
   1644 	/* evict remaining vram memory */
   1645 	radeon_bo_evict_vram(rdev);
   1646 
   1647 	radeon_agp_suspend(rdev);
   1648 
   1649 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1650 	pci_save_state(dev->pdev);
   1651 	if (suspend) {
   1652 		/* Shut down the device */
   1653 		pci_disable_device(dev->pdev);
   1654 		pci_set_power_state(dev->pdev, PCI_D3hot);
   1655 	}
   1656 #endif
   1657 
   1658 #ifndef __NetBSD__		/* XXX radeon fb */
   1659 	if (fbcon) {
   1660 		console_lock();
   1661 		radeon_fbdev_set_suspend(rdev, 1);
   1662 		console_unlock();
   1663 	}
   1664 #endif
   1665 	return 0;
   1666 }
   1667 
   1668 /**
   1669  * radeon_resume_kms - initiate device resume
   1670  *
   1671  * @pdev: drm dev pointer
   1672  *
   1673  * Bring the hw back to operating state (all asics).
   1674  * Returns 0 for success or an error on failure.
   1675  * Called at driver resume.
   1676  */
   1677 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
   1678 {
   1679 	struct drm_connector *connector;
   1680 	struct radeon_device *rdev = dev->dev_private;
   1681 	int r;
   1682 
   1683 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
   1684 		return 0;
   1685 
   1686 #ifndef __NetBSD__		/* XXX radeon fb */
   1687 	if (fbcon) {
   1688 		console_lock();
   1689 	}
   1690 #endif
   1691 #ifndef __NetBSD__		/* pmf handles this for us.  */
   1692 	if (resume) {
   1693 		pci_set_power_state(dev->pdev, PCI_D0);
   1694 		pci_restore_state(dev->pdev);
   1695 		if (pci_enable_device(dev->pdev)) {
   1696 			if (fbcon)
   1697 				console_unlock();
   1698 			return -1;
   1699 		}
   1700 	}
   1701 #endif
   1702 	/* resume AGP if in use */
   1703 	radeon_agp_resume(rdev);
   1704 	radeon_resume(rdev);
   1705 
   1706 	r = radeon_ib_ring_tests(rdev);
   1707 	if (r)
   1708 		DRM_ERROR("ib ring test failed (%d).\n", r);
   1709 
   1710 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
   1711 		/* do dpm late init */
   1712 		r = radeon_pm_late_init(rdev);
   1713 		if (r) {
   1714 			rdev->pm.dpm_enabled = false;
   1715 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1716 		}
   1717 	} else {
   1718 		/* resume old pm late */
   1719 		radeon_pm_resume(rdev);
   1720 	}
   1721 
   1722 	radeon_restore_bios_scratch_regs(rdev);
   1723 
   1724 	/* init dig PHYs, disp eng pll */
   1725 	if (rdev->is_atom_bios) {
   1726 		radeon_atom_encoder_init(rdev);
   1727 		radeon_atom_disp_eng_pll_init(rdev);
   1728 		/* turn on the BL */
   1729 		if (rdev->mode_info.bl_encoder) {
   1730 			u8 bl_level = radeon_get_backlight_level(rdev,
   1731 								 rdev->mode_info.bl_encoder);
   1732 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
   1733 						   bl_level);
   1734 		}
   1735 	}
   1736 	/* reset hpd state */
   1737 	radeon_hpd_init(rdev);
   1738 	/* blat the mode back in */
   1739 	if (fbcon) {
   1740 		drm_helper_resume_force_mode(dev);
   1741 		/* turn on display hw */
   1742 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
   1743 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
   1744 		}
   1745 	}
   1746 
   1747 	drm_kms_helper_poll_enable(dev);
   1748 
   1749 	/* set the power state here in case we are a PX system or headless */
   1750 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
   1751 		radeon_pm_compute_clocks(rdev);
   1752 
   1753 #ifndef __NetBSD__		/* XXX radeon fb */
   1754 	if (fbcon) {
   1755 		radeon_fbdev_set_suspend(rdev, 0);
   1756 		console_unlock();
   1757 	}
   1758 #endif
   1759 
   1760 	return 0;
   1761 }
   1762 
   1763 /**
   1764  * radeon_gpu_reset - reset the asic
   1765  *
   1766  * @rdev: radeon device pointer
   1767  *
   1768  * Attempt the reset the GPU if it has hung (all asics).
   1769  * Returns 0 for success or an error on failure.
   1770  */
   1771 int radeon_gpu_reset(struct radeon_device *rdev)
   1772 {
   1773 	unsigned ring_sizes[RADEON_NUM_RINGS];
   1774 	uint32_t *ring_data[RADEON_NUM_RINGS];
   1775 
   1776 	bool saved = false;
   1777 
   1778 	int i, r;
   1779 	int resched;
   1780 
   1781 	down_write(&rdev->exclusive_lock);
   1782 
   1783 	if (!rdev->needs_reset) {
   1784 		up_write(&rdev->exclusive_lock);
   1785 		return 0;
   1786 	}
   1787 
   1788 	rdev->needs_reset = false;
   1789 
   1790 	radeon_save_bios_scratch_regs(rdev);
   1791 	/* block TTM */
   1792 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   1793 	radeon_pm_suspend(rdev);
   1794 	radeon_suspend(rdev);
   1795 
   1796 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1797 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
   1798 						   &ring_data[i]);
   1799 		if (ring_sizes[i]) {
   1800 			saved = true;
   1801 			dev_info(rdev->dev, "Saved %d dwords of commands "
   1802 				 "on ring %d.\n", ring_sizes[i], i);
   1803 		}
   1804 	}
   1805 
   1806 retry:
   1807 	r = radeon_asic_reset(rdev);
   1808 	if (!r) {
   1809 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
   1810 		radeon_resume(rdev);
   1811 	}
   1812 
   1813 	radeon_restore_bios_scratch_regs(rdev);
   1814 
   1815 	if (!r) {
   1816 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1817 			radeon_ring_restore(rdev, &rdev->ring[i],
   1818 					    ring_sizes[i], ring_data[i]);
   1819 			ring_sizes[i] = 0;
   1820 			ring_data[i] = NULL;
   1821 		}
   1822 
   1823 		r = radeon_ib_ring_tests(rdev);
   1824 		if (r) {
   1825 			dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
   1826 			if (saved) {
   1827 				saved = false;
   1828 				radeon_suspend(rdev);
   1829 				goto retry;
   1830 			}
   1831 		}
   1832 	} else {
   1833 		radeon_fence_driver_force_completion(rdev);
   1834 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1835 			kfree(ring_data[i]);
   1836 		}
   1837 	}
   1838 
   1839 	radeon_pm_resume(rdev);
   1840 	drm_helper_resume_force_mode(rdev->ddev);
   1841 
   1842 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   1843 	if (r) {
   1844 		/* bad news, how to tell it to userspace ? */
   1845 		dev_info(rdev->dev, "GPU reset failed\n");
   1846 	}
   1847 
   1848 	up_write(&rdev->exclusive_lock);
   1849 	return r;
   1850 }
   1851 
   1852 
   1853 /*
   1854  * Debugfs
   1855  */
   1856 int radeon_debugfs_add_files(struct radeon_device *rdev,
   1857 			     struct drm_info_list *files,
   1858 			     unsigned nfiles)
   1859 {
   1860 	unsigned i;
   1861 
   1862 	for (i = 0; i < rdev->debugfs_count; i++) {
   1863 		if (rdev->debugfs[i].files == files) {
   1864 			/* Already registered */
   1865 			return 0;
   1866 		}
   1867 	}
   1868 
   1869 	i = rdev->debugfs_count + 1;
   1870 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
   1871 		DRM_ERROR("Reached maximum number of debugfs components.\n");
   1872 		DRM_ERROR("Report so we increase "
   1873 		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
   1874 		return -EINVAL;
   1875 	}
   1876 	rdev->debugfs[rdev->debugfs_count].files = files;
   1877 	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
   1878 	rdev->debugfs_count = i;
   1879 #if defined(CONFIG_DEBUG_FS)
   1880 	drm_debugfs_create_files(files, nfiles,
   1881 				 rdev->ddev->control->debugfs_root,
   1882 				 rdev->ddev->control);
   1883 	drm_debugfs_create_files(files, nfiles,
   1884 				 rdev->ddev->primary->debugfs_root,
   1885 				 rdev->ddev->primary);
   1886 #endif
   1887 	return 0;
   1888 }
   1889 
   1890 static void radeon_debugfs_remove_files(struct radeon_device *rdev)
   1891 {
   1892 #if defined(CONFIG_DEBUG_FS)
   1893 	unsigned i;
   1894 
   1895 	for (i = 0; i < rdev->debugfs_count; i++) {
   1896 		drm_debugfs_remove_files(rdev->debugfs[i].files,
   1897 					 rdev->debugfs[i].num_files,
   1898 					 rdev->ddev->control);
   1899 		drm_debugfs_remove_files(rdev->debugfs[i].files,
   1900 					 rdev->debugfs[i].num_files,
   1901 					 rdev->ddev->primary);
   1902 	}
   1903 #endif
   1904 }
   1905 
   1906 #if defined(CONFIG_DEBUG_FS)
   1907 int radeon_debugfs_init(struct drm_minor *minor)
   1908 {
   1909 	return 0;
   1910 }
   1911 
   1912 void radeon_debugfs_cleanup(struct drm_minor *minor)
   1913 {
   1914 }
   1915 #endif
   1916