| /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ | 
| amdgpu_gfxhub_v1_1.c | 59 		adev->gmc.xgmi.num_physical_nodes = max_region + 1; 60 		if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
 63 		adev->gmc.xgmi.physical_node_id =
 65 		if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
 67 		adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
 
 | 
| amdgpu_xgmi.c | 140 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id); 245 	if (!adev->gmc.xgmi.hive_id)
 252 		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
 273 	tmp->hive_id = adev->gmc.xgmi.hive_id;
 311 			adev->gmc.xgmi.node_id,
 312 			adev->gmc.xgmi.hive_id, ret);
 323 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
 349 			adev->gmc.xgmi.node_id,
 350 			adev->gmc.xgmi.hive_id, ret);
 363 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id
 [all...]
 | 
| amdgpu_xgmi.h | 58 		adev->gmc.xgmi.hive_id && 59 		adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
 
 | 
| amdgpu_gmc_v7_0.c | 45 #include "gmc/gmc_7_1_d.h" 46 #include "gmc/gmc_7_1_sh_mask.h"
 164 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 167 	err = amdgpu_ucode_validate(adev->gmc.fw);
 172 		release_firmware(adev->gmc.fw);
 173 		adev->gmc.fw = NULL;
 194 	if (!adev->gmc.fw)
 197 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 200 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 203 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes))
 [all...]
 | 
| amdgpu_gmc.h | 68  * GMC page fault information 226 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
 228 	((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
 230 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 231 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 232 #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
 233 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
 234 #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
 244 static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
 246 	WARN_ON(gmc->real_vram_size < gmc->visible_vram_size)
 [all...]
 | 
| amdgpu_gmc_v9_0.c | 405 	adev->gmc.vm_fault.num_types = 1; 406 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 409 		adev->gmc.ecc_irq.num_types = 1;
 410 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
 506 	spin_lock(&adev->gmc.invalidate_lock);
 553 	spin_unlock(&adev->gmc.invalidate_lock);
 742 			adev->gmc.vram_start;
 745 	if (!adev->gmc.translate_further)
 794 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
 847 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL
 [all...]
 | 
| amdgpu_gmc.c | 154 	if (addr + PAGE_SIZE >= adev->gmc.agp_size) 157 	return adev->gmc.agp_start + addr;
 205 	u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
 295 	struct amdgpu_gmc *gmc = &adev->gmc;  local in function:amdgpu_gmc_filter_faults
 304 	if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
 309 	fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
 317 		fault = &gmc->fault_ring[fault->next]
 [all...]
 | 
| amdgpu_gmc_v8_0.c | 40 #include "gmc/gmc_8_1_d.h" 41 #include "gmc/gmc_8_1_sh_mask.h"
 281 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 284 	err = amdgpu_ucode_validate(adev->gmc.fw);
 289 		release_firmware(adev->gmc.fw);
 290 		adev->gmc.fw = NULL;
 319 	if (!adev->gmc.fw)
 322 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 325 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 328 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes))
 [all...]
 | 
| amdgpu_gmc_v10_0.c | 206 	adev->gmc.vm_fault.num_types = 1; 207 	adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
 276 	spin_lock(&adev->gmc.invalidate_lock);
 325 	spin_unlock(&adev->gmc.invalidate_lock);
 574 			adev->gmc.vram_start;
 577 	if (!adev->gmc.translate_further)
 624 	if (adev->gmc.gmc_funcs == NULL)
 625 		adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
 635 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 636 	adev->gmc.shared_aperture_end
 [all...]
 | 
| amdgpu_gmc_v6_0.c | 43 #include "gmc/gmc_6_0_d.h" 44 #include "gmc/gmc_6_0_sh_mask.h"
 150 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 154 	err = amdgpu_ucode_validate(adev->gmc.fw);
 161 		release_firmware(adev->gmc.fw);
 162 		adev->gmc.fw = NULL;
 175 	if (!adev->gmc.fw)
 178 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 182 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 185 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes))
 [all...]
 | 
| amdgpu_gfxhub_v1_0.c | 64 		     (u32)(adev->gmc.gart_start >> 12)); 66 		     (u32)(adev->gmc.gart_start >> 44));
 69 		     (u32)(adev->gmc.gart_end >> 12));
 71 		     (u32)(adev->gmc.gart_end >> 44));
 80 	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
 81 	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
 86 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 98 					 max((adev->gmc.fb_end >> 18) + 0x1,
 99 					     adev->gmc.agp_end >> 18))
 [all...]
 | 
| amdgpu_object.c | 141 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 591 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 593 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 1076 	arch_io_reserve_memtype_wc(adev->gmc.aper_base,
 1077 				   adev->gmc.aper_size);
 1080 	adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
 1081 					      adev->gmc.aper_size);
 1083 	if (adev->gmc.aper_base)
 1084 		pmap_pv_track(adev->gmc.aper_base, adev->gmc.aper_size)
 [all...]
 | 
| amdgpu_test.c | 52 	n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; 164 					  (gart_addr - adev->gmc.gart_start +
 167 					  (vram_addr - adev->gmc.vram_start +
 210 					  (vram_addr - adev->gmc.vram_start +
 213 					  (gart_addr - adev->gmc.gart_start +
 223 			 gart_addr - adev->gmc.gart_start);
 
 | 
| amdgpu_gfxhub_v2_0.c | 75 		     (u32)(adev->gmc.gart_start >> 12)); 77 		     (u32)(adev->gmc.gart_start >> 44));
 80 		     (u32)(adev->gmc.gart_end >> 12));
 82 		     (u32)(adev->gmc.gart_end >> 44));
 96 		     adev->gmc.vram_start >> 18);
 98 		     adev->gmc.vram_end >> 18);
 101 	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
 163 	if (adev->gmc.translate_further) {
 271 			     adev->gmc.vram_start >> 24);
 273 			     adev->gmc.vram_end >> 24)
 [all...]
 | 
| amdgpu_mmhub_v2_0.c | 60 		     (u32)(adev->gmc.gart_start >> 12)); 62 		     (u32)(adev->gmc.gart_start >> 44));
 65 		     (u32)(adev->gmc.gart_end >> 12));
 67 		     (u32)(adev->gmc.gart_end >> 44));
 82 		     adev->gmc.vram_start >> 18);
 84 		     adev->gmc.vram_end >> 18);
 87 	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
 150 	if (adev->gmc.translate_further) {
 262 			     adev->gmc.vram_start >> 24);
 264 			     adev->gmc.vram_end >> 24)
 [all...]
 | 
| amdgpu_amdkfd.c | 77 		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; 394 	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
 397 	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
 398 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
 399 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
 400 				adev->gmc.visible_vram_size;
 403 		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
 405 	mem_info->vram_width = adev->gmc.vram_width;
 408 			&adev->gmc.aper_base, &aper_limit
 [all...]
 | 
| amdgpu_vram_mgr.c | 58 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); 75 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
 121 	switch (adev->gmc.vram_vendor) {
 263 	if (start >= adev->gmc.visible_vram_size)
 266 	return (end > adev->gmc.visible_vram_size ?
 267 		adev->gmc.visible_vram_size : end) - start;
 286 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
 289 	if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
 349 	max_bytes = adev->gmc.mc_vram_size;
 
 | 
| amdgpu_mmhub_v1_0.c | 53 	adev->gmc.fb_start = base; 54 	adev->gmc.fb_end = top;
 80 		     (u32)(adev->gmc.gart_start >> 12));
 82 		     (u32)(adev->gmc.gart_start >> 44));
 85 		     (u32)(adev->gmc.gart_end >> 12));
 87 		     (u32)(adev->gmc.gart_end >> 44));
 97 	WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
 98 	WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
 102 		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18)
 [all...]
 | 
| amdgpu_fb.c | 289 	tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; 290 	info->fix.smem_start = adev->gmc.aper_base + tmp;
 299 	info->apertures->ranges[0].size = adev->gmc.aper_size;
 309 	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)adev->gmc.aper_base);
 367 	if (adev->gmc.real_vram_size <= (32*1024*1024))
 
 | 
| amdgpu_ttm.c | 112 		man->gpu_offset = adev->gmc.gart_start; 120 		man->gpu_offset = adev->gmc.vram_start;
 191 		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 202 			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 618 		<= adev->gmc.visible_vram_size;
 736 		if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
 747 		mem->bus.base = adev->gmc.aper_base;
 1185 		placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
 1697 	while (len && pos < adev->gmc.mc_vram_size) {
 1786 	uint64_t vram_size = adev->gmc.visible_vram_size
 [all...]
 | 
| amdgpu_mmhub_v9_4.c | 56 	adev->gmc.fb_start = base; 57 	adev->gmc.fb_end = top;
 93 			    (u32)(adev->gmc.gart_start >> 12));
 97 			    (u32)(adev->gmc.gart_start >> 44));
 102 			    (u32)(adev->gmc.gart_end >> 12));
 106 			    (u32)(adev->gmc.gart_end >> 44));
 131 			    adev->gmc.agp_end >> 24);
 134 			    adev->gmc.agp_start >> 24);
 141 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18)
 [all...]
 | 
| amdgpu_umc.c | 60 		r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); 
 | 
| amdgpu_kms.c | 234 		fw_info->ver = adev->gmc.fw_version; 606 		vram_gtt.vram_size = adev->gmc.real_vram_size -
 610 			min(adev->gmc.visible_vram_size -
 623 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
 624 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
 632 			adev->gmc.visible_vram_size;
 634 			min(adev->gmc.visible_vram_size -
 753 		dev_info.vram_type = adev->gmc.vram_type;
 754 		dev_info.vram_bit_width = adev->gmc.vram_width;
 1270 	/* GMC */
 [all...]
 | 
| amdgpu_device.c | 938 	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); 2004 		/* need to do gmc hw init early so we can allocate gpu mem */
 2023 			/* right after GMC hw init, we create CSA */
 2075 	if (adev->gmc.xgmi.num_physical_nodes > 1)
 2269 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
 2285 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
 2322 	if (adev->gmc.xgmi.num_physical_nodes > 1)
 2629  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
 2665  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
 2960 	adev->gmc.gart_size = 512 * 1024 * 1024
 [all...]
 | 
| /src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/ | 
| tegra20-trimslice.dts | 104 			gmc { 105 				nvidia,pins = "gmc", "gmd";
 212 					"gma", "gmc", "gmd", "gpu", "gpu7",
 
 |