1 /* $NetBSD: intel_region_lmem.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $ */ 2 3 // SPDX-License-Identifier: MIT 4 /* 5 * Copyright 2019 Intel Corporation 6 */ 7 8 #include <sys/cdefs.h> 9 __KERNEL_RCSID(0, "$NetBSD: intel_region_lmem.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $"); 10 11 #include "i915_drv.h" 12 #include "intel_memory_region.h" 13 #include "gem/i915_gem_lmem.h" 14 #include "gem/i915_gem_region.h" 15 #include "intel_region_lmem.h" 16 17 static int init_fake_lmem_bar(struct intel_memory_region *mem) 18 { 19 struct drm_i915_private *i915 = mem->i915; 20 struct i915_ggtt *ggtt = &i915->ggtt; 21 unsigned long n; 22 int ret; 23 24 /* We want to 1:1 map the mappable aperture to our reserved region */ 25 26 mem->fake_mappable.start = 0; 27 mem->fake_mappable.size = resource_size(&mem->region); 28 mem->fake_mappable.color = I915_COLOR_UNEVICTABLE; 29 30 ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable); 31 if (ret) 32 return ret; 33 34 mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev, 35 mem->region.start, 36 mem->fake_mappable.size, 37 PCI_DMA_BIDIRECTIONAL, 38 DMA_ATTR_FORCE_CONTIGUOUS); 39 if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) { 40 drm_mm_remove_node(&mem->fake_mappable); 41 return -EINVAL; 42 } 43 44 for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) { 45 ggtt->vm.insert_page(&ggtt->vm, 46 mem->remap_addr + (n << PAGE_SHIFT), 47 n << PAGE_SHIFT, 48 I915_CACHE_NONE, 0); 49 } 50 51 mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr, 52 mem->fake_mappable.size); 53 54 return 0; 55 } 56 57 static void release_fake_lmem_bar(struct intel_memory_region *mem) 58 { 59 if (!drm_mm_node_allocated(&mem->fake_mappable)) 60 return; 61 62 drm_mm_remove_node(&mem->fake_mappable); 63 64 dma_unmap_resource(&mem->i915->drm.pdev->dev, 65 mem->remap_addr, 66 mem->fake_mappable.size, 67 PCI_DMA_BIDIRECTIONAL, 68 DMA_ATTR_FORCE_CONTIGUOUS); 69 } 70 71 static void 72 region_lmem_release(struct intel_memory_region *mem) 73 { 74 release_fake_lmem_bar(mem); 75 io_mapping_fini(&mem->iomap); 76 intel_memory_region_release_buddy(mem); 77 } 78 79 static int 80 region_lmem_init(struct intel_memory_region *mem) 81 { 82 int ret; 83 84 if (i915_modparams.fake_lmem_start) { 85 ret = init_fake_lmem_bar(mem); 86 GEM_BUG_ON(ret); 87 } 88 89 if (!io_mapping_init_wc(&mem->iomap, 90 mem->io_start, 91 resource_size(&mem->region))) 92 return -EIO; 93 94 ret = intel_memory_region_init_buddy(mem); 95 if (ret) 96 io_mapping_fini(&mem->iomap); 97 98 intel_memory_region_set_name(mem, "local"); 99 100 return ret; 101 } 102 103 const struct intel_memory_region_ops intel_region_lmem_ops = { 104 .init = region_lmem_init, 105 .release = region_lmem_release, 106 .create_object = __i915_gem_lmem_object_create, 107 }; 108 109 struct intel_memory_region * 110 intel_setup_fake_lmem(struct drm_i915_private *i915) 111 { 112 struct pci_dev *pdev = i915->drm.pdev; 113 struct intel_memory_region *mem; 114 resource_size_t mappable_end; 115 resource_size_t io_start; 116 resource_size_t start; 117 118 GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt)); 119 GEM_BUG_ON(!i915_modparams.fake_lmem_start); 120 121 /* Your mappable aperture belongs to me now! */ 122 mappable_end = pci_resource_len(pdev, 2); 123 io_start = pci_resource_start(pdev, 2), 124 start = i915_modparams.fake_lmem_start; 125 126 mem = intel_memory_region_create(i915, 127 start, 128 mappable_end, 129 PAGE_SIZE, 130 io_start, 131 &intel_region_lmem_ops); 132 if (!IS_ERR(mem)) { 133 drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n", 134 &mem->region); 135 drm_info(&i915->drm, 136 "Intel graphics fake LMEM IO start: %llx\n", 137 (u64)mem->io_start); 138 drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n", 139 (u64)resource_size(&mem->region)); 140 } 141 142 return mem; 143 } 144