Home | History | Annotate | Line # | Download | only in nouveau
nouveau_ttm.c revision 1.1.1.3
      1 /*	$NetBSD: nouveau_ttm.c,v 1.1.1.3 2018/08/27 01:34:55 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
      5  * All Rights Reserved.
      6  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the "Software"),
     11  * to deal in the Software without restriction, including without limitation
     12  * the rights to use, copy, modify, merge, publish, distribute, sub license,
     13  * and/or sell copies of the Software, and to permit persons to whom the
     14  * Software is furnished to do so, subject to the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: nouveau_ttm.c,v 1.1.1.3 2018/08/27 01:34:55 riastradh Exp $");
     31 
     32 #include "nouveau_drm.h"
     33 #include "nouveau_ttm.h"
     34 #include "nouveau_gem.h"
     35 
     36 #include "drm_legacy.h"
     37 
     38 #include <core/tegra.h>
     39 
     40 static int
     41 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
     42 {
     43 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
     44 	struct nvkm_fb *fb = nvxx_fb(&drm->device);
     45 	man->priv = fb;
     46 	return 0;
     47 }
     48 
     49 static int
     50 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
     51 {
     52 	man->priv = NULL;
     53 	return 0;
     54 }
     55 
     56 static inline void
     57 nvkm_mem_node_cleanup(struct nvkm_mem *node)
     58 {
     59 	if (node->vma[0].node) {
     60 		nvkm_vm_unmap(&node->vma[0]);
     61 		nvkm_vm_put(&node->vma[0]);
     62 	}
     63 
     64 	if (node->vma[1].node) {
     65 		nvkm_vm_unmap(&node->vma[1]);
     66 		nvkm_vm_put(&node->vma[1]);
     67 	}
     68 }
     69 
     70 static void
     71 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
     72 			 struct ttm_mem_reg *mem)
     73 {
     74 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
     75 	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
     76 	nvkm_mem_node_cleanup(mem->mm_node);
     77 	ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
     78 }
     79 
     80 static int
     81 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
     82 			 struct ttm_buffer_object *bo,
     83 			 const struct ttm_place *place,
     84 			 struct ttm_mem_reg *mem)
     85 {
     86 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
     87 	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
     88 	struct nouveau_bo *nvbo = nouveau_bo(bo);
     89 	struct nvkm_mem *node;
     90 	u32 size_nc = 0;
     91 	int ret;
     92 
     93 	if (drm->device.info.ram_size == 0)
     94 		return -ENOMEM;
     95 
     96 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
     97 		size_nc = 1 << nvbo->page_shift;
     98 
     99 	ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
    100 			     mem->page_alignment << PAGE_SHIFT, size_nc,
    101 			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
    102 	if (ret) {
    103 		mem->mm_node = NULL;
    104 		return (ret == -ENOSPC) ? 0 : ret;
    105 	}
    106 
    107 	node->page_shift = nvbo->page_shift;
    108 
    109 	mem->mm_node = node;
    110 	mem->start   = node->offset >> PAGE_SHIFT;
    111 	return 0;
    112 }
    113 
    114 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
    115 	nouveau_vram_manager_init,
    116 	nouveau_vram_manager_fini,
    117 	nouveau_vram_manager_new,
    118 	nouveau_vram_manager_del,
    119 };
    120 
    121 static int
    122 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
    123 {
    124 	return 0;
    125 }
    126 
    127 static int
    128 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
    129 {
    130 	return 0;
    131 }
    132 
    133 static void
    134 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
    135 			 struct ttm_mem_reg *mem)
    136 {
    137 	nvkm_mem_node_cleanup(mem->mm_node);
    138 	kfree(mem->mm_node);
    139 	mem->mm_node = NULL;
    140 }
    141 
    142 static int
    143 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
    144 			 struct ttm_buffer_object *bo,
    145 			 const struct ttm_place *place,
    146 			 struct ttm_mem_reg *mem)
    147 {
    148 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    149 	struct nouveau_bo *nvbo = nouveau_bo(bo);
    150 	struct nvkm_mem *node;
    151 
    152 	node = kzalloc(sizeof(*node), GFP_KERNEL);
    153 	if (!node)
    154 		return -ENOMEM;
    155 
    156 	node->page_shift = 12;
    157 
    158 	switch (drm->device.info.family) {
    159 	case NV_DEVICE_INFO_V0_TNT:
    160 	case NV_DEVICE_INFO_V0_CELSIUS:
    161 	case NV_DEVICE_INFO_V0_KELVIN:
    162 	case NV_DEVICE_INFO_V0_RANKINE:
    163 	case NV_DEVICE_INFO_V0_CURIE:
    164 		break;
    165 	case NV_DEVICE_INFO_V0_TESLA:
    166 		if (drm->device.info.chipset != 0x50)
    167 			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
    168 		break;
    169 	case NV_DEVICE_INFO_V0_FERMI:
    170 	case NV_DEVICE_INFO_V0_KEPLER:
    171 	case NV_DEVICE_INFO_V0_MAXWELL:
    172 		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
    173 		break;
    174 	default:
    175 		NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
    176 			drm->device.info.family);
    177 		break;
    178 	}
    179 
    180 	mem->mm_node = node;
    181 	mem->start   = 0;
    182 	return 0;
    183 }
    184 
    185 static void
    186 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
    187 {
    188 }
    189 
    190 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
    191 	nouveau_gart_manager_init,
    192 	nouveau_gart_manager_fini,
    193 	nouveau_gart_manager_new,
    194 	nouveau_gart_manager_del,
    195 	nouveau_gart_manager_debug
    196 };
    197 
    198 /*XXX*/
    199 #include <subdev/mmu/nv04.h>
    200 static int
    201 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
    202 {
    203 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
    204 	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
    205 	struct nv04_mmu *priv = (void *)mmu;
    206 	struct nvkm_vm *vm = NULL;
    207 	nvkm_vm_ref(priv->vm, &vm, NULL);
    208 	man->priv = vm;
    209 	return 0;
    210 }
    211 
    212 static int
    213 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
    214 {
    215 	struct nvkm_vm *vm = man->priv;
    216 	nvkm_vm_ref(NULL, &vm, NULL);
    217 	man->priv = NULL;
    218 	return 0;
    219 }
    220 
    221 static void
    222 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
    223 {
    224 	struct nvkm_mem *node = mem->mm_node;
    225 	if (node->vma[0].node)
    226 		nvkm_vm_put(&node->vma[0]);
    227 	kfree(mem->mm_node);
    228 	mem->mm_node = NULL;
    229 }
    230 
    231 static int
    232 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
    233 		      struct ttm_buffer_object *bo,
    234 		      const struct ttm_place *place,
    235 		      struct ttm_mem_reg *mem)
    236 {
    237 	struct nvkm_mem *node;
    238 	int ret;
    239 
    240 	node = kzalloc(sizeof(*node), GFP_KERNEL);
    241 	if (!node)
    242 		return -ENOMEM;
    243 
    244 	node->page_shift = 12;
    245 
    246 	ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
    247 			  NV_MEM_ACCESS_RW, &node->vma[0]);
    248 	if (ret) {
    249 		kfree(node);
    250 		return ret;
    251 	}
    252 
    253 	mem->mm_node = node;
    254 	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
    255 	return 0;
    256 }
    257 
    258 static void
    259 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
    260 {
    261 }
    262 
    263 const struct ttm_mem_type_manager_func nv04_gart_manager = {
    264 	nv04_gart_manager_init,
    265 	nv04_gart_manager_fini,
    266 	nv04_gart_manager_new,
    267 	nv04_gart_manager_del,
    268 	nv04_gart_manager_debug
    269 };
    270 
    271 int
    272 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
    273 {
    274 	struct drm_file *file_priv = filp->private_data;
    275 	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
    276 
    277 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
    278 		return drm_legacy_mmap(filp, vma);
    279 
    280 	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
    281 }
    282 
    283 static int
    284 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
    285 {
    286 	return ttm_mem_global_init(ref->object);
    287 }
    288 
    289 static void
    290 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
    291 {
    292 	ttm_mem_global_release(ref->object);
    293 }
    294 
    295 int
    296 nouveau_ttm_global_init(struct nouveau_drm *drm)
    297 {
    298 	struct drm_global_reference *global_ref;
    299 	int ret;
    300 
    301 	global_ref = &drm->ttm.mem_global_ref;
    302 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
    303 	global_ref->size = sizeof(struct ttm_mem_global);
    304 	global_ref->init = &nouveau_ttm_mem_global_init;
    305 	global_ref->release = &nouveau_ttm_mem_global_release;
    306 
    307 	ret = drm_global_item_ref(global_ref);
    308 	if (unlikely(ret != 0)) {
    309 		DRM_ERROR("Failed setting up TTM memory accounting\n");
    310 		drm->ttm.mem_global_ref.release = NULL;
    311 		return ret;
    312 	}
    313 
    314 	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
    315 	global_ref = &drm->ttm.bo_global_ref.ref;
    316 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
    317 	global_ref->size = sizeof(struct ttm_bo_global);
    318 	global_ref->init = &ttm_bo_global_init;
    319 	global_ref->release = &ttm_bo_global_release;
    320 
    321 	ret = drm_global_item_ref(global_ref);
    322 	if (unlikely(ret != 0)) {
    323 		DRM_ERROR("Failed setting up TTM BO subsystem\n");
    324 		drm_global_item_unref(&drm->ttm.mem_global_ref);
    325 		drm->ttm.mem_global_ref.release = NULL;
    326 		return ret;
    327 	}
    328 
    329 	return 0;
    330 }
    331 
    332 void
    333 nouveau_ttm_global_release(struct nouveau_drm *drm)
    334 {
    335 	if (drm->ttm.mem_global_ref.release == NULL)
    336 		return;
    337 
    338 	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
    339 	drm_global_item_unref(&drm->ttm.mem_global_ref);
    340 	drm->ttm.mem_global_ref.release = NULL;
    341 }
    342 
    343 int
    344 nouveau_ttm_init(struct nouveau_drm *drm)
    345 {
    346 	struct nvkm_device *device = nvxx_device(&drm->device);
    347 	struct nvkm_pci *pci = device->pci;
    348 	struct drm_device *dev = drm->dev;
    349 	u8 bits;
    350 	int ret;
    351 
    352 	if (pci && pci->agp.bridge) {
    353 		drm->agp.bridge = pci->agp.bridge;
    354 		drm->agp.base = pci->agp.base;
    355 		drm->agp.size = pci->agp.size;
    356 		drm->agp.cma = pci->agp.cma;
    357 	}
    358 
    359 	bits = nvxx_mmu(&drm->device)->dma_bits;
    360 	if (nvxx_device(&drm->device)->func->pci) {
    361 		if (drm->agp.bridge)
    362 			bits = 32;
    363 	} else if (device->func->tegra) {
    364 		struct nvkm_device_tegra *tegra = device->func->tegra(device);
    365 
    366 		/*
    367 		 * If the platform can use a IOMMU, then the addressable DMA
    368 		 * space is constrained by the IOMMU bit
    369 		 */
    370 		if (tegra->func->iommu_bit)
    371 			bits = min(bits, tegra->func->iommu_bit);
    372 
    373 	}
    374 
    375 	ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
    376 	if (ret && bits != 32) {
    377 		bits = 32;
    378 		ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
    379 	}
    380 	if (ret)
    381 		return ret;
    382 
    383 	ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
    384 	if (ret)
    385 		dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
    386 
    387 	ret = nouveau_ttm_global_init(drm);
    388 	if (ret)
    389 		return ret;
    390 
    391 	ret = ttm_bo_device_init(&drm->ttm.bdev,
    392 				  drm->ttm.bo_global_ref.ref.object,
    393 				  &nouveau_bo_driver,
    394 				  dev->anon_inode->i_mapping,
    395 				  DRM_FILE_PAGE_OFFSET,
    396 				  bits <= 32 ? true : false);
    397 	if (ret) {
    398 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
    399 		return ret;
    400 	}
    401 
    402 	/* VRAM init */
    403 	drm->gem.vram_available = drm->device.info.ram_user;
    404 
    405 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
    406 			      drm->gem.vram_available >> PAGE_SHIFT);
    407 	if (ret) {
    408 		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
    409 		return ret;
    410 	}
    411 
    412 	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
    413 					 device->func->resource_size(device, 1));
    414 
    415 	/* GART init */
    416 	if (!drm->agp.bridge) {
    417 		drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
    418 	} else {
    419 		drm->gem.gart_available = drm->agp.size;
    420 	}
    421 
    422 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
    423 			      drm->gem.gart_available >> PAGE_SHIFT);
    424 	if (ret) {
    425 		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
    426 		return ret;
    427 	}
    428 
    429 	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
    430 	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
    431 	return 0;
    432 }
    433 
    434 void
    435 nouveau_ttm_fini(struct nouveau_drm *drm)
    436 {
    437 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
    438 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
    439 
    440 	ttm_bo_device_release(&drm->ttm.bdev);
    441 
    442 	nouveau_ttm_global_release(drm);
    443 
    444 	arch_phys_wc_del(drm->ttm.mtrr);
    445 	drm->ttm.mtrr = 0;
    446 }
    447