Home | History | Annotate | Download | only in nouveau

Lines Matching refs:drm

58 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
65 struct nouveau_drm *drm;
78 struct nouveau_drm *drm;
136 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
140 struct device *dev = drm->dev->dev;
156 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
173 struct nouveau_drm *drm = dmem->drm;
196 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
203 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
215 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
220 if (drm->dmem == NULL)
223 mutex_lock(&drm->dmem->mutex);
224 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
228 mutex_unlock(&drm->dmem->mutex);
233 mutex_unlock(&drm->dmem->mutex);
235 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
251 mutex_lock(&drm->dmem->mutex);
253 list_add(&chunk->list, &drm->dmem->chunk_empty);
255 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
256 mutex_unlock(&drm->dmem->mutex);
262 nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
266 chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
272 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
282 nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
292 mutex_lock(&drm->dmem->mutex);
296 chunk = nouveau_dmem_chunk_first_free_locked(drm);
298 mutex_unlock(&drm->dmem->mutex);
299 ret = nouveau_dmem_chunk_alloc(drm);
305 mutex_lock(&drm->dmem->mutex);
322 mutex_unlock(&drm->dmem->mutex);
328 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
335 ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
346 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
353 nouveau_dmem_resume(struct nouveau_drm *drm)
358 if (drm->dmem == NULL)
361 mutex_lock(&drm->dmem->mutex);
362 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
367 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
372 mutex_unlock(&drm->dmem->mutex);
376 nouveau_dmem_suspend(struct nouveau_drm *drm)
380 if (drm->dmem == NULL)
383 mutex_lock(&drm->dmem->mutex);
384 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
387 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
390 mutex_unlock(&drm->dmem->mutex);
394 nouveau_dmem_fini(struct nouveau_drm *drm)
398 if (drm->dmem == NULL)
401 mutex_lock(&drm->dmem->mutex);
403 WARN_ON(!list_empty(&drm->dmem->chunk_free));
404 WARN_ON(!list_empty(&drm->dmem->chunk_full));
406 list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
416 mutex_unlock(&drm->dmem->mutex);
420 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
424 struct nouveau_channel *chan = drm->dmem->migrate.chan;
479 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
481 switch (drm->ttm.copy.oclass) {
486 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
487 drm->dmem->migrate.chan = drm->ttm.chan;
496 nouveau_dmem_init(struct nouveau_drm *drm)
498 struct device *device = drm->dev->dev;
504 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
507 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
510 drm->dmem->drm = drm;
511 mutex_init(&drm->dmem->mutex);
512 INIT_LIST_HEAD(&drm->dmem->chunk_free);
513 INIT_LIST_HEAD(&drm->dmem->chunk_full);
514 INIT_LIST_HEAD(&drm->dmem->chunk_empty);
516 size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
519 ret = nouveau_dmem_migrate_init(drm);
532 drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
533 drm->dmem->pagemap.res = *res;
534 drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
535 if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
546 nouveau_dmem_fini(drm);
550 chunk->drm = drm;
552 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
559 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
562 mutex_destroy(&drm->dmem->mutex);
563 kfree(drm->dmem);
564 drm->dmem = NULL;
567 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
570 struct device *dev = drm->dev->dev;
577 dpage = nouveau_dmem_page_alloc_locked(drm);
585 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
595 nouveau_dmem_page_free_locked(drm, dpage);
600 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
607 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
614 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
619 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
630 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
664 nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
680 nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
682 return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
686 nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
704 if (!nouveau_dmem_page(drm, page)) {