/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_gmr.c | 45 unsigned long num_pages, 53 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); 54 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; 64 define_cmd.numPages = num_pages; 79 while (num_pages > 0) { 80 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); 100 num_pages -= nr; 134 unsigned long num_pages, 147 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id) [all...] |
vmwgfx_reg.h | 46 u32 num_pages; member in struct:svga_guest_mem_descriptor
|
vmwgfx_gmrid_manager.c | 70 gman->used_gmr_pages += bo->num_pages; 77 mem->num_pages = bo->num_pages; 83 gman->used_gmr_pages -= bo->num_pages; 98 gman->used_gmr_pages -= mem->num_pages;
|
vmwgfx_ttm_buffer.c | 269 return ++(viter->i) < viter->num_pages; 333 viter->num_pages = vsgt->num_pages; 433 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; 444 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; 450 (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, 451 (unsigned long) vsgt->num_pages << PAGE_SHIFT, 457 if (vsgt->num_pages > vmw_tt->sgt.nents) { 459 sgl_size * (vsgt->num_pages [all...] |
vmwgfx_page_dirty.c | 240 pgoff_t num_pages = vbo->base.num_pages; local in function:vmw_bo_dirty_add 253 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); 268 dirty->bitmap_size = num_pages; 272 if (num_pages < PAGE_SIZE / sizeof(pte_t)) { 281 wp_shared_mapping_range(mapping, offset, num_pages); 282 clean_record_shared_mapping_range(mapping, offset, num_pages, 421 if (unlikely(page_offset >= bo->num_pages)) { 464 if (page_offset >= bo->num_pages ||
|
/src/sys/external/bsd/drm2/dist/include/drm/ |
drm_cache.h | 42 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
/src/sys/external/bsd/drm2/dist/drm/ |
drm_cache.c | 66 unsigned long num_pages) 71 for (i = 0; i < num_pages; i++) 80 * @num_pages: Number of pages in the array. 86 drm_clflush_pages(struct page *pages[], unsigned long num_pages) 91 drm_cache_flush_clflush(pages, num_pages); 101 for (i = 0; i < num_pages; i++) {
|
drm_memory.c | 68 unsigned long i, num_pages = local in function:agp_remap 94 /* note: use vmalloc() because num_pages could be large... */ 95 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); 100 for (i = 0; i < num_pages; ++i) 102 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
/src/sys/external/bsd/drm2/dist/drm/xen/ |
xen_drm_front_gem.c | 33 size_t num_pages; member in struct:xen_gem_object 52 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); 53 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, 107 ret = alloc_xenballooned_pages(xen_obj->num_pages, 111 xen_obj->num_pages, ret); 123 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 160 free_xenballooned_pages(xen_obj->num_pages, 187 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); 212 NULL, xen_obj->num_pages); 258 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); [all...] |
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_tt.c | 94 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), 124 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, 130 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 141 ttm->ttm.num_pages << PAGE_SHIFT, ttm->ttm.num_pages, PAGE_SIZE, 0, 144 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, 205 drm_clflush_pages(ttm->pages, ttm->num_pages); 207 for (i = 0; i < ttm->num_pages; ++i) { 274 ttm->num_pages = bo->num_pages; [all...] |
ttm_agp_backend.c | 65 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); 70 for (i = 0; i < ttm->num_pages; i++) {
|
ttm_page_alloc.c | 1050 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, 1064 if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) 1067 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, 1074 for (i = 0; i < ttm->num_pages; ++i) { 1098 ttm_pool_unpopulate_helper(ttm, ttm->num_pages); 1112 for (i = 0; i < tt->ttm.num_pages; ++i) { 1114 size_t num_pages = 1; local in function:ttm_populate_and_map_pages 1116 for (j = i + 1; j < tt->ttm.num_pages; ++j) { 1120 ++num_pages; 1124 0, num_pages * PAGE_SIZE 1151 size_t num_pages = 1; local in function:ttm_unmap_and_unpopulate_pages [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_gtt_mgr.c | 216 r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, 248 atomic64_read(&mgr->available) < mem->num_pages) { 252 atomic64_sub(mem->num_pages, &mgr->available); 262 node->node.size = mem->num_pages; 280 atomic64_add(mem->num_pages, &mgr->available); 308 atomic64_add(mem->num_pages, &mgr->available);
|
amdgpu_vram_mgr.c | 283 unsigned pages = mem->num_pages; 313 if (start > mem->num_pages) 314 start -= mem->num_pages; 354 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; 372 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 387 pages_left = mem->num_pages; 435 atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); 458 unsigned pages = mem->num_pages;
|
amdgpu_ttm.c | 71 struct ttm_mem_reg *mem, unsigned num_pages, 458 new_mem->num_pages << PAGE_SHIFT, 614 if (nodes->size != mem->num_pages) 705 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); 722 mem->bus.size = mem->num_pages << PAGE_SHIFT; 743 (mm_node->size == mem->num_pages)) 867 range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), 904 for (i = 0; i < ttm->num_pages; i++) { 947 gtt->userptr, ttm->num_pages); 979 for (i = 0; i < ttm->num_pages; ++i 1614 unsigned long num_pages = bo->mem.num_pages; local in function:amdgpu_ttm_bo_eviction_valuable 2322 unsigned long num_pages; local in function:amdgpu_fill_buffer [all...] |
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_ttm.c | 226 unsigned num_pages; local in function:radeon_move_blit 263 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 264 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); 415 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); 426 mem->bus.size = mem->num_pages << PAGE_SHIFT; 542 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; 567 .iov_len = ttm->num_pages << PAGE_SHIFT, 573 .uio_resid = ttm->num_pages << PAGE_SHIFT, 583 ttm->num_pages << PAGE_SHIFT 620 unsigned num_pages = ttm->num_pages - pinned; local in function:radeon_ttm_tt_pin_userptr [all...] |
radeon_prime.c | 42 int npages = bo->tbo.num_pages; 52 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
|
/src/sys/external/bsd/drm2/amdgpu/ |
amdgpufb.c | 169 const unsigned num_pages __diagused = rbo->tbo.num_pages; 173 KASSERT(offset < ((uintmax_t)num_pages << PAGE_SHIFT));
|
/src/sys/external/bsd/drm2/nouveau/ |
nouveaufb.c | 175 const unsigned num_pages __diagused = nvbo->bo.num_pages; 179 KASSERT(offset < (num_pages << PAGE_SHIFT));
|
/src/sys/external/bsd/drm2/radeon/ |
radeondrmkmsfb.c | 174 const unsigned num_pages __diagused = rbo->tbo.num_pages; 176 KASSERT(offset < (num_pages << PAGE_SHIFT));
|
/src/sys/external/bsd/drm2/ttm/ |
ttm_agp_backend.c | 109 KASSERT(ttm_agp->ttm_dma.dma_address->dm_nsegs == ttm->num_pages); 110 for (i = 0; i < ttm->num_pages; i++) { 146 for (i = 0; i < ttm->num_pages; i++)
|
ttm_bus_dma.c | 80 (ttm_dma->ttm.num_pages << PAGE_SHIFT), BUS_DMA_NOWAIT); 100 const size_t size = (ttm_dma->ttm.num_pages << PAGE_SHIFT);
|
/src/sys/external/bsd/drm2/vmwgfx/ |
vmwgfxfb.c | 169 const unsigned num_pages __diagused = vbo->base.num_pages; 171 KASSERT(offset < (num_pages << PAGE_SHIFT));
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/ |
nouveau_prime.c | 38 int npages = nvbo->bo.num_pages; 48 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
|
/src/sys/external/bsd/drm2/dist/include/drm/ttm/ |
ttm_memory.h | 100 uint64_t num_pages, struct ttm_operation_ctx *ctx);
|