/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_reg.h | 46 u32 num_pages; member in struct:svga_guest_mem_descriptor
|
vmwgfx_reg.h | 46 u32 num_pages; member in struct:svga_guest_mem_descriptor
|
vmwgfx_reg.h | 46 u32 num_pages; member in struct:svga_guest_mem_descriptor
|
vmwgfx_mob.c | 59 * @num_pages Number of pages that make up the page table. 65 unsigned long num_pages; member in struct:vmw_mob 421 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 448 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
|
vmwgfx_page_dirty.c | 240 pgoff_t num_pages = vbo->base.num_pages; local in function:vmw_bo_dirty_add 253 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); 268 dirty->bitmap_size = num_pages; 272 if (num_pages < PAGE_SIZE / sizeof(pte_t)) { 281 wp_shared_mapping_range(mapping, offset, num_pages); 282 clean_record_shared_mapping_range(mapping, offset, num_pages, 421 if (unlikely(page_offset >= bo->num_pages)) { 464 if (page_offset >= bo->num_pages ||
|
vmwgfx_mob.c | 59 * @num_pages Number of pages that make up the page table. 65 unsigned long num_pages; member in struct:vmw_mob 421 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 448 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
|
vmwgfx_page_dirty.c | 240 pgoff_t num_pages = vbo->base.num_pages; local in function:vmw_bo_dirty_add 253 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); 268 dirty->bitmap_size = num_pages; 272 if (num_pages < PAGE_SIZE / sizeof(pte_t)) { 281 wp_shared_mapping_range(mapping, offset, num_pages); 282 clean_record_shared_mapping_range(mapping, offset, num_pages, 421 if (unlikely(page_offset >= bo->num_pages)) { 464 if (page_offset >= bo->num_pages ||
|
vmwgfx_mob.c | 59 * @num_pages Number of pages that make up the page table. 65 unsigned long num_pages; member in struct:vmw_mob 421 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 448 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
|
vmwgfx_page_dirty.c | 240 pgoff_t num_pages = vbo->base.num_pages; local in function:vmw_bo_dirty_add 253 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); 268 dirty->bitmap_size = num_pages; 272 if (num_pages < PAGE_SIZE / sizeof(pte_t)) { 281 wp_shared_mapping_range(mapping, offset, num_pages); 282 clean_record_shared_mapping_range(mapping, offset, num_pages, 421 if (unlikely(page_offset >= bo->num_pages)) { 464 if (page_offset >= bo->num_pages ||
|
/src/sys/external/bsd/drm2/dist/drm/ |
drm_memory.c | 68 unsigned long i, num_pages = local in function:agp_remap 94 /* note: use vmalloc() because num_pages could be large... */ 95 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); 100 for (i = 0; i < num_pages; ++i) 102 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
drm_memory.c | 68 unsigned long i, num_pages = local in function:agp_remap 94 /* note: use vmalloc() because num_pages could be large... */ 95 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); 100 for (i = 0; i < num_pages; ++i) 102 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
drm_memory.c | 68 unsigned long i, num_pages = local in function:agp_remap 94 /* note: use vmalloc() because num_pages could be large... */ 95 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); 100 for (i = 0; i < num_pages; ++i) 102 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
/src/sys/external/bsd/drm2/dist/drm/xen/ |
xen_drm_front_gem.c | 33 size_t num_pages; member in struct:xen_gem_object 52 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); 53 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, 107 ret = alloc_xenballooned_pages(xen_obj->num_pages, 111 xen_obj->num_pages, ret); 123 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 160 free_xenballooned_pages(xen_obj->num_pages, 187 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); 212 NULL, xen_obj->num_pages); 258 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); [all...] |
xen_drm_front_gem.c | 33 size_t num_pages; member in struct:xen_gem_object 52 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); 53 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, 107 ret = alloc_xenballooned_pages(xen_obj->num_pages, 111 xen_obj->num_pages, ret); 123 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 160 free_xenballooned_pages(xen_obj->num_pages, 187 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); 212 NULL, xen_obj->num_pages); 258 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); [all...] |
xen_drm_front_gem.c | 33 size_t num_pages; member in struct:xen_gem_object 52 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); 53 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, 107 ret = alloc_xenballooned_pages(xen_obj->num_pages, 111 xen_obj->num_pages, ret); 123 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 160 free_xenballooned_pages(xen_obj->num_pages, 187 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); 212 NULL, xen_obj->num_pages); 258 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); [all...] |
/src/sys/external/bsd/drm2/dist/include/drm/ttm/ |
ttm_tt.h | 96 * @num_pages: Number of pages in the page array. 112 unsigned long num_pages; member in struct:ttm_tt
|
ttm_tt.h | 96 * @num_pages: Number of pages in the page array. 112 unsigned long num_pages; member in struct:ttm_tt
|
ttm_tt.h | 96 * @num_pages: Number of pages in the page array. 112 unsigned long num_pages; member in struct:ttm_tt
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_userptr.c | 442 struct page **pvec, unsigned long num_pages) 454 ret = __sg_alloc_table_from_pages(st, pvec, num_pages, 455 0, num_pages << PAGE_SHIFT, 603 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; local in function:i915_gem_userptr_get_pages 648 pvec = kvmalloc_array(num_pages, sizeof(struct page *), 654 num_pages, 663 } else if (pinned < num_pages) { 667 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
|
i915_gem_userptr.c | 442 struct page **pvec, unsigned long num_pages) 454 ret = __sg_alloc_table_from_pages(st, pvec, num_pages, 455 0, num_pages << PAGE_SHIFT, 603 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; local in function:i915_gem_userptr_get_pages 648 pvec = kvmalloc_array(num_pages, sizeof(struct page *), 654 num_pages, 663 } else if (pinned < num_pages) { 667 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
|
i915_gem_userptr.c | 442 struct page **pvec, unsigned long num_pages) 454 ret = __sg_alloc_table_from_pages(st, pvec, num_pages, 455 0, num_pages << PAGE_SHIFT, 603 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; local in function:i915_gem_userptr_get_pages 648 pvec = kvmalloc_array(num_pages, sizeof(struct page *), 654 num_pages, 663 } else if (pinned < num_pages) { 667 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
|
/src/sys/external/bsd/drm2/dist/drm/vgem/ |
vgem_drv.c | 83 loff_t num_pages; local in function:vgem_gem_fault 87 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 89 if (page_offset >= num_pages)
|
vgem_drv.c | 83 loff_t num_pages; local in function:vgem_gem_fault 87 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 89 if (page_offset >= num_pages)
|
vgem_drv.c | 83 loff_t num_pages; local in function:vgem_gem_fault 87 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 89 if (page_offset >= num_pages)
|
/src/sys/external/bsd/drm2/dist/drm/via/ |
via_dmablit.h | 51 unsigned long num_pages; member in struct:_drm_via_sg_info
|