| /xsrc/external/mit/MesaLib/dist/src/gallium/auxiliary/pipebuffer/ |
| H A D | pb_slab.c | 38 /* Slabs with allocation candidates. Typically, slabs in this list should 46 * Due to a race in new slab allocation, additional slabs in this list 49 struct list_head slabs; member in struct:pb_slab_group 54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) argument 64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; 65 list_addtail(&slab->head, &group->slabs); 70 slabs->slab_free(slabs->priv, slab); 77 pb_slabs_reclaim_locked(struct pb_slabs *slabs) argument 81 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs 108 pb_slab_alloc(struct pb_slabs * slabs,unsigned size,unsigned heap) argument 184 pb_slab_free(struct pb_slabs * slabs,struct pb_slab_entry * entry) argument 198 pb_slabs_reclaim(struct pb_slabs * slabs) argument 213 pb_slabs_init(struct pb_slabs * slabs,unsigned min_order,unsigned max_order,unsigned num_heaps,bool allow_three_fourth_allocations,void * priv,slab_can_reclaim_fn * can_reclaim,slab_alloc_fn * slab_alloc,slab_free_fn * slab_free) argument 262 pb_slabs_deinit(struct pb_slabs * slabs) argument [all...] |
| H A D | pb_slab.h | 32 * from larger buffers (called "slabs"). 138 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap); 141 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry); 144 pb_slabs_reclaim(struct pb_slabs *slabs); 147 pb_slabs_init(struct pb_slabs *slabs, 156 pb_slabs_deinit(struct pb_slabs *slabs);
|
| H A D | pb_bufmgr_slab.c | 94 * It adds/removes slabs as needed in order to meet the allocation/destruction 119 * Partial slabs 121 * Full slabs are not stored in any list. Empty slabs are destroyed 124 struct list_head slabs; member in struct:pb_slab_manager 131 * Wrapper around several slabs, therefore capable of handling buffers of 208 list_addtail(&slab->head, &mgr->slabs); 291 * Called when we ran out of free slabs. 353 /* Add this slab to the list of partial slabs */ 354 list_addtail(&slab->head, &mgr->slabs); [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/auxiliary/pipebuffer/ |
| H A D | pb_slab.c | 38 /* Slabs with allocation candidates. Typically, slabs in this list should 46 * Due to a race in new slab allocation, additional slabs in this list 49 struct list_head slabs; member in struct:pb_slab_group 54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) argument 64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; 65 LIST_ADDTAIL(&slab->head, &group->slabs); 70 slabs->slab_free(slabs->priv, slab); 75 pb_slabs_reclaim_locked(struct pb_slabs *slabs) argument 77 while (!LIST_IS_EMPTY(&slabs 98 pb_slab_alloc(struct pb_slabs * slabs,unsigned size,unsigned heap) argument 163 pb_slab_free(struct pb_slabs * slabs,struct pb_slab_entry * entry) argument 177 pb_slabs_reclaim(struct pb_slabs * slabs) argument 192 pb_slabs_init(struct pb_slabs * slabs,unsigned min_order,unsigned max_order,unsigned num_heaps,void * priv,slab_can_reclaim_fn * can_reclaim,slab_alloc_fn * slab_alloc,slab_free_fn * slab_free) argument 239 pb_slabs_deinit(struct pb_slabs * slabs) argument [all...] |
| H A D | pb_slab.h | 32 * from larger buffers (called "slabs"). 135 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap); 138 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry); 141 pb_slabs_reclaim(struct pb_slabs *slabs); 144 pb_slabs_init(struct pb_slabs *slabs, 153 pb_slabs_deinit(struct pb_slabs *slabs);
|
| H A D | pb_bufmgr_slab.c | 98 * It adds/removes slabs as needed in order to meet the allocation/destruction 123 * Partial slabs 125 * Full slabs are not stored in any list. Empty slabs are destroyed 128 struct list_head slabs; member in struct:pb_slab_manager 135 * Wrapper around several slabs, therefore capable of handling buffers of 212 LIST_ADDTAIL(&slab->head, &mgr->slabs); 296 * Called when we ran out of free slabs. 358 /* Add this slab to the list of partial slabs */ 359 LIST_ADDTAIL(&slab->head, &mgr->slabs); [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/state_trackers/nine/ |
| H A D | nine_helpers.c | 37 pool->slabs = REALLOC(pool->slabs, 42 pool->free = pool->slabs[pool->num_slabs++] = r;
|
| H A D | nine_helpers.h | 175 struct nine_range **slabs; member in struct:nine_range_pool
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/frontends/nine/ |
| H A D | nine_helpers.c | 37 pool->slabs = REALLOC(pool->slabs, 42 pool->free = pool->slabs[pool->num_slabs++] = r;
|
| H A D | nine_helpers.h | 175 struct nine_range **slabs; member in struct:nine_range_pool
|
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/panfrost/ |
| H A D | pan_screen.h | 89 /* Memory management is based on subdividing slabs with AMD's allocator */ 90 struct pb_slabs slabs; member in struct:panfrost_screen
|
| H A D | pan_allocate.c | 46 struct pb_slab_entry *entry = pb_slab_alloc(&screen->slabs, size, heap_id); 85 struct pb_slab_entry *entry = pb_slab_alloc(&screen->slabs, pool->entry_size, HEAP_TRANSIENT);
|
| H A D | pan_resource.c | 654 pb_slabs_init(&pscreen->slabs,
|
| H A D | pan_context.c | 2504 ctx->transient_pools[i].entries[0] = (struct panfrost_memory_entry *) pb_slab_alloc(&screen->slabs, entry_size, HEAP_TRANSIENT);
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/zink/ |
| H A D | zink_bo.c | 87 struct pb_slabs *slabs = &bo_slabs[i]; local in function:get_slabs 89 if (size <= 1ULL << (slabs->min_order + slabs->num_orders - 1)) 90 return slabs; 534 //struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && screen->info.has_tmz_support) ? 536 struct pb_slabs *slabs = screen->pb.bo_slabs; local in function:zink_bo_create 538 struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1]; 541 /* Sub-allocate small buffers from slabs. */ 551 /* Always use slabs for sizes less than 4 KB because the kernel aligns 571 struct pb_slabs *slabs local in function:zink_bo_create 837 struct pb_slabs *slabs = screen->pb.bo_slabs; local in function:bo_slab_alloc [all...] |
| /xsrc/external/mit/MesaLib/dist/src/gallium/winsys/amdgpu/drm/ |
| H A D | amdgpu_bo.c | 639 struct pb_slabs *slabs = &bo_slabs[i]; local in function:get_slabs 641 if (size <= 1 << (slabs->min_order + slabs->num_orders - 1)) 642 return slabs; 662 struct pb_slabs *slabs; local in function:amdgpu_bo_slab_destroy 666 slabs = get_slabs(ws, bo->base.size, bo->base.usage & RADEON_FLAG_ENCRYPTED); 673 pb_slab_free(slabs, &bo->u.slab.entry); 720 struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ? local in function:amdgpu_bo_slab_alloc 725 unsigned max_entry_size = 1 << (slabs[i].min_order + slabs[ 1377 struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ? local in function:amdgpu_bo_create 1413 struct pb_slabs *slabs = get_slabs(ws, alloc_size, flags); local in function:amdgpu_bo_create [all...] |
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/iris/ |
| H A D | iris_bufmgr.c | 549 struct pb_slabs *slabs = &bufmgr->bo_slabs[i]; local in function:get_slabs 551 if (size <= 1ull << (slabs->min_order + slabs->num_orders - 1)) 552 return slabs; 641 struct pb_slabs *slabs = bufmgr->bo_slabs; local in function:iris_slab_alloc 646 1 << (slabs[i].min_order + slabs[i].num_orders - 1); 757 /* Always use slabs for sizes less than 4 KB because the kernel aligns 778 struct pb_slabs *slabs = get_slabs(bufmgr, alloc_size); local in function:alloc_bo_from_slabs 779 entry = pb_slab_alloc(slabs, alloc_siz [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/winsys/amdgpu/drm/ |
| H A D | amdgpu_bo.c | 603 struct pb_slabs *slabs = &ws->bo_slabs[i]; local in function:get_slabs 605 if (size <= 1 << (slabs->min_order + slabs->num_orders - 1)) 606 return slabs; 643 struct pb_slabs *slabs = &ws->bo_slabs[i]; local in function:amdgpu_bo_slab_alloc 644 unsigned max_entry_size = 1 << (slabs->min_order + slabs->num_orders - 1); 1324 /* Sub-allocate small buffers from slabs. */ 1336 struct pb_slabs *slabs = get_slabs(ws, size); local in function:amdgpu_bo_create 1337 entry = pb_slab_alloc(slabs, siz [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/amd/vulkan/ |
| H A D | radv_shader.h | 330 struct list_head slabs; member in struct:radv_shader_slab
|
| H A D | radv_shader.c | 385 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) { 420 list_add(&slab->slabs, &device->shader_slabs); 432 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
|
| /xsrc/external/mit/MesaLib/dist/docs/relnotes/ |
| H A D | 17.1.5.rst | 135 - winsys/radeon: only call pb_slabs_reclaim when slabs are actually
|
| H A D | 13.0.0.rst | 290 buffer allocation from slabs
|
| H A D | 20.1.0.rst | 3579 - radv: allocate larger shader memory slabs if needed 4084 - gallium/pipebuffer: Use persistent maps for slabs
|
| H A D | 19.0.0.rst | 1747 - winsys/amdgpu: always reclaim/release slabs if there is not enough
|
| H A D | 21.1.0.rst | 3398 - winsys/amdgpu,radeonsi: add HUD counters for how much memory is wasted by slabs 3400 - winsys/amdgpu,pb_slab: add slabs with 3/4 of power of two sizes to save memory
|