/src/sys/external/bsd/compiler_rt/dist/lib/asan/ |
asan_stats.h | 32 uptr freed; member in struct:__asan::AsanStats
|
asan_stats.cc | 47 Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); 48 Printf("Stats: %zuM really freed by %zu calls\n", 143 uptr freed = stats.freed; local in function:__sanitizer_get_current_allocated_bytes 144 // Return sane value if malloced < freed due to racy 146 return (malloced > freed) ? malloced - freed : 1;
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_object.c | 194 struct llist_node *freed) 200 llist_for_each_entry_safe(obj, on, freed, freed) { 277 struct llist_node *freed = llist_del_all(&i915->mm.free_list); local in function:i915_gem_flush_free_objects 279 if (unlikely(freed)) 280 __i915_gem_free_objects(i915, freed); 311 * freed objects. 316 * Since we require blocking on struct_mutex to unbind the freed 320 * kthread. We use the RCU callback rather than move the freed object 325 if (llist_add(&obj->freed, &i915->mm.free_list) [all...] |
i915_gem_shrinker.c | 161 * and result in the object being freed from under us. This is 184 * yet freed (due to RCU then a workqueue) we still want 186 * the unbound/bound list until actually freed. 259 unsigned long freed = 0; local in function:i915_gem_shrink_all 262 freed = i915_gem_shrink(i915, -1UL, NULL, 268 return freed; 305 unsigned long freed; local in function:i915_gem_shrinker_scan 309 freed = i915_gem_shrink(i915, 318 freed += i915_gem_shrink(i915, 328 return sc->nr_scanned ? freed : SHRINK_STOP [all...] |
i915_gem_object_types.h | 134 struct llist_node freed; member in union:drm_i915_gem_object::__anonbfddbbad020a
|
/src/usr.bin/rpcgen/ |
rpc_cout.c | 673 int freed = 0; local in function:emit_single_in_line 688 freed = 1; 692 freed = 1; 703 if (!freed)
|
/src/sys/external/bsd/ipf/netinet/ |
ip_htable.c | 219 /* used to delete the pools one by one to ensure they're properly freed up. */ 784 size_t freed; local in function:ipf_htable_flush 787 freed = 0; 793 freed++; 801 return freed;
|
fil.c | 2022 /* Returns: int - 0 == packet ok, -1 == packet freed */ 2785 /* freed. Packets passed may be returned with the pointer pointed to by */ 3222 /* m_pullup() has freed the mbuf */ 3684 int freed = 0; local in function:ipf_flushlist 3699 freed += ipf_group_flush(softc, fp->fr_grphead); 3704 freed += ipf_group_flush(softc, fp->fr_icmpgrp); 3715 freed++; 3717 *nfreedp += freed; 3718 return freed; 5110 /* must use that as the guide for whether or not it can be freed. * [all...] |
/src/sys/kern/ |
sysv_sem.c | 384 bool freed = false; local in function:semu_alloc 392 freed = true; 397 if (freed) {
|
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_page_alloc.c | 31 * - Pool collects resently freed pages for reuse 390 unsigned long freed = 0; local in function:ttm_pool_shrink_scan 409 freed += (nr_free_pool - shrink_pages) << pool->order; 410 if (freed >= sc->nr_to_scan) 415 return freed; 479 /* Failed pages have to be freed */ 1178 char *h[] = {"pool", "refills", "pages freed", "size"};
|
ttm_page_alloc_dma.c | 31 * - Pool collects resently freed pages for reuse (and hooks up to 35 * when freed). 543 * Albeit the pool might have already been freed earlier. 1104 unsigned long freed = 0; local in function:ttm_dma_pool_shrink_scan 1127 freed += nr_free - shrink_pages; 1135 return freed; 1228 seq_printf(m, " pool refills pages freed inuse available name\n");
|
/src/sys/external/bsd/drm2/dist/drm/virtio/ |
virtgpu_vq.c | 182 int freed = 0; local in function:reclaim_vbufs 186 freed++; 188 if (freed == 0) 1114 /* gets freed when the ring has consumed it */
|
/src/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/ |
tsan_rtl_report.cc | 601 bool freed = false; local in function:__tsan::ReportRace 604 freed = s.GetFreedAndReset(); 623 if (thr->is_vptr_access && freed) 627 else if (freed)
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_vm.h | 272 /* BO mappings freed, but not yet updated in the PT */ 273 struct list_head freed; member in struct:amdgpu_vm
|
/src/sys/external/bsd/drm2/dist/drm/ |
drm_connector.c | 183 struct llist_node *freed; local in function:drm_connector_free_work_fn 186 freed = llist_del_all(&config->connector_free_list); 189 llist_for_each_entry_safe(connector, n, freed, free_node) {
|
/src/sys/netinet/ |
ip_input.c | 576 bool freed; local in function:ip_input 578 freed = pfil_run_hooks(inet_pfil_hook, &m, ifp, PFIL_IN) != 0; 579 if (freed || m == NULL) { 639 * to be sent and the original packet to be freed). 849 * => Returns true if packet has been forwarded/freed.
|
/src/sys/arch/ia64/ia64/ |
pmap.c | 699 int bit, field, freed, idx; local in function:pmap_pv_reclaim 729 freed = 0; 754 freed++; 757 if (freed == 0) { 761 /* Every freed mapping is for a 8 KB page. */ 762 pmap->pm_stats.resident_count -= freed; 763 PV_STAT(pv_entry_frees += freed); 764 PV_STAT(pv_entry_spare += freed); 765 pv_entry_count -= freed; 774 * One freed pv entry in locked_pmap i [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/display/ |
intel_display_types.h | 525 struct llist_node freed; member in struct:intel_atomic_state
|
intel_display.c | 15261 struct llist_node *freed; local in function:intel_atomic_helper_free_state 15263 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 15264 llist_for_each_entry_safe(state, next, freed, freed) 15524 if (llist_add(&state->freed, &helper->free_list))
|
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon.h | 968 /* protecting invalidated and freed */ 974 /* BOs freed, but not yet updated in the PT */ 975 struct list_head freed; member in struct:radeon_vm
|