/src/sys/external/bsd/common/linux/ |
linux_rcu.c | 49 SDT_PROBE_DEFINE0(sdt, linux, rcu, synchronize__start); 50 SDT_PROBE_DEFINE1(sdt, linux, rcu, synchronize__cpu, "unsigned"/*cpu*/); 51 SDT_PROBE_DEFINE0(sdt, linux, rcu, synchronize__done); 52 SDT_PROBE_DEFINE0(sdt, linux, rcu, barrier__start); 53 SDT_PROBE_DEFINE0(sdt, linux, rcu, barrier__done); 54 SDT_PROBE_DEFINE2(sdt, linux, rcu, call__queue, 56 SDT_PROBE_DEFINE2(sdt, linux, rcu, call__run, 58 SDT_PROBE_DEFINE2(sdt, linux, rcu, call__done, 60 SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__queue, 62 SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__free [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_globals.c | 31 struct rcu_head rcu; member in struct:park_work 50 static void __i915_globals_grace(struct rcu_head *rcu) 60 init_rcu_head(&park.rcu); 61 call_rcu(&park.rcu, __i915_globals_grace); 67 destroy_rcu_head(&park.rcu); 128 * after a RCU grace period has completed with no activity. This 132 * for an RCU grace period to elapse since the last use, it is likely 138 /* Queue cleanup after the next RCU grace period has freed slabs */
|
i915_perf_types.h | 62 struct rcu_head rcu; member in struct:i915_oa_config
|
i915_sw_fence.c | 467 struct rcu_head rcu; member in struct:i915_sw_dma_fence_cb_timer 523 kfree_rcu(cb, rcu);
|
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_timeline_types.h | 65 * Contains an RCU guarded pointer to the last request. No reference is 67 * the request using i915_active_fence_get(), or manage the RCU 90 struct rcu_head rcu; member in struct:intel_timeline 99 struct rcu_head rcu; member in struct:intel_timeline_cacheline
|
intel_gtt.c | 219 container_of(work, struct i915_address_space, rcu.work); 235 queue_rcu_work(vm->i915->wq, &vm->rcu); 241 INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
|
intel_timeline.c | 138 kfree_rcu(cl, rcu); 568 kfree_rcu(timeline, rcu);
|
intel_gtt.h | 251 struct rcu_work rcu; member in struct:i915_address_space
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_context_types.h | 35 struct rcu_head rcu; member in struct:i915_gem_engines 121 * @rcu: rcu_head for deferred freeing. 123 struct rcu_head rcu; member in struct:i915_gem_context
|
i915_gem_object.c | 81 init_rcu_head(&obj->rcu); 177 container_of(head, typeof(*obj), rcu); 269 /* But keep the pointer alive for RCU-protected lookups */ 270 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 299 * Before we free the object, make sure any pure RCU-only 308 * is delayed, first by RCU then by the workqueue, we want the 318 * system, we can not do that directly from the RCU callback (which may 320 * kthread. We use the RCU callback rather than move the freed object
|
i915_gem_object_types.h | 133 struct rcu_head rcu; member in union:drm_i915_gem_object::__anonbfddbbad020a
|
i915_gem_context.c | 268 static void free_engines_rcu(struct rcu_head *rcu) 270 free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 284 init_rcu_head(&e->rcu); 329 kfree_rcu(ctx, rcu); 1708 init_rcu_head(&set.engines->rcu); 1767 call_rcu(&set.engines->rcu, free_engines_rcu); 1782 init_rcu_head(©->rcu); 2026 init_rcu_head(&clone->rcu);
|
/src/sys/external/bsd/drm2/dist/drm/scheduler/ |
sched_fence.c | 99 * @rcu: RCU callback head 101 * Free up the fence memory after the RCU grace period. 103 static void drm_sched_fence_free(struct rcu_head *rcu) 105 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 119 * It just RCU schedules freeing up the fence. 126 call_rcu(&fence->finished.rcu, drm_sched_fence_free);
|
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
lib_sw_fence.c | 93 struct rcu_head rcu; member in union:heap_fence::__anon85865082010a 133 kfree_rcu(h, rcu);
|
/src/sys/external/bsd/drm2/dist/drm/i915/display/ |
intel_frontbuffer.h | 50 struct rcu_head rcu; member in struct:intel_frontbuffer
|
intel_frontbuffer.c | 242 kfree_rcu(front, rcu);
|
/src/sys/external/gpl2/dts/dist/arch/mips/boot/dts/lantiq/ |
danube.dtsi | 59 rcu0: rcu@203000 { 60 compatible = "lantiq,rcu-xway";
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_amdkfd_fence.c | 142 * Drops the mm_struct reference and RCU schedules freeing up the fence. 155 kfree_rcu(f, rcu);
|
amdgpu_fence.c | 665 * @rcu: RCU callback head 667 * Free up the fence memory after the RCU grace period. 669 static void amdgpu_fence_free(struct rcu_head *rcu) 671 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 683 * It just RCU schedules freeing up the fence. 687 call_rcu(&f->rcu, amdgpu_fence_free);
|
amdgpu_bo_list.c | 46 static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) 48 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
|
/src/sys/external/bsd/drm2/include/linux/ |
dma-fence.h | 58 struct rcu_head rcu; member in struct:dma_fence
|
/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_so.c | 66 struct rcu_head rcu; member in struct:vmw_view 296 kfree_rcu(view, rcu);
|
/src/sys/external/bsd/drm2/linux/ |
linux_dma_fence.c | 251 dma_fence_free_cb(struct rcu_head *rcu) 253 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu); 265 * any pending RCU read sections on all CPUs have completed. 281 call_rcu(&fence->rcu, &dma_fence_free_cb);
|
linux_dma_resv.c | 79 objlist_free_cb(struct rcu_head *rcu) 81 struct dma_resv_list *list = container_of(rcu, 510 * (b) the fence is scheduled to be destroyed after this RCU grace 541 * If the fence is already scheduled to away after this RCU 737 * Note: Caller need not call this from an RCU read section. 776 /* Enter an RCU read section and get a read ticket. */ 808 * out of RCU and allocate one with 924 /* Enter an RCU read section and get a read ticket. */ 973 /* All done with src; exit the RCU read section. */ 1055 /* Enter an RCU read section and get a read ticket. * [all...] |
/src/lib/ |
Makefile | 259 SUBDIR+= ../external/lgpl2/userspace-rcu/lib 268 # libuv, userspace-rcu
|