Lines Matching defs:robj

95  * dma_resv_init(robj)
101 dma_resv_init(struct dma_resv *robj)
104 ww_mutex_init(&robj->lock, &reservation_ww_class);
105 seqcount_init(&robj->seq);
106 robj->fence_excl = NULL;
107 robj->fence = NULL;
108 robj->robj_prealloc = NULL;
112 * dma_resv_fini(robj)
118 dma_resv_fini(struct dma_resv *robj)
122 if (robj->robj_prealloc) {
123 objlist_free(robj->robj_prealloc);
124 robj->robj_prealloc = NULL; /* paranoia */
126 if (robj->fence) {
127 for (i = 0; i < robj->fence->shared_count; i++) {
128 dma_fence_put(robj->fence->shared[i]);
129 robj->fence->shared[i] = NULL; /* paranoia */
131 objlist_free(robj->fence);
132 robj->fence = NULL; /* paranoia */
134 if (robj->fence_excl) {
135 dma_fence_put(robj->fence_excl);
136 robj->fence_excl = NULL; /* paranoia */
138 ww_mutex_destroy(&robj->lock);
142 * dma_resv_lock(robj, ctx)
150 dma_resv_lock(struct dma_resv *robj,
154 return ww_mutex_lock(&robj->lock, ctx);
158 * dma_resv_lock_slow(robj, ctx)
166 dma_resv_lock_slow(struct dma_resv *robj,
170 ww_mutex_lock_slow(&robj->lock, ctx);
174 * dma_resv_lock_interruptible(robj, ctx)
182 dma_resv_lock_interruptible(struct dma_resv *robj,
186 return ww_mutex_lock_interruptible(&robj->lock, ctx);
190 * dma_resv_lock_slow_interruptible(robj, ctx)
199 dma_resv_lock_slow_interruptible(struct dma_resv *robj,
203 return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
207 * dma_resv_trylock(robj)
213 dma_resv_trylock(struct dma_resv *robj)
216 return ww_mutex_trylock(&robj->lock);
220 * dma_resv_locking_ctx(robj)
227 dma_resv_locking_ctx(struct dma_resv *robj)
230 return ww_mutex_locking_ctx(&robj->lock);
234 * dma_resv_unlock(robj)
239 dma_resv_unlock(struct dma_resv *robj)
242 return ww_mutex_unlock(&robj->lock);
246 * dma_resv_is_locked(robj)
248 * True if robj is locked.
251 dma_resv_is_locked(struct dma_resv *robj)
254 return ww_mutex_is_locked(&robj->lock);
258 * dma_resv_held(robj)
260 * True if robj is locked.
263 dma_resv_held(struct dma_resv *robj)
266 return ww_mutex_is_locked(&robj->lock);
270 * dma_resv_assert_held(robj)
272 * Panic if robj is not held, in DIAGNOSTIC builds.
275 dma_resv_assert_held(struct dma_resv *robj)
278 KASSERT(dma_resv_held(robj));
282 * dma_resv_get_excl(robj)
285 * object robj.
287 * Caller must have robj locked.
290 dma_resv_get_excl(struct dma_resv *robj)
293 KASSERT(dma_resv_held(robj));
294 return robj->fence_excl;
298 * dma_resv_get_list(robj)
301 * object robj.
303 * Caller must have robj locked.
306 dma_resv_get_list(struct dma_resv *robj)
309 KASSERT(dma_resv_held(robj));
310 return robj->fence;
314 * dma_resv_reserve_shared(robj, num_fences)
316 * Reserve space in robj to add num_fences shared fences. To be
319 * Caller must have robj locked.
325 dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
330 KASSERT(dma_resv_held(robj));
332 list = robj->fence;
333 prealloc = robj->robj_prealloc;
363 objlist_free(robj->robj_prealloc);
364 robj->robj_prealloc = prealloc;
371 robj->robj_prealloc = prealloc;
382 * dma_resv_write_begin(robj, ticket)
384 * Begin an atomic batch of writes to robj, and initialize opaque
388 * Caller must have robj locked.
394 dma_resv_write_begin(struct dma_resv *robj,
398 KASSERT(dma_resv_held(robj));
400 write_seqcount_begin(&robj->seq);
404 * dma_resv_write_commit(robj, ticket)
406 * Commit an atomic batch of writes to robj begun with the call to
409 * Caller must have robj locked.
415 dma_resv_write_commit(struct dma_resv *robj,
419 KASSERT(dma_resv_held(robj));
421 write_seqcount_end(&robj->seq);
429 * dma_resv_read_begin(robj, ticket)
436 dma_resv_read_begin(const struct dma_resv *robj,
440 ticket->version = read_seqcount_begin(&robj->seq);
444 * dma_resv_read_valid(robj, ticket)
451 dma_resv_read_valid(const struct dma_resv *robj,
455 return !read_seqcount_retry(&robj->seq, ticket->version);
459 * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket)
462 * and length of the shared fence list of robj and return true, or
471 dma_resv_get_shared_reader(const struct dma_resv *robj,
483 list = atomic_load_consume(&robj->fence);
487 * We are done reading from robj and list. Validate our
491 if (!dma_resv_read_valid(robj, ticket))
505 * dma_resv_get_excl_reader(robj, fencep, ticket)
507 * Set *fencep to the exclusive fence of robj and return true, or
519 dma_resv_get_excl_reader(const struct dma_resv *robj,
529 fence = atomic_load_consume(&robj->fence_excl);
532 * The load of robj->fence_excl is atomic, but the caller may
537 if (!dma_resv_read_valid(robj, ticket))
558 * dma_resv_add_excl_fence(robj, fence)
560 * Empty and release all of robj's shared fences, and clear and
562 * reference to it and save it as robj's exclusive fence.
564 * Caller must have robj locked.
567 dma_resv_add_excl_fence(struct dma_resv *robj,
570 struct dma_fence *old_fence = robj->fence_excl;
571 struct dma_resv_list *old_list = robj->fence;
575 KASSERT(dma_resv_held(robj));
589 dma_resv_write_begin(robj, &ticket);
592 atomic_store_relaxed(&robj->fence_excl, fence);
597 dma_resv_write_commit(robj, &ticket);
616 * dma_resv_add_shared_fence(robj, fence)
618 * Acquire a reference to fence and add it to robj's shared list.
622 * Caller must have robj locked, and must have preceded with a
627 dma_resv_add_shared_fence(struct dma_resv *robj,
630 struct dma_resv_list *list = robj->fence;
631 struct dma_resv_list *prealloc = robj->robj_prealloc;
636 KASSERT(dma_resv_held(robj));
652 dma_resv_write_begin(robj, &ticket);
672 dma_resv_write_commit(robj, &ticket);
709 dma_resv_write_begin(robj, &ticket);
712 atomic_store_relaxed(&robj->fence, prealloc);
713 robj->robj_prealloc = NULL;
716 dma_resv_write_commit(robj, &ticket);
735 * dma_resv_get_excl_rcu(robj)
740 dma_resv_get_excl_rcu(const struct dma_resv *robj)
745 fence = dma_fence_get_rcu_safe(&robj->fence_excl);
752 * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
754 * Get a snapshot of the exclusive and shared fences of robj. The
765 dma_resv_get_fences_rcu(const struct dma_resv *robj,
778 dma_resv_read_begin(robj, &ticket);
781 if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket))
845 if (!dma_resv_read_valid(robj, &ticket))
851 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1033 * dma_resv_test_signaled_rcu(robj, shared)
1044 dma_resv_test_signaled_rcu(const struct dma_resv *robj,
1057 dma_resv_read_begin(robj, &ticket);
1061 if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
1088 if (!dma_resv_read_valid(robj, &ticket))
1096 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1118 * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
1131 dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
1141 return dma_resv_test_signaled_rcu(robj, shared);
1147 dma_resv_read_begin(robj, &ticket);
1151 if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
1177 if (!dma_resv_read_valid(robj, &ticket))
1185 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1274 * dma_resv_do_poll(robj, events, rpoll)
1287 dma_resv_do_poll(const struct dma_resv *robj, int events,
1313 dma_resv_read_begin(robj, &ticket);
1317 if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
1390 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1468 * dma_resv_kqfilter(robj, kn, rpoll)
1476 dma_resv_kqfilter(const struct dma_resv *robj,