Home | History | Annotate | Download | only in i915

Lines Matching refs:rq

52 	void (*hook)(struct i915_request *rq, struct dma_fence *signal);
112 struct i915_request *rq = to_request(fence);
122 i915_sw_fence_fini(&rq->submit);
123 i915_sw_fence_fini(&rq->semaphore);
126 kmem_cache_free(global.slab_requests, rq);
157 static void __notify_execute_cb(struct i915_request *rq)
161 lockdep_assert_held(&rq->lock);
163 if (list_empty(&rq->execute_cb))
166 list_for_each_entry(cb, &rq->execute_cb, link)
179 INIT_LIST_HEAD(&rq->execute_cb);
213 static void remove_from_engine(struct i915_request *rq)
219 * as their rq->engine pointer is not stable until under that
221 * check that the rq still belongs to the newly locked engine.
223 locked = READ_ONCE(rq->engine);
225 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
230 list_del_init(&rq->sched.link);
231 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
232 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
236 bool i915_request_retire(struct i915_request *rq)
238 if (!i915_request_completed(rq))
241 RQ_TRACE(rq, "\n");
243 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
244 trace_i915_request_retire(rq);
255 GEM_BUG_ON(!list_is_first(&rq->link,
256 &i915_request_timeline(rq)->requests));
257 rq->ring->head = rq->postfix;
265 remove_from_engine(rq);
267 spin_lock_irq(&rq->lock);
268 i915_request_mark_complete(rq);
269 if (!i915_request_signaled(rq))
270 dma_fence_signal_locked(&rq->fence);
271 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
272 i915_request_cancel_breadcrumb(rq);
273 if (i915_request_has_waitboost(rq)) {
274 GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
275 atomic_dec(&rq->engine->gt->rps.num_waiters);
277 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
278 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
279 __notify_execute_cb(rq);
281 GEM_BUG_ON(!list_empty(&rq->execute_cb));
282 spin_unlock_irq(&rq->lock);
284 remove_from_client(rq);
285 list_del(&rq->link);
287 intel_context_exit(rq->context);
288 intel_context_unpin(rq->context);
290 free_capture_list(rq);
291 i915_sched_node_fini(&rq->sched);
292 i915_request_put(rq);
297 void i915_request_retire_upto(struct i915_request *rq)
299 struct intel_timeline * const tl = i915_request_timeline(rq);
302 RQ_TRACE(rq, "\n");
304 GEM_BUG_ON(!i915_request_completed(rq));
308 } while (i915_request_retire(tmp) && tmp != rq);
312 __await_execution(struct i915_request *rq,
314 void (*hook)(struct i915_request *rq,
322 hook(rq, &signal->fence);
330 cb->fence = &rq->submit;
343 hook(rq, &signal->fence);
354 rq->sched.flags |= signal->sched.flags;
558 struct i915_request *rq, *rn;
560 list_for_each_entry_safe(rq, rn, &tl->requests, link)
561 if (!i915_request_retire(rq))
568 struct i915_request *rq;
577 rq = list_first_entry(&tl->requests, typeof(*rq), link);
578 i915_request_retire(rq);
580 rq = kmem_cache_alloc(global.slab_requests,
582 if (rq)
583 return rq;
586 rq = list_last_entry(&tl->requests, typeof(*rq), link);
587 cond_synchronize_rcu(rq->rcustate);
598 struct i915_request *rq = arg;
600 spin_lock_init(&rq->lock);
601 i915_sched_node_init(&rq->sched);
602 i915_sw_fence_init(&rq->submit, submit_notify);
603 i915_sw_fence_init(&rq->semaphore, semaphore_notify);
605 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
607 rq->file_priv = NULL;
608 rq->capture_list = NULL;
610 INIT_LIST_HEAD(&rq->execute_cb);
615 struct i915_request *rq = arg;
617 dma_fence_destroy(&rq->fence);
619 i915_sw_fence_fini(&rq->submit);
620 i915_sw_fence_fini(&rq->semaphore);
622 spin_lock_destroy(&rq->lock);
629 struct i915_request *rq;
667 rq = kmem_cache_alloc(global.slab_requests,
669 if (unlikely(!rq)) {
670 rq = request_alloc_slow(tl, gfp);
671 if (!rq) {
677 rq->i915 = ce->engine->i915;
678 rq->context = ce;
679 rq->engine = ce->engine;
680 rq->ring = ce->ring;
681 rq->execution_mask = ce->engine->mask;
684 dma_fence_reset(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
686 kref_init(&rq->fence.refcount);
687 rq->fence.flags = 0;
688 rq->fence.error = 0;
689 INIT_LIST_HEAD(&rq->fence.cb_list);
692 ret = intel_timeline_get_seqno(tl, rq, &seqno);
696 rq->fence.context = tl->fence_context;
697 rq->fence.seqno = seqno;
699 RCU_INIT_POINTER(rq->timeline, tl);
700 RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
701 rq->hwsp_seqno = tl->hwsp_seqno;
703 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
706 i915_sw_fence_reinit(&i915_request_get(rq)->submit);
707 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
709 i915_sched_node_reinit(&rq->sched);
712 rq->batch = NULL;
713 GEM_BUG_ON(rq->file_priv);
714 GEM_BUG_ON(rq->capture_list);
715 GEM_BUG_ON(!list_empty(&rq->execute_cb));
729 rq->reserved_space =
730 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
738 rq->head = rq->ring->emit;
740 ret = rq->engine->request_alloc(rq);
744 rq->infix = rq->ring->emit; /* end of header; start of user payload */
747 return rq;
750 ce->ring->emit = rq->head;
753 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
754 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
757 kmem_cache_free(global.slab_requests, rq);
766 struct i915_request *rq;
774 rq = list_first_entry(&tl->requests, typeof(*rq), link);
775 if (!list_is_last(&rq->link, &tl->requests))
776 i915_request_retire(rq);
779 rq = __i915_request_create(ce, GFP_KERNEL);
781 if (IS_ERR(rq))
785 rq->cookie = lockdep_pin_lock(&tl->mutex);
787 return rq;
791 return rq;
795 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
800 GEM_BUG_ON(i915_request_timeline(rq) ==
830 if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
831 err = i915_sw_fence_await_dma_fence(&rq->submit,
840 already_busywaiting(struct i915_request *rq)
854 return rq->sched.semaphores | rq->engine->saturated;
978 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1003 i915_sw_fence_set_error_once(&rq->submit, fence->error);
1012 if (fence->context == rq->fence.context)
1017 intel_timeline_sync_is_later(i915_request_timeline(rq),
1022 ret = i915_request_await_request(rq, to_request(fence));
1024 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
1032 intel_timeline_sync_set(i915_request_timeline(rq),
1056 void (*hook)(struct i915_request *rq,
1091 i915_request_await_execution(struct i915_request *rq,
1093 void (*hook)(struct i915_request *rq,
1113 i915_sw_fence_set_error_once(&rq->submit, fence->error);
1123 ret = __i915_request_await_execution(rq,
1127 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
1199 void i915_request_skip(struct i915_request *rq, int error)
1201 void *vaddr = rq->ring->vaddr;
1205 dma_fence_set_error(&rq->fence, error);
1207 if (rq->infix == rq->postfix)
1215 head = rq->infix;
1216 if (rq->postfix < head) {
1217 memset(vaddr + head, 0, rq->ring->size - head);
1220 memset(vaddr + head, 0, rq->postfix - head);
1221 rq->infix = rq->postfix;
1225 __i915_request_add_to_timeline(struct i915_request *rq)
1227 struct intel_timeline *timeline = i915_request_timeline(rq);
1251 &rq->fence));
1253 if (is_power_of_2(prev->engine->mask | rq->engine->mask))
1254 i915_sw_fence_await_sw_fence(&rq->submit,
1256 &rq->submitq);
1258 __i915_sw_fence_await_dma_fence(&rq->submit,
1260 &rq->dmaq);
1261 if (rq->engine->schedule)
1262 __i915_sched_node_add_dependency(&rq->sched,
1264 &rq->dep,
1268 list_add_tail(&rq->link, &timeline->requests);
1275 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1285 struct i915_request *__i915_request_commit(struct i915_request *rq)
1287 struct intel_engine_cs *engine = rq->engine;
1288 struct intel_ring *ring = rq->ring;
1291 RQ_TRACE(rq, "\n");
1298 GEM_BUG_ON(rq->reserved_space > ring->space);
1299 rq->reserved_space = 0;
1300 rq->emitted_jiffies = jiffies;
1308 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1310 rq->postfix = intel_ring_offset(rq, cs);
1312 return __i915_request_add_to_timeline(rq);
1315 void __i915_request_queue(struct i915_request *rq,
1329 i915_sw_fence_commit(&rq->semaphore);
1330 if (attr && rq->engine->schedule)
1331 rq->engine->schedule(rq, attr);
1332 i915_sw_fence_commit(&rq->submit);
1335 void i915_request_add(struct i915_request *rq)
1337 struct intel_timeline * const tl = i915_request_timeline(rq);
1342 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1344 trace_i915_request_add(rq);
1346 prev = __i915_request_commit(rq);
1348 if (rcu_access_pointer(rq->context->gem_context))
1349 attr = i915_request_gem_context(rq)->sched;
1363 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
1372 if (list_empty(&rq->sched.signalers_list))
1380 __i915_request_queue(rq, &attr);
1445 static bool __i915_spin_request(const struct i915_request * const rq,
1461 if (!i915_request_is_running(rq))
1477 if (i915_request_completed(rq))
1514 * @rq: the request to wait upon
1527 long i915_request_wait(struct i915_request *rq,
1538 if (dma_fence_is_signaled(&rq->fence))
1544 trace_i915_request_wait_begin(rq, flags);
1552 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1578 __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
1579 dma_fence_signal(&rq->fence);
1596 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
1597 intel_rps_boost(rq);
1598 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1606 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1610 spin_lock(rq->fence.lock);
1611 #define C (i915_request_completed(rq) ? 1 : \
1612 (spin_unlock(rq->fence.lock), \
1613 intel_engine_flush_submission(rq->engine), \
1614 spin_lock(rq->fence.lock), \
1615 i915_request_completed(rq)))
1618 rq->fence.lock, timeout,
1622 rq->fence.lock, timeout,
1627 KASSERT(i915_request_completed(rq));
1628 dma_fence_signal_locked(&rq->fence);
1632 spin_unlock(rq->fence.lock);
1637 if (i915_request_completed(rq)) {
1638 dma_fence_signal(&rq->fence);
1652 intel_engine_flush_submission(rq->engine);
1658 dma_fence_remove_callback(&rq->fence, &wait.cb);
1664 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
1665 trace_i915_request_wait_end(rq);