/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_request.h | 54 #define RQ_TRACE(rq, fmt, ...) do { \ 55 const struct i915_request *rq__ = (rq); \ 234 * path would be rq->hw_context->ring->timeline->hwsp_seqno. 313 void __i915_request_queue(struct i915_request *rq, 316 bool i915_request_retire(struct i915_request *rq); 317 void i915_request_retire_upto(struct i915_request *rq); 329 i915_request_get(struct i915_request *rq) 331 return to_request(dma_fence_get(&rq->fence)); 335 i915_request_get_rcu(struct i915_request *rq) 337 return to_request(dma_fence_get_rcu(&rq->fence)) [all...] |
i915_request.c | 52 void (*hook)(struct i915_request *rq, struct dma_fence *signal); 112 struct i915_request *rq = to_request(fence); local in function:i915_fence_release 122 i915_sw_fence_fini(&rq->submit); 123 i915_sw_fence_fini(&rq->semaphore); 126 kmem_cache_free(global.slab_requests, rq); 157 static void __notify_execute_cb(struct i915_request *rq) 161 lockdep_assert_held(&rq->lock); 163 if (list_empty(&rq->execute_cb)) 166 list_for_each_entry(cb, &rq->execute_cb, link) 179 INIT_LIST_HEAD(&rq->execute_cb) 558 struct i915_request *rq, *rn; local in function:retire_requests 568 struct i915_request *rq; local in function:request_alloc_slow 598 struct i915_request *rq = arg; local in function:__i915_request_ctor 615 struct i915_request *rq = arg; local in function:__i915_request_dtor 629 struct i915_request *rq; local in function:__i915_request_create 766 struct i915_request *rq; local in function:i915_request_create [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
i915_perf.c | 77 static int write_timestamp(struct i915_request *rq, int slot) 82 cs = intel_ring_begin(rq, 6); 87 if (INTEL_GEN(rq->i915) >= 8) 99 intel_ring_advance(rq, cs); 104 static ktime_t poll_status(struct i915_request *rq, int slot) 106 while (!intel_read_status_page(rq->engine, slot) && 107 !i915_request_completed(rq)) 117 struct i915_request *rq; local in function:live_noa_delay 140 rq = intel_engine_create_kernel_request(stream->engine); 141 if (IS_ERR(rq)) { [all...] |
igt_spinner.c | 72 const struct i915_request *rq) 74 return hws->node.start + seqno_offset(rq->fence.context); 78 struct i915_request *rq, 84 err = i915_request_await_object(rq, vma->obj, 87 err = i915_vma_move_to_active(vma, rq, flags); 99 struct i915_request *rq = NULL; local in function:igt_spinner_create_request 126 rq = intel_context_create_request(ce); 127 if (IS_ERR(rq)) { 128 err = PTR_ERR(rq); 132 err = move_to_active(vma, rq, 0) [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_breadcrumbs.c | 92 static inline bool __request_completed(const struct i915_request *rq) 94 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 98 check_signal_order(struct intel_context *ce, struct i915_request *rq) 100 if (!list_is_last(&rq->signal_link, &ce->signals) && 101 i915_seqno_passed(rq->fence.seqno, 102 list_next_entry(rq, signal_link)->fence.seqno)) 105 if (!list_is_first(&rq->signal_link, &ce->signals) && 106 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, 107 rq->fence.seqno) 173 struct i915_request *rq = local in function:signal_irq_work 215 struct i915_request *rq = local in function:signal_irq_work 378 struct i915_request *rq; local in function:intel_engine_print_breadcrumbs [all...] |
intel_engine_heartbeat.c | 44 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) 47 i915_request_add_active_barriers(rq); 50 static void show_heartbeat(const struct i915_request *rq, 58 rq->sched.attr.priority); 69 struct i915_request *rq; local in function:heartbeat 71 rq = engine->heartbeat.systole; 72 if (rq && i915_request_completed(rq)) { 73 i915_request_put(rq); 85 rq->sched.attr.priority < I915_PRIORITY_BARRIER) 195 struct i915_request *rq; local in function:intel_engine_pulse 230 struct i915_request *rq; local in function:intel_engine_flush_barriers [all...] |
intel_engine_pool.h | 21 struct i915_request *rq) 23 return i915_active_add_request(&node->active, rq);
|
selftest_hangcheck.c | 116 const struct i915_request *rq) 118 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); 122 struct i915_request *rq, 128 err = i915_request_await_object(rq, vma->obj, 131 err = i915_vma_move_to_active(vma, rq, flags); 143 struct i915_request *rq = NULL; local in function:hang_create_request 191 rq = igt_request_alloc(h->ctx, engine); 192 if (IS_ERR(rq)) { 193 err = PTR_ERR(rq); 197 err = move_to_active(vma, rq, 0) 339 struct i915_request *rq; local in function:igt_hang_sanitycheck 426 struct i915_request *rq; local in function:igt_reset_nop 506 struct i915_request *rq; local in function:igt_reset_nop_engine 592 struct i915_request *rq; local in function:__igt_reset_engine 709 struct i915_request *rq[8] = {}; local in function:active_engine 843 struct i915_request *rq = NULL; local in function:__igt_reset_engines 1032 struct i915_request *rq; local in function:igt_reset_wait 1161 struct i915_request *rq; local in function:__igt_reset_evict_vma 1392 struct i915_request *rq; local in function:igt_reset_queue 1505 struct i915_request *rq; local in function:igt_handle_error 1591 struct i915_request *rq; local in function:igt_atomic_reset_engine [all...] |
intel_ring.h | 21 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); 22 int intel_ring_cacheline_align(struct i915_request *rq); 43 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) 53 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); 82 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) 85 u32 offset = addr - rq->ring->vaddr; 86 GEM_BUG_ON(offset > rq->ring->size); 87 return intel_ring_wrap(rq->ring, offset);
|
selftest_lrc.c | 92 struct i915_request *rq; local in function:live_sanitycheck 100 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); 101 if (IS_ERR(rq)) { 102 err = PTR_ERR(rq); 106 i915_request_add(rq); 107 if (!igt_wait_for_spinner(&spin, rq)) { 149 struct i915_request *rq[2]; local in function:live_unlite_restore 196 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); 197 if (IS_ERR(rq[0])) { 198 err = PTR_ERR(rq[0]) 316 struct i915_request *rq; local in function:live_hold_reset 435 struct i915_request *rq; local in function:semaphore_queue 470 struct i915_request *rq; local in function:release_queue 518 struct i915_request *rq; local in function:slice_semaphore_queue 624 struct i915_request *rq; local in function:nop_request 700 struct i915_request *rq, *nop; local in function:live_timeslice_queue 980 struct i915_request *rq; local in function:spinner_create_request 1026 struct i915_request *rq; local in function:live_preempt 1122 struct i915_request *rq; local in function:live_late_preempt 1331 struct i915_request *rq; local in function:__cancel_active0 1380 struct i915_request *rq[2] = {}; local in function:__cancel_active1 1452 struct i915_request *rq[3] = {}; local in function:__cancel_queued 1542 struct i915_request *rq; local in function:__cancel_hostile 1770 struct i915_request *rq; local in function:dummy_request 1819 struct i915_request *rq[ARRAY_SIZE(client)] = {}; local in function:live_suppress_wait_preempt 1967 struct i915_request *rq; local in function:live_chain_preempt 2097 struct i915_request *rq; local in function:create_gang 2206 struct i915_request *rq = NULL; local in function:live_preempt_gang 2321 struct i915_request *rq; local in function:live_preempt_hang 2437 struct i915_request *rq; local in function:live_preempt_timeout 2527 struct i915_request *rq; local in function:smoke_submit 2798 struct i915_request *rq; local in function:nop_virtual_engine 2815 struct i915_request *rq; local in function:nop_virtual_engine 3079 struct i915_request *rq; local in function:preserved_virtual_engine 3197 struct i915_request *rq[16]; local in function:bond_virtual_engine 3426 struct i915_request *rq; local in function:reset_virtual_engine 3844 struct i915_request *rq; local in function:__live_lrc_state 3957 struct i915_request *rq; local in function:gpr_make_dirty 3988 struct i915_request *rq; local in function:__live_gpr_clear [all...] |
selftest_mocs.c | 25 static int request_add_sync(struct i915_request *rq, int err) 27 i915_request_get(rq); 28 i915_request_add(rq); 29 if (i915_request_wait(rq, 0, HZ / 5) < 0) 31 i915_request_put(rq); 36 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 40 i915_request_get(rq); 41 i915_request_add(rq); 42 if (spin && !igt_wait_for_spinner(spin, rq)) 44 i915_request_put(rq); 211 struct i915_request *rq; local in function:check_mocs_engine 317 struct i915_request *rq; local in function:active_engine_reset [all...] |
intel_ring_submission.c | 58 gen2_render_ring_flush(struct i915_request *rq, u32 mode) 70 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); 77 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 83 intel_ring_advance(rq, cs); 89 gen4_render_ring_flush(struct i915_request *rq, u32 mode) 125 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) 133 cs = intel_ring_begin(rq, i); 151 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 161 *cs++ = intel_gt_scratch_offset(rq->engine->gt 793 struct i915_request *pos, *rq; local in function:reset_rewind [all...] |
intel_lrc.c | 291 static void mark_eio(struct i915_request *rq) 293 if (i915_request_completed(rq)) 296 GEM_BUG_ON(i915_request_signaled(rq)); 298 dma_fence_set_error(&rq->fence, -EIO); 299 i915_request_mark_complete(rq); 303 active_request(const struct intel_timeline * const tl, struct i915_request *rq) 305 struct i915_request *active = rq; 308 list_for_each_entry_continue_reverse(rq, &tl->requests, link) { 309 if (i915_request_completed(rq)) 312 active = rq; 1007 struct i915_request *rq, *rn, *active = NULL; local in function:__unwind_incomplete_requests 1460 struct i915_request * const *port, *rq; local in function:assert_pending_valid 1556 struct i915_request *rq = execlists->pending[n]; local in function:execlists_submit_ports 1727 struct i915_request *rq; local in function:defer_active 1771 const struct i915_request *rq = *engine->execlists.active; local in function:active_timeslice 1797 struct i915_request *rq; local in function:active_preempt_timeout 1858 struct i915_request *rq = READ_ONCE(ve->request); local in function:execlists_dequeue 1967 struct i915_request *rq; local in function:execlists_dequeue 2073 struct i915_request *rq, *rn; local in function:execlists_dequeue 2601 struct i915_request *rq; member in struct:execlists_capture 3566 struct i915_request *rq; local in function:__execlists_reset 3680 struct i915_request *rq, *rn; local in function:execlists_reset_cancel 4827 struct i915_request *rq; local in function:virtual_submission_mask 5249 struct i915_request *rq, *last; local in function:intel_execlists_show_requests 5307 struct i915_request *rq = READ_ONCE(ve->request); local in function:intel_execlists_show_requests [all...] |
intel_engine_pm.h | 50 struct i915_request *rq; local in function:intel_engine_create_kernel_request 61 rq = i915_request_create(engine->kernel_context); 64 return rq;
|
selftest_timeline.c | 424 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) 428 cs = intel_ring_begin(rq, 4); 432 if (INTEL_GEN(rq->i915) >= 8) { 437 } else if (INTEL_GEN(rq->i915) >= 4) { 449 intel_ring_advance(rq, cs); 457 struct i915_request *rq; local in function:tl_write 462 rq = ERR_PTR(err); 466 rq = intel_engine_create_kernel_request(engine); 467 if (IS_ERR(rq)) 470 i915_request_get(rq); 536 struct i915_request *rq; local in function:live_hwsp_engine 605 struct i915_request *rq; local in function:live_hwsp_alternate 677 struct i915_request *rq; local in function:live_hwsp_wrap 783 struct i915_request *rq; local in function:live_hwsp_recycle [all...] |
intel_engine_pm.c | 106 struct i915_request *rq = to_request(fence); local in function:duration 108 ewma__engine_latency_add(&rq->engine->latency, 109 ktime_us_delta(rq->fence.timestamp, 110 rq->duration.emitted)); 114 __queue_and_release_pm(struct i915_request *rq, 138 __i915_request_queue(rq, NULL); 149 struct i915_request *rq; local in function:switch_to_kernel_context 194 rq = __i915_request_create(ce, GFP_NOWAIT); 195 if (IS_ERR(rq)) 201 i915_request_add_active_barriers(rq); [all...] |
selftest_rc6.c | 80 struct i915_request *rq; local in function:__live_rc6_ctx 85 rq = intel_context_create_request(ce); 86 if (IS_ERR(rq)) 87 return ERR_CAST(rq); 89 cs = intel_ring_begin(rq, 4); 91 i915_request_add(rq); 96 if (INTEL_GEN(rq->i915) >= 8) 103 intel_ring_advance(rq, cs); 105 result = rq->hwsp_seqno + 2; 106 i915_request_add(rq); [all...] |
selftest_workarounds.c | 41 static int request_add_sync(struct i915_request *rq, int err) 43 i915_request_get(rq); 44 i915_request_add(rq); 45 if (i915_request_wait(rq, 0, HZ / 5) < 0) 47 i915_request_put(rq); 52 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 56 i915_request_get(rq); 57 i915_request_add(rq); 58 if (spin && !igt_wait_for_spinner(spin, rq)) 60 i915_request_put(rq); 107 struct i915_request *rq; local in function:read_nonprivs 273 struct i915_request *rq; local in function:switch_to_scratch_context 496 struct i915_request *rq; local in function:check_dirty_whitelist 770 struct i915_request *rq; local in function:read_whitelisted_registers 818 struct i915_request *rq; local in function:scrub_whitelisted_registers 1184 struct i915_request *rq; local in function:live_engine_reset_workarounds [all...] |
intel_engine_cs.c | 622 struct i915_request rq; member in struct:measure_breadcrumb 651 frame->rq.i915 = engine->i915; 652 frame->rq.engine = engine; 653 frame->rq.ring = &frame->ring; 654 rcu_assign_pointer(frame->rq.timeline, &frame->timeline); 661 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; 1189 struct i915_request *rq, 1192 const char *name = rq->fence.ops->get_timeline_name(&rq->fence); 1196 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)) 1339 struct i915_request * const *port, *rq; local in function:intel_engine_print_registers 1486 struct i915_request *rq; local in function:intel_engine_dump 1603 struct i915_request *rq; local in function:intel_enable_engine_stats [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/ |
intel_guc_submission.c | 222 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) 224 struct intel_engine_cs *engine = rq->engine; 225 u32 ctx_desc = lower_32_bits(rq->context->lrc_desc); 226 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); 229 ring_tail, rq->fence.seqno); 255 struct i915_request *rq = *out++; local in function:guc_submit 257 flush_ggtt_writes(rq->ring->vma); 258 guc_add_request(guc, rq); 264 static inline int rq_prio(const struct i915_request *rq) 319 struct i915_request *rq, *rn; local in function:__guc_dequeue 356 struct i915_request **port, *rq; local in function:guc_submission_tasklet 399 struct i915_request * const *port, *rq; local in function:cancel_port_requests 412 struct i915_request *rq; local in function:guc_reset_rewind 437 struct i915_request *rq, *rn; local in function:guc_reset_cancel [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
igt_gem_utils.c | 27 struct i915_request *rq; local in function:igt_request_alloc 38 rq = intel_context_create_request(ce); 41 return rq; 116 struct i915_request *rq; local in function:igt_gpu_fill_dw 128 rq = intel_context_create_request(ce); 129 if (IS_ERR(rq)) { 130 err = PTR_ERR(rq); 138 err = rq->engine->emit_bb_start(rq, 145 err = i915_request_await_object(rq, batch->obj, false) [all...] |
/src/sys/external/bsd/drm2/dist/drm/scheduler/ |
sched_entity.c | 69 entity->rq = NULL; 77 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 100 assert_spin_locked(&entity->rq->sched->job_list_lock); 128 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load 132 * Return the pointer to the rq with least load. 137 struct drm_sched_rq *rq = NULL; local in function:drm_sched_entity_get_free_sched 152 rq = &entity->sched_list[i]->sched_rq[entity->priority]; 156 return rq; 181 if (!entity->rq) 184 sched = entity->rq->sched 494 struct drm_sched_rq *rq; local in function:drm_sched_entity_select_rq [all...] |
sched_main.c | 73 * @rq: scheduler run queue 78 struct drm_sched_rq *rq) 80 spin_lock_init(&rq->lock); 81 INIT_LIST_HEAD(&rq->entities); 82 rq->current_entity = NULL; 83 rq->sched = sched; 89 * @rq: scheduler run queue 94 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 99 spin_lock(&rq->lock); 100 atomic_inc(&rq->sched->score) 361 struct drm_sched_rq *rq = &sched->sched_rq[i]; local in function:drm_sched_increase_karma 901 struct drm_sched_rq *rq = &sched->sched_rq[i]; local in function:drm_sched_fini [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_object_blt.c | 110 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) 115 err = i915_request_await_object(rq, vma->obj, false); 117 err = i915_vma_move_to_active(vma, rq, 0); 122 return intel_engine_pool_mark_active(vma->private, rq); 136 struct i915_request *rq; local in function:i915_gem_object_fill_blt 161 rq = intel_context_create_request(ce); 162 if (IS_ERR(rq)) { 163 err = PTR_ERR(rq); 167 err = intel_emit_vma_mark_active(batch, rq); 171 err = i915_request_await_object(rq, obj, true) 327 struct i915_request *rq; local in function:i915_gem_object_copy_blt [all...] |
i915_gem_busy.c | 46 const struct i915_request *rq; local in function:__busy_set_if_active 60 rq = const_container_of(fence, struct i915_request, fence); 61 if (i915_request_completed(rq)) 65 BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); 66 return flag(rq->engine->uabi_class);
|