/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_engine_pm.h | 50 struct i915_request *rq; local in function:intel_engine_create_kernel_request 61 rq = i915_request_create(engine->kernel_context); 64 return rq;
|
intel_engine_heartbeat.c | 44 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) 47 i915_request_add_active_barriers(rq); 50 static void show_heartbeat(const struct i915_request *rq, 58 rq->sched.attr.priority); 69 struct i915_request *rq; local in function:heartbeat 71 rq = engine->heartbeat.systole; 72 if (rq && i915_request_completed(rq)) { 73 i915_request_put(rq); 85 rq->sched.attr.priority < I915_PRIORITY_BARRIER) 195 struct i915_request *rq; local in function:intel_engine_pulse 230 struct i915_request *rq; local in function:intel_engine_flush_barriers [all...] |
intel_engine_pm.c | 106 struct i915_request *rq = to_request(fence); local in function:duration 108 ewma__engine_latency_add(&rq->engine->latency, 109 ktime_us_delta(rq->fence.timestamp, 110 rq->duration.emitted)); 114 __queue_and_release_pm(struct i915_request *rq, 138 __i915_request_queue(rq, NULL); 149 struct i915_request *rq; local in function:switch_to_kernel_context 194 rq = __i915_request_create(ce, GFP_NOWAIT); 195 if (IS_ERR(rq)) 201 i915_request_add_active_barriers(rq); [all...] |
selftest_rc6.c | 80 struct i915_request *rq; local in function:__live_rc6_ctx 85 rq = intel_context_create_request(ce); 86 if (IS_ERR(rq)) 87 return ERR_CAST(rq); 89 cs = intel_ring_begin(rq, 4); 91 i915_request_add(rq); 96 if (INTEL_GEN(rq->i915) >= 8) 103 intel_ring_advance(rq, cs); 105 result = rq->hwsp_seqno + 2; 106 i915_request_add(rq); [all...] |
intel_breadcrumbs.c | 92 static inline bool __request_completed(const struct i915_request *rq) 94 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 98 check_signal_order(struct intel_context *ce, struct i915_request *rq) 100 if (!list_is_last(&rq->signal_link, &ce->signals) && 101 i915_seqno_passed(rq->fence.seqno, 102 list_next_entry(rq, signal_link)->fence.seqno)) 105 if (!list_is_first(&rq->signal_link, &ce->signals) && 106 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, 107 rq->fence.seqno) 173 struct i915_request *rq = local in function:signal_irq_work 215 struct i915_request *rq = local in function:signal_irq_work 378 struct i915_request *rq; local in function:intel_engine_print_breadcrumbs [all...] |
intel_context.c | 349 struct i915_request *rq) 355 GEM_BUG_ON(rq->context == ce); 357 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 359 err = i915_active_fence_set(&tl->last_request, rq); 372 return i915_active_add_request(&ce->active, rq); 377 struct i915_request *rq; local in function:intel_context_create_request 384 rq = i915_request_create(ce); 387 return rq;
|
intel_gt_requests.c | 27 struct i915_request *rq, *rn; local in function:retire_requests 29 list_for_each_entry_safe(rq, rn, &tl->requests, link) 30 if (!i915_request_retire(rq))
|
selftest_engine_heartbeat.c | 202 struct i915_request *rq; local in function:__live_heartbeat_fast 234 rq = READ_ONCE(engine->heartbeat.systole); 235 if (rq) 236 rq = i915_request_get_rcu(rq); 238 } while (!rq); 241 while (rq == READ_ONCE(engine->heartbeat.systole)) 245 i915_request_put(rq);
|
selftest_mocs.c | 25 static int request_add_sync(struct i915_request *rq, int err) 27 i915_request_get(rq); 28 i915_request_add(rq); 29 if (i915_request_wait(rq, 0, HZ / 5) < 0) 31 i915_request_put(rq); 36 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 40 i915_request_get(rq); 41 i915_request_add(rq); 42 if (spin && !igt_wait_for_spinner(spin, rq)) 44 i915_request_put(rq); 211 struct i915_request *rq; local in function:check_mocs_engine 317 struct i915_request *rq; local in function:active_engine_reset [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
igt_gem_utils.c | 27 struct i915_request *rq; local in function:igt_request_alloc 38 rq = intel_context_create_request(ce); 41 return rq; 116 struct i915_request *rq; local in function:igt_gpu_fill_dw 128 rq = intel_context_create_request(ce); 129 if (IS_ERR(rq)) { 130 err = PTR_ERR(rq); 138 err = rq->engine->emit_bb_start(rq, 145 err = i915_request_await_object(rq, batch->obj, false) [all...] |
i915_gem_coherency.c | 194 struct i915_request *rq; local in function:gpu_set 209 rq = intel_engine_create_kernel_request(ctx->engine); 210 if (IS_ERR(rq)) { 212 return PTR_ERR(rq); 215 cs = intel_ring_begin(rq, 4); 217 i915_request_add(rq); 238 intel_ring_advance(rq, cs); 241 err = i915_request_await_object(rq, vma->obj, true); 243 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 247 i915_request_add(rq); [all...] |
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/fb/ |
nouveau_nvkm_subdev_fb_gddr5.c | 44 int rq = ram->freq < 1000000; /* XXX */ local in function:nvkm_gddr5_calc 101 ram->mr[3] |= (rq & 0x01) << 5;
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_busy.c | 46 const struct i915_request *rq; local in function:__busy_set_if_active 60 rq = const_container_of(fence, struct i915_request, fence); 61 if (i915_request_completed(rq)) 65 BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); 66 return flag(rq->engine->uabi_class);
|
i915_gem_object_blt.c | 110 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) 115 err = i915_request_await_object(rq, vma->obj, false); 117 err = i915_vma_move_to_active(vma, rq, 0); 122 return intel_engine_pool_mark_active(vma->private, rq); 136 struct i915_request *rq; local in function:i915_gem_object_fill_blt 161 rq = intel_context_create_request(ce); 162 if (IS_ERR(rq)) { 163 err = PTR_ERR(rq); 167 err = intel_emit_vma_mark_active(batch, rq); 171 err = i915_request_await_object(rq, obj, true) 327 struct i915_request *rq; local in function:i915_gem_object_copy_blt [all...] |
i915_gem_wait.c | 104 struct i915_request *rq; local in function:__fence_set_priority 113 rq = to_request(fence); 114 engine = rq->engine; 123 engine->schedule(rq, attr);
|
i915_gem_client_blt.c | 165 struct i915_request *rq; local in function:clear_pages_worker 190 rq = intel_context_create_request(w->ce); 191 if (IS_ERR(rq)) { 192 err = PTR_ERR(rq); 197 if (dma_fence_add_callback(&rq->fence, &w->cb, 201 err = intel_emit_vma_mark_active(batch, rq); 206 err = w->ce->engine->emit_init_breadcrumb(rq); 216 err = __i915_vma_move_to_active(vma, rq); 220 err = w->ce->engine->emit_bb_start(rq, 225 i915_request_skip(rq, err) [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
igt_spinner.c | 72 const struct i915_request *rq) 74 return hws->node.start + seqno_offset(rq->fence.context); 78 struct i915_request *rq, 84 err = i915_request_await_object(rq, vma->obj, 87 err = i915_vma_move_to_active(vma, rq, flags); 99 struct i915_request *rq = NULL; local in function:igt_spinner_create_request 126 rq = intel_context_create_request(ce); 127 if (IS_ERR(rq)) { 128 err = PTR_ERR(rq); 132 err = move_to_active(vma, rq, 0) [all...] |
i915_gem.c | 31 struct i915_request *rq; local in function:switch_to_context 33 rq = intel_context_create_request(ce); 34 if (IS_ERR(rq)) { 35 err = PTR_ERR(rq); 39 i915_request_add(rq);
|
i915_perf.c | 77 static int write_timestamp(struct i915_request *rq, int slot) 82 cs = intel_ring_begin(rq, 6); 87 if (INTEL_GEN(rq->i915) >= 8) 99 intel_ring_advance(rq, cs); 104 static ktime_t poll_status(struct i915_request *rq, int slot) 106 while (!intel_read_status_page(rq->engine, slot) && 107 !i915_request_completed(rq)) 117 struct i915_request *rq; local in function:live_noa_delay 140 rq = intel_engine_create_kernel_request(stream->engine); 141 if (IS_ERR(rq)) { [all...] |
i915_active.c | 105 struct i915_request *rq; local in function:__live_active_setup 107 rq = intel_engine_create_kernel_request(engine); 108 if (IS_ERR(rq)) { 109 err = PTR_ERR(rq); 113 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 117 err = i915_active_add_request(&active->base, rq); 118 i915_request_add(rq);
|
i915_gem_evict.c | 431 /* Reserve a block so that we know we have enough to fit a few rq */ 485 struct i915_request *rq; local in function:igt_evict_contexts 492 /* We will need some GGTT space for the rq's context */ 494 rq = igt_request_alloc(ctx, engine); 497 if (IS_ERR(rq)) { 499 if (PTR_ERR(rq) != -EBUSY) { 502 (int)PTR_ERR(rq)); 503 err = PTR_ERR(rq); 509 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 515 i915_request_add(rq); [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_job.c | 166 ring = to_amdgpu_ring(entity->rq->sched); 190 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 265 struct drm_sched_rq *rq = &sched->sched_rq[i]; local in function:amdgpu_job_stop_all_jobs_on_sched 267 if (!rq) 270 spin_lock(&rq->lock); 271 list_for_each_entry(s_entity, &rq->entities, list) { 280 spin_unlock(&rq->lock);
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/i2c/ |
nouveau_nvkm_subdev_i2c_base.c | 138 u32 hi, lo, rq, tx; local in function:nvkm_i2c_intr 143 i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx); 144 if (!hi && !lo && !rq && !tx) 151 if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
|
/src/sys/external/bsd/drm2/dist/drm/scheduler/ |
sched_entity.c | 69 entity->rq = NULL; 77 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 100 assert_spin_locked(&entity->rq->sched->job_list_lock); 128 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load 132 * Return the pointer to the rq with least load. 137 struct drm_sched_rq *rq = NULL; local in function:drm_sched_entity_get_free_sched 152 rq = &entity->sched_list[i]->sched_rq[entity->priority]; 156 return rq; 181 if (!entity->rq) 184 sched = entity->rq->sched 494 struct drm_sched_rq *rq; local in function:drm_sched_entity_select_rq [all...] |
/src/sys/dev/sbus/ |
isp_sbus.c | 484 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds. 491 ispreq_t *rq = arg; local in function:isp_sbus_dmasetup 497 hidx = isp_handle_index(isp, rq->req_handle); 536 if (isp_send_cmd(isp, rq, dm_segs, nsegs, xs->datalen, ddir) != CMD_QUEUED) {
|