| /src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
| intel_engine_pm.h | 50 struct i915_request *rq; local 61 rq = i915_request_create(engine->kernel_context); 64 return rq;
|
| intel_engine_heartbeat.c | 44 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) 47 i915_request_add_active_barriers(rq); 50 static void show_heartbeat(const struct i915_request *rq, 58 rq->sched.attr.priority); 69 struct i915_request *rq; local 71 rq = engine->heartbeat.systole; 72 if (rq && i915_request_completed(rq)) { 73 i915_request_put(rq); 85 rq->sched.attr.priority < I915_PRIORITY_BARRIER) 195 struct i915_request *rq; local 230 struct i915_request *rq; local [all...] |
| intel_engine_pm.c | 106 struct i915_request *rq = to_request(fence); local 108 ewma__engine_latency_add(&rq->engine->latency, 109 ktime_us_delta(rq->fence.timestamp, 110 rq->duration.emitted)); 114 __queue_and_release_pm(struct i915_request *rq, 138 __i915_request_queue(rq, NULL); 149 struct i915_request *rq; local 194 rq = __i915_request_create(ce, GFP_NOWAIT); 195 if (IS_ERR(rq)) 201 i915_request_add_active_barriers(rq); [all...] |
| selftest_rc6.c | 80 struct i915_request *rq; local 85 rq = intel_context_create_request(ce); 86 if (IS_ERR(rq)) 87 return ERR_CAST(rq); 89 cs = intel_ring_begin(rq, 4); 91 i915_request_add(rq); 96 if (INTEL_GEN(rq->i915) >= 8) 103 intel_ring_advance(rq, cs); 105 result = rq->hwsp_seqno + 2; 106 i915_request_add(rq); [all...] |
| intel_breadcrumbs.c | 92 static inline bool __request_completed(const struct i915_request *rq) 94 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 98 check_signal_order(struct intel_context *ce, struct i915_request *rq) 100 if (!list_is_last(&rq->signal_link, &ce->signals) && 101 i915_seqno_passed(rq->fence.seqno, 102 list_next_entry(rq, signal_link)->fence.seqno)) 105 if (!list_is_first(&rq->signal_link, &ce->signals) && 106 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, 107 rq->fence.seqno) 173 struct i915_request *rq = local 215 struct i915_request *rq = local 378 struct i915_request *rq; local [all...] |
| intel_context.c | 349 struct i915_request *rq) 355 GEM_BUG_ON(rq->context == ce); 357 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 359 err = i915_active_fence_set(&tl->last_request, rq); 372 return i915_active_add_request(&ce->active, rq); 377 struct i915_request *rq; local 384 rq = i915_request_create(ce); 387 return rq;
|
| intel_gt_requests.c | 27 struct i915_request *rq, *rn; local 29 list_for_each_entry_safe(rq, rn, &tl->requests, link) 30 if (!i915_request_retire(rq))
|
| selftest_engine_heartbeat.c | 202 struct i915_request *rq; local 234 rq = READ_ONCE(engine->heartbeat.systole); 235 if (rq) 236 rq = i915_request_get_rcu(rq); 238 } while (!rq); 241 while (rq == READ_ONCE(engine->heartbeat.systole)) 245 i915_request_put(rq);
|
| /src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
| i915_gem_busy.c | 46 const struct i915_request *rq; local 60 rq = const_container_of(fence, struct i915_request, fence); 61 if (i915_request_completed(rq)) 65 BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); 66 return flag(rq->engine->uabi_class);
|
| i915_gem_object_blt.c | 110 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) 115 err = i915_request_await_object(rq, vma->obj, false); 117 err = i915_vma_move_to_active(vma, rq, 0); 122 return intel_engine_pool_mark_active(vma->private, rq); 136 struct i915_request *rq; local 161 rq = intel_context_create_request(ce); 162 if (IS_ERR(rq)) { 163 err = PTR_ERR(rq); 167 err = intel_emit_vma_mark_active(batch, rq); 171 err = i915_request_await_object(rq, obj, true) 327 struct i915_request *rq; local [all...] |
| i915_gem_wait.c | 104 struct i915_request *rq; local 113 rq = to_request(fence); 114 engine = rq->engine; 123 engine->schedule(rq, attr);
|
| i915_gem_client_blt.c | 165 struct i915_request *rq; local 190 rq = intel_context_create_request(w->ce); 191 if (IS_ERR(rq)) { 192 err = PTR_ERR(rq); 197 if (dma_fence_add_callback(&rq->fence, &w->cb, 201 err = intel_emit_vma_mark_active(batch, rq); 206 err = w->ce->engine->emit_init_breadcrumb(rq); 216 err = __i915_vma_move_to_active(vma, rq); 220 err = w->ce->engine->emit_bb_start(rq, 225 i915_request_skip(rq, err) [all...] |
| /src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
| igt_gem_utils.c | 27 struct i915_request *rq; local 38 rq = intel_context_create_request(ce); 41 return rq; 116 struct i915_request *rq; local 128 rq = intel_context_create_request(ce); 129 if (IS_ERR(rq)) { 130 err = PTR_ERR(rq); 138 err = rq->engine->emit_bb_start(rq, 145 err = i915_request_await_object(rq, batch->obj, false) [all...] |
| /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/fb/ |
| nouveau_nvkm_subdev_fb_gddr5.c | 44 int rq = ram->freq < 1000000; /* XXX */ local 101 ram->mr[3] |= (rq & 0x01) << 5;
|
| /src/external/lgpl3/gmp/dist/mini-gmp/tests/ |
| t-mpq_muldiv_2exp.c | 54 mpq_t aq, rq, tq; local 62 mpq_init (rq); 75 mpq_mul_2exp (rq, aq, e); 76 t1 = mpz_scan1 (mpq_numref (rq), 0); 77 t2 = mpz_scan1 (mpq_denref (rq), 0); 78 mpq_neg (tq, rq); 88 dump ("nr", mpq_numref (rq)); 89 dump ("dr", mpq_denref (rq)); 93 mpq_div_2exp (rq, aq, e); 94 t1 = mpz_scan1 (mpq_numref (rq), 0) [all...] |
| t-div.c | 39 mpz_t a, b, q, r, rq, rr; local 47 mpz_init (rq); 85 mini_random_op4 (ops[j], MAXBITS, a, b, rq, rr); 87 if (mpz_cmp (r, rr) || mpz_cmp (q, rq)) 95 dump ("qref", rq); 100 if (mpz_cmp (q, rq)) 106 dump ("qref", rq); 171 || mpz_cmp (r, rr) || mpz_cmp (q, rq)) 180 dump ("qref", rq); 186 if (rl != mpz_get_ui (rr) || mpz_cmp (q, rq)) [all...] |
| /src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
| igt_spinner.c | 72 const struct i915_request *rq) 74 return hws->node.start + seqno_offset(rq->fence.context); 78 struct i915_request *rq, 84 err = i915_request_await_object(rq, vma->obj, 87 err = i915_vma_move_to_active(vma, rq, flags); 99 struct i915_request *rq = NULL; local 126 rq = intel_context_create_request(ce); 127 if (IS_ERR(rq)) { 128 err = PTR_ERR(rq); 132 err = move_to_active(vma, rq, 0) [all...] |
| i915_gem.c | 31 struct i915_request *rq; local 33 rq = intel_context_create_request(ce); 34 if (IS_ERR(rq)) { 35 err = PTR_ERR(rq); 39 i915_request_add(rq);
|
| i915_perf.c | 77 static int write_timestamp(struct i915_request *rq, int slot) 82 cs = intel_ring_begin(rq, 6); 87 if (INTEL_GEN(rq->i915) >= 8) 99 intel_ring_advance(rq, cs); 104 static ktime_t poll_status(struct i915_request *rq, int slot) 106 while (!intel_read_status_page(rq->engine, slot) && 107 !i915_request_completed(rq)) 117 struct i915_request *rq; local 140 rq = intel_engine_create_kernel_request(stream->engine); 141 if (IS_ERR(rq)) { [all...] |
| i915_active.c | 105 struct i915_request *rq; local 107 rq = intel_engine_create_kernel_request(engine); 108 if (IS_ERR(rq)) { 109 err = PTR_ERR(rq); 113 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 117 err = i915_active_add_request(&active->base, rq); 118 i915_request_add(rq);
|
| /src/external/gpl2/lvm2/dist/daemons/cmirrord/ |
| local.c | 80 * @rq: the newly allocated request from kernel 83 * If there is no request from the kernel, *rq is NULL. 90 static int kernel_recv(struct clog_request **rq) 97 *rq = NULL; 163 // *rq = container_of(u_rq, struct clog_request, u_rq); 164 *rq = (void *)u_rq - 169 memset(*rq, 0, (void *)u_rq - (void *)(*rq)); 178 *rq = NULL; 216 * Any processing errors are placed in the 'rq' 226 struct clog_request *rq; local [all...] |
| /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
| amdgpu_job.c | 166 ring = to_amdgpu_ring(entity->rq->sched); 190 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 265 struct drm_sched_rq *rq = &sched->sched_rq[i]; local 267 if (!rq) 270 spin_lock(&rq->lock); 271 list_for_each_entry(s_entity, &rq->entities, list) { 280 spin_unlock(&rq->lock);
|
| /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/i2c/ |
| nouveau_nvkm_subdev_i2c_base.c | 138 u32 hi, lo, rq, tx; local 143 i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx); 144 if (!hi && !lo && !rq && !tx) 151 if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
|
| /src/sys/external/bsd/drm2/dist/drm/scheduler/ |
| sched_entity.c | 69 entity->rq = NULL; 77 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 100 assert_spin_locked(&entity->rq->sched->job_list_lock); 128 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load 132 * Return the pointer to the rq with least load. 137 struct drm_sched_rq *rq = NULL; local 152 rq = &entity->sched_list[i]->sched_rq[entity->priority]; 156 return rq; 181 if (!entity->rq) 184 sched = entity->rq->sched 494 struct drm_sched_rq *rq; local [all...] |
| /src/external/lgpl3/gmp/dist/mpn/generic/ |
| hgcd2.c | 415 mp_double_limb_t rq = div1 (n1, d1); local 416 if (UNLIKELY (rq.d1 > d1)) 450 n1 = rq.d0; 451 q = rq.d1; 663 mp_double_limb_t rq = div1 (ah, bh); local 664 mp_limb_t q = rq.d1; 665 ah = rq.d0; 693 mp_double_limb_t rq = div1 (bh, ah); local 694 mp_limb_t q = rq.d1; 695 bh = rq.d0 [all...] |