/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_context_types.h | 46 struct intel_engine_cs *inflight; member in struct:intel_context 47 #define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) 48 #define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
|
intel_lrc.c | 454 * If the inflight context did not trigger the preemption, then maybe 1290 old = READ_ONCE(ce->inflight); 1293 WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq)); 1296 } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old))); 1358 old = READ_ONCE(ce->inflight); 1361 while (!try_cmpxchg(&ce->inflight, &old, cur)); 1628 const struct intel_engine_cs *inflight; local in function:virtual_matches 1638 * while the previous virtualized request is inflight (so 1642 inflight = intel_context_inflight(&ve->context); 1643 if (inflight && inflight != engine [all...] |
intel_engine_types.h | 194 * @inflight: the set of contexts submitted and acknowleged by HW 196 * The set of inflight contexts is managed by reading CS events 199 * advance our inflight/active tracking accordingly. 201 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; member in struct:intel_engine_execlists 206 * promote them to the inflight array once HW has signaled the 233 * However, since the we may have recorded the priority of an inflight
|
intel_engine_cs.c | 490 memset(execlists->inflight, 0, sizeof(execlists->inflight));
|
/src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/ |
intel_guc_submission.c | 274 * Currently we are not tracking the rq->context being inflight 275 * (ce->inflight = rq->engine). It is only used by the execlists 277 * required if we generalise the inflight tracking. 295 struct i915_request **first = execlists->inflight; 312 * We write directly into the execlists->inflight queue and don't use 328 port - execlists->inflight); 345 *port = schedule_in(last, port - execlists->inflight); 349 execlists->active = execlists->inflight; 361 for (port = execlists->inflight; (rq = *port); port++) { 367 if (port != execlists->inflight) { [all...] |
/src/sys/dev/ata/ |
wdvar.h | 104 unsigned inflight; member in struct:wd_softc
|
wd.c | 327 wd->inflight = 0; 795 wd->inflight++; 826 if (wd->inflight >= openings) { 1047 wd->inflight--;
|
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_scheduler.c | 275 const struct i915_request *inflight; local in function:kick_submission 287 inflight = execlists_active(&engine->execlists); 288 if (!inflight) 295 if (inflight->context == rq->context) 299 if (need_preempt(prio, rq_prio(inflight))) 519 * As we do not allow WAIT to preempt inflight requests,
|
/src/sys/netinet/ |
sctp_uio.h | 464 u_int32_t inflight; /* flightsize in k */ member in struct:sctp_cwnd_args
|
sctputil.c | 212 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 228 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
|
/src/sys/dev/ic/ |
stireg.h | 287 uint16_t inflight; /* possible on pci */ member in struct:sti_einitin
|
/src/sys/dev/pci/qat/ |
qat.c | 878 /* Share inflight counter with rx and tx */ 1139 uint32_t inflight; local in function:qat_etr_put_msg 1144 inflight = atomic_inc_32_nv(qr->qr_inflight); 1145 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
|