/src/sys/external/bsd/drm2/dist/drm/i915/gt/selftests/ |
mock_timeline.c | 24 INIT_LIST_HEAD(&timeline->requests);
|
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_scheduler.h | 19 for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ 20 list_for_each_entry(it, &(plist)->requests[idx], sched.link) 27 &(plist)->requests[idx], \
|
i915_priolist_types.h | 42 * Requests containing performance queries must not be preempted by 55 struct list_head requests[I915_PRIORITY_COUNT]; member in struct:i915_priolist
|
i915_scheduler.c | 76 for (i = 0; i < ARRAY_SIZE(p->requests); i++) { 77 if (list_empty(&p->requests[i])) 188 * Requests will then be executed in fifo, and schedule 191 * requests, so if userspace lied about their 200 for (i = 0; i < ARRAY_SIZE(p->requests); i++) 201 INIT_LIST_HEAD(&p->requests[i]); 215 return &p->requests[idx]; 344 * end result is a topological list of requests in reverse order, the 519 * As we do not allow WAIT to preempt inflight requests,
|
i915_request.c | 256 &i915_request_timeline(rq)->requests)); 260 * We only loosely track inflight requests across preemption, 307 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); 370 * requests that we have unsubmitted from HW, but left running 421 list_move_tail(&request->sched.link, &engine->active.requests); 560 list_for_each_entry_safe(rq, rn, &tl->requests, link) 570 if (list_empty(&tl->requests)) 577 rq = list_first_entry(&tl->requests, typeof(*rq), link); 586 rq = list_last_entry(&tl->requests, typeof(*rq), link); 589 /* Retire our old requests in the hope that we free some * [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/ |
intel_guc_ct.c | 45 spin_lock_init(&ct->requests.lock); 46 INIT_LIST_HEAD(&ct->requests.pending); 47 INIT_LIST_HEAD(&ct->requests.incoming); 48 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); 194 spin_lock_destroy(&ct->requests.lock); 276 return ++ct->requests.next_fence; 500 spin_lock_irqsave(&ct->requests.lock, flags); 501 list_add_tail(&request.link, &ct->requests.pending); 502 spin_unlock_irqrestore(&ct->requests.lock, flags); 535 spin_lock_irqsave(&ct->requests.lock, flags) [all...] |
intel_guc_ct.h | 44 * for the H2G and G2H requests sent and received through the buffers. 56 spinlock_t lock; /* protects pending requests list */ 57 struct list_head pending; /* requests waiting for response */ 59 struct list_head incoming; /* incoming requests */ 60 struct work_struct worker; /* handler for incoming requests */ 61 } requests; member in struct:intel_guc_ct
|
/src/usr.sbin/lpr/lpq/ |
lpq.c | 69 int requests; /* # of spool requests */ variable in typeref:typename:int 121 if (requests >= MAXREQUESTS) 122 fatal("too many requests"); 123 requ[requests++] = atoi(*argv);
|
/src/usr.sbin/lpr/lprm/ |
lprm.c | 75 int requests; /* # of spool requests */ variable in typeref:typename:int 140 if (requests >= MAXREQUESTS) 141 fatal("Too many requests"); 142 requ[requests++] = atoi(arg);
|
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_gt_requests.c | 29 list_for_each_entry_safe(rq, rn, &tl->requests, link) 198 /* If the device is asleep, we have no requests outstanding */ 214 container_of(work, typeof(*gt), requests.retire_work.work); 216 schedule_delayed_work(>->requests.retire_work, 223 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); 228 cancel_delayed_work(>->requests.retire_work); 233 schedule_delayed_work(>->requests.retire_work, 240 cancel_delayed_work_sync(>->requests.retire_work);
|
intel_timeline_types.h | 29 struct mutex mutex; /* protects the flow of requests */ 33 * How many requests are in flight or may be under construction. 38 * context so that we can issue requests at any time without having 59 * List of breadcrumbs associated with GPU requests currently 62 struct list_head requests; member in struct:intel_timeline
|
intel_gt_types.h | 52 * but this means that requests will finish and never 55 * fires, go retire requests. 58 } requests; member in struct:intel_gt 70 * userspace requests? Whilst idle, we allow runtime power
|
intel_ring.c | 195 GEM_BUG_ON(list_empty(&tl->requests)); 196 list_for_each_entry(target, &tl->requests, link) { 206 if (GEM_WARN_ON(&target->link == &tl->requests))
|
intel_gt.c | 399 struct i915_request *requests[I915_NUM_ENGINES] = {}; local in function:__engines_record_defaults 447 requests[id] = i915_request_get(rq); 460 for (id = 0; id < ARRAY_SIZE(requests); id++) { 465 rq = requests[id]; 523 for (id = 0; id < ARRAY_SIZE(requests); id++) { 527 rq = requests[id];
|
/src/lib/libpuffs/ |
Makefile | 13 framebuf.c null.c opdump.c paths.c pnode.c requests.c \
|
/src/usr.sbin/lpr/common_source/ |
rmjob.c | 65 extern int requests; /* # of spool requests */ 255 if (users == 0 && requests == 0) 262 for (r = requ; r < &requ[requests]; r++) 318 for (i = 0; i < requests; i++) { 335 for (i = 0; i < requests; i++) {
|
displayq.c | 68 extern int requests; /* # of spool requests */ 224 i < requests && (size_t)(cp - line + 11) < sizeof(line) - 2; 369 if (users == 0 && requests == 0) 382 for (r = requ; r < &requ[requests]; r++)
|
/src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/ |
pxa25x.dtsi | 43 #dma-requests = <40>;
|
spear13xx.dtsi | 109 dma-requests = <32>; 124 dma-requests = <32>;
|
stm32h743.dtsi | 269 dma-requests = <8>; 287 dma-requests = <8>; 296 dma-requests = <128>; 370 dma-requests = <32>;
|
artpec6.dtsi | 262 dma-requests = <8>; 284 dma-requests = <8>;
|
dm814x.dtsi | 171 #dma-requests = <256>; 468 dma-requests = <32>; 573 dma-requests = <64>;
|
pxa27x.dtsi | 17 #dma-requests = <75>;
|
/src/usr.sbin/lpr/lpd/ |
lpd.c | 120 int lflag; /* log requests flag */ 418 int requests; /* # of spool requests */ variable in typeref:typename:int 453 syslog(LOG_INFO, "%s requests %s %s", 495 if (requests >= MAXREQUESTS) 496 fatal("Too many requests"); 497 requ[requests++] = atoi(cp); 531 if (requests >= MAXREQUESTS) 532 fatal("Too many requests"); 533 requ[requests++] = atoi(cp) [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
i915_request.c | 289 struct i915_request **requests; local in function:__igt_breadcrumbs_smoketest 297 * At its heart, we simply create oodles of requests running across 303 requests = kcalloc(total, sizeof(*requests), GFP_KERNEL); 304 if (!requests) 354 requests[n] = i915_request_get(rq); 376 struct i915_request *rq = requests[count - 1]; 391 struct i915_request *rq = requests[n]; 420 kfree(requests); 439 * Smoketest our breadcrumb/signal handling for requests across multipl [all...] |