/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
igt_spinner.h | 25 u32 *batch; member in struct:igt_spinner
|
igt_spinner.c | 52 spin->batch = vaddr; 102 u32 *batch; local in function:igt_spinner_create_request 140 batch = spin->batch; 143 *batch++ = MI_STORE_DWORD_IMM_GEN4; 144 *batch++ = lower_32_bits(hws_address(hws, rq)); 145 *batch++ = upper_32_bits(hws_address(hws, rq)); 147 *batch++ = MI_STORE_DWORD_IMM_GEN4; 148 *batch++ = 0; 149 *batch++ = hws_address(hws, rq) [all...] |
i915_request.c | 537 * (individually), and wait for the batch to complete. We can check 654 struct i915_vma *batch) 664 batch->node.start, 665 batch->node.size, 681 struct i915_vma *batch; local in function:live_empty_request 686 * (individually), and wait for the batch to complete. We can check 690 batch = empty_batch(i915); 691 if (IS_ERR(batch)) 692 return PTR_ERR(batch); 707 request = empty_request(engine, batch); 831 struct i915_vma *batch; local in function:live_all_engines 965 struct i915_vma *batch; local in function:live_sequential_engines [all...] |
i915_gem_gtt.c | 1768 static u32 *spinner(u32 *batch, int i) 1770 return batch + i * 64 / sizeof(*batch) + 4; 1773 static void end_spin(u32 *batch, int i) 1775 *spinner(batch, i) = MI_BATCH_BUFFER_END; 1794 u32 *batch; local in function:igt_cs_tlb 1799 * from scratch as it has not seen the batch move (due to missing 1824 batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); 1825 if (IS_ERR(batch)) { 1826 err = PTR_ERR(batch); [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
igt_gem_utils.c | 117 struct i915_vma *batch; local in function:igt_gpu_fill_dw 124 batch = igt_emit_store_dw(vma, offset, count, val); 125 if (IS_ERR(batch)) 126 return PTR_ERR(batch); 139 batch->node.start, batch->node.size, 144 i915_vma_lock(batch); 145 err = i915_request_await_object(rq, batch->obj, false); 147 err = i915_vma_move_to_active(batch, rq, 0); 148 i915_vma_unlock(batch); [all...] |
i915_gem_context.c | 948 struct i915_vma *batch; local in function:emit_rpcs_query 968 batch = rpcs_query_batch(vma); 969 if (IS_ERR(batch)) { 970 err = PTR_ERR(batch); 981 batch->node.start, batch->node.size, 986 i915_vma_lock(batch); 987 err = i915_request_await_object(rq, batch->obj, false); 989 err = i915_vma_move_to_active(batch, rq, 0); 990 i915_vma_unlock(batch); [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_renderstate.h | 37 const u32 *batch; member in struct:intel_renderstate_rodata 44 .batch = gen ## _g ## _null_state_batch, \
|
selftest_hangcheck.c | 57 u32 *batch; member in struct:hang 100 h->batch = vaddr; 147 u32 *batch; local in function:hang_create_request 167 h->batch = vaddr; 205 batch = h->batch; 207 *batch++ = MI_STORE_DWORD_IMM_GEN4; 208 *batch++ = lower_32_bits(hws_address(hws, rq)); 209 *batch++ = upper_32_bits(hws_address(hws, rq)); 210 *batch++ = rq->fence.seqno [all...] |
selftest_workarounds.c | 479 struct i915_vma *batch; local in function:check_dirty_whitelist 487 batch = create_batch(ce->vm); 488 if (IS_ERR(batch)) { 489 err = PTR_ERR(batch); 518 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); 567 i915_gem_object_flush_map(batch->obj); 568 i915_gem_object_unpin_map(batch->obj); 583 i915_vma_lock(batch); 584 err = i915_request_await_object(rq, batch->obj, false); 586 err = i915_vma_move_to_active(batch, rq, 0) 819 struct i915_vma *batch; local in function:scrub_whitelisted_registers [all...] |
selftest_lrc.c | 2137 u64 offset = (*prev)->batch->node.start; 2139 /* Terminate the spinner in the next lower priority batch. */ 2154 rq->batch = vma; 2198 * the last batch which then precolates down the chain, each releasing 2249 cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); 2252 i915_gem_object_unpin_map(rq->batch->obj); 2511 struct drm_i915_gem_object *batch; member in struct:preempt_smoke 2525 struct drm_i915_gem_object *batch) 2531 if (batch) { 2535 vma = i915_vma_instance(batch, vm, NULL) [all...] |
/src/sys/external/bsd/drm2/include/linux/ |
shrinker.h | 49 size_t batch; member in struct:shrinker
|
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/tests/ |
sanitizer_quarantine_test.cc | 39 while (QuarantineBatch *batch = cache->DequeueBatch()) local in function:__sanitizer::DeallocateCache 40 cb.Deallocate(batch); 53 ASSERT_EQ(into.batch[0], kFakePtr); 54 ASSERT_EQ(into.batch[1], kFakePtr); 62 // Merge the batch to the limit. 116 // Batches merged, one batch to deallocate.
|
/src/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/ |
tsan_dense_alloc.h | 112 T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); local in function:__tsan::DenseSlabAlloc::Refill 116 new(batch + i) T; 117 *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; 119 *(IndexT*)(batch + kL2Size - 1) = 0; 121 map_[fillpos_++] = batch;
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_object_blt.c | 27 struct i915_vma *batch; local in function:intel_emit_vma_fill_blt 90 batch = i915_vma_instance(pool->obj, ce->vm, NULL); 91 if (IS_ERR(batch)) { 92 err = PTR_ERR(batch); 96 err = i915_vma_pin(batch, 0, 0, PIN_USER); 100 batch->private = pool; 101 return batch; 137 struct i915_vma *batch; local in function:i915_gem_object_fill_blt 155 batch = intel_emit_vma_fill_blt(ce, vma, value); 156 if (IS_ERR(batch)) { 211 struct i915_vma *batch; local in function:intel_emit_vma_copy_blt 325 struct i915_vma *vma[2], *batch; local in function:i915_gem_object_copy_blt [all...] |
i915_gem_client_blt.c | 166 struct i915_vma *batch; local in function:clear_pages_worker 184 batch = intel_emit_vma_fill_blt(w->ce, vma, w->value); 185 if (IS_ERR(batch)) { 186 err = PTR_ERR(batch); 201 err = intel_emit_vma_mark_active(batch, rq); 221 batch->node.start, batch->node.size, 231 intel_emit_vma_release(w->ce, batch);
|
/src/usr.bin/mail/ |
mime_detach.c | 62 int batch; member in struct:__anond51030100108 71 detach_ctl.batch = value(ENAME_MIME_DETACH_BATCH) != NULL; 72 detach_ctl.ask = detach_ctl.batch ? 0 : 1; 97 if (!detach_ctl.batch) { 146 detach_ctl.batch = 1; 155 detach_ctl.batch = 1; 237 detach_ctl.batch = 0; 242 } while (!detach_ctl.batch);
|
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/ |
sanitizer_allocator_local_cache.h | 169 void *res = c->batch[--c->count]; 170 PREFETCH(c->batch[c->count - 1]); 184 c->batch[c->count++] = p; 212 void *batch[2 * TransferBatch::kMaxNumCached]; member in struct:SizeClassAllocator32LocalCache::PerClass 228 // id. 0 means the class size is large enough to store a batch within one 249 b->CopyToArray(c->batch); 260 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); 261 // Failure to allocate a batch while releasing memory is non recoverable. 262 // TODO(alekseys): Figure out how to do it without allocating a new batch. 265 "transfer batch.\n", SanitizerToolName) [all...] |
sanitizer_quarantine.h | 33 void *batch[kSize]; member in struct:__sanitizer::QuarantineBatch 37 batch[0] = ptr; 38 this->size = size + sizeof(QuarantineBatch); // Account for the batch size. 41 // The total size of quarantined nodes recorded in this batch. 48 batch[count++] = ptr; 61 batch[count + i] = from->batch[i]; 166 // require some tuning). It saves us merge attempt when the batch list 187 CHECK(kPrefetch <= ARRAY_SIZE(b->batch)); 189 PREFETCH(b->batch[i]) [all...] |
/src/games/boggle/boggle/ |
bog.c | 126 static int batch; variable in typeref:typename:int 141 batch = debug = reuse = selfuse = sflag = 0; 149 batch = 1; 199 if (batch && bspec == NULL) 206 if (batch) {
|
/src/sys/external/bsd/drm/dist/shared-core/ |
i915_dma.c | 486 drm_i915_batchbuffer_t * batch) 489 struct drm_clip_rect __user *boxes = batch->cliprects; 490 int nbox = batch->num_cliprects; 494 if ((batch->start | batch->used) & 0x7) { 506 batch->DR1, batch->DR4); 515 OUT_RING(batch->start); 518 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 524 OUT_RING(batch->start | MI_BATCH_NON_SECURE) 613 drm_i915_batchbuffer_t *batch = data; local in function:i915_batchbuffer [all...] |
/src/sys/kern/ |
kern_runq.c | 1037 int batch; local in function:sched_lwp_stats 1050 batch = (l->l_rticksum > l->l_slpticksum); 1051 if (batch != 0) { 1053 batch = 0; 1063 sched_pstats_hook(l, batch);
|
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_request.h | 270 /** Batch buffer related to this request if any (used for 273 struct i915_vma *batch; member in struct:i915_request
|
i915_gpu_error.c | 502 return __find_vma(ee->vma, "batch"); 508 struct i915_vma_coredump *batch; local in function:error_print_engine 527 batch = find_batch(ee); 528 if (batch) { 529 u64 start = batch->gtt_offset; 530 u64 end = start + batch->gtt_size; 532 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", 1394 vma = capture_vma(vma, rq->batch, "batch", gfp);
|
/src/usr.sbin/lockstat/ |
main.c | 573 const int batch = 32; local in function:morelocks 576 l = (lock_t *)malloc(sizeof(*l) * batch); 578 for (lp = l, max = l + batch; lp < max; lp++)
|
/src/sys/arch/hppa/hppa/ |
pmap.c | 1495 int batch; local in function:pmap_remove 1499 for (batch = 0; sva < eva; sva += PAGE_SIZE) { 1505 batch = pdemask == sva && sva + PDE_SIZE <= eva; 1519 if (!batch)
|