/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
igt_spinner.c | 52 spin->batch = vaddr; 102 u32 *batch; local in function:igt_spinner_create_request 140 batch = spin->batch; 143 *batch++ = MI_STORE_DWORD_IMM_GEN4; 144 *batch++ = lower_32_bits(hws_address(hws, rq)); 145 *batch++ = upper_32_bits(hws_address(hws, rq)); 147 *batch++ = MI_STORE_DWORD_IMM_GEN4; 148 *batch++ = 0; 149 *batch++ = hws_address(hws, rq) [all...] |
igt_spinner.h | 25 u32 *batch; member in struct:igt_spinner
|
i915_request.c | 537 * (individually), and wait for the batch to complete. We can check 654 struct i915_vma *batch) 664 batch->node.start, 665 batch->node.size, 681 struct i915_vma *batch; local in function:live_empty_request 686 * (individually), and wait for the batch to complete. We can check 690 batch = empty_batch(i915); 691 if (IS_ERR(batch)) 692 return PTR_ERR(batch); 707 request = empty_request(engine, batch); 831 struct i915_vma *batch; local in function:live_all_engines 965 struct i915_vma *batch; local in function:live_sequential_engines [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
igt_gem_utils.c | 117 struct i915_vma *batch; local in function:igt_gpu_fill_dw 124 batch = igt_emit_store_dw(vma, offset, count, val); 125 if (IS_ERR(batch)) 126 return PTR_ERR(batch); 139 batch->node.start, batch->node.size, 144 i915_vma_lock(batch); 145 err = i915_request_await_object(rq, batch->obj, false); 147 err = i915_vma_move_to_active(batch, rq, 0); 148 i915_vma_unlock(batch); [all...] |
/src/usr.bin/at/ |
Makefile | 9 ${BINDIR}/at ${BINDIR}/batch 10 MLINKS= at.1 batch.1 \
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_object_blt.c | 27 struct i915_vma *batch; local in function:intel_emit_vma_fill_blt 90 batch = i915_vma_instance(pool->obj, ce->vm, NULL); 91 if (IS_ERR(batch)) { 92 err = PTR_ERR(batch); 96 err = i915_vma_pin(batch, 0, 0, PIN_USER); 100 batch->private = pool; 101 return batch; 137 struct i915_vma *batch; local in function:i915_gem_object_fill_blt 155 batch = intel_emit_vma_fill_blt(ce, vma, value); 156 if (IS_ERR(batch)) { 211 struct i915_vma *batch; local in function:intel_emit_vma_copy_blt 325 struct i915_vma *vma[2], *batch; local in function:i915_gem_object_copy_blt [all...] |
i915_gem_client_blt.c | 166 struct i915_vma *batch; local in function:clear_pages_worker 184 batch = intel_emit_vma_fill_blt(w->ce, vma, w->value); 185 if (IS_ERR(batch)) { 186 err = PTR_ERR(batch); 201 err = intel_emit_vma_mark_active(batch, rq); 221 batch->node.start, batch->node.size, 231 intel_emit_vma_release(w->ce, batch);
|
i915_gem_execbuffer.c | 147 * Any render targets written to in the batch must be flagged with 218 * Before any batch is given extra privileges we first must check that it 240 struct i915_vma *batch; /** identity of the batch obj/vma */ member in struct:i915_execbuffer 275 u32 batch_start_offset; /** Location within object of batch */ 276 u32 batch_len; /** Length of batch within object */ 519 * SNA is doing fancy tricks with compressing batch buffers, which leads 521 * relocate address is still positive, except when the batch is placed 534 eb->batch = vma; 552 eb->batch = NULL 743 unsigned int i, batch; local in function:eb_lookup_vmas 1155 struct i915_vma *batch; local in function:__reloc_gpu_alloc 1276 u32 *batch; local in function:relocate_entry 2006 struct i915_vma *batch; member in struct:eb_parse_work [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_renderstate.h | 37 const u32 *batch; member in struct:intel_renderstate_rodata 44 .batch = gen ## _g ## _null_state_batch, \
|
selftest_hangcheck.c | 57 u32 *batch; member in struct:hang 100 h->batch = vaddr; 147 u32 *batch; local in function:hang_create_request 167 h->batch = vaddr; 205 batch = h->batch; 207 *batch++ = MI_STORE_DWORD_IMM_GEN4; 208 *batch++ = lower_32_bits(hws_address(hws, rq)); 209 *batch++ = upper_32_bits(hws_address(hws, rq)); 210 *batch++ = rq->fence.seqno [all...] |
selftest_workarounds.c | 479 struct i915_vma *batch; local in function:check_dirty_whitelist 487 batch = create_batch(ce->vm); 488 if (IS_ERR(batch)) { 489 err = PTR_ERR(batch); 518 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); 567 i915_gem_object_flush_map(batch->obj); 568 i915_gem_object_unpin_map(batch->obj); 583 i915_vma_lock(batch); 584 err = i915_request_await_object(rq, batch->obj, false); 586 err = i915_vma_move_to_active(batch, rq, 0) 819 struct i915_vma *batch; local in function:scrub_whitelisted_registers [all...] |
intel_engine.h | 246 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) 248 memset(batch, 0, 6 * sizeof(u32)); 250 batch[0] = GFX_OP_PIPE_CONTROL(6); 251 batch[1] = flags; 252 batch[2] = offset; 254 return batch + 6; 265 * following the batch.
|
intel_renderstate.c | 60 * Macro to add commands to auxiliary batch. 62 * this is sufficient as the null state generator makes the final batch 68 #define OUT_BATCH(batch, i, val) \ 72 (batch)[(i)++] = (val); \ 91 u32 s = rodata->batch[i]; 98 rodata->batch[i + 1] != 0)
|
/src/usr.bin/mail/ |
mime_detach.c | 62 int batch; member in struct:__anond51030100108 71 detach_ctl.batch = value(ENAME_MIME_DETACH_BATCH) != NULL; 72 detach_ctl.ask = detach_ctl.batch ? 0 : 1; 97 if (!detach_ctl.batch) { 146 detach_ctl.batch = 1; 155 detach_ctl.batch = 1; 237 detach_ctl.batch = 0; 242 } while (!detach_ctl.batch);
|
/src/sys/external/bsd/drm2/include/linux/ |
shrinker.h | 49 size_t batch; member in struct:shrinker
|
/src/tests/usr.bin/gdb/ |
t_regress.sh | 48 gdb --batch -x test.gdb dig >gdb.out 68 gdb --batch -x test.gdb ./test >gdb.out 2>&1 87 gdb --batch -x test.gdb >gdb.out 2>&1
|
/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_mob.c | 241 struct vmw_otable_batch *batch) 245 struct vmw_otable *otables = batch->otables; 254 for (i = 0; i < batch->num_otables; ++i) { 266 0, false, &batch->otable_bo); 271 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); 273 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); 276 ret = vmw_bo_map_dma(batch->otable_bo); 280 ttm_bo_unreserve(batch->otable_bo); 283 for (i = 0; i < batch->num_otables; ++i) { 284 if (!batch->otables[i].enabled [all...] |
/src/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/ |
tsan_dense_alloc.h | 112 T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); local in function:__tsan::DenseSlabAlloc::Refill 116 new(batch + i) T; 117 *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; 119 *(IndexT*)(batch + kL2Size - 1) = 0; 121 map_[fillpos_++] = batch;
|
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/ |
sanitizer_quarantine.h | 33 void *batch[kSize]; member in struct:__sanitizer::QuarantineBatch 37 batch[0] = ptr; 38 this->size = size + sizeof(QuarantineBatch); // Account for the batch size. 41 // The total size of quarantined nodes recorded in this batch. 48 batch[count++] = ptr; 61 batch[count + i] = from->batch[i]; 166 // require some tuning). It saves us merge attempt when the batch list 187 CHECK(kPrefetch <= ARRAY_SIZE(b->batch)); 189 PREFETCH(b->batch[i]) [all...] |
sanitizer_allocator_local_cache.h | 169 void *res = c->batch[--c->count]; 170 PREFETCH(c->batch[c->count - 1]); 184 c->batch[c->count++] = p; 212 void *batch[2 * TransferBatch::kMaxNumCached]; member in struct:SizeClassAllocator32LocalCache::PerClass 228 // id. 0 means the class size is large enough to store a batch within one 249 b->CopyToArray(c->batch); 260 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); 261 // Failure to allocate a batch while releasing memory is non recoverable. 262 // TODO(alekseys): Figure out how to do it without allocating a new batch. 265 "transfer batch.\n", SanitizerToolName) [all...] |
/src/tests/net/npf/ |
t_npf.sh | 45 gdb -batch -ex bt npftest npftest.core
|
/src/sys/external/bsd/drm/dist/shared-core/ |
i915_dma.c | 486 drm_i915_batchbuffer_t * batch) 489 struct drm_clip_rect __user *boxes = batch->cliprects; 490 int nbox = batch->num_cliprects; 494 if ((batch->start | batch->used) & 0x7) { 506 batch->DR1, batch->DR4); 515 OUT_RING(batch->start); 518 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 524 OUT_RING(batch->start | MI_BATCH_NON_SECURE) 613 drm_i915_batchbuffer_t *batch = data; local in function:i915_batchbuffer [all...] |
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/tests/ |
sanitizer_quarantine_test.cc | 39 while (QuarantineBatch *batch = cache->DequeueBatch()) local in function:__sanitizer::DeallocateCache 40 cb.Deallocate(batch); 53 ASSERT_EQ(into.batch[0], kFakePtr); 54 ASSERT_EQ(into.batch[1], kFakePtr); 62 // Merge the batch to the limit. 116 // Batches merged, one batch to deallocate.
|
/src/usr.bin/patch/ |
common.h | 99 extern bool batch;
|
/src/tests/lib/csu/ |
t_hello.sh | 52 gdb -batch -ex bt -ex 'info registers' -ex disas \
|