| /src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
| igt_spinner.h | 25 u32 *batch; member in struct:igt_spinner
|
| igt_spinner.h | 25 u32 *batch; member in struct:igt_spinner
|
| igt_spinner.h | 25 u32 *batch; member in struct:igt_spinner
|
| igt_spinner.c | 52 spin->batch = vaddr; 102 u32 *batch; local in function:igt_spinner_create_request 140 batch = spin->batch; 143 *batch++ = MI_STORE_DWORD_IMM_GEN4; 144 *batch++ = lower_32_bits(hws_address(hws, rq)); 145 *batch++ = upper_32_bits(hws_address(hws, rq)); 147 *batch++ = MI_STORE_DWORD_IMM_GEN4; 148 *batch++ = 0; 149 *batch++ = hws_address(hws, rq) [all...] |
| igt_spinner.c | 52 spin->batch = vaddr; 102 u32 *batch; local in function:igt_spinner_create_request 140 batch = spin->batch; 143 *batch++ = MI_STORE_DWORD_IMM_GEN4; 144 *batch++ = lower_32_bits(hws_address(hws, rq)); 145 *batch++ = upper_32_bits(hws_address(hws, rq)); 147 *batch++ = MI_STORE_DWORD_IMM_GEN4; 148 *batch++ = 0; 149 *batch++ = hws_address(hws, rq) [all...] |
| igt_spinner.c | 52 spin->batch = vaddr; 102 u32 *batch; local in function:igt_spinner_create_request 140 batch = spin->batch; 143 *batch++ = MI_STORE_DWORD_IMM_GEN4; 144 *batch++ = lower_32_bits(hws_address(hws, rq)); 145 *batch++ = upper_32_bits(hws_address(hws, rq)); 147 *batch++ = MI_STORE_DWORD_IMM_GEN4; 148 *batch++ = 0; 149 *batch++ = hws_address(hws, rq) [all...] |
| /src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
| igt_gem_utils.c | 117 struct i915_vma *batch; local in function:igt_gpu_fill_dw 124 batch = igt_emit_store_dw(vma, offset, count, val); 125 if (IS_ERR(batch)) 126 return PTR_ERR(batch); 139 batch->node.start, batch->node.size, 144 i915_vma_lock(batch); 145 err = i915_request_await_object(rq, batch->obj, false); 147 err = i915_vma_move_to_active(batch, rq, 0); 148 i915_vma_unlock(batch); [all...] |
| igt_gem_utils.c | 117 struct i915_vma *batch; local in function:igt_gpu_fill_dw 124 batch = igt_emit_store_dw(vma, offset, count, val); 125 if (IS_ERR(batch)) 126 return PTR_ERR(batch); 139 batch->node.start, batch->node.size, 144 i915_vma_lock(batch); 145 err = i915_request_await_object(rq, batch->obj, false); 147 err = i915_vma_move_to_active(batch, rq, 0); 148 i915_vma_unlock(batch); [all...] |
| igt_gem_utils.c | 117 struct i915_vma *batch; local in function:igt_gpu_fill_dw 124 batch = igt_emit_store_dw(vma, offset, count, val); 125 if (IS_ERR(batch)) 126 return PTR_ERR(batch); 139 batch->node.start, batch->node.size, 144 i915_vma_lock(batch); 145 err = i915_request_await_object(rq, batch->obj, false); 147 err = i915_vma_move_to_active(batch, rq, 0); 148 i915_vma_unlock(batch); [all...] |
| /src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
| intel_renderstate.h | 37 const u32 *batch; member in struct:intel_renderstate_rodata 44 .batch = gen ## _g ## _null_state_batch, \
|
| intel_renderstate.h | 37 const u32 *batch; member in struct:intel_renderstate_rodata 44 .batch = gen ## _g ## _null_state_batch, \
|
| intel_renderstate.h | 37 const u32 *batch; member in struct:intel_renderstate_rodata 44 .batch = gen ## _g ## _null_state_batch, \
|
| /src/sys/external/bsd/drm2/include/linux/ |
| shrinker.h | 49 size_t batch; member in struct:shrinker
|
| shrinker.h | 49 size_t batch; member in struct:shrinker
|
| shrinker.h | 49 size_t batch; member in struct:shrinker
|
| /src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/tests/ |
| sanitizer_quarantine_test.cc | 39 while (QuarantineBatch *batch = cache->DequeueBatch()) local in function:__sanitizer::DeallocateCache 40 cb.Deallocate(batch); 53 ASSERT_EQ(into.batch[0], kFakePtr); 54 ASSERT_EQ(into.batch[1], kFakePtr); 62 // Merge the batch to the limit. 116 // Batches merged, one batch to deallocate.
|
| sanitizer_quarantine_test.cc | 39 while (QuarantineBatch *batch = cache->DequeueBatch()) local in function:__sanitizer::DeallocateCache 40 cb.Deallocate(batch); 53 ASSERT_EQ(into.batch[0], kFakePtr); 54 ASSERT_EQ(into.batch[1], kFakePtr); 62 // Merge the batch to the limit. 116 // Batches merged, one batch to deallocate.
|
| sanitizer_quarantine_test.cc | 39 while (QuarantineBatch *batch = cache->DequeueBatch()) local in function:__sanitizer::DeallocateCache 40 cb.Deallocate(batch); 53 ASSERT_EQ(into.batch[0], kFakePtr); 54 ASSERT_EQ(into.batch[1], kFakePtr); 62 // Merge the batch to the limit. 116 // Batches merged, one batch to deallocate.
|
| /src/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/ |
| tsan_dense_alloc.h | 112 T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); local in function:__tsan::DenseSlabAlloc::Refill 116 new(batch + i) T; 117 *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; 119 *(IndexT*)(batch + kL2Size - 1) = 0; 121 map_[fillpos_++] = batch;
|
| tsan_dense_alloc.h | 112 T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); local in function:__tsan::DenseSlabAlloc::Refill 116 new(batch + i) T; 117 *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; 119 *(IndexT*)(batch + kL2Size - 1) = 0; 121 map_[fillpos_++] = batch;
|
| tsan_dense_alloc.h | 112 T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); local in function:__tsan::DenseSlabAlloc::Refill 116 new(batch + i) T; 117 *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; 119 *(IndexT*)(batch + kL2Size - 1) = 0; 121 map_[fillpos_++] = batch;
|
| /src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
| i915_gem_object_blt.c | 27 struct i915_vma *batch; local in function:intel_emit_vma_fill_blt 90 batch = i915_vma_instance(pool->obj, ce->vm, NULL); 91 if (IS_ERR(batch)) { 92 err = PTR_ERR(batch); 96 err = i915_vma_pin(batch, 0, 0, PIN_USER); 100 batch->private = pool; 101 return batch; 137 struct i915_vma *batch; local in function:i915_gem_object_fill_blt 155 batch = intel_emit_vma_fill_blt(ce, vma, value); 156 if (IS_ERR(batch)) { 211 struct i915_vma *batch; local in function:intel_emit_vma_copy_blt 325 struct i915_vma *vma[2], *batch; local in function:i915_gem_object_copy_blt [all...] |
| i915_gem_object_blt.c | 27 struct i915_vma *batch; local in function:intel_emit_vma_fill_blt 90 batch = i915_vma_instance(pool->obj, ce->vm, NULL); 91 if (IS_ERR(batch)) { 92 err = PTR_ERR(batch); 96 err = i915_vma_pin(batch, 0, 0, PIN_USER); 100 batch->private = pool; 101 return batch; 137 struct i915_vma *batch; local in function:i915_gem_object_fill_blt 155 batch = intel_emit_vma_fill_blt(ce, vma, value); 156 if (IS_ERR(batch)) { 211 struct i915_vma *batch; local in function:intel_emit_vma_copy_blt 325 struct i915_vma *vma[2], *batch; local in function:i915_gem_object_copy_blt [all...] |
| /src/usr.bin/mail/ |
| mime_detach.c | 62 int batch; member in struct:__anond51030100108 71 detach_ctl.batch = value(ENAME_MIME_DETACH_BATCH) != NULL; 72 detach_ctl.ask = detach_ctl.batch ? 0 : 1; 97 if (!detach_ctl.batch) { 146 detach_ctl.batch = 1; 155 detach_ctl.batch = 1; 237 detach_ctl.batch = 0; 242 } while (!detach_ctl.batch);
|
| mime_detach.c | 62 int batch; member in struct:__anond51030100108 71 detach_ctl.batch = value(ENAME_MIME_DETACH_BATCH) != NULL; 72 detach_ctl.ask = detach_ctl.batch ? 0 : 1; 97 if (!detach_ctl.batch) { 146 detach_ctl.batch = 1; 155 detach_ctl.batch = 1; 237 detach_ctl.batch = 0; 242 } while (!detach_ctl.batch);
|