/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
igt_spinner.h | 23 struct drm_i915_gem_object *hws; member in struct:igt_spinner
|
igt_spinner.h | 23 struct drm_i915_gem_object *hws; member in struct:igt_spinner
|
igt_spinner.c | 26 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 27 if (IS_ERR(spin->hws)) { 28 err = PTR_ERR(spin->hws); 38 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); 39 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); 57 i915_gem_object_unpin_map(spin->hws); 61 i915_gem_object_put(spin->hws); 71 static u64 hws_address(const struct i915_vma *hws, 74 return hws->node.start + seqno_offset(rq->fence.context); 100 struct i915_vma *hws, *vma local in function:igt_spinner_create_request [all...] |
igt_spinner.c | 26 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 27 if (IS_ERR(spin->hws)) { 28 err = PTR_ERR(spin->hws); 38 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); 39 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); 57 i915_gem_object_unpin_map(spin->hws); 61 i915_gem_object_put(spin->hws); 71 static u64 hws_address(const struct i915_vma *hws, 74 return hws->node.start + seqno_offset(rq->fence.context); 100 struct i915_vma *hws, *vma local in function:igt_spinner_create_request [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dcn21/ |
amdgpu_dcn21_hwseq.c | 45 hws->ctx 47 hws->regs->reg 51 hws->shifts->field_name, hws->masks->field_name 55 struct dce_hwseq *hws) 69 int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 83 mmhub_update_page_table_config(&config, hws); 92 struct dce_hwseq *hws = dc->hwseq; local in function:dcn21_s0i3_golden_init_wa
|
amdgpu_dcn21_hwseq.c | 45 hws->ctx 47 hws->regs->reg 51 hws->shifts->field_name, hws->masks->field_name 55 struct dce_hwseq *hws) 69 int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 83 mmhub_update_page_table_config(&config, hws); 92 struct dce_hwseq *hws = dc->hwseq; local in function:dcn21_s0i3_golden_init_wa
|
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dce/ |
amdgpu_dce_hwseq.c | 37 hws->ctx 39 hws->regs->reg 43 hws->shifts->field_name, hws->masks->field_name 45 void dce_enable_fe_clock(struct dce_hwseq *hws, 58 struct dce_hwseq *hws = dc->hwseq; local in function:dce_pipe_control_lock 80 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0) 85 if (hws->wa.blnd_crtc_trigger) { 93 void dce_set_blender_mode(struct dce_hwseq *hws, 125 if (hws->masks->BLND_ALPHA_MODE != 0) [all...] |
amdgpu_dce_hwseq.c | 37 hws->ctx 39 hws->regs->reg 43 hws->shifts->field_name, hws->masks->field_name 45 void dce_enable_fe_clock(struct dce_hwseq *hws, 58 struct dce_hwseq *hws = dc->hwseq; local in function:dce_pipe_control_lock 80 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0) 85 if (hws->wa.blnd_crtc_trigger) { 93 void dce_set_blender_mode(struct dce_hwseq *hws, 125 if (hws->masks->BLND_ALPHA_MODE != 0) [all...] |
/src/sys/external/bsd/drm/dist/shared-core/ |
i915_dma.c | 155 /* Clear the HWS virtual address at teardown */ 801 drm_i915_hws_addr_t *hws = data; local in function:i915_set_status_page 811 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); 813 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 815 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 833 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 835 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 876 /* Init HWS */
|
i915_dma.c | 155 /* Clear the HWS virtual address at teardown */ 801 drm_i915_hws_addr_t *hws = data; local in function:i915_set_status_page 811 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); 813 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 815 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 833 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 835 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 876 /* Init HWS */
|
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
selftest_hangcheck.c | 53 struct drm_i915_gem_object *hws; member in struct:hang 74 h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 75 if (IS_ERR(h->hws)) { 76 err = PTR_ERR(h->hws); 86 i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); 87 vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); 105 i915_gem_object_unpin_map(h->hws); 109 i915_gem_object_put(h->hws); 115 static u64 hws_address(const struct i915_vma *hws, 118 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context) 144 struct i915_vma *hws, *vma; local in function:hang_create_request [all...] |
selftest_hangcheck.c | 53 struct drm_i915_gem_object *hws; member in struct:hang 74 h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 75 if (IS_ERR(h->hws)) { 76 err = PTR_ERR(h->hws); 86 i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); 87 vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); 105 i915_gem_object_unpin_map(h->hws); 109 i915_gem_object_put(h->hws); 115 static u64 hws_address(const struct i915_vma *hws, 118 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context) 144 struct i915_vma *hws, *vma; local in function:hang_create_request [all...] |
intel_engine_cs.c | 522 * On g33, we cannot place HWS above 256MiB, so 526 * and hang if the HWS is placed at the top of the 528 * platforms have issues with us placing the HWS 547 * Though the HWS register does support 36bit addresses, historically 549 * the HWS is placed above 4G. We only allow objects to be allocated 1340 const u32 *hws = local in function:intel_engine_print_registers 1375 idx, hws[idx * 2], hws[idx * 2 + 1]);
|
intel_engine_cs.c | 522 * On g33, we cannot place HWS above 256MiB, so 526 * and hang if the HWS is placed at the top of the 528 * platforms have issues with us placing the HWS 547 * Though the HWS register does support 36bit addresses, historically 549 * the HWS is placed above 4G. We only allow objects to be allocated 1340 const u32 *hws = local in function:intel_engine_print_registers 1375 idx, hws[idx * 2], hws[idx * 2 + 1]);
|
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dce100/ |
amdgpu_dce100_resource.c | 504 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce100_hwseq_create 506 if (hws) { 507 hws->ctx = ctx; 508 hws->regs = &hwseq_reg; 509 hws->shifts = &hwseq_shift; 510 hws->masks = &hwseq_mask; 512 return hws;
|
amdgpu_dce100_resource.c | 504 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce100_hwseq_create 506 if (hws) { 507 hws->ctx = ctx; 508 hws->regs = &hwseq_reg; 509 hws->shifts = &hwseq_shift; 510 hws->masks = &hwseq_mask; 512 return hws;
|
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dce112/ |
amdgpu_dce112_resource.c | 531 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce112_hwseq_create 533 if (hws) { 534 hws->ctx = ctx; 535 hws->regs = &hwseq_reg; 536 hws->shifts = &hwseq_shift; 537 hws->masks = &hwseq_mask; 539 return hws;
|
amdgpu_dce112_resource.c | 531 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce112_hwseq_create 533 if (hws) { 534 hws->ctx = ctx; 535 hws->regs = &hwseq_reg; 536 hws->shifts = &hwseq_shift; 537 hws->masks = &hwseq_mask; 539 return hws;
|
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dce120/ |
amdgpu_dce120_resource.c | 786 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce120_hwseq_create 788 if (hws) { 789 hws->ctx = ctx; 790 hws->regs = &hwseq_reg; 791 hws->shifts = &hwseq_shift; 792 hws->masks = &hwseq_mask; 794 return hws; 800 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce121_hwseq_create 802 if (hws) { 803 hws->ctx = ctx [all...] |
amdgpu_dce120_resource.c | 786 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce120_hwseq_create 788 if (hws) { 789 hws->ctx = ctx; 790 hws->regs = &hwseq_reg; 791 hws->shifts = &hwseq_shift; 792 hws->masks = &hwseq_mask; 794 return hws; 800 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce121_hwseq_create 802 if (hws) { 803 hws->ctx = ctx [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dce80/ |
amdgpu_dce80_resource.c | 627 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce80_hwseq_create 629 if (hws) { 630 hws->ctx = ctx; 631 hws->regs = &hwseq_reg; 632 hws->shifts = &hwseq_shift; 633 hws->masks = &hwseq_mask; 635 return hws;
|
amdgpu_dce80_resource.c | 627 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); local in function:dce80_hwseq_create 629 if (hws) { 630 hws->ctx = ctx; 631 hws->regs = &hwseq_reg; 632 hws->shifts = &hwseq_shift; 633 hws->masks = &hwseq_mask; 635 return hws;
|
/src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dcn20/ |
amdgpu_dcn20_hwseq.c | 61 hws->ctx 63 hws->regs->reg 67 hws->shifts->field_name, hws->masks->field_name 188 struct dce_hwseq *hws, 228 void dcn20_dccg_init(struct dce_hwseq *hws) 253 struct dce_hwseq *hws) 280 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_init_blank 328 hws->funcs.wait_for_blank_complete(opp); 332 struct dce_hwseq *hws, 561 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_plane_atomic_disable 612 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_enable_stream_timing 834 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_set_input_transfer_func 1315 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_update_dchubp_dpp 1477 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_program_pipe 1561 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_program_front_end_for_ctx 1705 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_update_bandwidth 1825 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_disable_stream_gating 1840 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_enable_stream_gating 1973 struct dce_hwseq *hws = link->dc->hwseq; local in function:dcn20_unblank_stream 2089 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_reset_hw_ctx_wrap 2140 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_update_mpcc 2292 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_fpga_init_hw [all...] |
amdgpu_dcn20_hwseq.c | 61 hws->ctx 63 hws->regs->reg 67 hws->shifts->field_name, hws->masks->field_name 188 struct dce_hwseq *hws, 228 void dcn20_dccg_init(struct dce_hwseq *hws) 253 struct dce_hwseq *hws) 280 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_init_blank 328 hws->funcs.wait_for_blank_complete(opp); 332 struct dce_hwseq *hws, 561 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_plane_atomic_disable 612 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_enable_stream_timing 834 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_set_input_transfer_func 1315 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_update_dchubp_dpp 1477 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_program_pipe 1561 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_program_front_end_for_ctx 1705 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_update_bandwidth 1825 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_disable_stream_gating 1840 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_enable_stream_gating 1973 struct dce_hwseq *hws = link->dc->hwseq; local in function:dcn20_unblank_stream 2089 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_reset_hw_ctx_wrap 2140 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_update_mpcc 2292 struct dce_hwseq *hws = dc->hwseq; local in function:dcn20_fpga_init_hw [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_gpu_error.h | 77 u32 hws; member in struct:intel_engine_coredump
|