Lines Matching refs:sna
32 #include "sna.h"
258 #define OUT_BATCH(v) batch_emit(sna, v)
259 #define OUT_BATCH64(v) batch_emit64(sna, v)
260 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
261 #define OUT_VERTEX_F(v) vertex_emit(sna, v)
300 static bool is_skl(struct sna *sna)
302 return sna->kgem.gen == 0110;
305 static bool is_bxt(struct sna *sna)
307 return sna->kgem.gen == 0111;
310 static bool is_kbl(struct sna *sna)
312 return sna->kgem.gen == 0112;
315 static bool is_glk(struct sna *sna)
317 return sna->kgem.gen == 0113;
320 static bool is_cfl(struct sna *sna)
322 return sna->kgem.gen == 0114;
557 gen9_emit_push_constants(struct sna *sna)
578 gen9_emit_urb(struct sna *sna)
582 OUT_BATCH(sna->render_state.gen9.info->urb.max_vs_entries << URB_ENTRY_NUMBER_SHIFT |
600 gen9_emit_state_base_address(struct sna *sna)
604 assert(sna->kgem.surface - sna->kgem.nbatch <= 16384);
610 OUT_BATCH64(kgem_add_reloc64(&sna->kgem, /* surface */
611 sna->kgem.nbatch,
615 OUT_BATCH64(kgem_add_reloc64(&sna->kgem, /* dynamic */
616 sna->kgem.nbatch,
617 sna->render_state.gen9.general_bo,
621 OUT_BATCH64(kgem_add_reloc64(&sna->kgem, /* instruction */
622 sna->kgem.nbatch,
623 sna->render_state.gen9.general_bo,
627 num_pages = sna->render_state.gen9.general_bo->size.pages.count;
640 gen9_emit_vs_invariant(struct sna *sna)
668 gen9_emit_hs_invariant(struct sna *sna)
698 gen9_emit_te_invariant(struct sna *sna)
707 gen9_emit_ds_invariant(struct sna *sna)
739 gen9_emit_gs_invariant(struct sna *sna)
770 gen9_emit_sol_invariant(struct sna *sna)
780 gen9_emit_sf_invariant(struct sna *sna)
789 gen9_emit_clip_invariant(struct sna *sna)
804 gen9_emit_null_depth_buffer(struct sna *sna)
849 gen9_emit_wm_invariant(struct sna *sna)
851 gen9_emit_null_depth_buffer(sna);
910 gen9_emit_cc_invariant(struct sna *sna)
915 gen9_emit_vf_invariant(struct sna *sna)
940 gen9_emit_invariant(struct sna *sna)
967 gen9_emit_push_constants(sna);
968 gen9_emit_urb(sna);
970 gen9_emit_state_base_address(sna);
972 gen9_emit_vf_invariant(sna);
973 gen9_emit_vs_invariant(sna);
974 gen9_emit_hs_invariant(sna);
975 gen9_emit_te_invariant(sna);
976 gen9_emit_ds_invariant(sna);
977 gen9_emit_gs_invariant(sna);
978 gen9_emit_sol_invariant(sna);
979 gen9_emit_clip_invariant(sna);
980 gen9_emit_sf_invariant(sna);
981 gen9_emit_wm_invariant(sna);
982 gen9_emit_cc_invariant(sna);
984 sna->render_state.gen9.needs_invariant = false;
988 gen9_emit_cc(struct sna *sna, uint32_t blend)
990 struct gen9_render_state *render = &sna->render_state.gen9;
1033 gen9_emit_sampler(struct sna *sna, uint32_t state)
1035 if (sna->render_state.gen9.samplers == state)
1038 sna->render_state.gen9.samplers = state;
1044 OUT_BATCH(sna->render_state.gen9.wm_state + state * 2 * sizeof(struct gen9_sampler_state));
1048 gen9_emit_sf(struct sna *sna, bool has_mask)
1052 if (sna->render_state.gen9.num_sf_outputs == num_sf_outputs)
1057 sna->render_state.gen9.num_sf_outputs = num_sf_outputs;
1073 gen9_emit_wm(struct sna *sna, int kernel)
1078 if (sna->render_state.gen9.kernel == kernel)
1081 sna->render_state.gen9.kernel = kernel;
1082 kernels = sna->render_state.gen9.wm_kernel[kernel];
1111 gen9_emit_binding_table(struct sna *sna, uint16_t offset)
1113 if (sna->render_state.gen9.surface_table == offset)
1121 sna->render_state.gen9.surface_table = offset;
1126 gen9_emit_drawing_rectangle(struct sna *sna,
1135 if (sna->render_state.gen9.drawrect_limit == limit &&
1136 sna->render_state.gen9.drawrect_offset == offset)
1139 sna->render_state.gen9.drawrect_offset = offset;
1140 sna->render_state.gen9.drawrect_limit = limit;
1150 gen9_emit_vertex_elements(struct sna *sna,
1159 struct gen9_render_state *render = &sna->render_state.gen9;
1283 gen9_emit_pipe_invalidate(struct sna *sna)
1294 gen9_emit_pipe_flush(struct sna *sna, bool need_stall)
1310 gen9_emit_pipe_stall(struct sna *sna)
1321 gen9_emit_state(struct sna *sna,
1332 (sna->render_state.gen9.emit_flush && GEN9_READS_DST(op->u.gen9.flags));
1338 need_stall = sna->render_state.gen9.surface_table != wm_binding_table;
1344 need_stall &= gen9_emit_drawing_rectangle(sna, op);
1349 gen9_emit_pipe_invalidate(sna);
1350 kgem_clear_dirty(&sna->kgem);
1358 gen9_emit_pipe_flush(sna, need_stall);
1362 gen9_emit_pipe_stall(sna);
1364 gen9_emit_cc(sna, GEN9_BLEND(op->u.gen9.flags));
1365 gen9_emit_sampler(sna, GEN9_SAMPLER(op->u.gen9.flags));
1366 gen9_emit_sf(sna, GEN9_VERTEX(op->u.gen9.flags) >> 2);
1367 gen9_emit_wm(sna, op->u.gen9.wm_kernel);
1368 gen9_emit_vertex_elements(sna, op);
1369 gen9_emit_binding_table(sna, wm_binding_table);
1371 sna->render_state.gen9.emit_flush = GEN9_READS_DST(op->u.gen9.flags);
1374 static bool gen9_magic_ca_pass(struct sna *sna,
1377 struct gen9_render_state *state = &sna->render_state.gen9;
1383 sna->render.vertex_start, sna->render.vertex_index));
1385 gen9_emit_pipe_stall(sna);
1387 gen9_emit_cc(sna,
1390 gen9_emit_wm(sna,
1397 OUT_BATCH(sna->render.vertex_index - sna->render.vertex_start);
1398 OUT_BATCH(sna->render.vertex_start);
1403 state->last_primitive = sna->kgem.nbatch;
1500 gen9_bind_bo(struct sna *sna,
1517 assert(offset >= sna->kgem.surface);
1521 offset = sna->kgem.surface -= SURFACE_DW;
1522 ss = sna->kgem.batch + offset;
1532 ss[1] = (is_scanout || (is_dst && is_uncached(sna, bo))) ? MOCS_PTE << 24 : MOCS_WB << 24;
1540 *(uint64_t *)(ss+8) = kgem_add_reloc64(&sna->kgem, offset + 8, bo, domains, 0);
1558 static void gen9_emit_vertex_buffer(struct sna *sna,
1566 sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
1570 sna->render.vb_id |= 1 << id;
1573 static void gen9_emit_primitive(struct sna *sna)
1575 if (sna->kgem.nbatch == sna->render_state.gen9.last_primitive) {
1576 sna->render.vertex_offset = sna->kgem.nbatch - 5;
1582 sna->render.vertex_offset = sna->kgem.nbatch;
1584 OUT_BATCH(sna->render.vertex_index);
1588 sna->render.vertex_start = sna->render.vertex_index;
1590 sna->render_state.gen9.last_primitive = sna->kgem.nbatch;
1591 sna->render_state.gen9.ve_dirty = false;
1594 static bool gen9_rectangle_begin(struct sna *sna,
1600 if (sna_vertex_wait__locked(&sna->render) && sna->render.vertex_offset)
1604 if ((sna->render.vb_id & id) == 0)
1606 if (!kgem_check_batch(&sna->kgem, ndwords))
1609 if ((sna->render.vb_id & id) == 0)
1610 gen9_emit_vertex_buffer(sna, op);
1612 gen9_emit_primitive(sna);
1616 static int gen9_get_rectangles__flush(struct sna *sna,
1620 if (sna_vertex_wait__locked(&sna->render)) {
1621 int rem = vertex_space(sna);
1626 if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 6))
1628 if (!kgem_check_reloc_and_exec(&sna->kgem, 2))
1631 if (sna->render.vertex_offset) {
1632 gen8_vertex_flush(sna);
1633 if (gen9_magic_ca_pass(sna, op)) {
1634 gen9_emit_pipe_invalidate(sna);
1635 gen9_emit_cc(sna, GEN9_BLEND(op->u.gen9.flags));
1636 gen9_emit_wm(sna, op->u.gen9.wm_kernel);
1640 return gen8_vertex_finish(sna);
1643 inline static int gen9_get_rectangles(struct sna *sna,
1646 void (*emit_state)(struct sna *sna, const struct sna_composite_op *op))
1653 rem = vertex_space(sna);
1657 rem = gen9_get_rectangles__flush(sna, op);
1662 if (unlikely(sna->render.vertex_offset == 0)) {
1663 if (!gen9_rectangle_begin(sna, op))
1669 assert(rem <= vertex_space(sna));
1675 sna->render.vertex_index += 3*want;
1679 if (sna->render.vertex_offset) {
1680 gen8_vertex_flush(sna);
1681 gen9_magic_ca_pass(sna, op);
1683 sna_vertex_wait__locked(&sna->render);
1684 _kgem_submit(&sna->kgem);
1685 emit_state(sna, op);
1689 inline static uint32_t *gen9_composite_get_binding_table(struct sna *sna,
1694 assert(sna->kgem.surface <= 16384);
1695 sna->kgem.surface -= SURFACE_DW;
1697 table = memset(sna->kgem.batch + sna->kgem.surface, 0, 64);
1699 DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface));
1701 *offset = sna->kgem.surface;
1706 gen9_get_batch(struct sna *sna, const struct sna_composite_op *op)
1708 kgem_set_mode(&sna->kgem, KGEM_RENDER, op->dst.bo);
1710 if (!kgem_check_batch_with_surfaces(&sna->kgem, 150, 2*(1+3))) {
1712 __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
1714 _kgem_submit(&sna->kgem);
1715 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
1718 assert(sna->kgem.mode == KGEM_RENDER);
1719 assert(sna->kgem.ring == KGEM_RENDER);
1721 if (sna->render_state.gen9.needs_invariant)
1722 gen9_emit_invariant(sna);
1725 static void gen9_emit_composite_state(struct sna *sna,
1731 gen9_get_batch(sna, op);
1733 binding_table = gen9_composite_get_binding_table(sna, &offset);
1738 gen9_bind_bo(sna,
1743 gen9_bind_bo(sna,
1749 gen9_bind_bo(sna,
1757 if (sna->kgem.surface == offset &&
1758 *(uint64_t *)(sna->kgem.batch + sna->render_state.gen9.surface_table) == *(uint64_t*)binding_table &&
1760 sna->kgem.batch[sna->render_state.gen9.surface_table+2] == binding_table[2])) {
1761 sna->kgem.surface += SURFACE_DW;
1762 offset = sna->render_state.gen9.surface_table;
1765 if (sna->kgem.batch[sna->render_state.gen9.surface_table] == binding_table[0])
1768 gen9_emit_state(sna, op, offset | dirty);
1772 gen9_align_vertex(struct sna *sna, const struct sna_composite_op *op)
1774 if (op->floats_per_vertex != sna->render_state.gen9.floats_per_vertex) {
1776 sna->render_state.gen9.floats_per_vertex, op->floats_per_vertex));
1777 gen8_vertex_align(sna, op);
1778 sna->render_state.gen9.floats_per_vertex = op->floats_per_vertex;
1783 gen9_render_composite_blt(struct sna *sna,
1787 gen9_get_rectangles(sna, op, 1, gen9_emit_composite_state);
1788 op->prim_emit(sna, op, r);
1792 gen9_render_composite_box(struct sna *sna,
1798 gen9_get_rectangles(sna, op, 1, gen9_emit_composite_state);
1810 op->prim_emit(sna, op, &r);
1814 gen9_render_composite_boxes__blt(struct sna *sna,
1823 nbox_this_time = gen9_get_rectangles(sna, op, nbox,
1840 op->prim_emit(sna, op, &r);
1847 gen9_render_composite_boxes(struct sna *sna,
1857 nbox_this_time = gen9_get_rectangles(sna, op, nbox,
1862 v = sna->render.vertices + sna->render.vertex_used;
1863 sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1871 gen9_render_composite_boxes__thread(struct sna *sna,
1877 sna_vertex_lock(&sna->render);
1882 nbox_this_time = gen9_get_rectangles(sna, op, nbox,
1887 v = sna->render.vertices + sna->render.vertex_used;
1888 sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1890 sna_vertex_acquire__locked(&sna->render);
1891 sna_vertex_unlock(&sna->render);
1896 sna_vertex_lock(&sna->render);
1897 sna_vertex_release__locked(&sna->render);
1899 sna_vertex_unlock(&sna->render);
1946 gen9_composite_picture(struct sna *sna,
1965 return gen4_channel_init_solid(sna, channel, color);
1971 return gen4_channel_init_linear(sna, picture, channel,
1979 ret = sna_render_picture_approximate_gradient(sna, picture, channel,
1982 ret = sna_render_picture_fixup(sna, picture, channel,
1989 return sna_render_picture_fixup(sna, picture, channel,
1994 return sna_render_picture_fixup(sna, picture, channel,
1998 return sna_render_picture_fixup(sna, picture, channel,
2027 return gen4_channel_init_solid(sna, channel, solid_color(picture->format, priv->clear_color));
2036 return sna_render_picture_convert(sna, picture, channel, pixmap,
2043 return sna_render_picture_extract(sna, picture, channel,
2047 return sna_render_pixmap_bo(sna, channel, pixmap,
2065 static void gen9_render_composite_done(struct sna *sna,
2068 if (sna->render.vertex_offset) {
2069 gen8_vertex_flush(sna);
2070 gen9_magic_ca_pass(sna, op);
2074 kgem_bo_destroy(&sna->kgem, op->mask.bo);
2076 kgem_bo_destroy(&sna->kgem, op->src.bo);
2078 sna_render_composite_redirect_done(sna, op);
2082 gen9_composite_set_target(struct sna *sna,
2108 if (!need_tiling(sna, op->dst.width, op->dst.height))
2127 kgem_bo_pair_undo(&sna->kgem, priv->gpu_bo, priv->cpu_bo);
2144 !sna_render_composite_redirect(sna, op, x, y, w, h, partial))
2151 try_blt(struct sna *sna,
2165 if (sna->kgem.mode == KGEM_BLT) {
2193 if (sna_picture_is_solid(src, NULL) && can_switch_to_blt(sna, bo, 0))
2197 (sna->render_state.gt < 3 || width*height < 1024) &&
2198 can_switch_to_blt(sna, bo, 0))
2206 if (prefer_blt_bo(sna, s, bo))
2210 if (sna->kgem.ring == KGEM_BLT) {
2218 return sna_blt_composite(sna, op,
2288 gen9_composite_fallback(struct sna *sna,
2379 reuse_source(struct sna *sna,
2396 return gen4_channel_init_solid(sna, mc, color);
2434 gen9_render_composite(struct sna *sna,
2450 width, height, sna->kgem.mode, sna->kgem.ring));
2453 try_blt(sna, op,
2462 if (gen9_composite_fallback(sna, src, mask, dst))
2465 if (need_tiling(sna, width, height))
2473 if (op == PictOpClear && src == sna->clear)
2476 if (!gen9_composite_set_target(sna, tmp, dst,
2481 switch (gen9_composite_picture(sna, src, &tmp->src,
2489 if (!gen4_channel_init_solid(sna, &tmp->src, 0))
2495 (prefer_blt_composite(sna, tmp) ||
2497 sna_blt_composite__convert(sna,
2534 if (!reuse_source(sna,
2537 switch (gen9_composite_picture(sna, mask, &tmp->mask,
2545 if (!gen4_channel_init_solid(sna, &tmp->mask, 0))
2566 gen4_choose_composite_emitter(sna, tmp));
2581 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->dst.bo);
2582 if (!kgem_check_bo(&sna->kgem,
2585 kgem_submit(&sna->kgem);
2586 if (!kgem_check_bo(&sna->kgem,
2590 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
2593 gen9_align_vertex(sna, tmp);
2594 gen9_emit_composite_state(sna, tmp);
2599 kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
2604 kgem_bo_destroy(&sna->kgem, tmp->src.bo);
2609 kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
2614 sna_blt_composite(sna, op,
2624 gen9_render_composite_spans_box(struct sna *sna,
2637 gen9_get_rectangles(sna, &op->base, 1, gen9_emit_composite_state);
2638 op->prim_emit(sna, op, box, opacity);
2642 gen9_render_composite_spans_boxes(struct sna *sna,
2656 nbox_this_time = gen9_get_rectangles(sna, &op->base, nbox,
2666 op->prim_emit(sna, op, box++, opacity);
2672 gen9_render_composite_spans_boxes__thread(struct sna *sna,
2682 sna_vertex_lock(&sna->render);
2687 nbox_this_time = gen9_get_rectangles(sna, &op->base, nbox,
2692 v = sna->render.vertices + sna->render.vertex_used;
2693 sna->render.vertex_used += nbox_this_time * op->base.floats_per_rect;
2695 sna_vertex_acquire__locked(&sna->render);
2696 sna_vertex_unlock(&sna->render);
2701 sna_vertex_lock(&sna->render);
2702 sna_vertex_release__locked(&sna->render);
2704 sna_vertex_unlock(&sna->render);
2708 gen9_render_composite_spans_done(struct sna *sna,
2711 if (sna->render.vertex_offset)
2712 gen8_vertex_flush(sna);
2717 kgem_bo_destroy(&sna->kgem, op->base.src.bo);
2719 sna_render_composite_redirect_done(sna, &op->base);
2723 gen9_check_composite_spans(struct sna *sna,
2730 if (gen9_composite_fallback(sna, src, NULL, dst))
2733 if (need_tiling(sna, width, height) &&
2734 !is_gpu(sna, dst->pDrawable, PREFER_GPU_SPANS)) {
2744 gen9_render_composite_spans(struct sna *sna,
2755 width, height, flags, sna->kgem.ring));
2757 assert(gen9_check_composite_spans(sna, op, src, dst, width, height, flags));
2759 if (need_tiling(sna, width, height)) {
2768 if (!gen9_composite_set_target(sna, &tmp->base, dst,
2772 switch (gen9_composite_picture(sna, src, &tmp->base.src,
2780 if (!gen4_channel_init_solid(sna, &tmp->base.src, 0))
2799 gen4_choose_spans_emitter(sna, tmp));
2809 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->base.dst.bo);
2810 if (!kgem_check_bo(&sna->kgem,
2813 kgem_submit(&sna->kgem);
2814 if (!kgem_check_bo(&sna->kgem,
2818 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
2821 gen9_align_vertex(sna, &tmp->base);
2822 gen9_emit_composite_state(sna, &tmp->base);
2827 kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
2830 kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
2836 gen9_emit_copy_state(struct sna *sna,
2842 gen9_get_batch(sna, op);
2844 binding_table = gen9_composite_get_binding_table(sna, &offset);
2849 gen9_bind_bo(sna,
2854 gen9_bind_bo(sna,
2859 if (sna->kgem.surface == offset &&
2860 *(uint64_t *)(sna->kgem.batch + sna->render_state.gen9.surface_table) == *(uint64_t*)binding_table) {
2861 sna->kgem.surface += SURFACE_DW;
2862 offset = sna->render_state.gen9.surface_table;
2865 if (sna->kgem.batch[sna->render_state.gen9.surface_table] == binding_table[0])
2869 gen9_emit_state(sna, op, offset | dirty);
2873 prefer_blt_copy(struct sna *sna,
2878 if (sna->kgem.mode == KGEM_BLT)
2887 if (flags & COPY_DRI && !sna->kgem.has_semaphores)
2890 if (force_blt_ring(sna, dst_bo))
2894 (sna->render_state.gt < 3 && src_bo == dst_bo)) &&
2895 can_switch_to_blt(sna, dst_bo, flags))
2903 sna->render_state.gt < 3 &&
2904 can_switch_to_blt(sna, dst_bo, flags))
2907 if (prefer_render_ring(sna, dst_bo))
2910 if (!prefer_blt_ring(sna, dst_bo, flags))
2913 return prefer_blt_bo(sna, src_bo, dst_bo);
2917 gen9_render_copy_boxes(struct sna *sna, uint8_t alu,
2928 overlaps(sna,
2933 if (prefer_blt_copy(sna, src_bo, dst_bo, flags) &&
2935 sna_blt_copy_boxes(sna, alu,
2950 return sna_blt_copy_boxes_fallback(sna, alu,
2956 if (overlaps(sna,
2963 if ((big || !prefer_render_ring(sna, dst_bo)) &&
2964 sna_blt_copy_boxes(sna, alu,
2978 return sna_render_copy_boxes__overlap(sna, alu, dst, dst_bo,
3018 if (!sna_render_composite_redirect(sna, &tmp,
3044 if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
3066 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp.dst.bo);
3067 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
3068 kgem_submit(&sna->kgem);
3069 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
3071 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3073 kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
3076 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3087 gen9_align_vertex(sna, &tmp);
3088 gen9_emit_copy_state(sna, &tmp);
3094 n_this_time = gen9_get_rectangles(sna, &tmp, n,
3098 v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
3099 sna->render.vertex_used += 6 * n_this_time;
3100 assert(sna->render.vertex_used <= sna->render.vertex_size);
3119 gen8_vertex_flush(sna);
3120 sna_render_composite_redirect_done(sna, &tmp);
3122 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3127 kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
3131 sna_blt_copy_boxes(sna, alu,
3138 return sna_tiling_copy_boxes(sna, alu,
3145 gen9_render_copy_blt(struct sna *sna,
3153 gen9_get_rectangles(sna, &op->base, 1, gen9_emit_copy_state);
3155 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3156 sna->render.vertex_used += 6;
3157 assert(sna->render.vertex_used <= sna->render.vertex_size);
3168 gen9_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
3170 if (sna->render.vertex_offset)
3171 gen8_vertex_flush(sna);
3175 gen9_render_copy(struct sna *sna, uint8_t alu,
3185 if (prefer_blt_copy(sna, src_bo, dst_bo, 0) &&
3187 sna_blt_copy(sna, alu,
3202 return sna_blt_copy(sna, alu, src_bo, dst_bo,
3236 kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
3237 if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
3238 kgem_submit(&sna->kgem);
3239 if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
3241 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3244 gen9_align_vertex(sna, &op->base);
3245 gen9_emit_copy_state(sna, &op->base);
3253 gen9_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
3264 gen9_get_batch(sna, op);
3266 binding_table = gen9_composite_get_binding_table(sna, &offset);
3271 gen9_bind_bo(sna,
3276 gen9_bind_bo(sna,
3281 if (sna->kgem.surface == offset &&
3282 *(uint64_t *)(sna->kgem.batch + sna->render_state.gen9.surface_table) == *(uint64_t*)binding_table) {
3283 sna->kgem.surface += SURFACE_DW;
3284 offset = sna->render_state.gen9.surface_table;
3287 if (sna->kgem.batch[sna->render_state.gen9.surface_table] == binding_table[0])
3290 gen9_emit_state(sna, op, offset | dirty);
3294 gen9_render_fill_boxes(struct sna *sna,
3314 if (prefer_blt_fill(sna, dst_bo, FILL_BOXES) ||
3333 sna_blt_fill_boxes(sna, alu,
3370 if (!sna_render_composite_redirect(sna, &tmp,
3375 return sna_tiling_fill_boxes(sna, op, format, color,
3379 tmp.src.bo = sna_render_get_solid(sna, pixel);
3389 kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
3390 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3391 kgem_submit(&sna->kgem);
3392 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3393 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3397 kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
3403 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3406 gen9_align_vertex(sna, &tmp);
3407 gen9_emit_fill_state(sna, &tmp);
3413 n_this_time = gen9_get_rectangles(sna, &tmp, n,
3417 v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
3418 sna->render.vertex_used += 6 * n_this_time;
3419 assert(sna->render.vertex_used <= sna->render.vertex_size);
3434 gen8_vertex_flush(sna);
3435 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3436 sna_render_composite_redirect_done(sna, &tmp);
3441 gen9_render_fill_op_blt(struct sna *sna,
3449 gen9_get_rectangles(sna, &op->base, 1, gen9_emit_fill_state);
3451 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3452 sna->render.vertex_used += 6;
3453 assert(sna->render.vertex_used <= sna->render.vertex_size);
3465 gen9_render_fill_op_box(struct sna *sna,
3474 gen9_get_rectangles(sna, &op->base, 1, gen9_emit_fill_state);
3476 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3477 sna->render.vertex_used += 6;
3478 assert(sna->render.vertex_used <= sna->render.vertex_size);
3490 gen9_render_fill_op_boxes(struct sna *sna,
3502 nbox_this_time = gen9_get_rectangles(sna, &op->base, nbox,
3506 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3507 sna->render.vertex_used += 6 * nbox_this_time;
3508 assert(sna->render.vertex_used <= sna->render.vertex_size);
3523 gen9_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
3525 if (sna->render.vertex_offset)
3526 gen8_vertex_flush(sna);
3527 kgem_bo_destroy(&sna->kgem, op->base.src.bo);
3531 gen9_render_fill(struct sna *sna, uint8_t alu,
3538 if (prefer_blt_fill(sna, dst_bo, flags) &&
3539 sna_blt_fill(sna, alu,
3548 return sna_blt_fill(sna, alu,
3564 sna_render_get_solid(sna,
3576 kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
3577 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3578 kgem_submit(&sna->kgem);
3579 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3580 kgem_bo_destroy(&sna->kgem, op->base.src.bo);
3584 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3587 gen9_align_vertex(sna, &op->base);
3588 gen9_emit_fill_state(sna, &op->base);
3599 gen9_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
3611 return sna_blt_fill_boxes(sna, alu,
3617 gen9_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
3627 if (prefer_blt_fill(sna, bo, FILL_BOXES) &&
3628 gen9_render_fill_one_try_blt(sna, dst, bo, color,
3636 return gen9_render_fill_one_try_blt(sna, dst, bo, color,
3650 sna_render_get_solid(sna,
3662 kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
3663 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3664 kgem_submit(&sna->kgem);
3665 if (kgem_check_bo(&sna->kgem, bo, NULL)) {
3666 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3669 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3672 gen9_align_vertex(sna, &tmp);
3673 gen9_emit_fill_state(sna, &tmp);
3675 gen9_get_rectangles(sna, &tmp, 1, gen9_emit_fill_state);
3679 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3680 sna->render.vertex_used += 6;
3681 assert(sna->render.vertex_used <= sna->render.vertex_size);
3690 gen8_vertex_flush(sna);
3691 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3697 gen9_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
3706 return sna_blt_fill_boxes(sna, GXclear,
3712 gen9_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
3723 if (sna->kgem.mode == KGEM_BLT &&
3724 gen9_render_clear_try_blt(sna, dst, bo))
3730 return gen9_render_clear_try_blt(sna, dst, bo);
3739 tmp.src.bo = sna_render_get_solid(sna, 0);
3749 kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
3750 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3751 kgem_submit(&sna->kgem);
3752 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3753 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3756 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3759 gen9_align_vertex(sna, &tmp);
3760 gen9_emit_fill_state(sna, &tmp);
3762 gen9_get_rectangles(sna, &tmp, 1, gen9_emit_fill_state);
3764 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3765 sna->render.vertex_used += 6;
3766 assert(sna->render.vertex_used <= sna->render.vertex_size);
3776 gen8_vertex_flush(sna);
3777 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3783 static uint32_t gen9_bind_video_source(struct sna *sna,
3794 offset = sna->kgem.surface -= SURFACE_DW;
3795 ss = sna->kgem.batch + offset;
3809 kgem_add_reloc64(&sna->kgem, offset + 8, bo,
3826 static void gen9_emit_video_state(struct sna *sna,
3841 gen9_get_batch(sna, op);
3883 binding_table = gen9_composite_get_binding_table(sna, &offset);
3886 gen9_bind_bo(sna,
3892 gen9_bind_video_source(sna,
3901 gen9_emit_state(sna, op, offset);
3937 gen9_render_video(struct sna *sna,
4000 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp.dst.bo);
4001 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
4002 kgem_submit(&sna->kgem);
4003 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
4006 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
4009 gen9_align_vertex(sna, &tmp);
4010 gen9_emit_video_state(sna, &tmp);
4042 gen9_get_rectangles(sna, &tmp, 1, gen9_emit_video_state);
4058 gen8_vertex_flush(sna);
4067 static void gen9_render_flush(struct sna *sna)
4069 gen8_vertex_close(sna);
4071 assert(sna->render.vb_id == 0);
4072 assert(sna->render.vertex_offset == 0);
4075 static void gen9_render_reset(struct sna *sna)
4077 sna->render_state.gen9.emit_flush = false;
4078 sna->render_state.gen9.needs_invariant = true;
4079 sna->render_state.gen9.ve_id = 3 << 2;
4080 sna->render_state.gen9.ve_dirty = false;
4081 sna->render_state.gen9.last_primitive = -1;
4083 sna->render_state.gen9.num_sf_outputs = 0;
4084 sna->render_state.gen9.samplers = -1;
4085 sna->render_state.gen9.blend = -1;
4086 sna->render_state.gen9.kernel = -1;
4087 sna->render_state.gen9.drawrect_offset = -1;
4088 sna->render_state.gen9.drawrect_limit = -1;
4089 sna->render_state.gen9.surface_table = 0;
4091 if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
4093 discard_vbo(sna);
4096 sna->render.vertex_offset = 0;
4097 sna->render.nvertex_reloc = 0;
4098 sna->render.vb_id = 0;
4101 static void gen9_render_fini(struct sna *sna)
4103 kgem_bo_destroy(&sna->kgem, sna->render_state.gen9.general_bo);
4106 static bool gen9_render_setup(struct sna *sna)
4108 struct gen9_render_state *state = &sna->render_state.gen9;
4114 devid = intel_get_device_id(sna->dev);
4120 if (is_skl(sna))
4122 if (is_bxt(sna))
4124 if (is_kbl(sna))
4126 if (is_glk(sna))
4128 if (is_cfl(sna))
4148 sna_static_stream_compile_wm(sna, &general,
4154 sna_static_stream_compile_wm(sna, &general,
4160 sna_static_stream_compile_wm(sna, &general,
4193 state->general_bo = sna_static_stream_fini(sna, &general);
4197 const char *gen9_render_init(struct sna *sna, const char *backend)
4199 if (!gen9_render_setup(sna))
4202 sna->kgem.context_switch = gen6_render_context_switch;
4203 sna->kgem.retire = gen6_render_retire;
4204 sna->kgem.expire = gen4_render_expire;
4207 sna->render.composite = gen9_render_composite;
4208 sna->render.prefer_gpu |= PREFER_GPU_RENDER;
4211 sna->render.check_composite_spans = gen9_check_composite_spans;
4212 sna->render.composite_spans = gen9_render_composite_spans;
4213 sna->render.prefer_gpu |= PREFER_GPU_SPANS;
4216 sna->render.video = gen9_render_video;
4220 sna->render.copy_boxes = gen9_render_copy_boxes;
4223 sna->render.copy = gen9_render_copy;
4227 sna->render.fill_boxes = gen9_render_fill_boxes;
4230 sna->render.fill = gen9_render_fill;
4233 sna->render.fill_one = gen9_render_fill_one;
4236 sna->render.clear = gen9_render_clear;
4239 sna->render.flush = gen9_render_flush;
4240 sna->render.reset = gen9_render_reset;
4241 sna->render.fini = gen9_render_fini;
4243 sna->render.max_3d_size = GEN9_MAX_SIZE;
4244 sna->render.max_3d_pitch = 1 << 18;
4245 return sna->render_state.gen9.info->name;