Lines Matching refs:sna
25 * Wang Zhenyu <zhenyu.z.wang@sna.com>
37 #include "sna.h"
181 inline static bool is_ivb(struct sna *sna)
183 return sna->kgem.gen == 070;
186 inline static bool is_byt(struct sna *sna)
188 return sna->kgem.gen == 071;
191 inline static bool is_hsw(struct sna *sna)
193 return sna->kgem.gen == 075;
338 #define OUT_BATCH(v) batch_emit(sna, v)
339 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
340 #define OUT_VERTEX_F(v) vertex_emit(sna, v)
537 gen7_emit_urb(struct sna *sna)
540 OUT_BATCH(sna->render_state.gen7.info->urb.push_ps_size);
544 OUT_BATCH((sna->render_state.gen7.info->urb.max_vs_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
562 gen7_emit_state_base_address(struct sna *sna)
564 uint32_t mocs = sna->render_state.gen7.info->mocs << 8;
568 OUT_BATCH(kgem_add_reloc(&sna->kgem, /* surface */
569 sna->kgem.nbatch,
573 OUT_BATCH(kgem_add_reloc(&sna->kgem, /* dynamic */
574 sna->kgem.nbatch,
575 sna->render_state.gen7.general_bo,
579 OUT_BATCH(kgem_add_reloc(&sna->kgem, /* instruction */
580 sna->kgem.nbatch,
581 sna->render_state.gen7.general_bo,
593 gen7_disable_vs(struct sna *sna)
629 gen7_disable_hs(struct sna *sna)
657 gen7_disable_te(struct sna *sna)
666 gen7_disable_ds(struct sna *sna)
693 gen7_disable_gs(struct sna *sna)
721 gen7_disable_streamout(struct sna *sna)
729 gen7_emit_sf_invariant(struct sna *sna)
741 gen7_emit_cc_invariant(struct sna *sna)
757 gen7_disable_clip(struct sna *sna)
769 gen7_emit_wm_invariant(struct sna *sna)
789 gen7_emit_null_depth_buffer(struct sna *sna)
808 gen7_emit_invariant(struct sna *sna)
821 gen7_emit_urb(sna);
823 gen7_emit_state_base_address(sna);
825 gen7_disable_vs(sna);
826 gen7_disable_hs(sna);
827 gen7_disable_te(sna);
828 gen7_disable_ds(sna);
829 gen7_disable_gs(sna);
830 gen7_disable_clip(sna);
831 gen7_emit_sf_invariant(sna);
832 gen7_emit_wm_invariant(sna);
833 gen7_emit_cc_invariant(sna);
834 gen7_disable_streamout(sna);
835 gen7_emit_null_depth_buffer(sna);
837 sna->render_state.gen7.needs_invariant = false;
841 gen7_emit_cc(struct sna *sna, uint32_t blend_offset)
843 struct gen7_render_state *render = &sna->render_state.gen7;
862 gen7_emit_sampler(struct sna *sna, uint32_t state)
864 if (sna->render_state.gen7.samplers == state)
867 sna->render_state.gen7.samplers = state;
871 assert (is_aligned(sna->render_state.gen7.wm_state + state, 32));
873 OUT_BATCH(sna->render_state.gen7.wm_state + state);
877 gen7_emit_sf(struct sna *sna, bool has_mask)
881 if (sna->render_state.gen7.num_sf_outputs == num_sf_outputs)
887 sna->render_state.gen7.num_sf_outputs = num_sf_outputs;
908 gen7_emit_wm(struct sna *sna, int kernel)
912 if (sna->render_state.gen7.kernel == kernel)
915 sna->render_state.gen7.kernel = kernel;
916 kernels = sna->render_state.gen7.wm_kernel[kernel];
929 OUT_BATCH(sna->render_state.gen7.info->max_wm_threads |
942 gen7_emit_binding_table(struct sna *sna, uint16_t offset)
944 if (sna->render_state.gen7.surface_table == offset)
952 sna->render_state.gen7.surface_table = offset;
957 gen7_emit_drawing_rectangle(struct sna *sna,
966 if (sna->render_state.gen7.drawrect_limit == limit &&
967 sna->render_state.gen7.drawrect_offset == offset)
970 sna->render_state.gen7.drawrect_offset = offset;
971 sna->render_state.gen7.drawrect_limit = limit;
981 gen7_emit_vertex_elements(struct sna *sna,
990 struct gen7_render_state *render = &sna->render_state.gen7;
1101 gen7_emit_pipe_invalidate(struct sna *sna)
1109 sna->render_state.gen7.pipe_controls_since_stall = 0;
1113 gen7_emit_pipe_flush(struct sna *sna, bool need_stall)
1120 sna->render_state.gen7.pipe_controls_since_stall = 0;
1122 sna->render_state.gen7.pipe_controls_since_stall++;
1131 gen7_emit_pipe_stall(struct sna *sna)
1138 sna->render_state.gen7.pipe_controls_since_stall = 0;
1142 gen7_emit_state(struct sna *sna,
1153 (sna->render_state.gen7.emit_flush && GEN7_READS_DST(op->u.gen7.flags));
1159 need_stall = sna->render_state.gen7.surface_table != wm_binding_table;
1165 need_stall &= gen7_emit_drawing_rectangle(sna, op);
1168 if (sna->kgem.gen < 075 &&
1169 sna->render_state.gen7.pipe_controls_since_stall >= 3)
1173 gen7_emit_pipe_invalidate(sna);
1174 kgem_clear_dirty(&sna->kgem);
1182 gen7_emit_pipe_flush(sna, need_stall);
1186 gen7_emit_pipe_stall(sna);
1188 gen7_emit_cc(sna, GEN7_BLEND(op->u.gen7.flags));
1189 gen7_emit_sampler(sna, GEN7_SAMPLER(op->u.gen7.flags));
1190 gen7_emit_sf(sna, GEN7_VERTEX(op->u.gen7.flags) >> 2);
1191 gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
1192 gen7_emit_vertex_elements(sna, op);
1193 gen7_emit_binding_table(sna, wm_binding_table);
1195 sna->render_state.gen7.emit_flush = GEN7_READS_DST(op->u.gen7.flags);
1198 static bool gen7_magic_ca_pass(struct sna *sna,
1201 struct gen7_render_state *state = &sna->render_state.gen7;
1207 sna->render.vertex_start, sna->render.vertex_index));
1209 gen7_emit_pipe_stall(sna);
1211 gen7_emit_cc(sna,
1214 gen7_emit_wm(sna,
1221 OUT_BATCH(sna->render.vertex_index - sna->render.vertex_start);
1222 OUT_BATCH(sna->render.vertex_start);
1227 state->last_primitive = sna->kgem.nbatch;
1319 gen7_bind_bo(struct sna *sna,
1336 assert(offset >= sna->kgem.surface);
1342 offset = sna->kgem.surface -=
1344 ss = sna->kgem.batch + offset;
1355 ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
1360 ss[5] = (is_scanout || bo->io) ? 0 : sna->render_state.gen7.info->mocs << 16;
1363 if (is_hsw(sna))
1376 static void gen7_emit_vertex_buffer(struct sna *sna,
1386 sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
1391 sna->render.vb_id |= 1 << id;
1394 static void gen7_emit_primitive(struct sna *sna)
1396 if (sna->kgem.nbatch == sna->render_state.gen7.last_primitive) {
1397 sna->render.vertex_offset = sna->kgem.nbatch - 5;
1403 sna->render.vertex_offset = sna->kgem.nbatch;
1405 OUT_BATCH(sna->render.vertex_index);
1409 sna->render.vertex_start = sna->render.vertex_index;
1411 sna->render_state.gen7.last_primitive = sna->kgem.nbatch;
1414 static bool gen7_rectangle_begin(struct sna *sna,
1420 if (sna_vertex_wait__locked(&sna->render) && sna->render.vertex_offset)
1424 if ((sna->render.vb_id & id) == 0)
1426 if (!kgem_check_batch(&sna->kgem, ndwords))
1429 if ((sna->render.vb_id & id) == 0)
1430 gen7_emit_vertex_buffer(sna, op);
1432 gen7_emit_primitive(sna);
1436 static int gen7_get_rectangles__flush(struct sna *sna,
1440 if (sna_vertex_wait__locked(&sna->render)) {
1441 int rem = vertex_space(sna);
1446 if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 6))
1448 if (!kgem_check_reloc_and_exec(&sna->kgem, 2))
1451 if (sna->render.vertex_offset) {
1452 gen4_vertex_flush(sna);
1453 if (gen7_magic_ca_pass(sna, op)) {
1454 gen7_emit_pipe_stall(sna);
1455 gen7_emit_cc(sna, GEN7_BLEND(op->u.gen7.flags));
1456 gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
1460 return gen4_vertex_finish(sna);
1463 inline static int gen7_get_rectangles(struct sna *sna,
1466 void (*emit_state)(struct sna *sna, const struct sna_composite_op *op))
1473 rem = vertex_space(sna);
1477 rem = gen7_get_rectangles__flush(sna, op);
1482 if (unlikely(sna->render.vertex_offset == 0)) {
1483 if (!gen7_rectangle_begin(sna, op))
1489 assert(rem <= vertex_space(sna));
1495 sna->render.vertex_index += 3*want;
1499 if (sna->render.vertex_offset) {
1500 gen4_vertex_flush(sna);
1501 gen7_magic_ca_pass(sna, op);
1503 sna_vertex_wait__locked(&sna->render);
1504 _kgem_submit(&sna->kgem);
1505 emit_state(sna, op);
1509 inline static uint32_t *gen7_composite_get_binding_table(struct sna *sna,
1514 sna->kgem.surface -=
1517 table = memset(sna->kgem.batch + sna->kgem.surface,
1520 DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface));
1522 *offset = sna->kgem.surface;
1527 gen7_get_batch(struct sna *sna, const struct sna_composite_op *op)
1529 kgem_set_mode(&sna->kgem, KGEM_RENDER, op->dst.bo);
1531 if (!kgem_check_batch_with_surfaces(&sna->kgem, 150, 4)) {
1533 __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
1535 _kgem_submit(&sna->kgem);
1536 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
1539 assert(sna->kgem.mode == KGEM_RENDER);
1540 assert(sna->kgem.ring == KGEM_RENDER);
1542 if (sna->render_state.gen7.needs_invariant)
1543 gen7_emit_invariant(sna);
1546 static void gen7_emit_composite_state(struct sna *sna,
1552 gen7_get_batch(sna, op);
1554 binding_table = gen7_composite_get_binding_table(sna, &offset);
1559 gen7_bind_bo(sna,
1564 gen7_bind_bo(sna,
1570 gen7_bind_bo(sna,
1578 if (sna->kgem.surface == offset &&
1579 *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table &&
1581 sna->kgem.batch[sna->render_state.gen7.surface_table+2] == binding_table[2])) {
1582 sna->kgem.surface += sizeof(struct gen7_surface_state) / sizeof(uint32_t);
1583 offset = sna->render_state.gen7.surface_table;
1586 if (sna->kgem.batch[sna->render_state.gen7.surface_table] == binding_table[0])
1589 gen7_emit_state(sna, op, offset | dirty);
1593 gen7_align_vertex(struct sna *sna, const struct sna_composite_op *op)
1595 if (op->floats_per_vertex != sna->render_state.gen7.floats_per_vertex) {
1597 sna->render_state.gen7.floats_per_vertex, op->floats_per_vertex));
1598 gen4_vertex_align(sna, op);
1599 sna->render_state.gen7.floats_per_vertex = op->floats_per_vertex;
1604 gen7_render_composite_blt(struct sna *sna,
1608 gen7_get_rectangles(sna, op, 1, gen7_emit_composite_state);
1609 op->prim_emit(sna, op, r);
1613 gen7_render_composite_box(struct sna *sna,
1619 gen7_get_rectangles(sna, op, 1, gen7_emit_composite_state);
1631 op->prim_emit(sna, op, &r);
1635 gen7_render_composite_boxes__blt(struct sna *sna,
1644 nbox_this_time = gen7_get_rectangles(sna, op, nbox,
1661 op->prim_emit(sna, op, &r);
1668 gen7_render_composite_boxes(struct sna *sna,
1678 nbox_this_time = gen7_get_rectangles(sna, op, nbox,
1683 v = sna->render.vertices + sna->render.vertex_used;
1684 sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1692 gen7_render_composite_boxes__thread(struct sna *sna,
1698 sna_vertex_lock(&sna->render);
1703 nbox_this_time = gen7_get_rectangles(sna, op, nbox,
1708 v = sna->render.vertices + sna->render.vertex_used;
1709 sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1711 sna_vertex_acquire__locked(&sna->render);
1712 sna_vertex_unlock(&sna->render);
1717 sna_vertex_lock(&sna->render);
1718 sna_vertex_release__locked(&sna->render);
1720 sna_vertex_unlock(&sna->render);
1759 static uint32_t gen7_bind_video_source(struct sna *sna,
1769 bind = sna->kgem.surface -=
1774 ss = sna->kgem.batch + bind;
1777 ss[1] = kgem_add_reloc(&sna->kgem, bind + 1, bo,
1787 if (is_hsw(sna))
1797 static void gen7_emit_video_state(struct sna *sna,
1810 gen7_get_batch(sna, op);
1850 binding_table = gen7_composite_get_binding_table(sna, &offset);
1855 gen7_bind_bo(sna,
1861 gen7_bind_video_source(sna,
1870 gen7_emit_state(sna, op, offset | dirty);
1901 gen7_render_video(struct sna *sna,
1956 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp.dst.bo);
1957 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
1958 kgem_submit(&sna->kgem);
1959 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
1962 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
1965 gen7_align_vertex(sna, &tmp);
1966 gen7_emit_video_state(sna, &tmp);
1998 gen7_get_rectangles(sna, &tmp, 1, gen7_emit_video_state);
2014 gen4_vertex_flush(sna);
2023 gen7_composite_picture(struct sna *sna,
2042 return gen4_channel_init_solid(sna, channel, color);
2048 return gen4_channel_init_linear(sna, picture, channel,
2056 ret = sna_render_picture_approximate_gradient(sna, picture, channel,
2059 ret = sna_render_picture_fixup(sna, picture, channel,
2066 return sna_render_picture_fixup(sna, picture, channel,
2071 return sna_render_picture_fixup(sna, picture, channel,
2075 return sna_render_picture_fixup(sna, picture, channel,
2105 return gen4_channel_init_solid(sna, channel,
2115 return sna_render_picture_convert(sna, picture, channel, pixmap,
2122 return sna_render_picture_extract(sna, picture, channel,
2145 return sna_render_pixmap_bo(sna, channel, pixmap,
2158 static void gen7_render_composite_done(struct sna *sna,
2161 if (sna->render.vertex_offset) {
2162 gen4_vertex_flush(sna);
2163 gen7_magic_ca_pass(sna, op);
2167 kgem_bo_destroy(&sna->kgem, op->mask.bo);
2169 kgem_bo_destroy(&sna->kgem, op->src.bo);
2171 sna_render_composite_redirect_done(sna, op);
2175 gen7_composite_set_target(struct sna *sna,
2201 if (!need_tiling(sna, op->dst.width, op->dst.height))
2215 kgem_bo_pair_undo(&sna->kgem, priv->gpu_bo, priv->cpu_bo);
2232 !sna_render_composite_redirect(sna, op, x, y, w, h, partial))
2239 try_blt(struct sna *sna,
2253 if (sna->kgem.mode == KGEM_BLT) {
2282 (sna->render_state.gt < 3 || width*height < 1024) &&
2283 can_switch_to_blt(sna, bo, 0))
2286 if (sna_picture_is_solid(src, NULL) && can_switch_to_blt(sna, bo, 0))
2294 if (prefer_blt_bo(sna, s, bo))
2298 if (sna->kgem.ring == KGEM_BLT) {
2306 return sna_blt_composite(sna, op,
2376 gen7_composite_fallback(struct sna *sna,
2467 reuse_source(struct sna *sna,
2484 return gen4_channel_init_solid(sna, mc, color);
2522 gen7_render_composite(struct sna *sna,
2538 width, height, sna->kgem.mode, sna->kgem.ring));
2541 try_blt(sna, op,
2550 if (gen7_composite_fallback(sna, src, mask, dst))
2553 if (need_tiling(sna, width, height))
2561 if (op == PictOpClear && src == sna->clear)
2564 if (!gen7_composite_set_target(sna, tmp, dst,
2569 switch (gen7_composite_picture(sna, src, &tmp->src,
2577 if (!gen4_channel_init_solid(sna, &tmp->src, 0))
2583 prefer_blt_composite(sna, tmp) &&
2584 sna_blt_composite__convert(sna,
2619 if (!reuse_source(sna,
2622 switch (gen7_composite_picture(sna, mask, &tmp->mask,
2630 if (!gen4_channel_init_solid(sna, &tmp->mask, 0))
2654 gen4_choose_composite_emitter(sna, tmp));
2665 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->dst.bo);
2666 if (!kgem_check_bo(&sna->kgem,
2669 kgem_submit(&sna->kgem);
2670 if (!kgem_check_bo(&sna->kgem,
2674 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
2677 gen7_align_vertex(sna, tmp);
2678 gen7_emit_composite_state(sna, tmp);
2683 kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
2688 kgem_bo_destroy(&sna->kgem, tmp->src.bo);
2693 kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
2698 sna_blt_composite(sna, op,
2708 gen7_render_composite_spans_box(struct sna *sna,
2721 gen7_get_rectangles(sna, &op->base, 1, gen7_emit_composite_state);
2722 op->prim_emit(sna, op, box, opacity);
2726 gen7_render_composite_spans_boxes(struct sna *sna,
2740 nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox,
2750 op->prim_emit(sna, op, box++, opacity);
2756 gen7_render_composite_spans_boxes__thread(struct sna *sna,
2766 sna_vertex_lock(&sna->render);
2771 nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox,
2776 v = sna->render.vertices + sna->render.vertex_used;
2777 sna->render.vertex_used += nbox_this_time * op->base.floats_per_rect;
2779 sna_vertex_acquire__locked(&sna->render);
2780 sna_vertex_unlock(&sna->render);
2785 sna_vertex_lock(&sna->render);
2786 sna_vertex_release__locked(&sna->render);
2788 sna_vertex_unlock(&sna->render);
2792 gen7_render_composite_spans_done(struct sna *sna,
2795 if (sna->render.vertex_offset)
2796 gen4_vertex_flush(sna);
2801 kgem_bo_destroy(&sna->kgem, op->base.src.bo);
2803 sna_render_composite_redirect_done(sna, &op->base);
2807 gen7_check_composite_spans(struct sna *sna,
2814 if (gen7_composite_fallback(sna, src, NULL, dst))
2817 if (need_tiling(sna, width, height) &&
2818 !is_gpu(sna, dst->pDrawable, PREFER_GPU_SPANS)) {
2828 gen7_render_composite_spans(struct sna *sna,
2839 width, height, flags, sna->kgem.mode, sna->kgem.ring));
2841 assert(gen7_check_composite_spans(sna, op, src, dst, width, height, flags));
2843 if (need_tiling(sna, width, height)) {
2852 if (!gen7_composite_set_target(sna, &tmp->base, dst,
2856 switch (gen7_composite_picture(sna, src, &tmp->base.src,
2864 if (!gen4_channel_init_solid(sna, &tmp->base.src, 0))
2883 gen4_choose_spans_emitter(sna, tmp));
2891 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->base.dst.bo);
2892 if (!kgem_check_bo(&sna->kgem,
2895 kgem_submit(&sna->kgem);
2896 if (!kgem_check_bo(&sna->kgem,
2900 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
2903 gen7_align_vertex(sna, &tmp->base);
2904 gen7_emit_composite_state(sna, &tmp->base);
2909 kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
2912 kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
2918 gen7_emit_copy_state(struct sna *sna,
2924 gen7_get_batch(sna, op);
2926 binding_table = gen7_composite_get_binding_table(sna, &offset);
2931 gen7_bind_bo(sna,
2936 gen7_bind_bo(sna,
2941 if (sna->kgem.surface == offset &&
2942 *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table) {
2943 sna->kgem.surface += sizeof(struct gen7_surface_state) / sizeof(uint32_t);
2944 offset = sna->render_state.gen7.surface_table;
2947 if (sna->kgem.batch[sna->render_state.gen7.surface_table] == binding_table[0])
2951 gen7_emit_state(sna, op, offset | dirty);
2955 prefer_blt_copy(struct sna *sna,
2960 if (sna->kgem.mode == KGEM_BLT)
2969 if (flags & COPY_DRI && !sna->kgem.has_semaphores)
2972 if (force_blt_ring(sna, dst_bo))
2976 (sna->render_state.gt < 3 && src_bo == dst_bo)) &&
2977 can_switch_to_blt(sna, dst_bo, flags))
2985 sna->render_state.gt < 3 &&
2986 can_switch_to_blt(sna, dst_bo, flags))
2989 if (prefer_render_ring(sna, dst_bo))
2992 if (!prefer_blt_ring(sna, dst_bo, flags))
2995 return prefer_blt_bo(sna, src_bo, dst_bo);
2999 gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
3010 overlaps(sna,
3015 if (prefer_blt_copy(sna, src_bo, dst_bo, flags) &&
3017 sna_blt_copy_boxes(sna, alu,
3030 return sna_blt_copy_boxes_fallback(sna, alu,
3036 if (overlaps(sna,
3043 if ((big || !prefer_render_ring(sna, dst_bo)) &&
3044 sna_blt_copy_boxes(sna, alu,
3058 return sna_render_copy_boxes__overlap(sna, alu, dst, dst_bo,
3098 if (!sna_render_composite_redirect(sna, &tmp,
3124 if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
3145 kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp.dst.bo);
3146 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
3147 kgem_submit(&sna->kgem);
3148 if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
3150 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3152 kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
3155 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3166 gen7_align_vertex(sna, &tmp);
3167 gen7_emit_copy_state(sna, &tmp);
3173 n_this_time = gen7_get_rectangles(sna, &tmp, n,
3177 v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
3178 sna->render.vertex_used += 6 * n_this_time;
3179 assert(sna->render.vertex_used <= sna->render.vertex_size);
3198 gen4_vertex_flush(sna);
3199 sna_render_composite_redirect_done(sna, &tmp);
3201 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3206 kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
3210 sna_blt_copy_boxes(sna, alu,
3217 return sna_tiling_copy_boxes(sna, alu,
3224 gen7_render_copy_blt(struct sna *sna,
3232 gen7_get_rectangles(sna, &op->base, 1, gen7_emit_copy_state);
3234 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3235 sna->render.vertex_used += 6;
3236 assert(sna->render.vertex_used <= sna->render.vertex_size);
3247 gen7_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
3249 if (sna->render.vertex_offset)
3250 gen4_vertex_flush(sna);
3254 gen7_render_copy(struct sna *sna, uint8_t alu,
3264 if (prefer_blt_copy(sna, src_bo, dst_bo, 0) &&
3266 sna_blt_copy(sna, alu,
3279 return sna_blt_copy(sna, alu, src_bo, dst_bo,
3312 kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
3313 if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
3314 kgem_submit(&sna->kgem);
3315 if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
3317 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3320 gen7_align_vertex(sna, &op->base);
3321 gen7_emit_copy_state(sna, &op->base);
3329 gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
3341 gen7_get_batch(sna, op);
3343 binding_table = gen7_composite_get_binding_table(sna, &offset);
3348 gen7_bind_bo(sna,
3353 gen7_bind_bo(sna,
3358 if (sna->kgem.surface == offset &&
3359 *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table) {
3360 sna->kgem.surface +=
3362 offset = sna->render_state.gen7.surface_table;
3365 if (sna->kgem.batch[sna->render_state.gen7.surface_table] == binding_table[0])
3368 gen7_emit_state(sna, op, offset | dirty);
3372 gen7_render_fill_boxes(struct sna *sna,
3392 if (prefer_blt_fill(sna, dst_bo, FILL_BOXES) ||
3410 sna_blt_fill_boxes(sna, alu,
3447 if (!sna_render_composite_redirect(sna, &tmp,
3452 return sna_tiling_fill_boxes(sna, op, format, color,
3456 tmp.src.bo = sna_render_get_solid(sna, pixel);
3465 kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
3466 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3467 kgem_submit(&sna->kgem);
3468 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3469 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3471 kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
3475 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3478 gen7_align_vertex(sna, &tmp);
3479 gen7_emit_fill_state(sna, &tmp);
3485 n_this_time = gen7_get_rectangles(sna, &tmp, n,
3489 v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
3490 sna->render.vertex_used += 6 * n_this_time;
3491 assert(sna->render.vertex_used <= sna->render.vertex_size);
3506 gen4_vertex_flush(sna);
3507 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3508 sna_render_composite_redirect_done(sna, &tmp);
3513 gen7_render_fill_op_blt(struct sna *sna,
3521 gen7_get_rectangles(sna, &op->base, 1, gen7_emit_fill_state);
3523 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3524 sna->render.vertex_used += 6;
3525 assert(sna->render.vertex_used <= sna->render.vertex_size);
3537 gen7_render_fill_op_box(struct sna *sna,
3546 gen7_get_rectangles(sna, &op->base, 1, gen7_emit_fill_state);
3548 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3549 sna->render.vertex_used += 6;
3550 assert(sna->render.vertex_used <= sna->render.vertex_size);
3562 gen7_render_fill_op_boxes(struct sna *sna,
3574 nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox,
3578 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3579 sna->render.vertex_used += 6 * nbox_this_time;
3580 assert(sna->render.vertex_used <= sna->render.vertex_size);
3595 gen7_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
3597 if (sna->render.vertex_offset)
3598 gen4_vertex_flush(sna);
3599 kgem_bo_destroy(&sna->kgem, op->base.src.bo);
3603 gen7_render_fill(struct sna *sna, uint8_t alu,
3610 if (prefer_blt_fill(sna, dst_bo, flags) &&
3611 sna_blt_fill(sna, alu,
3619 return sna_blt_fill(sna, alu,
3635 sna_render_get_solid(sna,
3646 kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
3647 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3648 kgem_submit(&sna->kgem);
3649 if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
3650 kgem_bo_destroy(&sna->kgem, op->base.src.bo);
3654 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3657 gen7_align_vertex(sna, &op->base);
3658 gen7_emit_fill_state(sna, &op->base);
3669 gen7_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
3681 return sna_blt_fill_boxes(sna, alu,
3687 gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
3697 if (prefer_blt_fill(sna, bo, FILL_BOXES) &&
3698 gen7_render_fill_one_try_blt(sna, dst, bo, color,
3705 return gen7_render_fill_one_try_blt(sna, dst, bo, color,
3719 sna_render_get_solid(sna,
3730 kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
3731 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3732 kgem_submit(&sna->kgem);
3733 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3734 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3737 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3740 gen7_align_vertex(sna, &tmp);
3741 gen7_emit_fill_state(sna, &tmp);
3743 gen7_get_rectangles(sna, &tmp, 1, gen7_emit_fill_state);
3747 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3748 sna->render.vertex_used += 6;
3749 assert(sna->render.vertex_used <= sna->render.vertex_size);
3758 gen4_vertex_flush(sna);
3759 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3765 gen7_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
3774 return sna_blt_fill_boxes(sna, GXclear,
3780 gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
3791 if (sna->kgem.mode == KGEM_BLT &&
3792 gen7_render_clear_try_blt(sna, dst, bo))
3797 return gen7_render_clear_try_blt(sna, dst, bo);
3806 tmp.src.bo = sna_render_get_solid(sna, 0);
3815 kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
3816 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3817 kgem_submit(&sna->kgem);
3818 if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3819 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3822 _kgem_set_mode(&sna->kgem, KGEM_RENDER);
3825 gen7_align_vertex(sna, &tmp);
3826 gen7_emit_fill_state(sna, &tmp);
3828 gen7_get_rectangles(sna, &tmp, 1, gen7_emit_fill_state);
3830 v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
3831 sna->render.vertex_used += 6;
3832 assert(sna->render.vertex_used <= sna->render.vertex_size);
3842 gen4_vertex_flush(sna);
3843 kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3847 static void gen7_render_reset(struct sna *sna)
3849 sna->render_state.gen7.pipe_controls_since_stall = 0;
3850 sna->render_state.gen7.emit_flush = false;
3851 sna->render_state.gen7.needs_invariant = true;
3852 sna->render_state.gen7.ve_id = 3 << 2;
3853 sna->render_state.gen7.last_primitive = -1;
3855 sna->render_state.gen7.num_sf_outputs = 0;
3856 sna->render_state.gen7.samplers = -1;
3857 sna->render_state.gen7.blend = -1;
3858 sna->render_state.gen7.kernel = -1;
3859 sna->render_state.gen7.drawrect_offset = -1;
3860 sna->render_state.gen7.drawrect_limit = -1;
3861 sna->render_state.gen7.surface_table = 0;
3863 if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
3865 discard_vbo(sna);
3868 sna->render.vertex_offset = 0;
3869 sna->render.nvertex_reloc = 0;
3870 sna->render.vb_id = 0;
3873 static void gen7_render_fini(struct sna *sna)
3875 kgem_bo_destroy(&sna->kgem, sna->render_state.gen7.general_bo);
3878 static bool is_gt3(struct sna *sna, int devid)
3880 assert(sna->kgem.gen == 075);
3884 static bool is_gt2(struct sna *sna, int devid)
3886 return devid & (is_hsw(sna)? 0x30 : 0x20);
3889 static bool is_mobile(struct sna *sna, int devid)
3894 static bool gen7_render_setup(struct sna *sna, int devid)
3896 struct gen7_render_state *state = &sna->render_state.gen7;
3901 if (is_ivb(sna)) {
3905 if (is_gt2(sna, devid))
3908 } else if (is_byt(sna)) {
3910 } else if (is_hsw(sna)) {
3913 if (is_gt3(sna, devid))
3915 else if (is_gt2(sna, devid))
3942 sna_static_stream_compile_wm(sna, &general,
3948 sna_static_stream_compile_wm(sna, &general,
3954 sna_static_stream_compile_wm(sna, &general,
3983 state->general_bo = sna_static_stream_fini(sna, &general);
3987 const char *gen7_render_init(struct sna *sna, const char *backend)
3989 int devid = intel_get_device_id(sna->dev);
3991 if (!gen7_render_setup(sna, devid))
3994 sna->kgem.context_switch = gen6_render_context_switch;
3995 sna->kgem.retire = gen6_render_retire;
3996 sna->kgem.expire = gen4_render_expire;
3999 sna->render.composite = gen7_render_composite;
4000 sna->render.prefer_gpu |= PREFER_GPU_RENDER;
4003 sna->render.check_composite_spans = gen7_check_composite_spans;
4004 sna->render.composite_spans = gen7_render_composite_spans;
4005 if (is_mobile(sna, devid) || is_gt2(sna, devid) || is_byt(sna))
4006 sna->render.prefer_gpu |= PREFER_GPU_SPANS;
4008 sna->render.video = gen7_render_video;
4011 sna->render.copy_boxes = gen7_render_copy_boxes;
4014 sna->render.copy = gen7_render_copy;
4018 sna->render.fill_boxes = gen7_render_fill_boxes;
4021 sna->render.fill = gen7_render_fill;
4024 sna->render.fill_one = gen7_render_fill_one;
4027 sna->render.clear = gen7_render_clear;
4030 sna->render.flush = gen4_render_flush;
4031 sna->render.reset = gen7_render_reset;
4032 sna->render.fini = gen7_render_fini;
4034 sna->render.max_3d_size = GEN7_MAX_SIZE;
4035 sna->render.max_3d_pitch = 1 << 18;
4036 return sna->render_state.gen7.info->name;