1/*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "util/u_format.h"
25#include "util/u_surface.h"
26#include "util/u_blitter.h"
27#include "compiler/nir/nir_builder.h"
28#include "vc4_context.h"
29
30static struct pipe_surface *
31vc4_get_blit_surface(struct pipe_context *pctx,
32                     struct pipe_resource *prsc, unsigned level)
33{
34        struct pipe_surface tmpl;
35
36        memset(&tmpl, 0, sizeof(tmpl));
37        tmpl.format = prsc->format;
38        tmpl.u.tex.level = level;
39        tmpl.u.tex.first_layer = 0;
40        tmpl.u.tex.last_layer = 0;
41
42        return pctx->create_surface(pctx, prsc, &tmpl);
43}
44
45static bool
46is_tile_unaligned(unsigned size, unsigned tile_size)
47{
48        return size & (tile_size - 1);
49}
50
51static bool
52vc4_tile_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
53{
54        struct vc4_context *vc4 = vc4_context(pctx);
55        bool msaa = (info->src.resource->nr_samples > 1 ||
56                     info->dst.resource->nr_samples > 1);
57        int tile_width = msaa ? 32 : 64;
58        int tile_height = msaa ? 32 : 64;
59
60        if (util_format_is_depth_or_stencil(info->dst.resource->format))
61                return false;
62
63        if (info->scissor_enable)
64                return false;
65
66        if ((info->mask & PIPE_MASK_RGBA) == 0)
67                return false;
68
69        if (info->dst.box.x != info->src.box.x ||
70            info->dst.box.y != info->src.box.y ||
71            info->dst.box.width != info->src.box.width ||
72            info->dst.box.height != info->src.box.height) {
73                return false;
74        }
75
76        int dst_surface_width = u_minify(info->dst.resource->width0,
77                                         info->dst.level);
78        int dst_surface_height = u_minify(info->dst.resource->height0,
79                                         info->dst.level);
80        if (is_tile_unaligned(info->dst.box.x, tile_width) ||
81            is_tile_unaligned(info->dst.box.y, tile_height) ||
82            (is_tile_unaligned(info->dst.box.width, tile_width) &&
83             info->dst.box.x + info->dst.box.width != dst_surface_width) ||
84            (is_tile_unaligned(info->dst.box.height, tile_height) &&
85             info->dst.box.y + info->dst.box.height != dst_surface_height)) {
86                return false;
87        }
88
89        /* VC4_PACKET_LOAD_TILE_BUFFER_GENERAL uses the
90         * VC4_PACKET_TILE_RENDERING_MODE_CONFIG's width (determined by our
91         * destination surface) to determine the stride.  This may be wrong
92         * when reading from texture miplevels > 0, which are stored in
93         * POT-sized areas.  For MSAA, the tile addresses are computed
94         * explicitly by the RCL, but still use the destination width to
95         * determine the stride (which could be fixed by explicitly supplying
96         * it in the ABI).
97         */
98        struct vc4_resource *rsc = vc4_resource(info->src.resource);
99
100        uint32_t stride;
101
102        if (info->src.resource->nr_samples > 1)
103                stride = align(dst_surface_width, 32) * 4 * rsc->cpp;
104        else if (rsc->slices[info->src.level].tiling == VC4_TILING_FORMAT_T)
105                stride = align(dst_surface_width * rsc->cpp, 128);
106        else
107                stride = align(dst_surface_width * rsc->cpp, 16);
108
109        if (stride != rsc->slices[info->src.level].stride)
110                return false;
111
112        if (info->dst.resource->format != info->src.resource->format)
113                return false;
114
115        if (false) {
116                fprintf(stderr, "RCL blit from %d,%d to %d,%d (%d,%d)\n",
117                        info->src.box.x,
118                        info->src.box.y,
119                        info->dst.box.x,
120                        info->dst.box.y,
121                        info->dst.box.width,
122                        info->dst.box.height);
123        }
124
125        struct pipe_surface *dst_surf =
126                vc4_get_blit_surface(pctx, info->dst.resource, info->dst.level);
127        struct pipe_surface *src_surf =
128                vc4_get_blit_surface(pctx, info->src.resource, info->src.level);
129
130        vc4_flush_jobs_reading_resource(vc4, info->src.resource);
131
132        struct vc4_job *job = vc4_get_job(vc4, dst_surf, NULL);
133        pipe_surface_reference(&job->color_read, src_surf);
134
135        /* If we're resolving from MSAA to single sample, we still need to run
136         * the engine in MSAA mode for the load.
137         */
138        if (!job->msaa && info->src.resource->nr_samples > 1) {
139                job->msaa = true;
140                job->tile_width = 32;
141                job->tile_height = 32;
142        }
143
144        job->draw_min_x = info->dst.box.x;
145        job->draw_min_y = info->dst.box.y;
146        job->draw_max_x = info->dst.box.x + info->dst.box.width;
147        job->draw_max_y = info->dst.box.y + info->dst.box.height;
148        job->draw_width = dst_surf->width;
149        job->draw_height = dst_surf->height;
150
151        job->tile_width = tile_width;
152        job->tile_height = tile_height;
153        job->msaa = msaa;
154        job->needs_flush = true;
155        job->resolve |= PIPE_CLEAR_COLOR;
156
157        vc4_job_submit(vc4, job);
158
159        pipe_surface_reference(&dst_surf, NULL);
160        pipe_surface_reference(&src_surf, NULL);
161
162        return true;
163}
164
165void
166vc4_blitter_save(struct vc4_context *vc4)
167{
168        util_blitter_save_vertex_buffer_slot(vc4->blitter, vc4->vertexbuf.vb);
169        util_blitter_save_vertex_elements(vc4->blitter, vc4->vtx);
170        util_blitter_save_vertex_shader(vc4->blitter, vc4->prog.bind_vs);
171        util_blitter_save_rasterizer(vc4->blitter, vc4->rasterizer);
172        util_blitter_save_viewport(vc4->blitter, &vc4->viewport);
173        util_blitter_save_scissor(vc4->blitter, &vc4->scissor);
174        util_blitter_save_fragment_shader(vc4->blitter, vc4->prog.bind_fs);
175        util_blitter_save_blend(vc4->blitter, vc4->blend);
176        util_blitter_save_depth_stencil_alpha(vc4->blitter, vc4->zsa);
177        util_blitter_save_stencil_ref(vc4->blitter, &vc4->stencil_ref);
178        util_blitter_save_sample_mask(vc4->blitter, vc4->sample_mask);
179        util_blitter_save_framebuffer(vc4->blitter, &vc4->framebuffer);
180        util_blitter_save_fragment_sampler_states(vc4->blitter,
181                        vc4->fragtex.num_samplers,
182                        (void **)vc4->fragtex.samplers);
183        util_blitter_save_fragment_sampler_views(vc4->blitter,
184                        vc4->fragtex.num_textures, vc4->fragtex.textures);
185}
186
187static void *vc4_get_yuv_vs(struct pipe_context *pctx)
188{
189   struct vc4_context *vc4 = vc4_context(pctx);
190   struct pipe_screen *pscreen = pctx->screen;
191
192   if (vc4->yuv_linear_blit_vs)
193           return vc4->yuv_linear_blit_vs;
194
195   const struct nir_shader_compiler_options *options =
196           pscreen->get_compiler_options(pscreen,
197                                         PIPE_SHADER_IR_NIR,
198                                         PIPE_SHADER_VERTEX);
199
200   nir_builder b;
201   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, options);
202   b.shader->info.name = ralloc_strdup(b.shader, "linear_blit_vs");
203
204   const struct glsl_type *vec4 = glsl_vec4_type();
205   nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
206                                              vec4, "pos");
207
208   nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
209                                               vec4, "gl_Position");
210   pos_out->data.location = VARYING_SLOT_POS;
211
212   nir_store_var(&b, pos_out, nir_load_var(&b, pos_in), 0xf);
213
214   struct pipe_shader_state shader_tmpl = {
215           .type = PIPE_SHADER_IR_NIR,
216           .ir.nir = b.shader,
217   };
218
219   vc4->yuv_linear_blit_vs = pctx->create_vs_state(pctx, &shader_tmpl);
220
221   return vc4->yuv_linear_blit_vs;
222}
223
224static void *vc4_get_yuv_fs(struct pipe_context *pctx, int cpp)
225{
226   struct vc4_context *vc4 = vc4_context(pctx);
227   struct pipe_screen *pscreen = pctx->screen;
228   struct pipe_shader_state **cached_shader;
229   const char *name;
230
231   if (cpp == 1) {
232           cached_shader = &vc4->yuv_linear_blit_fs_8bit;
233           name = "linear_blit_8bit_fs";
234   } else {
235           cached_shader = &vc4->yuv_linear_blit_fs_16bit;
236           name = "linear_blit_16bit_fs";
237   }
238
239   if (*cached_shader)
240           return *cached_shader;
241
242   const struct nir_shader_compiler_options *options =
243           pscreen->get_compiler_options(pscreen,
244                                         PIPE_SHADER_IR_NIR,
245                                         PIPE_SHADER_FRAGMENT);
246
247   nir_builder b;
248   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, options);
249   b.shader->info.name = ralloc_strdup(b.shader, name);
250
251   const struct glsl_type *vec4 = glsl_vec4_type();
252   const struct glsl_type *glsl_int = glsl_int_type();
253
254   nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
255                                                 vec4, "f_color");
256   color_out->data.location = FRAG_RESULT_COLOR;
257
258   nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
259                                              vec4, "pos");
260   pos_in->data.location = VARYING_SLOT_POS;
261   nir_ssa_def *pos = nir_load_var(&b, pos_in);
262
263   nir_ssa_def *one = nir_imm_int(&b, 1);
264   nir_ssa_def *two = nir_imm_int(&b, 2);
265
266   nir_ssa_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
267   nir_ssa_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
268
269   nir_variable *stride_in = nir_variable_create(b.shader, nir_var_uniform,
270                                                 glsl_int, "stride");
271   nir_ssa_def *stride = nir_load_var(&b, stride_in);
272
273   nir_ssa_def *x_offset;
274   nir_ssa_def *y_offset;
275   if (cpp == 1) {
276           nir_ssa_def *intra_utile_x_offset =
277                   nir_ishl(&b, nir_iand(&b, x, one), two);
278           nir_ssa_def *inter_utile_x_offset =
279                   nir_ishl(&b, nir_iand(&b, x, nir_imm_int(&b, ~3)), one);
280
281           x_offset = nir_iadd(&b,
282                               intra_utile_x_offset,
283                               inter_utile_x_offset);
284           y_offset = nir_imul(&b,
285                               nir_iadd(&b,
286                                        nir_ishl(&b, y, one),
287                                        nir_ushr(&b, nir_iand(&b, x, two), one)),
288                               stride);
289   } else {
290           x_offset = nir_ishl(&b, x, two);
291           y_offset = nir_imul(&b, y, stride);
292   }
293
294   nir_intrinsic_instr *load =
295           nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
296   load->num_components = 1;
297   nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, 32, NULL);
298   load->src[0] = nir_src_for_ssa(one);
299   load->src[1] = nir_src_for_ssa(nir_iadd(&b, x_offset, y_offset));
300   nir_builder_instr_insert(&b, &load->instr);
301
302   nir_store_var(&b, color_out,
303                 nir_unpack_unorm_4x8(&b, &load->dest.ssa),
304                 0xf);
305
306   struct pipe_shader_state shader_tmpl = {
307           .type = PIPE_SHADER_IR_NIR,
308           .ir.nir = b.shader,
309   };
310
311   *cached_shader = pctx->create_fs_state(pctx, &shader_tmpl);
312
313   return *cached_shader;
314}
315
316static bool
317vc4_yuv_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
318{
319        struct vc4_context *vc4 = vc4_context(pctx);
320        struct vc4_resource *src = vc4_resource(info->src.resource);
321        struct vc4_resource *dst = vc4_resource(info->dst.resource);
322        bool ok;
323
324        if (src->tiled)
325                return false;
326        if (src->base.format != PIPE_FORMAT_R8_UNORM &&
327            src->base.format != PIPE_FORMAT_R8G8_UNORM)
328                return false;
329
330        /* YUV blits always turn raster-order to tiled */
331        assert(dst->base.format == src->base.format);
332        assert(dst->tiled);
333
334        /* Always 1:1 and at the origin */
335        assert(info->src.box.x == 0 && info->dst.box.x == 0);
336        assert(info->src.box.y == 0 && info->dst.box.y == 0);
337        assert(info->src.box.width == info->dst.box.width);
338        assert(info->src.box.height == info->dst.box.height);
339
340        if ((src->slices[info->src.level].offset & 3) ||
341            (src->slices[info->src.level].stride & 3)) {
342                perf_debug("YUV-blit src texture offset/stride misaligned: 0x%08x/%d\n",
343                           src->slices[info->src.level].offset,
344                           src->slices[info->src.level].stride);
345                goto fallback;
346        }
347
348        vc4_blitter_save(vc4);
349
350        /* Create a renderable surface mapping the T-tiled shadow buffer.
351         */
352        struct pipe_surface dst_tmpl;
353        util_blitter_default_dst_texture(&dst_tmpl, info->dst.resource,
354                                         info->dst.level, info->dst.box.z);
355        dst_tmpl.format = PIPE_FORMAT_RGBA8888_UNORM;
356        struct pipe_surface *dst_surf =
357                pctx->create_surface(pctx, info->dst.resource, &dst_tmpl);
358        if (!dst_surf) {
359                fprintf(stderr, "Failed to create YUV dst surface\n");
360                util_blitter_unset_running_flag(vc4->blitter);
361                return false;
362        }
363        dst_surf->width /= 2;
364        if (dst->cpp == 1)
365                dst_surf->height /= 2;
366
367        /* Set the constant buffer. */
368        uint32_t stride = src->slices[info->src.level].stride;
369        struct pipe_constant_buffer cb_uniforms = {
370                .user_buffer = &stride,
371                .buffer_size = sizeof(stride),
372        };
373        pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb_uniforms);
374        struct pipe_constant_buffer cb_src = {
375                .buffer = info->src.resource,
376                .buffer_offset = src->slices[info->src.level].offset,
377                .buffer_size = (src->bo->size -
378                                src->slices[info->src.level].offset),
379        };
380        pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_src);
381
382        /* Unbind the textures, to make sure we don't try to recurse into the
383         * shadow blit.
384         */
385        pctx->set_sampler_views(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);
386        pctx->bind_sampler_states(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);
387
388        util_blitter_custom_shader(vc4->blitter, dst_surf,
389                                   vc4_get_yuv_vs(pctx),
390                                   vc4_get_yuv_fs(pctx, src->cpp));
391
392        util_blitter_restore_textures(vc4->blitter);
393        util_blitter_restore_constant_buffer_state(vc4->blitter);
394        /* Restore cb1 (util_blitter doesn't handle this one). */
395        struct pipe_constant_buffer cb_disabled = { 0 };
396        pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_disabled);
397
398        pipe_surface_reference(&dst_surf, NULL);
399
400        return true;
401
402fallback:
403        /* Do an immediate SW fallback, since the render blit path
404         * would just recurse.
405         */
406        ok = util_try_blit_via_copy_region(pctx, info);
407        assert(ok); (void)ok;
408
409        return true;
410}
411
412static bool
413vc4_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
414{
415        struct vc4_context *vc4 = vc4_context(ctx);
416
417        if (!util_blitter_is_blit_supported(vc4->blitter, info)) {
418                fprintf(stderr, "blit unsupported %s -> %s\n",
419                    util_format_short_name(info->src.resource->format),
420                    util_format_short_name(info->dst.resource->format));
421                return false;
422        }
423
424        /* Enable the scissor, so we get a minimal set of tiles rendered. */
425        if (!info->scissor_enable) {
426                info->scissor_enable = true;
427                info->scissor.minx = info->dst.box.x;
428                info->scissor.miny = info->dst.box.y;
429                info->scissor.maxx = info->dst.box.x + info->dst.box.width;
430                info->scissor.maxy = info->dst.box.y + info->dst.box.height;
431        }
432
433        vc4_blitter_save(vc4);
434        util_blitter_blit(vc4->blitter, info);
435
436        return true;
437}
438
439/* Optimal hardware path for blitting pixels.
440 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
441 */
442void
443vc4_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
444{
445        struct pipe_blit_info info = *blit_info;
446
447        if (vc4_yuv_blit(pctx, blit_info))
448                return;
449
450        if (vc4_tile_blit(pctx, blit_info))
451                return;
452
453        if (info.mask & PIPE_MASK_S) {
454                if (util_try_blit_via_copy_region(pctx, &info))
455                        return;
456
457                info.mask &= ~PIPE_MASK_S;
458                fprintf(stderr, "cannot blit stencil, skipping\n");
459        }
460
461        if (vc4_render_blit(pctx, &info))
462                return;
463
464        fprintf(stderr, "Unsupported blit\n");
465}
466