vc4_draw.c revision b8e80941
1/*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "util/u_blitter.h"
26#include "util/u_prim.h"
27#include "util/u_format.h"
28#include "util/u_pack_color.h"
29#include "util/u_upload_mgr.h"
30#include "indices/u_primconvert.h"
31
32#include "vc4_context.h"
33#include "vc4_resource.h"
34
35#define VC4_HW_2116_COUNT		0x1ef0
36
37static void
38vc4_get_draw_cl_space(struct vc4_job *job, int vert_count)
39{
40        /* The SW-5891 workaround may cause us to emit multiple shader recs
41         * and draw packets.
42         */
43        int num_draws = DIV_ROUND_UP(vert_count, 65535 - 2) + 1;
44
45        /* Binner gets our packet state -- vc4_emit.c contents,
46         * and the primitive itself.
47         */
48        cl_ensure_space(&job->bcl,
49                        256 + (VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE +
50                               VC4_PACKET_GL_SHADER_STATE_SIZE) * num_draws);
51
52        /* Nothing for rcl -- that's covered by vc4_context.c */
53
54        /* shader_rec gets up to 12 dwords of reloc handles plus a maximally
55         * sized shader_rec (104 bytes base for 8 vattrs plus 32 bytes of
56         * vattr stride).
57         */
58        cl_ensure_space(&job->shader_rec,
59                        (12 * sizeof(uint32_t) + 104 + 8 * 32) * num_draws);
60
61        /* Uniforms are covered by vc4_write_uniforms(). */
62
63        /* There could be up to 16 textures per stage, plus misc other
64         * pointers.
65         */
66        cl_ensure_space(&job->bo_handles, (2 * 16 + 20) * sizeof(uint32_t));
67        cl_ensure_space(&job->bo_pointers,
68                        (2 * 16 + 20) * sizeof(struct vc4_bo *));
69}
70
71/**
72 * Does the initial bining command list setup for drawing to a given FBO.
73 */
74static void
75vc4_start_draw(struct vc4_context *vc4)
76{
77        struct vc4_job *job = vc4->job;
78
79        if (job->needs_flush)
80                return;
81
82        vc4_get_draw_cl_space(job, 0);
83
84        cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION, bin) {
85                bin.width_in_tiles = job->draw_tiles_x;
86                bin.height_in_tiles = job->draw_tiles_y;
87                bin.multisample_mode_4x = job->msaa;
88        }
89
90        /* START_TILE_BINNING resets the statechange counters in the hardware,
91         * which are what is used when a primitive is binned to a tile to
92         * figure out what new state packets need to be written to that tile's
93         * command list.
94         */
95        cl_emit(&job->bcl, START_TILE_BINNING, start);
96
97        /* Reset the current compressed primitives format.  This gets modified
98         * by VC4_PACKET_GL_INDEXED_PRIMITIVE and
99         * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start
100         * of every tile.
101         */
102        cl_emit(&job->bcl, PRIMITIVE_LIST_FORMAT, list) {
103                list.data_type = _16_BIT_INDEX;
104                list.primitive_type = TRIANGLES_LIST;
105        }
106
107        job->needs_flush = true;
108        job->draw_width = vc4->framebuffer.width;
109        job->draw_height = vc4->framebuffer.height;
110}
111
112static void
113vc4_predraw_check_textures(struct pipe_context *pctx,
114                           struct vc4_texture_stateobj *stage_tex)
115{
116        struct vc4_context *vc4 = vc4_context(pctx);
117
118        for (int i = 0; i < stage_tex->num_textures; i++) {
119                struct vc4_sampler_view *view =
120                        vc4_sampler_view(stage_tex->textures[i]);
121                if (!view)
122                        continue;
123
124                if (view->texture != view->base.texture)
125                        vc4_update_shadow_baselevel_texture(pctx, &view->base);
126
127                vc4_flush_jobs_writing_resource(vc4, view->texture);
128        }
129}
130
131static void
132vc4_emit_gl_shader_state(struct vc4_context *vc4,
133                         const struct pipe_draw_info *info,
134                         uint32_t extra_index_bias)
135{
136        struct vc4_job *job = vc4->job;
137        /* VC4_DIRTY_VTXSTATE */
138        struct vc4_vertex_stateobj *vtx = vc4->vtx;
139        /* VC4_DIRTY_VTXBUF */
140        struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf;
141
142        /* The simulator throws a fit if VS or CS don't read an attribute, so
143         * we emit a dummy read.
144         */
145        uint32_t num_elements_emit = MAX2(vtx->num_elements, 1);
146
147        /* Emit the shader record. */
148        cl_start_shader_reloc(&job->shader_rec, 3 + num_elements_emit);
149
150        cl_emit(&job->shader_rec, SHADER_RECORD, rec) {
151                rec.enable_clipping = true;
152
153                /* VC4_DIRTY_COMPILED_FS */
154                rec.fragment_shader_is_single_threaded =
155                        !vc4->prog.fs->fs_threaded;
156
157                /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */
158                rec.point_size_included_in_shaded_vertex_data =
159                         (info->mode == PIPE_PRIM_POINTS &&
160                          vc4->rasterizer->base.point_size_per_vertex);
161
162                /* VC4_DIRTY_COMPILED_FS */
163                rec.fragment_shader_number_of_varyings =
164                        vc4->prog.fs->num_inputs;
165                rec.fragment_shader_code_address =
166                        cl_address(vc4->prog.fs->bo, 0);
167
168                rec.coordinate_shader_attribute_array_select_bits =
169                         vc4->prog.cs->vattrs_live;
170                rec.coordinate_shader_total_attributes_size =
171                         vc4->prog.cs->vattr_offsets[8];
172                rec.coordinate_shader_code_address =
173                        cl_address(vc4->prog.cs->bo, 0);
174
175                rec.vertex_shader_attribute_array_select_bits =
176                         vc4->prog.vs->vattrs_live;
177                rec.vertex_shader_total_attributes_size =
178                         vc4->prog.vs->vattr_offsets[8];
179                rec.vertex_shader_code_address =
180                        cl_address(vc4->prog.vs->bo, 0);
181        };
182
183        uint32_t max_index = 0xffff;
184        for (int i = 0; i < vtx->num_elements; i++) {
185                struct pipe_vertex_element *elem = &vtx->pipe[i];
186                struct pipe_vertex_buffer *vb =
187                        &vertexbuf->vb[elem->vertex_buffer_index];
188                struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
189                /* not vc4->dirty tracked: vc4->last_index_bias */
190                uint32_t offset = (vb->buffer_offset +
191                                   elem->src_offset +
192                                   vb->stride * (info->index_bias +
193                                                 extra_index_bias));
194                uint32_t vb_size = rsc->bo->size - offset;
195                uint32_t elem_size =
196                        util_format_get_blocksize(elem->src_format);
197
198                cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
199                        attr.address = cl_address(rsc->bo, offset);
200                        attr.number_of_bytes_minus_1 = elem_size - 1;
201                        attr.stride = vb->stride;
202                        attr.coordinate_shader_vpm_offset =
203                                vc4->prog.cs->vattr_offsets[i];
204                        attr.vertex_shader_vpm_offset =
205                                vc4->prog.vs->vattr_offsets[i];
206                }
207
208                if (vb->stride > 0) {
209                        max_index = MIN2(max_index,
210                                         (vb_size - elem_size) / vb->stride);
211                }
212        }
213
214        if (vtx->num_elements == 0) {
215                assert(num_elements_emit == 1);
216                struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO");
217
218                cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
219                        attr.address = cl_address(bo, 0);
220                        attr.number_of_bytes_minus_1 = 16 - 1;
221                        attr.stride = 0;
222                        attr.coordinate_shader_vpm_offset = 0;
223                        attr.vertex_shader_vpm_offset = 0;
224                }
225
226                vc4_bo_unreference(&bo);
227        }
228
229        cl_emit(&job->bcl, GL_SHADER_STATE, shader_state) {
230                /* Note that number of attributes == 0 in the packet means 8
231                 * attributes.  This field also contains the offset into
232                 * shader_rec.
233                 */
234                assert(vtx->num_elements <= 8);
235                shader_state.number_of_attribute_arrays =
236                        num_elements_emit & 0x7;
237        }
238
239        vc4_write_uniforms(vc4, vc4->prog.fs,
240                           &vc4->constbuf[PIPE_SHADER_FRAGMENT],
241                           &vc4->fragtex);
242        vc4_write_uniforms(vc4, vc4->prog.vs,
243                           &vc4->constbuf[PIPE_SHADER_VERTEX],
244                           &vc4->verttex);
245        vc4_write_uniforms(vc4, vc4->prog.cs,
246                           &vc4->constbuf[PIPE_SHADER_VERTEX],
247                           &vc4->verttex);
248
249        vc4->last_index_bias = info->index_bias + extra_index_bias;
250        vc4->max_index = max_index;
251        job->shader_rec_count++;
252}
253
254/**
255 * HW-2116 workaround: Flush the batch before triggering the hardware state
256 * counter wraparound behavior.
257 *
258 * State updates are tracked by a global counter which increments at the first
259 * state update after a draw or a START_BINNING.  Tiles can then have their
260 * state updated at draw time with a set of cheap checks for whether the
261 * state's copy of the global counter matches the global counter the last time
262 * that state was written to the tile.
263 *
264 * The state counters are relatively small and wrap around quickly, so you
265 * could get false negatives for needing to update a particular state in the
266 * tile.  To avoid this, the hardware attempts to write all of the state in
267 * the tile at wraparound time.  This apparently is broken, so we just flush
268 * everything before that behavior is triggered.  A batch flush is sufficient
269 * to get our current contents drawn and reset the counters to 0.
270 *
271 * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
272 * tiles with VC4_PACKET_RETURN_FROM_LIST.
273 */
274static void
275vc4_hw_2116_workaround(struct pipe_context *pctx, int vert_count)
276{
277        struct vc4_context *vc4 = vc4_context(pctx);
278        struct vc4_job *job = vc4_get_job_for_fbo(vc4);
279
280        if (job->draw_calls_queued + vert_count / 65535 >= VC4_HW_2116_COUNT) {
281                perf_debug("Flushing batch due to HW-2116 workaround "
282                           "(too many draw calls per scene\n");
283                vc4_job_submit(vc4, job);
284        }
285}
286
287static void
288vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
289{
290        struct vc4_context *vc4 = vc4_context(pctx);
291        struct pipe_draw_info local_info;
292
293	if (!info->count_from_stream_output && !info->indirect &&
294	    !info->primitive_restart &&
295	    !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
296		return;
297
298        if (info->mode >= PIPE_PRIM_QUADS) {
299                if (info->mode == PIPE_PRIM_QUADS &&
300                    info->count == 4 &&
301                    !vc4->rasterizer->base.flatshade) {
302                        local_info = *info;
303                        local_info.mode = PIPE_PRIM_TRIANGLE_FAN;
304                        info = &local_info;
305                } else {
306                        util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base);
307                        util_primconvert_draw_vbo(vc4->primconvert, info);
308                        perf_debug("Fallback conversion for %d %s vertices\n",
309                                   info->count, u_prim_name(info->mode));
310                        return;
311                }
312        }
313
314        /* Before setting up the draw, do any fixup blits necessary. */
315        vc4_predraw_check_textures(pctx, &vc4->verttex);
316        vc4_predraw_check_textures(pctx, &vc4->fragtex);
317
318        vc4_hw_2116_workaround(pctx, info->count);
319
320        struct vc4_job *job = vc4_get_job_for_fbo(vc4);
321
322        /* Make sure that the raster order flags haven't changed, which can
323         * only be set at job granularity.
324         */
325        if (job->flags != vc4->rasterizer->tile_raster_order_flags) {
326                vc4_job_submit(vc4, job);
327                job = vc4_get_job_for_fbo(vc4);
328        }
329
330        vc4_get_draw_cl_space(job, info->count);
331
332        if (vc4->prim_mode != info->mode) {
333                vc4->prim_mode = info->mode;
334                vc4->dirty |= VC4_DIRTY_PRIM_MODE;
335        }
336
337        vc4_start_draw(vc4);
338        if (!vc4_update_compiled_shaders(vc4, info->mode)) {
339                debug_warn_once("shader compile failed, skipping draw call.\n");
340                return;
341        }
342
343        vc4_emit_state(pctx);
344
345        bool needs_drawarrays_shader_state = false;
346
347        if ((vc4->dirty & (VC4_DIRTY_VTXBUF |
348                           VC4_DIRTY_VTXSTATE |
349                           VC4_DIRTY_PRIM_MODE |
350                           VC4_DIRTY_RASTERIZER |
351                           VC4_DIRTY_COMPILED_CS |
352                           VC4_DIRTY_COMPILED_VS |
353                           VC4_DIRTY_COMPILED_FS |
354                           vc4->prog.cs->uniform_dirty_bits |
355                           vc4->prog.vs->uniform_dirty_bits |
356                           vc4->prog.fs->uniform_dirty_bits)) ||
357            vc4->last_index_bias != info->index_bias) {
358                if (info->index_size)
359                        vc4_emit_gl_shader_state(vc4, info, 0);
360                else
361                        needs_drawarrays_shader_state = true;
362        }
363
364        vc4->dirty = 0;
365
366        /* Note that the primitive type fields match with OpenGL/gallium
367         * definitions, up to but not including QUADS.
368         */
369        if (info->index_size) {
370                uint32_t index_size = info->index_size;
371                uint32_t offset = info->start * index_size;
372                struct pipe_resource *prsc;
373                if (info->index_size == 4) {
374                        prsc = vc4_get_shadow_index_buffer(pctx, info,
375                                                           offset,
376                                                           info->count, &offset);
377                        index_size = 2;
378                } else {
379                        if (info->has_user_indices) {
380                                prsc = NULL;
381                                u_upload_data(vc4->uploader, 0,
382                                              info->count * index_size, 4,
383                                              info->index.user,
384                                              &offset, &prsc);
385                        } else {
386                                prsc = info->index.resource;
387                        }
388                }
389                struct vc4_resource *rsc = vc4_resource(prsc);
390
391                struct vc4_cl_out *bcl = cl_start(&job->bcl);
392
393                /* The original design for the VC4 kernel UABI had multiple
394                 * packets that used relocations in the BCL (some of which
395                 * needed two BOs), but later modifications eliminated all but
396                 * this one usage.  We have an arbitrary 32-bit offset value,
397                 * and need to also supply an arbitrary 32-bit index buffer
398                 * GEM handle, so we have this fake packet we emit in our BCL
399                 * to be validated, which the kernel uses at validation time
400                 * to perform the relocation in the IB packet (without
401                 * emitting to the actual HW).
402                 */
403                uint32_t hindex = vc4_gem_hindex(job, rsc->bo);
404                if (job->last_gem_handle_hindex != hindex) {
405                        cl_u8(&bcl, VC4_PACKET_GEM_HANDLES);
406                        cl_u32(&bcl, hindex);
407                        cl_u32(&bcl, 0);
408                        job->last_gem_handle_hindex = hindex;
409                }
410
411                cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE);
412                cl_u8(&bcl,
413                      info->mode |
414                      (index_size == 2 ?
415                       VC4_INDEX_BUFFER_U16:
416                       VC4_INDEX_BUFFER_U8));
417                cl_u32(&bcl, info->count);
418                cl_u32(&bcl, offset);
419                cl_u32(&bcl, vc4->max_index);
420
421                cl_end(&job->bcl, bcl);
422                job->draw_calls_queued++;
423
424                if (info->index_size == 4 || info->has_user_indices)
425                        pipe_resource_reference(&prsc, NULL);
426        } else {
427                uint32_t count = info->count;
428                uint32_t start = info->start;
429                uint32_t extra_index_bias = 0;
430                static const uint32_t max_verts = 65535;
431
432                /* GFXH-515 / SW-5891: The binner emits 16 bit indices for
433                 * drawarrays, which means that if start + count > 64k it
434                 * would truncate the top bits.  Work around this by emitting
435                 * a limited number of primitives at a time and reemitting the
436                 * shader state pointing farther down the vertex attribute
437                 * arrays.
438                 *
439                 * To do this properly for line loops or trifans, we'd need to
440                 * make a new VB containing the first vertex plus whatever
441                 * remainder.
442                 */
443                if (start + count > max_verts) {
444                        extra_index_bias = start;
445                        start = 0;
446                        needs_drawarrays_shader_state = true;
447                }
448
449                while (count) {
450                        uint32_t this_count = count;
451                        uint32_t step = count;
452
453                        if (needs_drawarrays_shader_state) {
454                                vc4_emit_gl_shader_state(vc4, info,
455                                                         extra_index_bias);
456                        }
457
458                        if (count > max_verts) {
459                                switch (info->mode) {
460                                case PIPE_PRIM_POINTS:
461                                        this_count = step = max_verts;
462                                        break;
463                                case PIPE_PRIM_LINES:
464                                        this_count = step = max_verts - (max_verts % 2);
465                                        break;
466                                case PIPE_PRIM_LINE_STRIP:
467                                        this_count = max_verts;
468                                        step = max_verts - 1;
469                                        break;
470                                case PIPE_PRIM_LINE_LOOP:
471                                        this_count = max_verts;
472                                        step = max_verts - 1;
473                                        debug_warn_once("unhandled line loop "
474                                                        "looping behavior with "
475                                                        ">65535 verts\n");
476                                        break;
477                                case PIPE_PRIM_TRIANGLES:
478                                        this_count = step = max_verts - (max_verts % 3);
479                                        break;
480                                case PIPE_PRIM_TRIANGLE_STRIP:
481                                        this_count = max_verts;
482                                        step = max_verts - 2;
483                                        break;
484                                default:
485                                        debug_warn_once("unhandled primitive "
486                                                        "max vert count, truncating\n");
487                                        this_count = step = max_verts;
488                                }
489                        }
490
491                        cl_emit(&job->bcl, VERTEX_ARRAY_PRIMITIVES, array) {
492                                array.primitive_mode = info->mode;
493                                array.length = this_count;
494                                array.index_of_first_vertex = start;
495                        }
496                        job->draw_calls_queued++;
497
498                        count -= step;
499                        extra_index_bias += start + step;
500                        start = 0;
501                        needs_drawarrays_shader_state = true;
502                }
503        }
504
505        /* We shouldn't have tripped the HW_2116 bug with the GFXH-515
506         * workaround.
507         */
508        assert(job->draw_calls_queued <= VC4_HW_2116_COUNT);
509
510        if (vc4->zsa && vc4->framebuffer.zsbuf) {
511                struct vc4_resource *rsc =
512                        vc4_resource(vc4->framebuffer.zsbuf->texture);
513
514                if (vc4->zsa->base.depth.enabled) {
515                        job->resolve |= PIPE_CLEAR_DEPTH;
516                        rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
517                }
518
519                if (vc4->zsa->base.stencil[0].enabled) {
520                        job->resolve |= PIPE_CLEAR_STENCIL;
521                        rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
522                }
523        }
524
525        job->resolve |= PIPE_CLEAR_COLOR0;
526
527        /* If we've used half of the presumably 256MB CMA area, flush the job
528         * so that we don't accumulate a job that will end up not being
529         * executable.
530         */
531        if (job->bo_space > 128 * 1024 * 1024)
532                vc4_flush(pctx);
533
534        if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH)
535                vc4_flush(pctx);
536}
537
538static uint32_t
539pack_rgba(enum pipe_format format, const float *rgba)
540{
541        union util_color uc;
542        util_pack_color(rgba, format, &uc);
543        if (util_format_get_blocksize(format) == 2)
544                return uc.us;
545        else
546                return uc.ui[0];
547}
548
549static void
550vc4_clear(struct pipe_context *pctx, unsigned buffers,
551          const union pipe_color_union *color, double depth, unsigned stencil)
552{
553        struct vc4_context *vc4 = vc4_context(pctx);
554        struct vc4_job *job = vc4_get_job_for_fbo(vc4);
555
556        if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
557                struct vc4_resource *rsc =
558                        vc4_resource(vc4->framebuffer.zsbuf->texture);
559                unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
560
561                /* Clearing ZS will clear both Z and stencil, so if we're
562                 * trying to clear just one then we need to draw a quad to do
563                 * it instead.  We need to do this before setting up
564                 * tile-based clears in vc4->job, because the blitter may
565                 * submit the current job.
566                 */
567                if ((zsclear == PIPE_CLEAR_DEPTH ||
568                     zsclear == PIPE_CLEAR_STENCIL) &&
569                    (rsc->initialized_buffers & ~(zsclear | job->cleared)) &&
570                    util_format_is_depth_and_stencil(vc4->framebuffer.zsbuf->format)) {
571                        static const union pipe_color_union dummy_color = {};
572
573                        perf_debug("Partial clear of Z+stencil buffer, "
574                                   "drawing a quad instead of fast clearing\n");
575                        vc4_blitter_save(vc4);
576                        util_blitter_clear(vc4->blitter,
577                                           vc4->framebuffer.width,
578                                           vc4->framebuffer.height,
579                                           1,
580                                           zsclear,
581                                           &dummy_color, depth, stencil);
582                        buffers &= ~zsclear;
583                        if (!buffers)
584                                return;
585                        job = vc4_get_job_for_fbo(vc4);
586                }
587        }
588
589        /* We can't flag new buffers for clearing once we've queued draws.  We
590         * could avoid this by using the 3d engine to clear.
591         */
592        if (job->draw_calls_queued) {
593                perf_debug("Flushing rendering to process new clear.\n");
594                vc4_job_submit(vc4, job);
595                job = vc4_get_job_for_fbo(vc4);
596        }
597
598        if (buffers & PIPE_CLEAR_COLOR0) {
599                struct vc4_resource *rsc =
600                        vc4_resource(vc4->framebuffer.cbufs[0]->texture);
601                uint32_t clear_color;
602
603                if (vc4_rt_format_is_565(vc4->framebuffer.cbufs[0]->format)) {
604                        /* In 565 mode, the hardware will be packing our color
605                         * for us.
606                         */
607                        clear_color = pack_rgba(PIPE_FORMAT_R8G8B8A8_UNORM,
608                                                color->f);
609                } else {
610                        /* Otherwise, we need to do this packing because we
611                         * support multiple swizzlings of RGBA8888.
612                         */
613                        clear_color =
614                                pack_rgba(vc4->framebuffer.cbufs[0]->format,
615                                          color->f);
616                }
617                job->clear_color[0] = job->clear_color[1] = clear_color;
618                rsc->initialized_buffers |= (buffers & PIPE_CLEAR_COLOR0);
619        }
620
621        if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
622                struct vc4_resource *rsc =
623                        vc4_resource(vc4->framebuffer.zsbuf->texture);
624
625                /* Though the depth buffer is stored with Z in the high 24,
626                 * for this field we just need to store it in the low 24.
627                 */
628                if (buffers & PIPE_CLEAR_DEPTH) {
629                        job->clear_depth = util_pack_z(PIPE_FORMAT_Z24X8_UNORM,
630                                                       depth);
631                }
632                if (buffers & PIPE_CLEAR_STENCIL)
633                        job->clear_stencil = stencil;
634
635                rsc->initialized_buffers |= (buffers & PIPE_CLEAR_DEPTHSTENCIL);
636        }
637
638        job->draw_min_x = 0;
639        job->draw_min_y = 0;
640        job->draw_max_x = vc4->framebuffer.width;
641        job->draw_max_y = vc4->framebuffer.height;
642        job->cleared |= buffers;
643        job->resolve |= buffers;
644
645        vc4_start_draw(vc4);
646}
647
648static void
649vc4_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
650                        const union pipe_color_union *color,
651                        unsigned x, unsigned y, unsigned w, unsigned h,
652			bool render_condition_enabled)
653{
654        fprintf(stderr, "unimpl: clear RT\n");
655}
656
657static void
658vc4_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
659                        unsigned buffers, double depth, unsigned stencil,
660                        unsigned x, unsigned y, unsigned w, unsigned h,
661			bool render_condition_enabled)
662{
663        fprintf(stderr, "unimpl: clear DS\n");
664}
665
666void
667vc4_draw_init(struct pipe_context *pctx)
668{
669        pctx->draw_vbo = vc4_draw_vbo;
670        pctx->clear = vc4_clear;
671        pctx->clear_render_target = vc4_clear_render_target;
672        pctx->clear_depth_stencil = vc4_clear_depth_stencil;
673}
674