1/*
2 * Copyright 2006 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "main/enums.h"
27#include "main/imports.h"
28#include "main/macros.h"
29#include "main/mtypes.h"
30#include "main/fbobject.h"
31#include "main/framebuffer.h"
32#include "main/renderbuffer.h"
33#include "main/context.h"
34#include "main/teximage.h"
35#include "main/image.h"
36#include "main/condrender.h"
37#include "util/hash_table.h"
38#include "util/set.h"
39
40#include "swrast/swrast.h"
41#include "drivers/common/meta.h"
42
43#include "intel_batchbuffer.h"
44#include "intel_buffers.h"
45#include "intel_blit.h"
46#include "intel_fbo.h"
47#include "intel_mipmap_tree.h"
48#include "intel_image.h"
49#include "intel_screen.h"
50#include "intel_tex.h"
51#include "brw_context.h"
52#include "brw_defines.h"
53
54#define FILE_DEBUG_FLAG DEBUG_FBO
55
56/** Called by gl_renderbuffer::Delete() */
57static void
58intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
59{
60   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
61
62   assert(irb);
63
64   intel_miptree_release(&irb->mt);
65   intel_miptree_release(&irb->singlesample_mt);
66
67   _mesa_delete_renderbuffer(ctx, rb);
68}
69
70/**
71 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
72 *
73 * If the miptree needs no downsample, then skip.
74 */
75void
76intel_renderbuffer_downsample(struct brw_context *brw,
77                              struct intel_renderbuffer *irb)
78{
79   if (!irb->need_downsample)
80      return;
81   intel_miptree_updownsample(brw, irb->mt, irb->singlesample_mt);
82   irb->need_downsample = false;
83}
84
85/**
86 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
87 *
88 * The upsample is done unconditionally.
89 */
90void
91intel_renderbuffer_upsample(struct brw_context *brw,
92                            struct intel_renderbuffer *irb)
93{
94   assert(!irb->need_downsample);
95
96   intel_miptree_updownsample(brw, irb->singlesample_mt, irb->mt);
97}
98
99/**
100 * \see dd_function_table::MapRenderbuffer
101 */
102static void
103intel_map_renderbuffer(struct gl_context *ctx,
104		       struct gl_renderbuffer *rb,
105		       GLuint x, GLuint y, GLuint w, GLuint h,
106		       GLbitfield mode,
107		       GLubyte **out_map,
108		       GLint *out_stride,
109		       bool flip_y)
110{
111   struct brw_context *brw = brw_context(ctx);
112   struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
113   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
114   struct intel_mipmap_tree *mt;
115   void *map;
116   ptrdiff_t stride;
117
118   if (srb->Buffer) {
119      /* this is a malloc'd renderbuffer (accum buffer), not an irb */
120      GLint bpp = _mesa_get_format_bytes(rb->Format);
121      GLint rowStride = srb->RowStride;
122      *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
123      *out_stride = rowStride;
124      return;
125   }
126
127   intel_prepare_render(brw);
128
129   /* The MapRenderbuffer API should always return a single-sampled mapping.
130    * The case we are asked to map multisampled RBs is in glReadPixels() (or
131    * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
132    * and GL expects an automatic resolve to happen.
133    *
134    * If it's a color miptree, there is a ->singlesample_mt which wraps the
135    * actual window system renderbuffer (which we may resolve to at any time),
136    * while the miptree itself is our driver-private allocation.  If it's a
137    * depth or stencil miptree, we have a private MSAA buffer and no shared
138    * singlesample buffer, and since we don't expect anybody to ever actually
139    * resolve it, we just make a temporary singlesample buffer now when we
140    * have to.
141    */
142   if (rb->NumSamples > 1) {
143      if (!irb->singlesample_mt) {
144         irb->singlesample_mt =
145            intel_miptree_create_for_renderbuffer(brw, irb->mt->format,
146                                                  rb->Width, rb->Height,
147                                                  1 /*num_samples*/);
148         if (!irb->singlesample_mt)
149            goto fail;
150         irb->singlesample_mt_is_tmp = true;
151         irb->need_downsample = true;
152      }
153
154      intel_renderbuffer_downsample(brw, irb);
155      mt = irb->singlesample_mt;
156
157      irb->need_map_upsample = mode & GL_MAP_WRITE_BIT;
158   } else {
159      mt = irb->mt;
160   }
161
162   /* For a window-system renderbuffer, we need to flip the mapping we receive
163    * upside-down.  So we need to ask for a rectangle on flipped vertically, and
164    * we then return a pointer to the bottom of it with a negative stride.
165    */
166   if (flip_y) {
167      y = rb->Height - y - h;
168   }
169
170   intel_miptree_map(brw, mt, irb->mt_level, irb->mt_layer,
171		     x, y, w, h, mode, &map, &stride);
172
173   if (flip_y) {
174      map += (h - 1) * stride;
175      stride = -stride;
176   }
177
178   DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR"\n",
179       __func__, rb->Name, _mesa_get_format_name(rb->Format),
180       x, y, w, h, map, stride);
181
182   *out_map = map;
183   *out_stride = stride;
184   return;
185
186fail:
187   *out_map = NULL;
188   *out_stride = 0;
189}
190
191/**
192 * \see dd_function_table::UnmapRenderbuffer
193 */
194static void
195intel_unmap_renderbuffer(struct gl_context *ctx,
196			 struct gl_renderbuffer *rb)
197{
198   struct brw_context *brw = brw_context(ctx);
199   struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
200   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
201   struct intel_mipmap_tree *mt;
202
203   DBG("%s: rb %d (%s)\n", __func__,
204       rb->Name, _mesa_get_format_name(rb->Format));
205
206   if (srb->Buffer) {
207      /* this is a malloc'd renderbuffer (accum buffer) */
208      /* nothing to do */
209      return;
210   }
211
212   if (rb->NumSamples > 1) {
213      mt = irb->singlesample_mt;
214   } else {
215      mt = irb->mt;
216   }
217
218   intel_miptree_unmap(brw, mt, irb->mt_level, irb->mt_layer);
219
220   if (irb->need_map_upsample) {
221      intel_renderbuffer_upsample(brw, irb);
222      irb->need_map_upsample = false;
223   }
224
225   if (irb->singlesample_mt_is_tmp)
226      intel_miptree_release(&irb->singlesample_mt);
227}
228
229
230/**
231 * Round up the requested multisample count to the next supported sample size.
232 */
233unsigned
234intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
235{
236   const int *msaa_modes = intel_supported_msaa_modes(intel);
237   int quantized_samples = 0;
238
239   for (int i = 0; msaa_modes[i] != -1; ++i) {
240      if (msaa_modes[i] >= num_samples)
241         quantized_samples = msaa_modes[i];
242      else
243         break;
244   }
245
246   return quantized_samples;
247}
248
249static mesa_format
250intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
251{
252   struct brw_context *brw = brw_context(ctx);
253   MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo;
254
255   switch (internalFormat) {
256   default:
257      /* Use the same format-choice logic as for textures.
258       * Renderbuffers aren't any different from textures for us,
259       * except they're less useful because you can't texture with
260       * them.
261       */
262      return ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
263                                             internalFormat,
264                                             GL_NONE, GL_NONE);
265      break;
266   case GL_STENCIL_INDEX:
267   case GL_STENCIL_INDEX1_EXT:
268   case GL_STENCIL_INDEX4_EXT:
269   case GL_STENCIL_INDEX8_EXT:
270   case GL_STENCIL_INDEX16_EXT:
271      /* These aren't actual texture formats, so force them here. */
272      if (brw->has_separate_stencil) {
273	 return MESA_FORMAT_S_UINT8;
274      } else {
275	 assert(!devinfo->must_use_separate_stencil);
276	 return MESA_FORMAT_Z24_UNORM_S8_UINT;
277      }
278   }
279}
280
281static GLboolean
282intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
283                                         GLenum internalFormat,
284                                         GLuint width, GLuint height)
285{
286   struct brw_context *brw = brw_context(ctx);
287   struct intel_screen *screen = brw->screen;
288   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
289
290   assert(rb->Format != MESA_FORMAT_NONE);
291
292   rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
293   rb->NumStorageSamples = rb->NumSamples;
294   rb->Width = width;
295   rb->Height = height;
296   rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
297
298   intel_miptree_release(&irb->mt);
299
300   DBG("%s: %s: %s (%dx%d)\n", __func__,
301       _mesa_enum_to_string(internalFormat),
302       _mesa_get_format_name(rb->Format), width, height);
303
304   if (width == 0 || height == 0)
305      return true;
306
307   irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format,
308						   width, height,
309                                                   MAX2(rb->NumSamples, 1));
310   if (!irb->mt)
311      return false;
312
313   irb->layer_count = 1;
314
315   return true;
316}
317
318/**
319 * Called via glRenderbufferStorageEXT() to set the format and allocate
320 * storage for a user-created renderbuffer.
321 */
322static GLboolean
323intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
324                                 GLenum internalFormat,
325                                 GLuint width, GLuint height)
326{
327   rb->Format = intel_renderbuffer_format(ctx, internalFormat);
328   return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height);
329}
330
331static mesa_format
332fallback_rgbx_to_rgba(struct intel_screen *screen, struct gl_renderbuffer *rb,
333                      mesa_format original_format)
334{
335   mesa_format format = original_format;
336
337   /* The base format and internal format must be derived from the user-visible
338    * format (that is, the gl_config's format), even if we internally use
339    * choose a different format for the renderbuffer. Otherwise, rendering may
340    * use incorrect channel write masks.
341    */
342   rb->_BaseFormat = _mesa_get_format_base_format(original_format);
343   rb->InternalFormat = rb->_BaseFormat;
344
345   if (!screen->mesa_format_supports_render[original_format]) {
346      /* The glRenderbufferStorage paths in core Mesa detect if the driver
347       * does not support the user-requested format, and then searches for
348       * a fallback format. The DRI code bypasses core Mesa, though. So we do
349       * the fallbacks here.
350       *
351       * We must support MESA_FORMAT_R8G8B8X8 on Android because the Android
352       * framework requires HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces.
353       */
354      format = _mesa_format_fallback_rgbx_to_rgba(original_format);
355      assert(screen->mesa_format_supports_render[format]);
356   }
357   return format;
358}
359
360static void
361intel_image_target_renderbuffer_storage(struct gl_context *ctx,
362					struct gl_renderbuffer *rb,
363					void *image_handle)
364{
365   struct brw_context *brw = brw_context(ctx);
366   struct intel_renderbuffer *irb;
367   __DRIscreen *dri_screen = brw->screen->driScrnPriv;
368   __DRIimage *image;
369
370   image = dri_screen->dri2.image->lookupEGLImage(dri_screen, image_handle,
371                                                  dri_screen->loaderPrivate);
372   if (image == NULL)
373      return;
374
375   if (image->planar_format && image->planar_format->nplanes > 1) {
376      _mesa_error(ctx, GL_INVALID_OPERATION,
377            "glEGLImageTargetRenderbufferStorage(planar buffers are not "
378               "supported as render targets.)");
379      return;
380   }
381
382   rb->Format = fallback_rgbx_to_rgba(brw->screen, rb, image->format);
383
384   mesa_format chosen_format = rb->Format == image->format ?
385      image->format : rb->Format;
386
387   /* __DRIimage is opaque to the core so it has to be checked here */
388   if (!brw->mesa_format_supports_render[chosen_format]) {
389      _mesa_error(ctx, GL_INVALID_OPERATION,
390            "glEGLImageTargetRenderbufferStorage(unsupported image format)");
391      return;
392   }
393
394   irb = intel_renderbuffer(rb);
395   intel_miptree_release(&irb->mt);
396
397   /* Disable creation of the miptree's aux buffers because the driver exposes
398    * no EGL API to manage them. That is, there is no API for resolving the aux
399    * buffer's content to the main buffer nor for invalidating the aux buffer's
400    * content.
401    */
402   irb->mt = intel_miptree_create_for_dri_image(brw, image, GL_TEXTURE_2D,
403                                                rb->Format, false);
404   if (!irb->mt)
405      return;
406
407   rb->Width = image->width;
408   rb->Height = image->height;
409   rb->NeedsFinishRenderTexture = true;
410   irb->layer_count = 1;
411}
412
413/**
414 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
415 * window system framebuffer is resized.
416 *
417 * Any actual buffer reallocations for hardware renderbuffers (which would
418 * have triggered _mesa_resize_framebuffer()) were done by
419 * intel_process_dri2_buffer().
420 */
421static GLboolean
422intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
423                           GLenum internalFormat, GLuint width, GLuint height)
424{
425   (void) ctx;
426   assert(rb->Name == 0);
427   rb->Width = width;
428   rb->Height = height;
429   rb->InternalFormat = internalFormat;
430
431   return true;
432}
433
434/** Dummy function for gl_renderbuffer::AllocStorage() */
435static GLboolean
436intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
437                        GLenum internalFormat, GLuint width, GLuint height)
438{
439   (void) rb;
440   (void) internalFormat;
441   (void) width;
442   (void) height;
443   _mesa_problem(ctx, "intel_nop_alloc_storage should never be called.");
444   return false;
445}
446
447/**
448 * Create an intel_renderbuffer for a __DRIdrawable. This function is
449 * unrelated to GL renderbuffers (that is, those created by
450 * glGenRenderbuffers).
451 *
452 * \param num_samples must be quantized.
453 */
454struct intel_renderbuffer *
455intel_create_winsys_renderbuffer(struct intel_screen *screen,
456                                 mesa_format format, unsigned num_samples)
457{
458   struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
459   if (!irb)
460      return NULL;
461
462   struct gl_renderbuffer *rb = &irb->Base.Base;
463   irb->layer_count = 1;
464
465   _mesa_init_renderbuffer(rb, 0);
466   rb->ClassID = INTEL_RB_CLASS;
467   rb->NumSamples = num_samples;
468   rb->NumStorageSamples = num_samples;
469
470   rb->Format = fallback_rgbx_to_rgba(screen, rb, format);
471
472   /* intel-specific methods */
473   rb->Delete = intel_delete_renderbuffer;
474   rb->AllocStorage = intel_alloc_window_storage;
475
476   return irb;
477}
478
479/**
480 * Private window-system buffers (as opposed to ones shared with the display
481 * server created with intel_create_winsys_renderbuffer()) are most similar in their
482 * handling to user-created renderbuffers, but they have a resize handler that
483 * may be called at intel_update_renderbuffers() time.
484 *
485 * \param num_samples must be quantized.
486 */
487struct intel_renderbuffer *
488intel_create_private_renderbuffer(struct intel_screen *screen,
489                                  mesa_format format, unsigned num_samples)
490{
491   struct intel_renderbuffer *irb;
492
493   irb = intel_create_winsys_renderbuffer(screen, format, num_samples);
494   irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage;
495
496   return irb;
497}
498
499/**
500 * Create a new renderbuffer object.
501 * Typically called via glBindRenderbufferEXT().
502 */
503static struct gl_renderbuffer *
504intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
505{
506   struct intel_renderbuffer *irb;
507   struct gl_renderbuffer *rb;
508
509   irb = CALLOC_STRUCT(intel_renderbuffer);
510   if (!irb) {
511      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
512      return NULL;
513   }
514
515   rb = &irb->Base.Base;
516
517   _mesa_init_renderbuffer(rb, name);
518   rb->ClassID = INTEL_RB_CLASS;
519
520   /* intel-specific methods */
521   rb->Delete = intel_delete_renderbuffer;
522   rb->AllocStorage = intel_alloc_renderbuffer_storage;
523   /* span routines set in alloc_storage function */
524
525   return rb;
526}
527
528static bool
529intel_renderbuffer_update_wrapper(struct brw_context *brw,
530                                  struct intel_renderbuffer *irb,
531                                  struct gl_texture_image *image,
532                                  uint32_t layer,
533                                  bool layered)
534{
535   struct gl_renderbuffer *rb = &irb->Base.Base;
536   struct intel_texture_image *intel_image = intel_texture_image(image);
537   struct intel_mipmap_tree *mt = intel_image->mt;
538   int level = image->Level;
539
540   rb->AllocStorage = intel_nop_alloc_storage;
541
542   /* adjust for texture view parameters */
543   layer += image->TexObject->MinLayer;
544   level += image->TexObject->MinLevel;
545
546   intel_miptree_check_level_layer(mt, level, layer);
547   irb->mt_level = level;
548   irb->mt_layer = layer;
549
550   if (!layered) {
551      irb->layer_count = 1;
552   } else if (mt->target != GL_TEXTURE_3D && image->TexObject->NumLayers > 0) {
553      irb->layer_count = image->TexObject->NumLayers;
554   } else {
555      irb->layer_count = mt->surf.dim == ISL_SURF_DIM_3D ?
556                            minify(mt->surf.logical_level0_px.depth, level) :
557                            mt->surf.logical_level0_px.array_len;
558   }
559
560   intel_miptree_reference(&irb->mt, mt);
561
562   intel_renderbuffer_set_draw_offset(irb);
563
564   return true;
565}
566
567void
568intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
569{
570   unsigned int dst_x, dst_y;
571
572   /* compute offset of the particular 2D image within the texture region */
573   intel_miptree_get_image_offset(irb->mt,
574				  irb->mt_level,
575				  irb->mt_layer,
576				  &dst_x, &dst_y);
577
578   irb->draw_x = dst_x;
579   irb->draw_y = dst_y;
580}
581
582/**
583 * Called by glFramebufferTexture[123]DEXT() (and other places) to
584 * prepare for rendering into texture memory.  This might be called
585 * many times to choose different texture levels, cube faces, etc
586 * before intel_finish_render_texture() is ever called.
587 */
588static void
589intel_render_texture(struct gl_context * ctx,
590                     struct gl_framebuffer *fb,
591                     struct gl_renderbuffer_attachment *att)
592{
593   struct brw_context *brw = brw_context(ctx);
594   struct gl_renderbuffer *rb = att->Renderbuffer;
595   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
596   struct gl_texture_image *image = rb->TexImage;
597   struct intel_texture_image *intel_image = intel_texture_image(image);
598   struct intel_mipmap_tree *mt = intel_image->mt;
599   int layer;
600
601   (void) fb;
602
603   if (att->CubeMapFace > 0) {
604      assert(att->Zoffset == 0);
605      layer = att->CubeMapFace;
606   } else {
607      layer = att->Zoffset;
608   }
609
610   if (!intel_image->mt) {
611      /* Fallback on drawing to a texture that doesn't have a miptree
612       * (has a border, width/height 0, etc.)
613       */
614      _swrast_render_texture(ctx, fb, att);
615      return;
616   }
617
618   intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
619
620   if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer, att->Layered)) {
621       _swrast_render_texture(ctx, fb, att);
622       return;
623   }
624
625   DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
626       _mesa_get_format_name(image->TexFormat),
627       att->Texture->Name, image->Width, image->Height, image->Depth,
628       rb->RefCount);
629}
630
631
632#define fbo_incomplete(fb, error_id, ...) do {                                          \
633      static GLuint msg_id = 0;                                               \
634      if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) {    \
635         _mesa_gl_debugf(ctx, &msg_id,                                        \
636                         MESA_DEBUG_SOURCE_API,                               \
637                         MESA_DEBUG_TYPE_OTHER,                               \
638                         MESA_DEBUG_SEVERITY_MEDIUM,                          \
639                         __VA_ARGS__);                                        \
640      }                                                                       \
641      DBG(__VA_ARGS__);                                                       \
642      fb->_Status = error_id;                                                 \
643   } while (0)
644
645/**
646 * Do additional "completeness" testing of a framebuffer object.
647 */
648static void
649intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
650{
651   struct brw_context *brw = brw_context(ctx);
652   const struct gen_device_info *devinfo = &brw->screen->devinfo;
653   struct intel_renderbuffer *depthRb =
654      intel_get_renderbuffer(fb, BUFFER_DEPTH);
655   struct intel_renderbuffer *stencilRb =
656      intel_get_renderbuffer(fb, BUFFER_STENCIL);
657   struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
658   unsigned i;
659
660   DBG("%s() on fb %p (%s)\n", __func__,
661       fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
662	    (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
663
664   if (depthRb)
665      depth_mt = depthRb->mt;
666   if (stencilRb) {
667      stencil_mt = stencilRb->mt;
668      if (stencil_mt->stencil_mt)
669	 stencil_mt = stencil_mt->stencil_mt;
670   }
671
672   if (depth_mt && stencil_mt) {
673      if (devinfo->gen >= 6) {
674         const unsigned d_width = depth_mt->surf.phys_level0_sa.width;
675         const unsigned d_height = depth_mt->surf.phys_level0_sa.height;
676         const unsigned d_depth = depth_mt->surf.dim == ISL_SURF_DIM_3D ?
677                                     depth_mt->surf.phys_level0_sa.depth :
678                                     depth_mt->surf.phys_level0_sa.array_len;
679
680         const unsigned s_width = stencil_mt->surf.phys_level0_sa.width;
681         const unsigned s_height = stencil_mt->surf.phys_level0_sa.height;
682         const unsigned s_depth = stencil_mt->surf.dim == ISL_SURF_DIM_3D ?
683                                     stencil_mt->surf.phys_level0_sa.depth :
684                                     stencil_mt->surf.phys_level0_sa.array_len;
685
686         /* For gen >= 6, we are using the lod/minimum-array-element fields
687          * and supporting layered rendering. This means that we must restrict
688          * the depth & stencil attachments to match in various more retrictive
689          * ways. (width, height, depth, LOD and layer)
690          */
691	 if (d_width != s_width ||
692             d_height != s_height ||
693             d_depth != s_depth ||
694             depthRb->mt_level != stencilRb->mt_level ||
695	     depthRb->mt_layer != stencilRb->mt_layer) {
696	    fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
697                           "FBO incomplete: depth and stencil must match in"
698                           "width, height, depth, LOD and layer\n");
699	 }
700      }
701      if (depth_mt == stencil_mt) {
702	 /* For true packed depth/stencil (not faked on prefers-separate-stencil
703	  * hardware) we need to be sure they're the same level/layer, since
704	  * we'll be emitting a single packet describing the packed setup.
705	  */
706	 if (depthRb->mt_level != stencilRb->mt_level ||
707	     depthRb->mt_layer != stencilRb->mt_layer) {
708	    fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
709                           "FBO incomplete: depth image level/layer %d/%d != "
710                           "stencil image %d/%d\n",
711                           depthRb->mt_level,
712                           depthRb->mt_layer,
713                           stencilRb->mt_level,
714                           stencilRb->mt_layer);
715	 }
716      } else {
717	 if (!brw->has_separate_stencil) {
718	    fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
719                      "FBO incomplete: separate stencil unsupported\n");
720	 }
721	 if (stencil_mt->format != MESA_FORMAT_S_UINT8) {
722	    fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
723                      "FBO incomplete: separate stencil is %s "
724                      "instead of S8\n",
725                      _mesa_get_format_name(stencil_mt->format));
726	 }
727	 if (devinfo->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
728	    /* Before Gen7, separate depth and stencil buffers can be used
729	     * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
730	     * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
731	     *     [DevSNB]: This field must be set to the same value (enabled
732	     *     or disabled) as Hierarchical Depth Buffer Enable.
733	     */
734	    fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
735                          "FBO incomplete: separate stencil without HiZ\n");
736	 }
737      }
738   }
739
740   for (i = 0; i < ARRAY_SIZE(fb->Attachment); i++) {
741      struct gl_renderbuffer *rb;
742      struct intel_renderbuffer *irb;
743
744      if (fb->Attachment[i].Type == GL_NONE)
745	 continue;
746
747      /* A supported attachment will have a Renderbuffer set either
748       * from being a Renderbuffer or being a texture that got the
749       * intel_wrap_texture() treatment.
750       */
751      rb = fb->Attachment[i].Renderbuffer;
752      if (rb == NULL) {
753	 fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
754                       "FBO incomplete: attachment without "
755                       "renderbuffer\n");
756	 continue;
757      }
758
759      if (fb->Attachment[i].Type == GL_TEXTURE) {
760	 if (rb->TexImage->Border) {
761	    fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
762                      "FBO incomplete: texture with border\n");
763	    continue;
764	 }
765      }
766
767      irb = intel_renderbuffer(rb);
768      if (irb == NULL) {
769	 fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
770                   "FBO incomplete: software rendering renderbuffer\n");
771	 continue;
772      }
773
774     if (rb->Format == MESA_FORMAT_R_SRGB8) {
775        fbo_incomplete(fb, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT,
776                       "FBO incomplete: Format not color renderable: %s\n",
777                       _mesa_get_format_name(rb->Format));
778        continue;
779     }
780
781      if (!brw_render_target_supported(brw, rb)) {
782	 fbo_incomplete(fb, GL_FRAMEBUFFER_UNSUPPORTED,
783                   "FBO incomplete: Unsupported HW "
784                   "texture/renderbuffer format attached: %s\n",
785                   _mesa_get_format_name(intel_rb_format(irb)));
786      }
787   }
788}
789
790/**
791 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
792 * We can do this when the dst renderbuffer is actually a texture and
793 * there is no scaling, mirroring or scissoring.
794 *
795 * \return new buffer mask indicating the buffers left to blit using the
796 *         normal path.
797 */
798static GLbitfield
799intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
800                                    const struct gl_framebuffer *readFb,
801                                    const struct gl_framebuffer *drawFb,
802                                    GLint srcX0, GLint srcY0,
803                                    GLint srcX1, GLint srcY1,
804                                    GLint dstX0, GLint dstY0,
805                                    GLint dstX1, GLint dstY1,
806                                    GLbitfield mask)
807{
808   struct brw_context *brw = brw_context(ctx);
809
810   /* Sync up the state of window system buffers.  We need to do this before
811    * we go looking for the buffers.
812    */
813   intel_prepare_render(brw);
814
815   if (mask & GL_COLOR_BUFFER_BIT) {
816      unsigned i;
817      struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer;
818      struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
819
820      if (!src_irb) {
821         perf_debug("glBlitFramebuffer(): missing src renderbuffer.  "
822                    "Falling back to software rendering.\n");
823         return mask;
824      }
825
826      /* If the source and destination are the same size with no mirroring,
827       * the rectangles are within the size of the texture and there is no
828       * scissor, then we can probably use the blit engine.
829       */
830      if (!(srcX0 - srcX1 == dstX0 - dstX1 &&
831            srcY0 - srcY1 == dstY0 - dstY1 &&
832            srcX1 >= srcX0 &&
833            srcY1 >= srcY0 &&
834            srcX0 >= 0 && srcX1 <= readFb->Width &&
835            srcY0 >= 0 && srcY1 <= readFb->Height &&
836            dstX0 >= 0 && dstX1 <= drawFb->Width &&
837            dstY0 >= 0 && dstY1 <= drawFb->Height &&
838            !(ctx->Scissor.EnableFlags))) {
839         perf_debug("glBlitFramebuffer(): non-1:1 blit.  "
840                    "Falling back to software rendering.\n");
841         return mask;
842      }
843
844      /* Blit to all active draw buffers.  We don't do any pre-checking,
845       * because we assume that copying to MRTs is rare, and failure midway
846       * through copying is even more rare.  Even if it was to occur, it's
847       * safe to let meta start the copy over from scratch, because
848       * glBlitFramebuffer completely overwrites the destination pixels, and
849       * results are undefined if any destination pixels have a dependency on
850       * source pixels.
851       */
852      for (i = 0; i < drawFb->_NumColorDrawBuffers; i++) {
853         struct gl_renderbuffer *dst_rb = drawFb->_ColorDrawBuffers[i];
854         struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb);
855
856         if (!dst_irb) {
857            perf_debug("glBlitFramebuffer(): missing dst renderbuffer.  "
858                       "Falling back to software rendering.\n");
859            return mask;
860         }
861
862         if (ctx->Color.sRGBEnabled &&
863             _mesa_get_format_color_encoding(src_irb->mt->format) !=
864             _mesa_get_format_color_encoding(dst_irb->mt->format)) {
865            perf_debug("glBlitFramebuffer() with sRGB conversion cannot be "
866                       "handled by BLT path.\n");
867            return mask;
868         }
869
870         if (!intel_miptree_blit(brw,
871                                 src_irb->mt,
872                                 src_irb->mt_level, src_irb->mt_layer,
873                                 srcX0, srcY0, readFb->FlipY,
874                                 dst_irb->mt,
875                                 dst_irb->mt_level, dst_irb->mt_layer,
876                                 dstX0, dstY0, drawFb->FlipY,
877                                 dstX1 - dstX0, dstY1 - dstY0,
878                                 COLOR_LOGICOP_COPY)) {
879            perf_debug("glBlitFramebuffer(): unknown blit failure.  "
880                       "Falling back to software rendering.\n");
881            return mask;
882         }
883      }
884
885      mask &= ~GL_COLOR_BUFFER_BIT;
886   }
887
888   return mask;
889}
890
891static void
892intel_blit_framebuffer(struct gl_context *ctx,
893                       struct gl_framebuffer *readFb,
894                       struct gl_framebuffer *drawFb,
895                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
896                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
897                       GLbitfield mask, GLenum filter)
898{
899   struct brw_context *brw = brw_context(ctx);
900   const struct gen_device_info *devinfo = &brw->screen->devinfo;
901
902   /* Page 679 of OpenGL 4.4 spec says:
903    *    "Added BlitFramebuffer to commands affected by conditional rendering in
904    *     section 10.10 (Bug 9562)."
905    */
906   if (!_mesa_check_conditional_render(ctx))
907      return;
908
909   if (devinfo->gen < 6) {
910      /* On gen4-5, try BLT first.
911       *
912       * Gen4-5 have a single ring for both 3D and BLT operations, so there's
913       * no inter-ring synchronization issues like on Gen6+.  It is apparently
914       * faster than using the 3D pipeline.  Original Gen4 also has to rebase
915       * and copy miptree slices in order to render to unaligned locations.
916       */
917      mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb,
918                                                 srcX0, srcY0, srcX1, srcY1,
919                                                 dstX0, dstY0, dstX1, dstY1,
920                                                 mask);
921      if (mask == 0x0)
922         return;
923   }
924
925   mask = brw_blorp_framebuffer(brw, readFb, drawFb,
926                                srcX0, srcY0, srcX1, srcY1,
927                                dstX0, dstY0, dstX1, dstY1,
928                                mask, filter);
929   if (mask == 0x0)
930      return;
931
932   mask = _mesa_meta_BlitFramebuffer(ctx, readFb, drawFb,
933                                     srcX0, srcY0, srcX1, srcY1,
934                                     dstX0, dstY0, dstX1, dstY1,
935                                     mask, filter);
936   if (mask == 0x0)
937      return;
938
939   if (devinfo->gen >= 8 && (mask & GL_STENCIL_BUFFER_BIT)) {
940      assert(!"Invalid blit");
941   }
942
943   _swrast_BlitFramebuffer(ctx, readFb, drawFb,
944                           srcX0, srcY0, srcX1, srcY1,
945                           dstX0, dstY0, dstX1, dstY1,
946                           mask, filter);
947}
948
949/**
950 * Does the renderbuffer have hiz enabled?
951 */
952bool
953intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb)
954{
955   return intel_miptree_level_has_hiz(irb->mt, irb->mt_level);
956}
957
958void
959intel_renderbuffer_move_to_temp(struct brw_context *brw,
960                                struct intel_renderbuffer *irb,
961                                bool invalidate)
962{
963   struct gl_renderbuffer *rb =&irb->Base.Base;
964   struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage);
965   struct intel_mipmap_tree *new_mt;
966   int width, height, depth;
967
968   intel_get_image_dims(rb->TexImage, &width, &height, &depth);
969
970   assert(irb->align_wa_mt == NULL);
971   new_mt = intel_miptree_create(brw, GL_TEXTURE_2D,
972                                 intel_image->base.Base.TexFormat,
973                                 0, 0,
974                                 width, height, 1,
975                                 irb->mt->surf.samples,
976                                 MIPTREE_CREATE_BUSY);
977
978   if (!invalidate)
979      intel_miptree_copy_slice(brw, intel_image->mt,
980                               intel_image->base.Base.Level, irb->mt_layer,
981                               new_mt, 0, 0);
982
983   intel_miptree_reference(&irb->align_wa_mt, new_mt);
984   intel_miptree_release(&new_mt);
985
986   irb->draw_x = 0;
987   irb->draw_y = 0;
988}
989
990void
991brw_cache_sets_clear(struct brw_context *brw)
992{
993   hash_table_foreach(brw->render_cache, render_entry)
994      _mesa_hash_table_remove(brw->render_cache, render_entry);
995
996   set_foreach(brw->depth_cache, depth_entry)
997      _mesa_set_remove(brw->depth_cache, depth_entry);
998}
999
1000/**
1001 * Emits an appropriate flush for a BO if it has been rendered to within the
1002 * same batchbuffer as a read that's about to be emitted.
1003 *
1004 * The GPU has separate, incoherent caches for the render cache and the
1005 * sampler cache, along with other caches.  Usually data in the different
1006 * caches don't interact (e.g. we don't render to our driver-generated
1007 * immediate constant data), but for render-to-texture in FBOs we definitely
1008 * do.  When a batchbuffer is flushed, the kernel will ensure that everything
1009 * necessary is flushed before another use of that BO, but for reuse from
1010 * different caches within a batchbuffer, it's all our responsibility.
1011 */
1012static void
1013flush_depth_and_render_caches(struct brw_context *brw, struct brw_bo *bo)
1014{
1015   const struct gen_device_info *devinfo = &brw->screen->devinfo;
1016
1017   if (devinfo->gen >= 6) {
1018      brw_emit_pipe_control_flush(brw,
1019                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1020                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
1021                                  PIPE_CONTROL_CS_STALL);
1022
1023      brw_emit_pipe_control_flush(brw,
1024                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1025                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE);
1026   } else {
1027      brw_emit_mi_flush(brw);
1028   }
1029
1030   brw_cache_sets_clear(brw);
1031}
1032
1033void
1034brw_cache_flush_for_read(struct brw_context *brw, struct brw_bo *bo)
1035{
1036   if (_mesa_hash_table_search(brw->render_cache, bo) ||
1037       _mesa_set_search(brw->depth_cache, bo))
1038      flush_depth_and_render_caches(brw, bo);
1039}
1040
1041static void *
1042format_aux_tuple(enum isl_format format, enum isl_aux_usage aux_usage)
1043{
1044   return (void *)(uintptr_t)((uint32_t)format << 8 | aux_usage);
1045}
1046
1047void
1048brw_cache_flush_for_render(struct brw_context *brw, struct brw_bo *bo,
1049                           enum isl_format format,
1050                           enum isl_aux_usage aux_usage)
1051{
1052   if (_mesa_set_search(brw->depth_cache, bo))
1053      flush_depth_and_render_caches(brw, bo);
1054
1055   /* Check to see if this bo has been used by a previous rendering operation
1056    * but with a different format or aux usage.  If it has, flush the render
1057    * cache so we ensure that it's only in there with one format or aux usage
1058    * at a time.
1059    *
1060    * Even though it's not obvious, this can easily happen in practice.
1061    * Suppose a client is blending on a surface with sRGB encode enabled on
1062    * gen9.  This implies that you get AUX_USAGE_CCS_D at best.  If the client
1063    * then disables sRGB decode and continues blending we will flip on
1064    * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
1065    * perfectly valid since CCS_E is a subset of CCS_D).  However, this means
1066    * that we have fragments in-flight which are rendering with UNORM+CCS_E
1067    * and other fragments in-flight with SRGB+CCS_D on the same surface at the
1068    * same time and the pixel scoreboard and color blender are trying to sort
1069    * it all out.  This ends badly (i.e. GPU hangs).
1070    *
1071    * To date, we have never observed GPU hangs or even corruption to be
1072    * associated with switching the format, only the aux usage.  However,
1073    * there are comments in various docs which indicate that the render cache
1074    * isn't 100% resilient to format changes.  We may as well be conservative
1075    * and flush on format changes too.  We can always relax this later if we
1076    * find it to be a performance problem.
1077    */
1078   struct hash_entry *entry = _mesa_hash_table_search(brw->render_cache, bo);
1079   if (entry && entry->data != format_aux_tuple(format, aux_usage))
1080      flush_depth_and_render_caches(brw, bo);
1081}
1082
1083void
1084brw_render_cache_add_bo(struct brw_context *brw, struct brw_bo *bo,
1085                        enum isl_format format,
1086                        enum isl_aux_usage aux_usage)
1087{
1088#ifndef NDEBUG
1089   struct hash_entry *entry = _mesa_hash_table_search(brw->render_cache, bo);
1090   if (entry) {
1091      /* Otherwise, someone didn't do a flush_for_render and that would be
1092       * very bad indeed.
1093       */
1094      assert(entry->data == format_aux_tuple(format, aux_usage));
1095   }
1096#endif
1097
1098   _mesa_hash_table_insert(brw->render_cache, bo,
1099                           format_aux_tuple(format, aux_usage));
1100}
1101
1102void
1103brw_cache_flush_for_depth(struct brw_context *brw, struct brw_bo *bo)
1104{
1105   if (_mesa_hash_table_search(brw->render_cache, bo))
1106      flush_depth_and_render_caches(brw, bo);
1107}
1108
1109void
1110brw_depth_cache_add_bo(struct brw_context *brw, struct brw_bo *bo)
1111{
1112   _mesa_set_add(brw->depth_cache, bo);
1113}
1114
1115/**
1116 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1117 * Hook in device driver functions.
1118 */
1119void
1120intel_fbo_init(struct brw_context *brw)
1121{
1122   struct dd_function_table *dd = &brw->ctx.Driver;
1123   dd->NewRenderbuffer = intel_new_renderbuffer;
1124   dd->MapRenderbuffer = intel_map_renderbuffer;
1125   dd->UnmapRenderbuffer = intel_unmap_renderbuffer;
1126   dd->RenderTexture = intel_render_texture;
1127   dd->ValidateFramebuffer = intel_validate_framebuffer;
1128   dd->BlitFramebuffer = intel_blit_framebuffer;
1129   dd->EGLImageTargetRenderbufferStorage =
1130      intel_image_target_renderbuffer_storage;
1131
1132   brw->render_cache = _mesa_hash_table_create(brw, _mesa_hash_pointer,
1133                                               _mesa_key_pointer_equal);
1134   brw->depth_cache = _mesa_set_create(brw, _mesa_hash_pointer,
1135                                       _mesa_key_pointer_equal);
1136}
1137